xref: /openbmc/linux/fs/cachefiles/ondemand.c (revision ae8c9639)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/fdtable.h>
3 #include <linux/anon_inodes.h>
4 #include <linux/uio.h>
5 #include "internal.h"
6 
7 struct ondemand_anon_file {
8 	struct file *file;
9 	int fd;
10 };
11 
cachefiles_req_put(struct cachefiles_req * req)12 static inline void cachefiles_req_put(struct cachefiles_req *req)
13 {
14 	if (refcount_dec_and_test(&req->ref))
15 		kfree(req);
16 }
17 
cachefiles_ondemand_fd_release(struct inode * inode,struct file * file)18 static int cachefiles_ondemand_fd_release(struct inode *inode,
19 					  struct file *file)
20 {
21 	struct cachefiles_object *object = file->private_data;
22 	struct cachefiles_cache *cache;
23 	struct cachefiles_ondemand_info *info;
24 	int object_id;
25 	struct cachefiles_req *req;
26 	XA_STATE(xas, NULL, 0);
27 
28 	if (!object)
29 		return 0;
30 
31 	info = object->ondemand;
32 	cache = object->volume->cache;
33 	xas.xa = &cache->reqs;
34 
35 	xa_lock(&cache->reqs);
36 	spin_lock(&info->lock);
37 	object_id = info->ondemand_id;
38 	info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
39 	cachefiles_ondemand_set_object_close(object);
40 	spin_unlock(&info->lock);
41 
42 	/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
43 	xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
44 		if (req->msg.object_id == object_id &&
45 		    req->msg.opcode == CACHEFILES_OP_CLOSE) {
46 			complete(&req->done);
47 			xas_store(&xas, NULL);
48 		}
49 	}
50 	xa_unlock(&cache->reqs);
51 
52 	xa_erase(&cache->ondemand_ids, object_id);
53 	trace_cachefiles_ondemand_fd_release(object, object_id);
54 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
55 	cachefiles_put_unbind_pincount(cache);
56 	return 0;
57 }
58 
cachefiles_ondemand_fd_write_iter(struct kiocb * kiocb,struct iov_iter * iter)59 static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
60 						 struct iov_iter *iter)
61 {
62 	struct cachefiles_object *object = kiocb->ki_filp->private_data;
63 	struct cachefiles_cache *cache = object->volume->cache;
64 	struct file *file = object->file;
65 	size_t len = iter->count;
66 	loff_t pos = kiocb->ki_pos;
67 	const struct cred *saved_cred;
68 	int ret;
69 
70 	if (!file)
71 		return -ENOBUFS;
72 
73 	cachefiles_begin_secure(cache, &saved_cred);
74 	ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
75 	cachefiles_end_secure(cache, saved_cred);
76 	if (ret < 0)
77 		return ret;
78 
79 	trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
80 	ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
81 	if (!ret) {
82 		ret = len;
83 		kiocb->ki_pos += ret;
84 	}
85 
86 	return ret;
87 }
88 
cachefiles_ondemand_fd_llseek(struct file * filp,loff_t pos,int whence)89 static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
90 					    int whence)
91 {
92 	struct cachefiles_object *object = filp->private_data;
93 	struct file *file = object->file;
94 
95 	if (!file)
96 		return -ENOBUFS;
97 
98 	return vfs_llseek(file, pos, whence);
99 }
100 
cachefiles_ondemand_fd_ioctl(struct file * filp,unsigned int ioctl,unsigned long id)101 static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
102 					 unsigned long id)
103 {
104 	struct cachefiles_object *object = filp->private_data;
105 	struct cachefiles_cache *cache = object->volume->cache;
106 	struct cachefiles_req *req;
107 	XA_STATE(xas, &cache->reqs, id);
108 
109 	if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
110 		return -EINVAL;
111 
112 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
113 		return -EOPNOTSUPP;
114 
115 	xa_lock(&cache->reqs);
116 	req = xas_load(&xas);
117 	if (!req || req->msg.opcode != CACHEFILES_OP_READ ||
118 	    req->object != object) {
119 		xa_unlock(&cache->reqs);
120 		return -EINVAL;
121 	}
122 	xas_store(&xas, NULL);
123 	xa_unlock(&cache->reqs);
124 
125 	trace_cachefiles_ondemand_cread(object, id);
126 	complete(&req->done);
127 	return 0;
128 }
129 
130 static const struct file_operations cachefiles_ondemand_fd_fops = {
131 	.owner		= THIS_MODULE,
132 	.release	= cachefiles_ondemand_fd_release,
133 	.write_iter	= cachefiles_ondemand_fd_write_iter,
134 	.llseek		= cachefiles_ondemand_fd_llseek,
135 	.unlocked_ioctl	= cachefiles_ondemand_fd_ioctl,
136 };
137 
138 /*
139  * OPEN request Completion (copen)
140  * - command: "copen <id>,<cache_size>"
141  *   <cache_size> indicates the object size if >=0, error code if negative
142  */
cachefiles_ondemand_copen(struct cachefiles_cache * cache,char * args)143 int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
144 {
145 	struct cachefiles_req *req;
146 	struct fscache_cookie *cookie;
147 	struct cachefiles_ondemand_info *info;
148 	char *pid, *psize;
149 	unsigned long id;
150 	long size;
151 	int ret;
152 	XA_STATE(xas, &cache->reqs, 0);
153 
154 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
155 		return -EOPNOTSUPP;
156 
157 	if (!*args) {
158 		pr_err("Empty id specified\n");
159 		return -EINVAL;
160 	}
161 
162 	pid = args;
163 	psize = strchr(args, ',');
164 	if (!psize) {
165 		pr_err("Cache size is not specified\n");
166 		return -EINVAL;
167 	}
168 
169 	*psize = 0;
170 	psize++;
171 
172 	ret = kstrtoul(pid, 0, &id);
173 	if (ret)
174 		return ret;
175 
176 	xa_lock(&cache->reqs);
177 	xas.xa_index = id;
178 	req = xas_load(&xas);
179 	if (!req || req->msg.opcode != CACHEFILES_OP_OPEN ||
180 	    !req->object->ondemand->ondemand_id) {
181 		xa_unlock(&cache->reqs);
182 		return -EINVAL;
183 	}
184 	xas_store(&xas, NULL);
185 	xa_unlock(&cache->reqs);
186 
187 	info = req->object->ondemand;
188 	/* fail OPEN request if copen format is invalid */
189 	ret = kstrtol(psize, 0, &size);
190 	if (ret) {
191 		req->error = ret;
192 		goto out;
193 	}
194 
195 	/* fail OPEN request if daemon reports an error */
196 	if (size < 0) {
197 		if (!IS_ERR_VALUE(size)) {
198 			req->error = -EINVAL;
199 			ret = -EINVAL;
200 		} else {
201 			req->error = size;
202 			ret = 0;
203 		}
204 		goto out;
205 	}
206 
207 	spin_lock(&info->lock);
208 	/*
209 	 * The anonymous fd was closed before copen ? Fail the request.
210 	 *
211 	 *             t1             |             t2
212 	 * ---------------------------------------------------------
213 	 *                             cachefiles_ondemand_copen
214 	 *                             req = xa_erase(&cache->reqs, id)
215 	 * // Anon fd is maliciously closed.
216 	 * cachefiles_ondemand_fd_release
217 	 * xa_lock(&cache->reqs)
218 	 * cachefiles_ondemand_set_object_close(object)
219 	 * xa_unlock(&cache->reqs)
220 	 *                             cachefiles_ondemand_set_object_open
221 	 *                             // No one will ever close it again.
222 	 * cachefiles_ondemand_daemon_read
223 	 * cachefiles_ondemand_select_req
224 	 *
225 	 * Get a read req but its fd is already closed. The daemon can't
226 	 * issue a cread ioctl with an closed fd, then hung.
227 	 */
228 	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
229 		spin_unlock(&info->lock);
230 		req->error = -EBADFD;
231 		goto out;
232 	}
233 	cookie = req->object->cookie;
234 	cookie->object_size = size;
235 	if (size)
236 		clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
237 	else
238 		set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
239 	trace_cachefiles_ondemand_copen(req->object, id, size);
240 
241 	cachefiles_ondemand_set_object_open(req->object);
242 	spin_unlock(&info->lock);
243 	wake_up_all(&cache->daemon_pollwq);
244 
245 out:
246 	spin_lock(&info->lock);
247 	/* Need to set object close to avoid reopen status continuing */
248 	if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED)
249 		cachefiles_ondemand_set_object_close(req->object);
250 	spin_unlock(&info->lock);
251 	complete(&req->done);
252 	return ret;
253 }
254 
cachefiles_ondemand_restore(struct cachefiles_cache * cache,char * args)255 int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
256 {
257 	struct cachefiles_req *req;
258 
259 	XA_STATE(xas, &cache->reqs, 0);
260 
261 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
262 		return -EOPNOTSUPP;
263 
264 	/*
265 	 * Reset the requests to CACHEFILES_REQ_NEW state, so that the
266 	 * requests have been processed halfway before the crash of the
267 	 * user daemon could be reprocessed after the recovery.
268 	 */
269 	xas_lock(&xas);
270 	xas_for_each(&xas, req, ULONG_MAX)
271 		xas_set_mark(&xas, CACHEFILES_REQ_NEW);
272 	xas_unlock(&xas);
273 
274 	wake_up_all(&cache->daemon_pollwq);
275 	return 0;
276 }
277 
cachefiles_ondemand_get_fd(struct cachefiles_req * req,struct ondemand_anon_file * anon_file)278 static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
279 				      struct ondemand_anon_file *anon_file)
280 {
281 	struct cachefiles_object *object;
282 	struct cachefiles_cache *cache;
283 	struct cachefiles_open *load;
284 	u32 object_id;
285 	int ret;
286 
287 	object = cachefiles_grab_object(req->object,
288 			cachefiles_obj_get_ondemand_fd);
289 	cache = object->volume->cache;
290 
291 	ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
292 			      XA_LIMIT(1, INT_MAX),
293 			      &cache->ondemand_id_next, GFP_KERNEL);
294 	if (ret < 0)
295 		goto err;
296 
297 	anon_file->fd = get_unused_fd_flags(O_WRONLY);
298 	if (anon_file->fd < 0) {
299 		ret = anon_file->fd;
300 		goto err_free_id;
301 	}
302 
303 	anon_file->file = anon_inode_getfile("[cachefiles]",
304 				&cachefiles_ondemand_fd_fops, object, O_WRONLY);
305 	if (IS_ERR(anon_file->file)) {
306 		ret = PTR_ERR(anon_file->file);
307 		goto err_put_fd;
308 	}
309 
310 	spin_lock(&object->ondemand->lock);
311 	if (object->ondemand->ondemand_id > 0) {
312 		spin_unlock(&object->ondemand->lock);
313 		/* Pair with check in cachefiles_ondemand_fd_release(). */
314 		anon_file->file->private_data = NULL;
315 		ret = -EEXIST;
316 		goto err_put_file;
317 	}
318 
319 	anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
320 
321 	load = (void *)req->msg.data;
322 	load->fd = anon_file->fd;
323 	object->ondemand->ondemand_id = object_id;
324 	spin_unlock(&object->ondemand->lock);
325 
326 	cachefiles_get_unbind_pincount(cache);
327 	trace_cachefiles_ondemand_open(object, &req->msg, load);
328 	return 0;
329 
330 err_put_file:
331 	fput(anon_file->file);
332 	anon_file->file = NULL;
333 err_put_fd:
334 	put_unused_fd(anon_file->fd);
335 	anon_file->fd = ret;
336 err_free_id:
337 	xa_erase(&cache->ondemand_ids, object_id);
338 err:
339 	spin_lock(&object->ondemand->lock);
340 	/* Avoid marking an opened object as closed. */
341 	if (object->ondemand->ondemand_id <= 0)
342 		cachefiles_ondemand_set_object_close(object);
343 	spin_unlock(&object->ondemand->lock);
344 	cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
345 	return ret;
346 }
347 
ondemand_object_worker(struct work_struct * work)348 static void ondemand_object_worker(struct work_struct *work)
349 {
350 	struct cachefiles_ondemand_info *info =
351 		container_of(work, struct cachefiles_ondemand_info, ondemand_work);
352 
353 	cachefiles_ondemand_init_object(info->object);
354 }
355 
356 /*
357  * If there are any inflight or subsequent READ requests on the
358  * closed object, reopen it.
359  * Skip read requests whose related object is reopening.
360  */
cachefiles_ondemand_select_req(struct xa_state * xas,unsigned long xa_max)361 static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas,
362 							      unsigned long xa_max)
363 {
364 	struct cachefiles_req *req;
365 	struct cachefiles_object *object;
366 	struct cachefiles_ondemand_info *info;
367 
368 	xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) {
369 		if (req->msg.opcode != CACHEFILES_OP_READ)
370 			return req;
371 		object = req->object;
372 		info = object->ondemand;
373 		if (cachefiles_ondemand_object_is_close(object)) {
374 			cachefiles_ondemand_set_object_reopening(object);
375 			queue_work(fscache_wq, &info->ondemand_work);
376 			continue;
377 		}
378 		if (cachefiles_ondemand_object_is_reopening(object))
379 			continue;
380 		return req;
381 	}
382 	return NULL;
383 }
384 
cachefiles_ondemand_finish_req(struct cachefiles_req * req,struct xa_state * xas,int err)385 static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
386 						  struct xa_state *xas, int err)
387 {
388 	if (unlikely(!xas || !req))
389 		return false;
390 
391 	if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
392 		return false;
393 
394 	req->error = err;
395 	complete(&req->done);
396 	return true;
397 }
398 
cachefiles_ondemand_daemon_read(struct cachefiles_cache * cache,char __user * _buffer,size_t buflen)399 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
400 					char __user *_buffer, size_t buflen)
401 {
402 	struct cachefiles_req *req;
403 	struct cachefiles_msg *msg;
404 	size_t n;
405 	int ret = 0;
406 	struct ondemand_anon_file anon_file;
407 	XA_STATE(xas, &cache->reqs, cache->req_id_next);
408 
409 	xa_lock(&cache->reqs);
410 	/*
411 	 * Cyclically search for a request that has not ever been processed,
412 	 * to prevent requests from being processed repeatedly, and make
413 	 * request distribution fair.
414 	 */
415 	req = cachefiles_ondemand_select_req(&xas, ULONG_MAX);
416 	if (!req && cache->req_id_next > 0) {
417 		xas_set(&xas, 0);
418 		req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1);
419 	}
420 	if (!req) {
421 		xa_unlock(&cache->reqs);
422 		return 0;
423 	}
424 
425 	msg = &req->msg;
426 	n = msg->len;
427 
428 	if (n > buflen) {
429 		xa_unlock(&cache->reqs);
430 		return -EMSGSIZE;
431 	}
432 
433 	xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
434 	cache->req_id_next = xas.xa_index + 1;
435 	refcount_inc(&req->ref);
436 	cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
437 	xa_unlock(&cache->reqs);
438 
439 	if (msg->opcode == CACHEFILES_OP_OPEN) {
440 		ret = cachefiles_ondemand_get_fd(req, &anon_file);
441 		if (ret)
442 			goto out;
443 	}
444 
445 	msg->msg_id = xas.xa_index;
446 	msg->object_id = req->object->ondemand->ondemand_id;
447 
448 	if (copy_to_user(_buffer, msg, n) != 0)
449 		ret = -EFAULT;
450 
451 	if (msg->opcode == CACHEFILES_OP_OPEN) {
452 		if (ret < 0) {
453 			fput(anon_file.file);
454 			put_unused_fd(anon_file.fd);
455 			goto out;
456 		}
457 		fd_install(anon_file.fd, anon_file.file);
458 	}
459 out:
460 	cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
461 	/* Remove error request and CLOSE request has no reply */
462 	if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
463 		cachefiles_ondemand_finish_req(req, &xas, ret);
464 	cachefiles_req_put(req);
465 	return ret ? ret : n;
466 }
467 
468 typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
469 
cachefiles_ondemand_send_req(struct cachefiles_object * object,enum cachefiles_opcode opcode,size_t data_len,init_req_fn init_req,void * private)470 static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
471 					enum cachefiles_opcode opcode,
472 					size_t data_len,
473 					init_req_fn init_req,
474 					void *private)
475 {
476 	struct cachefiles_cache *cache = object->volume->cache;
477 	struct cachefiles_req *req = NULL;
478 	XA_STATE(xas, &cache->reqs, 0);
479 	int ret;
480 
481 	if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
482 		return 0;
483 
484 	if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
485 		ret = -EIO;
486 		goto out;
487 	}
488 
489 	req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
490 	if (!req) {
491 		ret = -ENOMEM;
492 		goto out;
493 	}
494 
495 	refcount_set(&req->ref, 1);
496 	req->object = object;
497 	init_completion(&req->done);
498 	req->msg.opcode = opcode;
499 	req->msg.len = sizeof(struct cachefiles_msg) + data_len;
500 
501 	ret = init_req(req, private);
502 	if (ret)
503 		goto out;
504 
505 	do {
506 		/*
507 		 * Stop enqueuing the request when daemon is dying. The
508 		 * following two operations need to be atomic as a whole.
509 		 *   1) check cache state, and
510 		 *   2) enqueue request if cache is alive.
511 		 * Otherwise the request may be enqueued after xarray has been
512 		 * flushed, leaving the orphan request never being completed.
513 		 *
514 		 * CPU 1			CPU 2
515 		 * =====			=====
516 		 *				test CACHEFILES_DEAD bit
517 		 * set CACHEFILES_DEAD bit
518 		 * flush requests in the xarray
519 		 *				enqueue the request
520 		 */
521 		xas_lock(&xas);
522 
523 		if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
524 		    cachefiles_ondemand_object_is_dropping(object)) {
525 			xas_unlock(&xas);
526 			ret = -EIO;
527 			goto out;
528 		}
529 
530 		/* coupled with the barrier in cachefiles_flush_reqs() */
531 		smp_mb();
532 
533 		if (opcode == CACHEFILES_OP_CLOSE &&
534 		    !cachefiles_ondemand_object_is_open(object)) {
535 			WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
536 			xas_unlock(&xas);
537 			ret = -EIO;
538 			goto out;
539 		}
540 
541 		/*
542 		 * Cyclically find a free xas to avoid msg_id reuse that would
543 		 * cause the daemon to successfully copen a stale msg_id.
544 		 */
545 		xas.xa_index = cache->msg_id_next;
546 		xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
547 		if (xas.xa_node == XAS_RESTART) {
548 			xas.xa_index = 0;
549 			xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
550 		}
551 		if (xas.xa_node == XAS_RESTART)
552 			xas_set_err(&xas, -EBUSY);
553 
554 		xas_store(&xas, req);
555 		if (xas_valid(&xas)) {
556 			cache->msg_id_next = xas.xa_index + 1;
557 			xas_clear_mark(&xas, XA_FREE_MARK);
558 			xas_set_mark(&xas, CACHEFILES_REQ_NEW);
559 		}
560 		xas_unlock(&xas);
561 	} while (xas_nomem(&xas, GFP_KERNEL));
562 
563 	ret = xas_error(&xas);
564 	if (ret)
565 		goto out;
566 
567 	wake_up_all(&cache->daemon_pollwq);
568 wait:
569 	ret = wait_for_completion_killable(&req->done);
570 	if (!ret) {
571 		ret = req->error;
572 	} else {
573 		ret = -EINTR;
574 		if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
575 			/* Someone will complete it soon. */
576 			cpu_relax();
577 			goto wait;
578 		}
579 	}
580 	cachefiles_req_put(req);
581 	return ret;
582 out:
583 	/* Reset the object to close state in error handling path.
584 	 * If error occurs after creating the anonymous fd,
585 	 * cachefiles_ondemand_fd_release() will set object to close.
586 	 */
587 	if (opcode == CACHEFILES_OP_OPEN &&
588 	    !cachefiles_ondemand_object_is_dropping(object))
589 		cachefiles_ondemand_set_object_close(object);
590 	kfree(req);
591 	return ret;
592 }
593 
cachefiles_ondemand_init_open_req(struct cachefiles_req * req,void * private)594 static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
595 					     void *private)
596 {
597 	struct cachefiles_object *object = req->object;
598 	struct fscache_cookie *cookie = object->cookie;
599 	struct fscache_volume *volume = object->volume->vcookie;
600 	struct cachefiles_open *load = (void *)req->msg.data;
601 	size_t volume_key_size, cookie_key_size;
602 	void *volume_key, *cookie_key;
603 
604 	/*
605 	 * Volume key is a NUL-terminated string. key[0] stores strlen() of the
606 	 * string, followed by the content of the string (excluding '\0').
607 	 */
608 	volume_key_size = volume->key[0] + 1;
609 	volume_key = volume->key + 1;
610 
611 	/* Cookie key is binary data, which is netfs specific. */
612 	cookie_key_size = cookie->key_len;
613 	cookie_key = fscache_get_key(cookie);
614 
615 	if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
616 		pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
617 		return -EINVAL;
618 	}
619 
620 	load->volume_key_size = volume_key_size;
621 	load->cookie_key_size = cookie_key_size;
622 	memcpy(load->data, volume_key, volume_key_size);
623 	memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
624 
625 	return 0;
626 }
627 
cachefiles_ondemand_init_close_req(struct cachefiles_req * req,void * private)628 static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
629 					      void *private)
630 {
631 	struct cachefiles_object *object = req->object;
632 
633 	if (!cachefiles_ondemand_object_is_open(object))
634 		return -ENOENT;
635 
636 	trace_cachefiles_ondemand_close(object, &req->msg);
637 	return 0;
638 }
639 
640 struct cachefiles_read_ctx {
641 	loff_t off;
642 	size_t len;
643 };
644 
cachefiles_ondemand_init_read_req(struct cachefiles_req * req,void * private)645 static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
646 					     void *private)
647 {
648 	struct cachefiles_object *object = req->object;
649 	struct cachefiles_read *load = (void *)req->msg.data;
650 	struct cachefiles_read_ctx *read_ctx = private;
651 
652 	load->off = read_ctx->off;
653 	load->len = read_ctx->len;
654 	trace_cachefiles_ondemand_read(object, &req->msg, load);
655 	return 0;
656 }
657 
cachefiles_ondemand_init_object(struct cachefiles_object * object)658 int cachefiles_ondemand_init_object(struct cachefiles_object *object)
659 {
660 	struct fscache_cookie *cookie = object->cookie;
661 	struct fscache_volume *volume = object->volume->vcookie;
662 	size_t volume_key_size, cookie_key_size, data_len;
663 
664 	if (!object->ondemand)
665 		return 0;
666 
667 	/*
668 	 * CacheFiles will firstly check the cache file under the root cache
669 	 * directory. If the coherency check failed, it will fallback to
670 	 * creating a new tmpfile as the cache file. Reuse the previously
671 	 * allocated object ID if any.
672 	 */
673 	if (cachefiles_ondemand_object_is_open(object))
674 		return 0;
675 
676 	volume_key_size = volume->key[0] + 1;
677 	cookie_key_size = cookie->key_len;
678 	data_len = sizeof(struct cachefiles_open) +
679 		   volume_key_size + cookie_key_size;
680 
681 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
682 			data_len, cachefiles_ondemand_init_open_req, NULL);
683 }
684 
cachefiles_ondemand_clean_object(struct cachefiles_object * object)685 void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
686 {
687 	unsigned long index;
688 	struct cachefiles_req *req;
689 	struct cachefiles_cache *cache;
690 
691 	if (!object->ondemand)
692 		return;
693 
694 	cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
695 			cachefiles_ondemand_init_close_req, NULL);
696 
697 	if (!object->ondemand->ondemand_id)
698 		return;
699 
700 	/* Cancel all requests for the object that is being dropped. */
701 	cache = object->volume->cache;
702 	xa_lock(&cache->reqs);
703 	cachefiles_ondemand_set_object_dropping(object);
704 	xa_for_each(&cache->reqs, index, req) {
705 		if (req->object == object) {
706 			req->error = -EIO;
707 			complete(&req->done);
708 			__xa_erase(&cache->reqs, index);
709 		}
710 	}
711 	xa_unlock(&cache->reqs);
712 
713 	/* Wait for ondemand_object_worker() to finish to avoid UAF. */
714 	cancel_work_sync(&object->ondemand->ondemand_work);
715 }
716 
cachefiles_ondemand_init_obj_info(struct cachefiles_object * object,struct cachefiles_volume * volume)717 int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
718 				struct cachefiles_volume *volume)
719 {
720 	if (!cachefiles_in_ondemand_mode(volume->cache))
721 		return 0;
722 
723 	object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info),
724 					GFP_KERNEL);
725 	if (!object->ondemand)
726 		return -ENOMEM;
727 
728 	object->ondemand->object = object;
729 	spin_lock_init(&object->ondemand->lock);
730 	INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
731 	return 0;
732 }
733 
cachefiles_ondemand_deinit_obj_info(struct cachefiles_object * object)734 void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object)
735 {
736 	kfree(object->ondemand);
737 	object->ondemand = NULL;
738 }
739 
cachefiles_ondemand_read(struct cachefiles_object * object,loff_t pos,size_t len)740 int cachefiles_ondemand_read(struct cachefiles_object *object,
741 			     loff_t pos, size_t len)
742 {
743 	struct cachefiles_read_ctx read_ctx = {pos, len};
744 
745 	return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
746 			sizeof(struct cachefiles_read),
747 			cachefiles_ondemand_init_read_req, &read_ctx);
748 }
749