xref: /openbmc/linux/fs/cachefiles/io.c (revision e6fa4c72)
1287fd611SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later
2287fd611SDavid Howells /* kiocb-using read/write
3287fd611SDavid Howells  *
4287fd611SDavid Howells  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5287fd611SDavid Howells  * Written by David Howells (dhowells@redhat.com)
6287fd611SDavid Howells  */
7287fd611SDavid Howells 
8287fd611SDavid Howells #include <linux/mount.h>
9287fd611SDavid Howells #include <linux/slab.h>
10287fd611SDavid Howells #include <linux/file.h>
11287fd611SDavid Howells #include <linux/uio.h>
12287fd611SDavid Howells #include <linux/falloc.h>
13287fd611SDavid Howells #include <linux/sched/mm.h>
14287fd611SDavid Howells #include <trace/events/fscache.h>
15287fd611SDavid Howells #include "internal.h"
16287fd611SDavid Howells 
17047487c9SDavid Howells struct cachefiles_kiocb {
18047487c9SDavid Howells 	struct kiocb		iocb;
19047487c9SDavid Howells 	refcount_t		ki_refcnt;
20047487c9SDavid Howells 	loff_t			start;
21047487c9SDavid Howells 	union {
22047487c9SDavid Howells 		size_t		skipped;
23047487c9SDavid Howells 		size_t		len;
24047487c9SDavid Howells 	};
25047487c9SDavid Howells 	struct cachefiles_object *object;
26047487c9SDavid Howells 	netfs_io_terminated_t	term_func;
27047487c9SDavid Howells 	void			*term_func_priv;
28047487c9SDavid Howells 	bool			was_async;
29047487c9SDavid Howells 	unsigned int		inval_counter;	/* Copy of cookie->inval_counter */
30047487c9SDavid Howells 	u64			b_writing;
31047487c9SDavid Howells };
32047487c9SDavid Howells 
cachefiles_put_kiocb(struct cachefiles_kiocb * ki)33047487c9SDavid Howells static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
34047487c9SDavid Howells {
35047487c9SDavid Howells 	if (refcount_dec_and_test(&ki->ki_refcnt)) {
36047487c9SDavid Howells 		cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
37047487c9SDavid Howells 		fput(ki->iocb.ki_filp);
38047487c9SDavid Howells 		kfree(ki);
39047487c9SDavid Howells 	}
40047487c9SDavid Howells }
41047487c9SDavid Howells 
42047487c9SDavid Howells /*
43047487c9SDavid Howells  * Handle completion of a read from the cache.
44047487c9SDavid Howells  */
cachefiles_read_complete(struct kiocb * iocb,long ret)45047487c9SDavid Howells static void cachefiles_read_complete(struct kiocb *iocb, long ret)
46047487c9SDavid Howells {
47047487c9SDavid Howells 	struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
48047487c9SDavid Howells 	struct inode *inode = file_inode(ki->iocb.ki_filp);
49047487c9SDavid Howells 
50047487c9SDavid Howells 	_enter("%ld", ret);
51047487c9SDavid Howells 
52047487c9SDavid Howells 	if (ret < 0)
53047487c9SDavid Howells 		trace_cachefiles_io_error(ki->object, inode, ret,
54047487c9SDavid Howells 					  cachefiles_trace_read_error);
55047487c9SDavid Howells 
56047487c9SDavid Howells 	if (ki->term_func) {
57047487c9SDavid Howells 		if (ret >= 0) {
58047487c9SDavid Howells 			if (ki->object->cookie->inval_counter == ki->inval_counter)
59047487c9SDavid Howells 				ki->skipped += ret;
60047487c9SDavid Howells 			else
61047487c9SDavid Howells 				ret = -ESTALE;
62047487c9SDavid Howells 		}
63047487c9SDavid Howells 
64047487c9SDavid Howells 		ki->term_func(ki->term_func_priv, ret, ki->was_async);
65047487c9SDavid Howells 	}
66047487c9SDavid Howells 
67047487c9SDavid Howells 	cachefiles_put_kiocb(ki);
68047487c9SDavid Howells }
69047487c9SDavid Howells 
70047487c9SDavid Howells /*
71047487c9SDavid Howells  * Initiate a read from the cache.
72047487c9SDavid Howells  */
cachefiles_read(struct netfs_cache_resources * cres,loff_t start_pos,struct iov_iter * iter,enum netfs_read_from_hole read_hole,netfs_io_terminated_t term_func,void * term_func_priv)73047487c9SDavid Howells static int cachefiles_read(struct netfs_cache_resources *cres,
74047487c9SDavid Howells 			   loff_t start_pos,
75047487c9SDavid Howells 			   struct iov_iter *iter,
76047487c9SDavid Howells 			   enum netfs_read_from_hole read_hole,
77047487c9SDavid Howells 			   netfs_io_terminated_t term_func,
78047487c9SDavid Howells 			   void *term_func_priv)
79047487c9SDavid Howells {
80047487c9SDavid Howells 	struct cachefiles_object *object;
81047487c9SDavid Howells 	struct cachefiles_kiocb *ki;
82047487c9SDavid Howells 	struct file *file;
83047487c9SDavid Howells 	unsigned int old_nofs;
84047487c9SDavid Howells 	ssize_t ret = -ENOBUFS;
85047487c9SDavid Howells 	size_t len = iov_iter_count(iter), skipped = 0;
86047487c9SDavid Howells 
87047487c9SDavid Howells 	if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
88047487c9SDavid Howells 		goto presubmission_error;
89047487c9SDavid Howells 
90047487c9SDavid Howells 	fscache_count_read();
91047487c9SDavid Howells 	object = cachefiles_cres_object(cres);
92047487c9SDavid Howells 	file = cachefiles_cres_file(cres);
93047487c9SDavid Howells 
94047487c9SDavid Howells 	_enter("%pD,%li,%llx,%zx/%llx",
95047487c9SDavid Howells 	       file, file_inode(file)->i_ino, start_pos, len,
96047487c9SDavid Howells 	       i_size_read(file_inode(file)));
97047487c9SDavid Howells 
98047487c9SDavid Howells 	/* If the caller asked us to seek for data before doing the read, then
99047487c9SDavid Howells 	 * we should do that now.  If we find a gap, we fill it with zeros.
100047487c9SDavid Howells 	 */
101047487c9SDavid Howells 	if (read_hole != NETFS_READ_HOLE_IGNORE) {
102047487c9SDavid Howells 		loff_t off = start_pos, off2;
103047487c9SDavid Howells 
104047487c9SDavid Howells 		off2 = cachefiles_inject_read_error();
105047487c9SDavid Howells 		if (off2 == 0)
106047487c9SDavid Howells 			off2 = vfs_llseek(file, off, SEEK_DATA);
107047487c9SDavid Howells 		if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
108047487c9SDavid Howells 			skipped = 0;
109047487c9SDavid Howells 			ret = off2;
110047487c9SDavid Howells 			goto presubmission_error;
111047487c9SDavid Howells 		}
112047487c9SDavid Howells 
113047487c9SDavid Howells 		if (off2 == -ENXIO || off2 >= start_pos + len) {
114047487c9SDavid Howells 			/* The region is beyond the EOF or there's no more data
115047487c9SDavid Howells 			 * in the region, so clear the rest of the buffer and
116047487c9SDavid Howells 			 * return success.
117047487c9SDavid Howells 			 */
118047487c9SDavid Howells 			ret = -ENODATA;
119047487c9SDavid Howells 			if (read_hole == NETFS_READ_HOLE_FAIL)
120047487c9SDavid Howells 				goto presubmission_error;
121047487c9SDavid Howells 
122047487c9SDavid Howells 			iov_iter_zero(len, iter);
123047487c9SDavid Howells 			skipped = len;
124047487c9SDavid Howells 			ret = 0;
125047487c9SDavid Howells 			goto presubmission_error;
126047487c9SDavid Howells 		}
127047487c9SDavid Howells 
128047487c9SDavid Howells 		skipped = off2 - off;
129047487c9SDavid Howells 		iov_iter_zero(skipped, iter);
130047487c9SDavid Howells 	}
131047487c9SDavid Howells 
132047487c9SDavid Howells 	ret = -ENOMEM;
133047487c9SDavid Howells 	ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
134047487c9SDavid Howells 	if (!ki)
135047487c9SDavid Howells 		goto presubmission_error;
136047487c9SDavid Howells 
137047487c9SDavid Howells 	refcount_set(&ki->ki_refcnt, 2);
138047487c9SDavid Howells 	ki->iocb.ki_filp	= file;
139047487c9SDavid Howells 	ki->iocb.ki_pos		= start_pos + skipped;
140047487c9SDavid Howells 	ki->iocb.ki_flags	= IOCB_DIRECT;
141047487c9SDavid Howells 	ki->iocb.ki_ioprio	= get_current_ioprio();
142047487c9SDavid Howells 	ki->skipped		= skipped;
143047487c9SDavid Howells 	ki->object		= object;
144047487c9SDavid Howells 	ki->inval_counter	= cres->inval_counter;
145047487c9SDavid Howells 	ki->term_func		= term_func;
146047487c9SDavid Howells 	ki->term_func_priv	= term_func_priv;
147047487c9SDavid Howells 	ki->was_async		= true;
148047487c9SDavid Howells 
149047487c9SDavid Howells 	if (ki->term_func)
150047487c9SDavid Howells 		ki->iocb.ki_complete = cachefiles_read_complete;
151047487c9SDavid Howells 
152047487c9SDavid Howells 	get_file(ki->iocb.ki_filp);
153047487c9SDavid Howells 	cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
154047487c9SDavid Howells 
155047487c9SDavid Howells 	trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
156047487c9SDavid Howells 	old_nofs = memalloc_nofs_save();
157047487c9SDavid Howells 	ret = cachefiles_inject_read_error();
158047487c9SDavid Howells 	if (ret == 0)
159047487c9SDavid Howells 		ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
160047487c9SDavid Howells 	memalloc_nofs_restore(old_nofs);
161047487c9SDavid Howells 	switch (ret) {
162047487c9SDavid Howells 	case -EIOCBQUEUED:
163047487c9SDavid Howells 		goto in_progress;
164047487c9SDavid Howells 
165047487c9SDavid Howells 	case -ERESTARTSYS:
166047487c9SDavid Howells 	case -ERESTARTNOINTR:
167047487c9SDavid Howells 	case -ERESTARTNOHAND:
168047487c9SDavid Howells 	case -ERESTART_RESTARTBLOCK:
169047487c9SDavid Howells 		/* There's no easy way to restart the syscall since other AIO's
170047487c9SDavid Howells 		 * may be already running. Just fail this IO with EINTR.
171047487c9SDavid Howells 		 */
172047487c9SDavid Howells 		ret = -EINTR;
173047487c9SDavid Howells 		fallthrough;
174047487c9SDavid Howells 	default:
175047487c9SDavid Howells 		ki->was_async = false;
176047487c9SDavid Howells 		cachefiles_read_complete(&ki->iocb, ret);
177047487c9SDavid Howells 		if (ret > 0)
178047487c9SDavid Howells 			ret = 0;
179047487c9SDavid Howells 		break;
180047487c9SDavid Howells 	}
181047487c9SDavid Howells 
182047487c9SDavid Howells in_progress:
183047487c9SDavid Howells 	cachefiles_put_kiocb(ki);
184047487c9SDavid Howells 	_leave(" = %zd", ret);
185047487c9SDavid Howells 	return ret;
186047487c9SDavid Howells 
187047487c9SDavid Howells presubmission_error:
188047487c9SDavid Howells 	if (term_func)
189047487c9SDavid Howells 		term_func(term_func_priv, ret < 0 ? ret : skipped, false);
190047487c9SDavid Howells 	return ret;
191047487c9SDavid Howells }
192047487c9SDavid Howells 
193047487c9SDavid Howells /*
194bee9f655SDavid Howells  * Query the occupancy of the cache in a region, returning where the next chunk
195bee9f655SDavid Howells  * of data starts and how long it is.
196bee9f655SDavid Howells  */
cachefiles_query_occupancy(struct netfs_cache_resources * cres,loff_t start,size_t len,size_t granularity,loff_t * _data_start,size_t * _data_len)197bee9f655SDavid Howells static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
198bee9f655SDavid Howells 				      loff_t start, size_t len, size_t granularity,
199bee9f655SDavid Howells 				      loff_t *_data_start, size_t *_data_len)
200bee9f655SDavid Howells {
201bee9f655SDavid Howells 	struct cachefiles_object *object;
202bee9f655SDavid Howells 	struct file *file;
203bee9f655SDavid Howells 	loff_t off, off2;
204bee9f655SDavid Howells 
205bee9f655SDavid Howells 	*_data_start = -1;
206bee9f655SDavid Howells 	*_data_len = 0;
207bee9f655SDavid Howells 
208bee9f655SDavid Howells 	if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
209bee9f655SDavid Howells 		return -ENOBUFS;
210bee9f655SDavid Howells 
211bee9f655SDavid Howells 	object = cachefiles_cres_object(cres);
212bee9f655SDavid Howells 	file = cachefiles_cres_file(cres);
213bee9f655SDavid Howells 	granularity = max_t(size_t, object->volume->cache->bsize, granularity);
214bee9f655SDavid Howells 
215bee9f655SDavid Howells 	_enter("%pD,%li,%llx,%zx/%llx",
216bee9f655SDavid Howells 	       file, file_inode(file)->i_ino, start, len,
217bee9f655SDavid Howells 	       i_size_read(file_inode(file)));
218bee9f655SDavid Howells 
219bee9f655SDavid Howells 	off = cachefiles_inject_read_error();
220bee9f655SDavid Howells 	if (off == 0)
221bee9f655SDavid Howells 		off = vfs_llseek(file, start, SEEK_DATA);
222bee9f655SDavid Howells 	if (off == -ENXIO)
223bee9f655SDavid Howells 		return -ENODATA; /* Beyond EOF */
224bee9f655SDavid Howells 	if (off < 0 && off >= (loff_t)-MAX_ERRNO)
225bee9f655SDavid Howells 		return -ENOBUFS; /* Error. */
226bee9f655SDavid Howells 	if (round_up(off, granularity) >= start + len)
227bee9f655SDavid Howells 		return -ENODATA; /* No data in range */
228bee9f655SDavid Howells 
229bee9f655SDavid Howells 	off2 = cachefiles_inject_read_error();
230bee9f655SDavid Howells 	if (off2 == 0)
231bee9f655SDavid Howells 		off2 = vfs_llseek(file, off, SEEK_HOLE);
232bee9f655SDavid Howells 	if (off2 == -ENXIO)
233bee9f655SDavid Howells 		return -ENODATA; /* Beyond EOF */
234bee9f655SDavid Howells 	if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
235bee9f655SDavid Howells 		return -ENOBUFS; /* Error. */
236bee9f655SDavid Howells 
237bee9f655SDavid Howells 	/* Round away partial blocks */
238bee9f655SDavid Howells 	off = round_up(off, granularity);
239bee9f655SDavid Howells 	off2 = round_down(off2, granularity);
240bee9f655SDavid Howells 	if (off2 <= off)
241bee9f655SDavid Howells 		return -ENODATA;
242bee9f655SDavid Howells 
243bee9f655SDavid Howells 	*_data_start = off;
244bee9f655SDavid Howells 	if (off2 > start + len)
245bee9f655SDavid Howells 		*_data_len = len;
246bee9f655SDavid Howells 	else
247bee9f655SDavid Howells 		*_data_len = off2 - off;
248bee9f655SDavid Howells 	return 0;
249bee9f655SDavid Howells }
250bee9f655SDavid Howells 
251bee9f655SDavid Howells /*
252047487c9SDavid Howells  * Handle completion of a write to the cache.
253047487c9SDavid Howells  */
cachefiles_write_complete(struct kiocb * iocb,long ret)254047487c9SDavid Howells static void cachefiles_write_complete(struct kiocb *iocb, long ret)
255047487c9SDavid Howells {
256047487c9SDavid Howells 	struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
257047487c9SDavid Howells 	struct cachefiles_object *object = ki->object;
258047487c9SDavid Howells 	struct inode *inode = file_inode(ki->iocb.ki_filp);
259047487c9SDavid Howells 
260047487c9SDavid Howells 	_enter("%ld", ret);
261047487c9SDavid Howells 
262*e6fa4c72SAmir Goldstein 	kiocb_end_write(iocb);
263047487c9SDavid Howells 
264047487c9SDavid Howells 	if (ret < 0)
265047487c9SDavid Howells 		trace_cachefiles_io_error(object, inode, ret,
266047487c9SDavid Howells 					  cachefiles_trace_write_error);
267047487c9SDavid Howells 
268047487c9SDavid Howells 	atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
269047487c9SDavid Howells 	set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
270047487c9SDavid Howells 	if (ki->term_func)
271047487c9SDavid Howells 		ki->term_func(ki->term_func_priv, ret, ki->was_async);
272047487c9SDavid Howells 	cachefiles_put_kiocb(ki);
273047487c9SDavid Howells }
274047487c9SDavid Howells 
275047487c9SDavid Howells /*
276047487c9SDavid Howells  * Initiate a write to the cache.
277047487c9SDavid Howells  */
__cachefiles_write(struct cachefiles_object * object,struct file * file,loff_t start_pos,struct iov_iter * iter,netfs_io_terminated_t term_func,void * term_func_priv)278a06fac15SJeffle Xu int __cachefiles_write(struct cachefiles_object *object,
279a06fac15SJeffle Xu 		       struct file *file,
280047487c9SDavid Howells 		       loff_t start_pos,
281047487c9SDavid Howells 		       struct iov_iter *iter,
282047487c9SDavid Howells 		       netfs_io_terminated_t term_func,
283047487c9SDavid Howells 		       void *term_func_priv)
284047487c9SDavid Howells {
285047487c9SDavid Howells 	struct cachefiles_cache *cache;
286047487c9SDavid Howells 	struct cachefiles_kiocb *ki;
287047487c9SDavid Howells 	unsigned int old_nofs;
288a06fac15SJeffle Xu 	ssize_t ret;
289047487c9SDavid Howells 	size_t len = iov_iter_count(iter);
290047487c9SDavid Howells 
291047487c9SDavid Howells 	fscache_count_write();
292047487c9SDavid Howells 	cache = object->volume->cache;
293047487c9SDavid Howells 
294047487c9SDavid Howells 	_enter("%pD,%li,%llx,%zx/%llx",
295047487c9SDavid Howells 	       file, file_inode(file)->i_ino, start_pos, len,
296047487c9SDavid Howells 	       i_size_read(file_inode(file)));
297047487c9SDavid Howells 
298047487c9SDavid Howells 	ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
299a06fac15SJeffle Xu 	if (!ki) {
300a06fac15SJeffle Xu 		if (term_func)
301a06fac15SJeffle Xu 			term_func(term_func_priv, -ENOMEM, false);
302a06fac15SJeffle Xu 		return -ENOMEM;
303a06fac15SJeffle Xu 	}
304047487c9SDavid Howells 
305047487c9SDavid Howells 	refcount_set(&ki->ki_refcnt, 2);
306047487c9SDavid Howells 	ki->iocb.ki_filp	= file;
307047487c9SDavid Howells 	ki->iocb.ki_pos		= start_pos;
308047487c9SDavid Howells 	ki->iocb.ki_flags	= IOCB_DIRECT | IOCB_WRITE;
309047487c9SDavid Howells 	ki->iocb.ki_ioprio	= get_current_ioprio();
310047487c9SDavid Howells 	ki->object		= object;
311047487c9SDavid Howells 	ki->start		= start_pos;
312047487c9SDavid Howells 	ki->len			= len;
313047487c9SDavid Howells 	ki->term_func		= term_func;
314047487c9SDavid Howells 	ki->term_func_priv	= term_func_priv;
315047487c9SDavid Howells 	ki->was_async		= true;
3165638b067SDavid Howells 	ki->b_writing		= (len + (1 << cache->bshift) - 1) >> cache->bshift;
317047487c9SDavid Howells 
318047487c9SDavid Howells 	if (ki->term_func)
319047487c9SDavid Howells 		ki->iocb.ki_complete = cachefiles_write_complete;
320047487c9SDavid Howells 	atomic_long_add(ki->b_writing, &cache->b_writing);
321047487c9SDavid Howells 
322*e6fa4c72SAmir Goldstein 	kiocb_start_write(&ki->iocb);
323047487c9SDavid Howells 
324047487c9SDavid Howells 	get_file(ki->iocb.ki_filp);
325047487c9SDavid Howells 	cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
326047487c9SDavid Howells 
327*e6fa4c72SAmir Goldstein 	trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
328047487c9SDavid Howells 	old_nofs = memalloc_nofs_save();
329047487c9SDavid Howells 	ret = cachefiles_inject_write_error();
330047487c9SDavid Howells 	if (ret == 0)
331047487c9SDavid Howells 		ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
332047487c9SDavid Howells 	memalloc_nofs_restore(old_nofs);
333047487c9SDavid Howells 	switch (ret) {
334047487c9SDavid Howells 	case -EIOCBQUEUED:
335047487c9SDavid Howells 		goto in_progress;
336047487c9SDavid Howells 
337047487c9SDavid Howells 	case -ERESTARTSYS:
338047487c9SDavid Howells 	case -ERESTARTNOINTR:
339047487c9SDavid Howells 	case -ERESTARTNOHAND:
340047487c9SDavid Howells 	case -ERESTART_RESTARTBLOCK:
341047487c9SDavid Howells 		/* There's no easy way to restart the syscall since other AIO's
342047487c9SDavid Howells 		 * may be already running. Just fail this IO with EINTR.
343047487c9SDavid Howells 		 */
344047487c9SDavid Howells 		ret = -EINTR;
345047487c9SDavid Howells 		fallthrough;
346047487c9SDavid Howells 	default:
347047487c9SDavid Howells 		ki->was_async = false;
348047487c9SDavid Howells 		cachefiles_write_complete(&ki->iocb, ret);
349047487c9SDavid Howells 		if (ret > 0)
350047487c9SDavid Howells 			ret = 0;
351047487c9SDavid Howells 		break;
352047487c9SDavid Howells 	}
353047487c9SDavid Howells 
354047487c9SDavid Howells in_progress:
355047487c9SDavid Howells 	cachefiles_put_kiocb(ki);
356047487c9SDavid Howells 	_leave(" = %zd", ret);
357047487c9SDavid Howells 	return ret;
358a06fac15SJeffle Xu }
359047487c9SDavid Howells 
cachefiles_write(struct netfs_cache_resources * cres,loff_t start_pos,struct iov_iter * iter,netfs_io_terminated_t term_func,void * term_func_priv)360a06fac15SJeffle Xu static int cachefiles_write(struct netfs_cache_resources *cres,
361a06fac15SJeffle Xu 			    loff_t start_pos,
362a06fac15SJeffle Xu 			    struct iov_iter *iter,
363a06fac15SJeffle Xu 			    netfs_io_terminated_t term_func,
364a06fac15SJeffle Xu 			    void *term_func_priv)
365a06fac15SJeffle Xu {
366a06fac15SJeffle Xu 	if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
367047487c9SDavid Howells 		if (term_func)
368a06fac15SJeffle Xu 			term_func(term_func_priv, -ENOBUFS, false);
369a06fac15SJeffle Xu 		return -ENOBUFS;
370a06fac15SJeffle Xu 	}
371a06fac15SJeffle Xu 
372a06fac15SJeffle Xu 	return __cachefiles_write(cachefiles_cres_object(cres),
373a06fac15SJeffle Xu 				  cachefiles_cres_file(cres),
374a06fac15SJeffle Xu 				  start_pos, iter,
375a06fac15SJeffle Xu 				  term_func, term_func_priv);
376047487c9SDavid Howells }
377047487c9SDavid Howells 
37886692475SJingbo Xu static inline enum netfs_io_source
cachefiles_do_prepare_read(struct netfs_cache_resources * cres,loff_t start,size_t * _len,loff_t i_size,unsigned long * _flags,ino_t netfs_ino)37986692475SJingbo Xu cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
38086692475SJingbo Xu 			   loff_t start, size_t *_len, loff_t i_size,
38186692475SJingbo Xu 			   unsigned long *_flags, ino_t netfs_ino)
382047487c9SDavid Howells {
383047487c9SDavid Howells 	enum cachefiles_prepare_read_trace why;
38486692475SJingbo Xu 	struct cachefiles_object *object = NULL;
385047487c9SDavid Howells 	struct cachefiles_cache *cache;
386047487c9SDavid Howells 	struct fscache_cookie *cookie = fscache_cres_cookie(cres);
387047487c9SDavid Howells 	const struct cred *saved_cred;
388047487c9SDavid Howells 	struct file *file = cachefiles_cres_file(cres);
3896a19114bSDavid Howells 	enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
39086692475SJingbo Xu 	size_t len = *_len;
391047487c9SDavid Howells 	loff_t off, to;
392047487c9SDavid Howells 	ino_t ino = file ? file_inode(file)->i_ino : 0;
3939032b6e8SJeffle Xu 	int rc;
394047487c9SDavid Howells 
39586692475SJingbo Xu 	_enter("%zx @%llx/%llx", len, start, i_size);
396047487c9SDavid Howells 
39786692475SJingbo Xu 	if (start >= i_size) {
398047487c9SDavid Howells 		ret = NETFS_FILL_WITH_ZEROES;
399047487c9SDavid Howells 		why = cachefiles_trace_read_after_eof;
400047487c9SDavid Howells 		goto out_no_object;
401047487c9SDavid Howells 	}
402047487c9SDavid Howells 
403047487c9SDavid Howells 	if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
40486692475SJingbo Xu 		__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
405047487c9SDavid Howells 		why = cachefiles_trace_read_no_data;
40686692475SJingbo Xu 		if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
407047487c9SDavid Howells 			goto out_no_object;
408047487c9SDavid Howells 	}
409047487c9SDavid Howells 
410047487c9SDavid Howells 	/* The object and the file may be being created in the background. */
411047487c9SDavid Howells 	if (!file) {
412047487c9SDavid Howells 		why = cachefiles_trace_read_no_file;
413047487c9SDavid Howells 		if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
414047487c9SDavid Howells 			goto out_no_object;
415047487c9SDavid Howells 		file = cachefiles_cres_file(cres);
416047487c9SDavid Howells 		if (!file)
417047487c9SDavid Howells 			goto out_no_object;
418047487c9SDavid Howells 		ino = file_inode(file)->i_ino;
419047487c9SDavid Howells 	}
420047487c9SDavid Howells 
421047487c9SDavid Howells 	object = cachefiles_cres_object(cres);
422047487c9SDavid Howells 	cache = object->volume->cache;
423047487c9SDavid Howells 	cachefiles_begin_secure(cache, &saved_cred);
4249032b6e8SJeffle Xu retry:
425047487c9SDavid Howells 	off = cachefiles_inject_read_error();
426047487c9SDavid Howells 	if (off == 0)
42786692475SJingbo Xu 		off = vfs_llseek(file, start, SEEK_DATA);
428047487c9SDavid Howells 	if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
429047487c9SDavid Howells 		if (off == (loff_t)-ENXIO) {
430047487c9SDavid Howells 			why = cachefiles_trace_read_seek_nxio;
431047487c9SDavid Howells 			goto download_and_store;
432047487c9SDavid Howells 		}
433047487c9SDavid Howells 		trace_cachefiles_io_error(object, file_inode(file), off,
434047487c9SDavid Howells 					  cachefiles_trace_seek_error);
435047487c9SDavid Howells 		why = cachefiles_trace_read_seek_error;
436047487c9SDavid Howells 		goto out;
437047487c9SDavid Howells 	}
438047487c9SDavid Howells 
43986692475SJingbo Xu 	if (off >= start + len) {
440047487c9SDavid Howells 		why = cachefiles_trace_read_found_hole;
441047487c9SDavid Howells 		goto download_and_store;
442047487c9SDavid Howells 	}
443047487c9SDavid Howells 
44486692475SJingbo Xu 	if (off > start) {
445047487c9SDavid Howells 		off = round_up(off, cache->bsize);
44686692475SJingbo Xu 		len = off - start;
44786692475SJingbo Xu 		*_len = len;
448047487c9SDavid Howells 		why = cachefiles_trace_read_found_part;
449047487c9SDavid Howells 		goto download_and_store;
450047487c9SDavid Howells 	}
451047487c9SDavid Howells 
452047487c9SDavid Howells 	to = cachefiles_inject_read_error();
453047487c9SDavid Howells 	if (to == 0)
45486692475SJingbo Xu 		to = vfs_llseek(file, start, SEEK_HOLE);
455047487c9SDavid Howells 	if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
456047487c9SDavid Howells 		trace_cachefiles_io_error(object, file_inode(file), to,
457047487c9SDavid Howells 					  cachefiles_trace_seek_error);
458047487c9SDavid Howells 		why = cachefiles_trace_read_seek_error;
459047487c9SDavid Howells 		goto out;
460047487c9SDavid Howells 	}
461047487c9SDavid Howells 
46286692475SJingbo Xu 	if (to < start + len) {
46386692475SJingbo Xu 		if (start + len >= i_size)
464047487c9SDavid Howells 			to = round_up(to, cache->bsize);
465047487c9SDavid Howells 		else
466047487c9SDavid Howells 			to = round_down(to, cache->bsize);
46786692475SJingbo Xu 		len = to - start;
46886692475SJingbo Xu 		*_len = len;
469047487c9SDavid Howells 	}
470047487c9SDavid Howells 
471047487c9SDavid Howells 	why = cachefiles_trace_read_have_data;
472047487c9SDavid Howells 	ret = NETFS_READ_FROM_CACHE;
473047487c9SDavid Howells 	goto out;
474047487c9SDavid Howells 
475047487c9SDavid Howells download_and_store:
47686692475SJingbo Xu 	__set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
47786692475SJingbo Xu 	if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
47886692475SJingbo Xu 		rc = cachefiles_ondemand_read(object, start, len);
4799032b6e8SJeffle Xu 		if (!rc) {
48086692475SJingbo Xu 			__clear_bit(NETFS_SREQ_ONDEMAND, _flags);
4819032b6e8SJeffle Xu 			goto retry;
4829032b6e8SJeffle Xu 		}
4839032b6e8SJeffle Xu 		ret = NETFS_INVALID_READ;
4849032b6e8SJeffle Xu 	}
485047487c9SDavid Howells out:
486047487c9SDavid Howells 	cachefiles_end_secure(cache, saved_cred);
487047487c9SDavid Howells out_no_object:
48886692475SJingbo Xu 	trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
489047487c9SDavid Howells 	return ret;
490047487c9SDavid Howells }
491047487c9SDavid Howells 
492047487c9SDavid Howells /*
49386692475SJingbo Xu  * Prepare a read operation, shortening it to a cached/uncached
49486692475SJingbo Xu  * boundary as appropriate.
49586692475SJingbo Xu  */
cachefiles_prepare_read(struct netfs_io_subrequest * subreq,loff_t i_size)49686692475SJingbo Xu static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
49786692475SJingbo Xu 						    loff_t i_size)
49886692475SJingbo Xu {
49986692475SJingbo Xu 	return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
50086692475SJingbo Xu 					  subreq->start, &subreq->len, i_size,
50186692475SJingbo Xu 					  &subreq->flags, subreq->rreq->inode->i_ino);
50286692475SJingbo Xu }
50386692475SJingbo Xu 
50486692475SJingbo Xu /*
50586692475SJingbo Xu  * Prepare an on-demand read operation, shortening it to a cached/uncached
50686692475SJingbo Xu  * boundary as appropriate.
50786692475SJingbo Xu  */
50886692475SJingbo Xu static enum netfs_io_source
cachefiles_prepare_ondemand_read(struct netfs_cache_resources * cres,loff_t start,size_t * _len,loff_t i_size,unsigned long * _flags,ino_t ino)50986692475SJingbo Xu cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
51086692475SJingbo Xu 				 loff_t start, size_t *_len, loff_t i_size,
51186692475SJingbo Xu 				 unsigned long *_flags, ino_t ino)
51286692475SJingbo Xu {
51386692475SJingbo Xu 	return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
51486692475SJingbo Xu }
51586692475SJingbo Xu 
51686692475SJingbo Xu /*
517047487c9SDavid Howells  * Prepare for a write to occur.
518047487c9SDavid Howells  */
__cachefiles_prepare_write(struct cachefiles_object * object,struct file * file,loff_t * _start,size_t * _len,bool no_space_allocated_yet)519a06fac15SJeffle Xu int __cachefiles_prepare_write(struct cachefiles_object *object,
520a06fac15SJeffle Xu 			       struct file *file,
521a06fac15SJeffle Xu 			       loff_t *_start, size_t *_len,
522047487c9SDavid Howells 			       bool no_space_allocated_yet)
523047487c9SDavid Howells {
524047487c9SDavid Howells 	struct cachefiles_cache *cache = object->volume->cache;
525047487c9SDavid Howells 	loff_t start = *_start, pos;
526047487c9SDavid Howells 	size_t len = *_len, down;
527047487c9SDavid Howells 	int ret;
528047487c9SDavid Howells 
529047487c9SDavid Howells 	/* Round to DIO size */
530047487c9SDavid Howells 	down = start - round_down(start, PAGE_SIZE);
531047487c9SDavid Howells 	*_start = start - down;
532047487c9SDavid Howells 	*_len = round_up(down + len, PAGE_SIZE);
533047487c9SDavid Howells 
534047487c9SDavid Howells 	/* We need to work out whether there's sufficient disk space to perform
535047487c9SDavid Howells 	 * the write - but we can skip that check if we have space already
536047487c9SDavid Howells 	 * allocated.
537047487c9SDavid Howells 	 */
538047487c9SDavid Howells 	if (no_space_allocated_yet)
539047487c9SDavid Howells 		goto check_space;
540047487c9SDavid Howells 
541047487c9SDavid Howells 	pos = cachefiles_inject_read_error();
542047487c9SDavid Howells 	if (pos == 0)
543047487c9SDavid Howells 		pos = vfs_llseek(file, *_start, SEEK_DATA);
544047487c9SDavid Howells 	if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
545047487c9SDavid Howells 		if (pos == -ENXIO)
546047487c9SDavid Howells 			goto check_space; /* Unallocated tail */
547047487c9SDavid Howells 		trace_cachefiles_io_error(object, file_inode(file), pos,
548047487c9SDavid Howells 					  cachefiles_trace_seek_error);
549047487c9SDavid Howells 		return pos;
550047487c9SDavid Howells 	}
551047487c9SDavid Howells 	if ((u64)pos >= (u64)*_start + *_len)
552047487c9SDavid Howells 		goto check_space; /* Unallocated region */
553047487c9SDavid Howells 
554047487c9SDavid Howells 	/* We have a block that's at least partially filled - if we're low on
555047487c9SDavid Howells 	 * space, we need to see if it's fully allocated.  If it's not, we may
556047487c9SDavid Howells 	 * want to cull it.
557047487c9SDavid Howells 	 */
5583929eca7SDavid Howells 	if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
5593929eca7SDavid Howells 				 cachefiles_has_space_check) == 0)
560047487c9SDavid Howells 		return 0; /* Enough space to simply overwrite the whole block */
561047487c9SDavid Howells 
562047487c9SDavid Howells 	pos = cachefiles_inject_read_error();
563047487c9SDavid Howells 	if (pos == 0)
564047487c9SDavid Howells 		pos = vfs_llseek(file, *_start, SEEK_HOLE);
565047487c9SDavid Howells 	if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
566047487c9SDavid Howells 		trace_cachefiles_io_error(object, file_inode(file), pos,
567047487c9SDavid Howells 					  cachefiles_trace_seek_error);
568047487c9SDavid Howells 		return pos;
569047487c9SDavid Howells 	}
570047487c9SDavid Howells 	if ((u64)pos >= (u64)*_start + *_len)
571047487c9SDavid Howells 		return 0; /* Fully allocated */
572047487c9SDavid Howells 
573047487c9SDavid Howells 	/* Partially allocated, but insufficient space: cull. */
5743929eca7SDavid Howells 	fscache_count_no_write_space();
575047487c9SDavid Howells 	ret = cachefiles_inject_remove_error();
576047487c9SDavid Howells 	if (ret == 0)
577047487c9SDavid Howells 		ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
578047487c9SDavid Howells 				    *_start, *_len);
579047487c9SDavid Howells 	if (ret < 0) {
580047487c9SDavid Howells 		trace_cachefiles_io_error(object, file_inode(file), ret,
581047487c9SDavid Howells 					  cachefiles_trace_fallocate_error);
582047487c9SDavid Howells 		cachefiles_io_error_obj(object,
583047487c9SDavid Howells 					"CacheFiles: fallocate failed (%d)\n", ret);
584047487c9SDavid Howells 		ret = -EIO;
585047487c9SDavid Howells 	}
586047487c9SDavid Howells 
587047487c9SDavid Howells 	return ret;
588047487c9SDavid Howells 
589047487c9SDavid Howells check_space:
5903929eca7SDavid Howells 	return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
5913929eca7SDavid Howells 				    cachefiles_has_space_for_write);
592047487c9SDavid Howells }
593047487c9SDavid Howells 
cachefiles_prepare_write(struct netfs_cache_resources * cres,loff_t * _start,size_t * _len,loff_t i_size,bool no_space_allocated_yet)594047487c9SDavid Howells static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
595047487c9SDavid Howells 				    loff_t *_start, size_t *_len, loff_t i_size,
596047487c9SDavid Howells 				    bool no_space_allocated_yet)
597047487c9SDavid Howells {
598047487c9SDavid Howells 	struct cachefiles_object *object = cachefiles_cres_object(cres);
599047487c9SDavid Howells 	struct cachefiles_cache *cache = object->volume->cache;
600047487c9SDavid Howells 	const struct cred *saved_cred;
601047487c9SDavid Howells 	int ret;
602047487c9SDavid Howells 
603047487c9SDavid Howells 	if (!cachefiles_cres_file(cres)) {
604047487c9SDavid Howells 		if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
605047487c9SDavid Howells 			return -ENOBUFS;
606047487c9SDavid Howells 		if (!cachefiles_cres_file(cres))
607047487c9SDavid Howells 			return -ENOBUFS;
608047487c9SDavid Howells 	}
609047487c9SDavid Howells 
610047487c9SDavid Howells 	cachefiles_begin_secure(cache, &saved_cred);
611a06fac15SJeffle Xu 	ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
612a06fac15SJeffle Xu 					 _start, _len,
613047487c9SDavid Howells 					 no_space_allocated_yet);
614047487c9SDavid Howells 	cachefiles_end_secure(cache, saved_cred);
615047487c9SDavid Howells 	return ret;
616047487c9SDavid Howells }
617047487c9SDavid Howells 
618287fd611SDavid Howells /*
619287fd611SDavid Howells  * Clean up an operation.
620287fd611SDavid Howells  */
cachefiles_end_operation(struct netfs_cache_resources * cres)621287fd611SDavid Howells static void cachefiles_end_operation(struct netfs_cache_resources *cres)
622287fd611SDavid Howells {
623287fd611SDavid Howells 	struct file *file = cachefiles_cres_file(cres);
624287fd611SDavid Howells 
625287fd611SDavid Howells 	if (file)
626287fd611SDavid Howells 		fput(file);
627287fd611SDavid Howells 	fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
628287fd611SDavid Howells }
629287fd611SDavid Howells 
630287fd611SDavid Howells static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
631287fd611SDavid Howells 	.end_operation		= cachefiles_end_operation,
632047487c9SDavid Howells 	.read			= cachefiles_read,
633047487c9SDavid Howells 	.write			= cachefiles_write,
634047487c9SDavid Howells 	.prepare_read		= cachefiles_prepare_read,
635047487c9SDavid Howells 	.prepare_write		= cachefiles_prepare_write,
63686692475SJingbo Xu 	.prepare_ondemand_read	= cachefiles_prepare_ondemand_read,
637bee9f655SDavid Howells 	.query_occupancy	= cachefiles_query_occupancy,
638287fd611SDavid Howells };
639287fd611SDavid Howells 
640287fd611SDavid Howells /*
641287fd611SDavid Howells  * Open the cache file when beginning a cache operation.
642287fd611SDavid Howells  */
cachefiles_begin_operation(struct netfs_cache_resources * cres,enum fscache_want_state want_state)643287fd611SDavid Howells bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
644287fd611SDavid Howells 				enum fscache_want_state want_state)
645287fd611SDavid Howells {
646287fd611SDavid Howells 	struct cachefiles_object *object = cachefiles_cres_object(cres);
647287fd611SDavid Howells 
648287fd611SDavid Howells 	if (!cachefiles_cres_file(cres)) {
649287fd611SDavid Howells 		cres->ops = &cachefiles_netfs_cache_ops;
650287fd611SDavid Howells 		if (object->file) {
651287fd611SDavid Howells 			spin_lock(&object->lock);
652287fd611SDavid Howells 			if (!cres->cache_priv2 && object->file)
653287fd611SDavid Howells 				cres->cache_priv2 = get_file(object->file);
654287fd611SDavid Howells 			spin_unlock(&object->lock);
655287fd611SDavid Howells 		}
656287fd611SDavid Howells 	}
657287fd611SDavid Howells 
658287fd611SDavid Howells 	if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
659287fd611SDavid Howells 		pr_err("failed to get cres->file\n");
660287fd611SDavid Howells 		return false;
661287fd611SDavid Howells 	}
662287fd611SDavid Howells 
663287fd611SDavid Howells 	return true;
664287fd611SDavid Howells }
665