xref: /openbmc/linux/fs/netfs/buffered_read.c (revision 16211268fcb36672a84359362c2fc2c4695b0fc4)
1*16211268SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later
2*16211268SDavid Howells /* Network filesystem high-level buffered read support.
3*16211268SDavid Howells  *
4*16211268SDavid Howells  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5*16211268SDavid Howells  * Written by David Howells (dhowells@redhat.com)
6*16211268SDavid Howells  */
7*16211268SDavid Howells 
8*16211268SDavid Howells #include <linux/export.h>
9*16211268SDavid Howells #include <linux/task_io_accounting_ops.h>
10*16211268SDavid Howells #include "internal.h"
11*16211268SDavid Howells 
12*16211268SDavid Howells /*
13*16211268SDavid Howells  * Unlock the folios in a read operation.  We need to set PG_fscache on any
14*16211268SDavid Howells  * folios we're going to write back before we unlock them.
15*16211268SDavid Howells  */
16*16211268SDavid Howells void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
17*16211268SDavid Howells {
18*16211268SDavid Howells 	struct netfs_io_subrequest *subreq;
19*16211268SDavid Howells 	struct folio *folio;
20*16211268SDavid Howells 	unsigned int iopos, account = 0;
21*16211268SDavid Howells 	pgoff_t start_page = rreq->start / PAGE_SIZE;
22*16211268SDavid Howells 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
23*16211268SDavid Howells 	bool subreq_failed = false;
24*16211268SDavid Howells 
25*16211268SDavid Howells 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
26*16211268SDavid Howells 
27*16211268SDavid Howells 	if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
28*16211268SDavid Howells 		__clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
29*16211268SDavid Howells 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
30*16211268SDavid Howells 			__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
31*16211268SDavid Howells 		}
32*16211268SDavid Howells 	}
33*16211268SDavid Howells 
34*16211268SDavid Howells 	/* Walk through the pagecache and the I/O request lists simultaneously.
35*16211268SDavid Howells 	 * We may have a mixture of cached and uncached sections and we only
36*16211268SDavid Howells 	 * really want to write out the uncached sections.  This is slightly
37*16211268SDavid Howells 	 * complicated by the possibility that we might have huge pages with a
38*16211268SDavid Howells 	 * mixture inside.
39*16211268SDavid Howells 	 */
40*16211268SDavid Howells 	subreq = list_first_entry(&rreq->subrequests,
41*16211268SDavid Howells 				  struct netfs_io_subrequest, rreq_link);
42*16211268SDavid Howells 	iopos = 0;
43*16211268SDavid Howells 	subreq_failed = (subreq->error < 0);
44*16211268SDavid Howells 
45*16211268SDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
46*16211268SDavid Howells 
47*16211268SDavid Howells 	rcu_read_lock();
48*16211268SDavid Howells 	xas_for_each(&xas, folio, last_page) {
49*16211268SDavid Howells 		unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
50*16211268SDavid Howells 		unsigned int pgend = pgpos + folio_size(folio);
51*16211268SDavid Howells 		bool pg_failed = false;
52*16211268SDavid Howells 
53*16211268SDavid Howells 		for (;;) {
54*16211268SDavid Howells 			if (!subreq) {
55*16211268SDavid Howells 				pg_failed = true;
56*16211268SDavid Howells 				break;
57*16211268SDavid Howells 			}
58*16211268SDavid Howells 			if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
59*16211268SDavid Howells 				folio_start_fscache(folio);
60*16211268SDavid Howells 			pg_failed |= subreq_failed;
61*16211268SDavid Howells 			if (pgend < iopos + subreq->len)
62*16211268SDavid Howells 				break;
63*16211268SDavid Howells 
64*16211268SDavid Howells 			account += subreq->transferred;
65*16211268SDavid Howells 			iopos += subreq->len;
66*16211268SDavid Howells 			if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
67*16211268SDavid Howells 				subreq = list_next_entry(subreq, rreq_link);
68*16211268SDavid Howells 				subreq_failed = (subreq->error < 0);
69*16211268SDavid Howells 			} else {
70*16211268SDavid Howells 				subreq = NULL;
71*16211268SDavid Howells 				subreq_failed = false;
72*16211268SDavid Howells 			}
73*16211268SDavid Howells 			if (pgend == iopos)
74*16211268SDavid Howells 				break;
75*16211268SDavid Howells 		}
76*16211268SDavid Howells 
77*16211268SDavid Howells 		if (!pg_failed) {
78*16211268SDavid Howells 			flush_dcache_folio(folio);
79*16211268SDavid Howells 			folio_mark_uptodate(folio);
80*16211268SDavid Howells 		}
81*16211268SDavid Howells 
82*16211268SDavid Howells 		if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
83*16211268SDavid Howells 			if (folio_index(folio) == rreq->no_unlock_folio &&
84*16211268SDavid Howells 			    test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
85*16211268SDavid Howells 				_debug("no unlock");
86*16211268SDavid Howells 			else
87*16211268SDavid Howells 				folio_unlock(folio);
88*16211268SDavid Howells 		}
89*16211268SDavid Howells 	}
90*16211268SDavid Howells 	rcu_read_unlock();
91*16211268SDavid Howells 
92*16211268SDavid Howells 	task_io_account_read(account);
93*16211268SDavid Howells 	if (rreq->netfs_ops->done)
94*16211268SDavid Howells 		rreq->netfs_ops->done(rreq);
95*16211268SDavid Howells }
96*16211268SDavid Howells 
97*16211268SDavid Howells static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
98*16211268SDavid Howells 					 loff_t *_start, size_t *_len, loff_t i_size)
99*16211268SDavid Howells {
100*16211268SDavid Howells 	struct netfs_cache_resources *cres = &rreq->cache_resources;
101*16211268SDavid Howells 
102*16211268SDavid Howells 	if (cres->ops && cres->ops->expand_readahead)
103*16211268SDavid Howells 		cres->ops->expand_readahead(cres, _start, _len, i_size);
104*16211268SDavid Howells }
105*16211268SDavid Howells 
106*16211268SDavid Howells static void netfs_rreq_expand(struct netfs_io_request *rreq,
107*16211268SDavid Howells 			      struct readahead_control *ractl)
108*16211268SDavid Howells {
109*16211268SDavid Howells 	/* Give the cache a chance to change the request parameters.  The
110*16211268SDavid Howells 	 * resultant request must contain the original region.
111*16211268SDavid Howells 	 */
112*16211268SDavid Howells 	netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
113*16211268SDavid Howells 
114*16211268SDavid Howells 	/* Give the netfs a chance to change the request parameters.  The
115*16211268SDavid Howells 	 * resultant request must contain the original region.
116*16211268SDavid Howells 	 */
117*16211268SDavid Howells 	if (rreq->netfs_ops->expand_readahead)
118*16211268SDavid Howells 		rreq->netfs_ops->expand_readahead(rreq);
119*16211268SDavid Howells 
120*16211268SDavid Howells 	/* Expand the request if the cache wants it to start earlier.  Note
121*16211268SDavid Howells 	 * that the expansion may get further extended if the VM wishes to
122*16211268SDavid Howells 	 * insert THPs and the preferred start and/or end wind up in the middle
123*16211268SDavid Howells 	 * of THPs.
124*16211268SDavid Howells 	 *
125*16211268SDavid Howells 	 * If this is the case, however, the THP size should be an integer
126*16211268SDavid Howells 	 * multiple of the cache granule size, so we get a whole number of
127*16211268SDavid Howells 	 * granules to deal with.
128*16211268SDavid Howells 	 */
129*16211268SDavid Howells 	if (rreq->start  != readahead_pos(ractl) ||
130*16211268SDavid Howells 	    rreq->len != readahead_length(ractl)) {
131*16211268SDavid Howells 		readahead_expand(ractl, rreq->start, rreq->len);
132*16211268SDavid Howells 		rreq->start  = readahead_pos(ractl);
133*16211268SDavid Howells 		rreq->len = readahead_length(ractl);
134*16211268SDavid Howells 
135*16211268SDavid Howells 		trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
136*16211268SDavid Howells 				 netfs_read_trace_expanded);
137*16211268SDavid Howells 	}
138*16211268SDavid Howells }
139*16211268SDavid Howells 
140*16211268SDavid Howells /**
141*16211268SDavid Howells  * netfs_readahead - Helper to manage a read request
142*16211268SDavid Howells  * @ractl: The description of the readahead request
143*16211268SDavid Howells  *
144*16211268SDavid Howells  * Fulfil a readahead request by drawing data from the cache if possible, or
145*16211268SDavid Howells  * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
146*16211268SDavid Howells  * requests from different sources will get munged together.  If necessary, the
147*16211268SDavid Howells  * readahead window can be expanded in either direction to a more convenient
148*16211268SDavid Howells  * alighment for RPC efficiency or to make storage in the cache feasible.
149*16211268SDavid Howells  *
150*16211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
151*16211268SDavid Howells  * inode before calling this.
152*16211268SDavid Howells  *
153*16211268SDavid Howells  * This is usable whether or not caching is enabled.
154*16211268SDavid Howells  */
155*16211268SDavid Howells void netfs_readahead(struct readahead_control *ractl)
156*16211268SDavid Howells {
157*16211268SDavid Howells 	struct netfs_io_request *rreq;
158*16211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
159*16211268SDavid Howells 	int ret;
160*16211268SDavid Howells 
161*16211268SDavid Howells 	_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
162*16211268SDavid Howells 
163*16211268SDavid Howells 	if (readahead_count(ractl) == 0)
164*16211268SDavid Howells 		return;
165*16211268SDavid Howells 
166*16211268SDavid Howells 	rreq = netfs_alloc_request(ractl->mapping, ractl->file,
167*16211268SDavid Howells 				   readahead_pos(ractl),
168*16211268SDavid Howells 				   readahead_length(ractl),
169*16211268SDavid Howells 				   NETFS_READAHEAD);
170*16211268SDavid Howells 	if (IS_ERR(rreq))
171*16211268SDavid Howells 		return;
172*16211268SDavid Howells 
173*16211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
174*16211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
175*16211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
176*16211268SDavid Howells 			goto cleanup_free;
177*16211268SDavid Howells 	}
178*16211268SDavid Howells 
179*16211268SDavid Howells 	netfs_stat(&netfs_n_rh_readahead);
180*16211268SDavid Howells 	trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
181*16211268SDavid Howells 			 netfs_read_trace_readahead);
182*16211268SDavid Howells 
183*16211268SDavid Howells 	netfs_rreq_expand(rreq, ractl);
184*16211268SDavid Howells 
185*16211268SDavid Howells 	/* Drop the refs on the folios here rather than in the cache or
186*16211268SDavid Howells 	 * filesystem.  The locks will be dropped in netfs_rreq_unlock().
187*16211268SDavid Howells 	 */
188*16211268SDavid Howells 	while (readahead_folio(ractl))
189*16211268SDavid Howells 		;
190*16211268SDavid Howells 
191*16211268SDavid Howells 	netfs_begin_read(rreq, false);
192*16211268SDavid Howells 	return;
193*16211268SDavid Howells 
194*16211268SDavid Howells cleanup_free:
195*16211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
196*16211268SDavid Howells 	return;
197*16211268SDavid Howells }
198*16211268SDavid Howells EXPORT_SYMBOL(netfs_readahead);
199*16211268SDavid Howells 
200*16211268SDavid Howells /**
201*16211268SDavid Howells  * netfs_readpage - Helper to manage a readpage request
202*16211268SDavid Howells  * @file: The file to read from
203*16211268SDavid Howells  * @subpage: A subpage of the folio to read
204*16211268SDavid Howells  *
205*16211268SDavid Howells  * Fulfil a readpage request by drawing data from the cache if possible, or the
206*16211268SDavid Howells  * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
207*16211268SDavid Howells  * from different sources will get munged together.
208*16211268SDavid Howells  *
209*16211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
210*16211268SDavid Howells  * inode before calling this.
211*16211268SDavid Howells  *
212*16211268SDavid Howells  * This is usable whether or not caching is enabled.
213*16211268SDavid Howells  */
214*16211268SDavid Howells int netfs_readpage(struct file *file, struct page *subpage)
215*16211268SDavid Howells {
216*16211268SDavid Howells 	struct folio *folio = page_folio(subpage);
217*16211268SDavid Howells 	struct address_space *mapping = folio_file_mapping(folio);
218*16211268SDavid Howells 	struct netfs_io_request *rreq;
219*16211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(mapping->host);
220*16211268SDavid Howells 	int ret;
221*16211268SDavid Howells 
222*16211268SDavid Howells 	_enter("%lx", folio_index(folio));
223*16211268SDavid Howells 
224*16211268SDavid Howells 	rreq = netfs_alloc_request(mapping, file,
225*16211268SDavid Howells 				   folio_file_pos(folio), folio_size(folio),
226*16211268SDavid Howells 				   NETFS_READPAGE);
227*16211268SDavid Howells 	if (IS_ERR(rreq)) {
228*16211268SDavid Howells 		ret = PTR_ERR(rreq);
229*16211268SDavid Howells 		goto alloc_error;
230*16211268SDavid Howells 	}
231*16211268SDavid Howells 
232*16211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
233*16211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
234*16211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
235*16211268SDavid Howells 			goto discard;
236*16211268SDavid Howells 	}
237*16211268SDavid Howells 
238*16211268SDavid Howells 	netfs_stat(&netfs_n_rh_readpage);
239*16211268SDavid Howells 	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
240*16211268SDavid Howells 	return netfs_begin_read(rreq, true);
241*16211268SDavid Howells 
242*16211268SDavid Howells discard:
243*16211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
244*16211268SDavid Howells alloc_error:
245*16211268SDavid Howells 	folio_unlock(folio);
246*16211268SDavid Howells 	return ret;
247*16211268SDavid Howells }
248*16211268SDavid Howells EXPORT_SYMBOL(netfs_readpage);
249*16211268SDavid Howells 
250*16211268SDavid Howells /*
251*16211268SDavid Howells  * Prepare a folio for writing without reading first
252*16211268SDavid Howells  * @folio: The folio being prepared
253*16211268SDavid Howells  * @pos: starting position for the write
254*16211268SDavid Howells  * @len: length of write
255*16211268SDavid Howells  * @always_fill: T if the folio should always be completely filled/cleared
256*16211268SDavid Howells  *
257*16211268SDavid Howells  * In some cases, write_begin doesn't need to read at all:
258*16211268SDavid Howells  * - full folio write
259*16211268SDavid Howells  * - write that lies in a folio that is completely beyond EOF
260*16211268SDavid Howells  * - write that covers the folio from start to EOF or beyond it
261*16211268SDavid Howells  *
262*16211268SDavid Howells  * If any of these criteria are met, then zero out the unwritten parts
263*16211268SDavid Howells  * of the folio and return true. Otherwise, return false.
264*16211268SDavid Howells  */
265*16211268SDavid Howells static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
266*16211268SDavid Howells 				 bool always_fill)
267*16211268SDavid Howells {
268*16211268SDavid Howells 	struct inode *inode = folio_inode(folio);
269*16211268SDavid Howells 	loff_t i_size = i_size_read(inode);
270*16211268SDavid Howells 	size_t offset = offset_in_folio(folio, pos);
271*16211268SDavid Howells 	size_t plen = folio_size(folio);
272*16211268SDavid Howells 
273*16211268SDavid Howells 	if (unlikely(always_fill)) {
274*16211268SDavid Howells 		if (pos - offset + len <= i_size)
275*16211268SDavid Howells 			return false; /* Page entirely before EOF */
276*16211268SDavid Howells 		zero_user_segment(&folio->page, 0, plen);
277*16211268SDavid Howells 		folio_mark_uptodate(folio);
278*16211268SDavid Howells 		return true;
279*16211268SDavid Howells 	}
280*16211268SDavid Howells 
281*16211268SDavid Howells 	/* Full folio write */
282*16211268SDavid Howells 	if (offset == 0 && len >= plen)
283*16211268SDavid Howells 		return true;
284*16211268SDavid Howells 
285*16211268SDavid Howells 	/* Page entirely beyond the end of the file */
286*16211268SDavid Howells 	if (pos - offset >= i_size)
287*16211268SDavid Howells 		goto zero_out;
288*16211268SDavid Howells 
289*16211268SDavid Howells 	/* Write that covers from the start of the folio to EOF or beyond */
290*16211268SDavid Howells 	if (offset == 0 && (pos + len) >= i_size)
291*16211268SDavid Howells 		goto zero_out;
292*16211268SDavid Howells 
293*16211268SDavid Howells 	return false;
294*16211268SDavid Howells zero_out:
295*16211268SDavid Howells 	zero_user_segments(&folio->page, 0, offset, offset + len, plen);
296*16211268SDavid Howells 	return true;
297*16211268SDavid Howells }
298*16211268SDavid Howells 
299*16211268SDavid Howells /**
300*16211268SDavid Howells  * netfs_write_begin - Helper to prepare for writing
301*16211268SDavid Howells  * @file: The file to read from
302*16211268SDavid Howells  * @mapping: The mapping to read from
303*16211268SDavid Howells  * @pos: File position at which the write will begin
304*16211268SDavid Howells  * @len: The length of the write (may extend beyond the end of the folio chosen)
305*16211268SDavid Howells  * @aop_flags: AOP_* flags
306*16211268SDavid Howells  * @_folio: Where to put the resultant folio
307*16211268SDavid Howells  * @_fsdata: Place for the netfs to store a cookie
308*16211268SDavid Howells  *
309*16211268SDavid Howells  * Pre-read data for a write-begin request by drawing data from the cache if
310*16211268SDavid Howells  * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
311*16211268SDavid Howells  * Multiple I/O requests from different sources will get munged together.  If
312*16211268SDavid Howells  * necessary, the readahead window can be expanded in either direction to a
313*16211268SDavid Howells  * more convenient alighment for RPC efficiency or to make storage in the cache
314*16211268SDavid Howells  * feasible.
315*16211268SDavid Howells  *
316*16211268SDavid Howells  * The calling netfs must provide a table of operations, only one of which,
317*16211268SDavid Howells  * issue_op, is mandatory.
318*16211268SDavid Howells  *
319*16211268SDavid Howells  * The check_write_begin() operation can be provided to check for and flush
320*16211268SDavid Howells  * conflicting writes once the folio is grabbed and locked.  It is passed a
321*16211268SDavid Howells  * pointer to the fsdata cookie that gets returned to the VM to be passed to
322*16211268SDavid Howells  * write_end.  It is permitted to sleep.  It should return 0 if the request
323*16211268SDavid Howells  * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
324*16211268SDavid Howells  * be regot; or return an error.
325*16211268SDavid Howells  *
326*16211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
327*16211268SDavid Howells  * inode before calling this.
328*16211268SDavid Howells  *
329*16211268SDavid Howells  * This is usable whether or not caching is enabled.
330*16211268SDavid Howells  */
331*16211268SDavid Howells int netfs_write_begin(struct file *file, struct address_space *mapping,
332*16211268SDavid Howells 		      loff_t pos, unsigned int len, unsigned int aop_flags,
333*16211268SDavid Howells 		      struct folio **_folio, void **_fsdata)
334*16211268SDavid Howells {
335*16211268SDavid Howells 	struct netfs_io_request *rreq;
336*16211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
337*16211268SDavid Howells 	struct folio *folio;
338*16211268SDavid Howells 	unsigned int fgp_flags;
339*16211268SDavid Howells 	pgoff_t index = pos >> PAGE_SHIFT;
340*16211268SDavid Howells 	int ret;
341*16211268SDavid Howells 
342*16211268SDavid Howells 	DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
343*16211268SDavid Howells 
344*16211268SDavid Howells retry:
345*16211268SDavid Howells 	fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
346*16211268SDavid Howells 	if (aop_flags & AOP_FLAG_NOFS)
347*16211268SDavid Howells 		fgp_flags |= FGP_NOFS;
348*16211268SDavid Howells 	folio = __filemap_get_folio(mapping, index, fgp_flags,
349*16211268SDavid Howells 				    mapping_gfp_mask(mapping));
350*16211268SDavid Howells 	if (!folio)
351*16211268SDavid Howells 		return -ENOMEM;
352*16211268SDavid Howells 
353*16211268SDavid Howells 	if (ctx->ops->check_write_begin) {
354*16211268SDavid Howells 		/* Allow the netfs (eg. ceph) to flush conflicts. */
355*16211268SDavid Howells 		ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
356*16211268SDavid Howells 		if (ret < 0) {
357*16211268SDavid Howells 			trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
358*16211268SDavid Howells 			if (ret == -EAGAIN)
359*16211268SDavid Howells 				goto retry;
360*16211268SDavid Howells 			goto error;
361*16211268SDavid Howells 		}
362*16211268SDavid Howells 	}
363*16211268SDavid Howells 
364*16211268SDavid Howells 	if (folio_test_uptodate(folio))
365*16211268SDavid Howells 		goto have_folio;
366*16211268SDavid Howells 
367*16211268SDavid Howells 	/* If the page is beyond the EOF, we want to clear it - unless it's
368*16211268SDavid Howells 	 * within the cache granule containing the EOF, in which case we need
369*16211268SDavid Howells 	 * to preload the granule.
370*16211268SDavid Howells 	 */
371*16211268SDavid Howells 	if (!netfs_is_cache_enabled(ctx) &&
372*16211268SDavid Howells 	    netfs_skip_folio_read(folio, pos, len, false)) {
373*16211268SDavid Howells 		netfs_stat(&netfs_n_rh_write_zskip);
374*16211268SDavid Howells 		goto have_folio_no_wait;
375*16211268SDavid Howells 	}
376*16211268SDavid Howells 
377*16211268SDavid Howells 	rreq = netfs_alloc_request(mapping, file,
378*16211268SDavid Howells 				   folio_file_pos(folio), folio_size(folio),
379*16211268SDavid Howells 				   NETFS_READ_FOR_WRITE);
380*16211268SDavid Howells 	if (IS_ERR(rreq)) {
381*16211268SDavid Howells 		ret = PTR_ERR(rreq);
382*16211268SDavid Howells 		goto error;
383*16211268SDavid Howells 	}
384*16211268SDavid Howells 	rreq->no_unlock_folio	= folio_index(folio);
385*16211268SDavid Howells 	__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
386*16211268SDavid Howells 
387*16211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
388*16211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
389*16211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
390*16211268SDavid Howells 			goto error_put;
391*16211268SDavid Howells 	}
392*16211268SDavid Howells 
393*16211268SDavid Howells 	netfs_stat(&netfs_n_rh_write_begin);
394*16211268SDavid Howells 	trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
395*16211268SDavid Howells 
396*16211268SDavid Howells 	/* Expand the request to meet caching requirements and download
397*16211268SDavid Howells 	 * preferences.
398*16211268SDavid Howells 	 */
399*16211268SDavid Howells 	ractl._nr_pages = folio_nr_pages(folio);
400*16211268SDavid Howells 	netfs_rreq_expand(rreq, &ractl);
401*16211268SDavid Howells 
402*16211268SDavid Howells 	/* We hold the folio locks, so we can drop the references */
403*16211268SDavid Howells 	folio_get(folio);
404*16211268SDavid Howells 	while (readahead_folio(&ractl))
405*16211268SDavid Howells 		;
406*16211268SDavid Howells 
407*16211268SDavid Howells 	ret = netfs_begin_read(rreq, true);
408*16211268SDavid Howells 	if (ret < 0)
409*16211268SDavid Howells 		goto error;
410*16211268SDavid Howells 
411*16211268SDavid Howells have_folio:
412*16211268SDavid Howells 	ret = folio_wait_fscache_killable(folio);
413*16211268SDavid Howells 	if (ret < 0)
414*16211268SDavid Howells 		goto error;
415*16211268SDavid Howells have_folio_no_wait:
416*16211268SDavid Howells 	*_folio = folio;
417*16211268SDavid Howells 	_leave(" = 0");
418*16211268SDavid Howells 	return 0;
419*16211268SDavid Howells 
420*16211268SDavid Howells error_put:
421*16211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
422*16211268SDavid Howells error:
423*16211268SDavid Howells 	folio_unlock(folio);
424*16211268SDavid Howells 	folio_put(folio);
425*16211268SDavid Howells 	_leave(" = %d", ret);
426*16211268SDavid Howells 	return ret;
427*16211268SDavid Howells }
428*16211268SDavid Howells EXPORT_SYMBOL(netfs_write_begin);
429