xref: /openbmc/linux/fs/netfs/buffered_read.c (revision 236d93c4bf2d6da83241cc8e4625e89d9604cb43)
116211268SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later
216211268SDavid Howells /* Network filesystem high-level buffered read support.
316211268SDavid Howells  *
416211268SDavid Howells  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
516211268SDavid Howells  * Written by David Howells (dhowells@redhat.com)
616211268SDavid Howells  */
716211268SDavid Howells 
816211268SDavid Howells #include <linux/export.h>
916211268SDavid Howells #include <linux/task_io_accounting_ops.h>
1016211268SDavid Howells #include "internal.h"
1116211268SDavid Howells 
1216211268SDavid Howells /*
1316211268SDavid Howells  * Unlock the folios in a read operation.  We need to set PG_fscache on any
1416211268SDavid Howells  * folios we're going to write back before we unlock them.
1516211268SDavid Howells  */
1616211268SDavid Howells void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
1716211268SDavid Howells {
1816211268SDavid Howells 	struct netfs_io_subrequest *subreq;
1916211268SDavid Howells 	struct folio *folio;
2016211268SDavid Howells 	unsigned int iopos, account = 0;
2116211268SDavid Howells 	pgoff_t start_page = rreq->start / PAGE_SIZE;
2216211268SDavid Howells 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
2316211268SDavid Howells 	bool subreq_failed = false;
2416211268SDavid Howells 
2516211268SDavid Howells 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
2616211268SDavid Howells 
2716211268SDavid Howells 	if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
2816211268SDavid Howells 		__clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
2916211268SDavid Howells 		list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
3016211268SDavid Howells 			__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
3116211268SDavid Howells 		}
3216211268SDavid Howells 	}
3316211268SDavid Howells 
3416211268SDavid Howells 	/* Walk through the pagecache and the I/O request lists simultaneously.
3516211268SDavid Howells 	 * We may have a mixture of cached and uncached sections and we only
3616211268SDavid Howells 	 * really want to write out the uncached sections.  This is slightly
3716211268SDavid Howells 	 * complicated by the possibility that we might have huge pages with a
3816211268SDavid Howells 	 * mixture inside.
3916211268SDavid Howells 	 */
4016211268SDavid Howells 	subreq = list_first_entry(&rreq->subrequests,
4116211268SDavid Howells 				  struct netfs_io_subrequest, rreq_link);
4216211268SDavid Howells 	iopos = 0;
4316211268SDavid Howells 	subreq_failed = (subreq->error < 0);
4416211268SDavid Howells 
4516211268SDavid Howells 	trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
4616211268SDavid Howells 
4716211268SDavid Howells 	rcu_read_lock();
4816211268SDavid Howells 	xas_for_each(&xas, folio, last_page) {
4916211268SDavid Howells 		unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
5016211268SDavid Howells 		unsigned int pgend = pgpos + folio_size(folio);
5116211268SDavid Howells 		bool pg_failed = false;
5216211268SDavid Howells 
5316211268SDavid Howells 		for (;;) {
5416211268SDavid Howells 			if (!subreq) {
5516211268SDavid Howells 				pg_failed = true;
5616211268SDavid Howells 				break;
5716211268SDavid Howells 			}
5816211268SDavid Howells 			if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
5916211268SDavid Howells 				folio_start_fscache(folio);
6016211268SDavid Howells 			pg_failed |= subreq_failed;
6116211268SDavid Howells 			if (pgend < iopos + subreq->len)
6216211268SDavid Howells 				break;
6316211268SDavid Howells 
6416211268SDavid Howells 			account += subreq->transferred;
6516211268SDavid Howells 			iopos += subreq->len;
6616211268SDavid Howells 			if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
6716211268SDavid Howells 				subreq = list_next_entry(subreq, rreq_link);
6816211268SDavid Howells 				subreq_failed = (subreq->error < 0);
6916211268SDavid Howells 			} else {
7016211268SDavid Howells 				subreq = NULL;
7116211268SDavid Howells 				subreq_failed = false;
7216211268SDavid Howells 			}
7316211268SDavid Howells 			if (pgend == iopos)
7416211268SDavid Howells 				break;
7516211268SDavid Howells 		}
7616211268SDavid Howells 
7716211268SDavid Howells 		if (!pg_failed) {
7816211268SDavid Howells 			flush_dcache_folio(folio);
7916211268SDavid Howells 			folio_mark_uptodate(folio);
8016211268SDavid Howells 		}
8116211268SDavid Howells 
8216211268SDavid Howells 		if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
8316211268SDavid Howells 			if (folio_index(folio) == rreq->no_unlock_folio &&
8416211268SDavid Howells 			    test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
8516211268SDavid Howells 				_debug("no unlock");
8616211268SDavid Howells 			else
8716211268SDavid Howells 				folio_unlock(folio);
8816211268SDavid Howells 		}
8916211268SDavid Howells 	}
9016211268SDavid Howells 	rcu_read_unlock();
9116211268SDavid Howells 
9216211268SDavid Howells 	task_io_account_read(account);
9316211268SDavid Howells 	if (rreq->netfs_ops->done)
9416211268SDavid Howells 		rreq->netfs_ops->done(rreq);
9516211268SDavid Howells }
9616211268SDavid Howells 
9716211268SDavid Howells static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
9816211268SDavid Howells 					 loff_t *_start, size_t *_len, loff_t i_size)
9916211268SDavid Howells {
10016211268SDavid Howells 	struct netfs_cache_resources *cres = &rreq->cache_resources;
10116211268SDavid Howells 
10216211268SDavid Howells 	if (cres->ops && cres->ops->expand_readahead)
10316211268SDavid Howells 		cres->ops->expand_readahead(cres, _start, _len, i_size);
10416211268SDavid Howells }
10516211268SDavid Howells 
10616211268SDavid Howells static void netfs_rreq_expand(struct netfs_io_request *rreq,
10716211268SDavid Howells 			      struct readahead_control *ractl)
10816211268SDavid Howells {
10916211268SDavid Howells 	/* Give the cache a chance to change the request parameters.  The
11016211268SDavid Howells 	 * resultant request must contain the original region.
11116211268SDavid Howells 	 */
11216211268SDavid Howells 	netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
11316211268SDavid Howells 
11416211268SDavid Howells 	/* Give the netfs a chance to change the request parameters.  The
11516211268SDavid Howells 	 * resultant request must contain the original region.
11616211268SDavid Howells 	 */
11716211268SDavid Howells 	if (rreq->netfs_ops->expand_readahead)
11816211268SDavid Howells 		rreq->netfs_ops->expand_readahead(rreq);
11916211268SDavid Howells 
12016211268SDavid Howells 	/* Expand the request if the cache wants it to start earlier.  Note
12116211268SDavid Howells 	 * that the expansion may get further extended if the VM wishes to
12216211268SDavid Howells 	 * insert THPs and the preferred start and/or end wind up in the middle
12316211268SDavid Howells 	 * of THPs.
12416211268SDavid Howells 	 *
12516211268SDavid Howells 	 * If this is the case, however, the THP size should be an integer
12616211268SDavid Howells 	 * multiple of the cache granule size, so we get a whole number of
12716211268SDavid Howells 	 * granules to deal with.
12816211268SDavid Howells 	 */
12916211268SDavid Howells 	if (rreq->start  != readahead_pos(ractl) ||
13016211268SDavid Howells 	    rreq->len != readahead_length(ractl)) {
13116211268SDavid Howells 		readahead_expand(ractl, rreq->start, rreq->len);
13216211268SDavid Howells 		rreq->start  = readahead_pos(ractl);
13316211268SDavid Howells 		rreq->len = readahead_length(ractl);
13416211268SDavid Howells 
13516211268SDavid Howells 		trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
13616211268SDavid Howells 				 netfs_read_trace_expanded);
13716211268SDavid Howells 	}
13816211268SDavid Howells }
13916211268SDavid Howells 
14016211268SDavid Howells /**
14116211268SDavid Howells  * netfs_readahead - Helper to manage a read request
14216211268SDavid Howells  * @ractl: The description of the readahead request
14316211268SDavid Howells  *
14416211268SDavid Howells  * Fulfil a readahead request by drawing data from the cache if possible, or
14516211268SDavid Howells  * the netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O
14616211268SDavid Howells  * requests from different sources will get munged together.  If necessary, the
14716211268SDavid Howells  * readahead window can be expanded in either direction to a more convenient
14816211268SDavid Howells  * alighment for RPC efficiency or to make storage in the cache feasible.
14916211268SDavid Howells  *
15016211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
15116211268SDavid Howells  * inode before calling this.
15216211268SDavid Howells  *
15316211268SDavid Howells  * This is usable whether or not caching is enabled.
15416211268SDavid Howells  */
15516211268SDavid Howells void netfs_readahead(struct readahead_control *ractl)
15616211268SDavid Howells {
15716211268SDavid Howells 	struct netfs_io_request *rreq;
15816211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host);
15916211268SDavid Howells 	int ret;
16016211268SDavid Howells 
16116211268SDavid Howells 	_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
16216211268SDavid Howells 
16316211268SDavid Howells 	if (readahead_count(ractl) == 0)
16416211268SDavid Howells 		return;
16516211268SDavid Howells 
16616211268SDavid Howells 	rreq = netfs_alloc_request(ractl->mapping, ractl->file,
16716211268SDavid Howells 				   readahead_pos(ractl),
16816211268SDavid Howells 				   readahead_length(ractl),
16916211268SDavid Howells 				   NETFS_READAHEAD);
17016211268SDavid Howells 	if (IS_ERR(rreq))
17116211268SDavid Howells 		return;
17216211268SDavid Howells 
17316211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
17416211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
17516211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
17616211268SDavid Howells 			goto cleanup_free;
17716211268SDavid Howells 	}
17816211268SDavid Howells 
17916211268SDavid Howells 	netfs_stat(&netfs_n_rh_readahead);
18016211268SDavid Howells 	trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
18116211268SDavid Howells 			 netfs_read_trace_readahead);
18216211268SDavid Howells 
18316211268SDavid Howells 	netfs_rreq_expand(rreq, ractl);
18416211268SDavid Howells 
18516211268SDavid Howells 	/* Drop the refs on the folios here rather than in the cache or
18616211268SDavid Howells 	 * filesystem.  The locks will be dropped in netfs_rreq_unlock().
18716211268SDavid Howells 	 */
18816211268SDavid Howells 	while (readahead_folio(ractl))
18916211268SDavid Howells 		;
19016211268SDavid Howells 
19116211268SDavid Howells 	netfs_begin_read(rreq, false);
19216211268SDavid Howells 	return;
19316211268SDavid Howells 
19416211268SDavid Howells cleanup_free:
19516211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
19616211268SDavid Howells 	return;
19716211268SDavid Howells }
19816211268SDavid Howells EXPORT_SYMBOL(netfs_readahead);
19916211268SDavid Howells 
20016211268SDavid Howells /**
20116211268SDavid Howells  * netfs_readpage - Helper to manage a readpage request
20216211268SDavid Howells  * @file: The file to read from
20316211268SDavid Howells  * @subpage: A subpage of the folio to read
20416211268SDavid Howells  *
20516211268SDavid Howells  * Fulfil a readpage request by drawing data from the cache if possible, or the
20616211268SDavid Howells  * netfs if not.  Space beyond the EOF is zero-filled.  Multiple I/O requests
20716211268SDavid Howells  * from different sources will get munged together.
20816211268SDavid Howells  *
20916211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
21016211268SDavid Howells  * inode before calling this.
21116211268SDavid Howells  *
21216211268SDavid Howells  * This is usable whether or not caching is enabled.
21316211268SDavid Howells  */
21416211268SDavid Howells int netfs_readpage(struct file *file, struct page *subpage)
21516211268SDavid Howells {
21616211268SDavid Howells 	struct folio *folio = page_folio(subpage);
21716211268SDavid Howells 	struct address_space *mapping = folio_file_mapping(folio);
21816211268SDavid Howells 	struct netfs_io_request *rreq;
21916211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(mapping->host);
22016211268SDavid Howells 	int ret;
22116211268SDavid Howells 
22216211268SDavid Howells 	_enter("%lx", folio_index(folio));
22316211268SDavid Howells 
22416211268SDavid Howells 	rreq = netfs_alloc_request(mapping, file,
22516211268SDavid Howells 				   folio_file_pos(folio), folio_size(folio),
22616211268SDavid Howells 				   NETFS_READPAGE);
22716211268SDavid Howells 	if (IS_ERR(rreq)) {
22816211268SDavid Howells 		ret = PTR_ERR(rreq);
22916211268SDavid Howells 		goto alloc_error;
23016211268SDavid Howells 	}
23116211268SDavid Howells 
23216211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
23316211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
23416211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
23516211268SDavid Howells 			goto discard;
23616211268SDavid Howells 	}
23716211268SDavid Howells 
23816211268SDavid Howells 	netfs_stat(&netfs_n_rh_readpage);
23916211268SDavid Howells 	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
24016211268SDavid Howells 	return netfs_begin_read(rreq, true);
24116211268SDavid Howells 
24216211268SDavid Howells discard:
24316211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
24416211268SDavid Howells alloc_error:
24516211268SDavid Howells 	folio_unlock(folio);
24616211268SDavid Howells 	return ret;
24716211268SDavid Howells }
24816211268SDavid Howells EXPORT_SYMBOL(netfs_readpage);
24916211268SDavid Howells 
25016211268SDavid Howells /*
25116211268SDavid Howells  * Prepare a folio for writing without reading first
25216211268SDavid Howells  * @folio: The folio being prepared
25316211268SDavid Howells  * @pos: starting position for the write
25416211268SDavid Howells  * @len: length of write
25516211268SDavid Howells  * @always_fill: T if the folio should always be completely filled/cleared
25616211268SDavid Howells  *
25716211268SDavid Howells  * In some cases, write_begin doesn't need to read at all:
25816211268SDavid Howells  * - full folio write
25916211268SDavid Howells  * - write that lies in a folio that is completely beyond EOF
26016211268SDavid Howells  * - write that covers the folio from start to EOF or beyond it
26116211268SDavid Howells  *
26216211268SDavid Howells  * If any of these criteria are met, then zero out the unwritten parts
26316211268SDavid Howells  * of the folio and return true. Otherwise, return false.
26416211268SDavid Howells  */
26516211268SDavid Howells static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
26616211268SDavid Howells 				 bool always_fill)
26716211268SDavid Howells {
26816211268SDavid Howells 	struct inode *inode = folio_inode(folio);
26916211268SDavid Howells 	loff_t i_size = i_size_read(inode);
27016211268SDavid Howells 	size_t offset = offset_in_folio(folio, pos);
27116211268SDavid Howells 	size_t plen = folio_size(folio);
27216211268SDavid Howells 
27316211268SDavid Howells 	if (unlikely(always_fill)) {
27416211268SDavid Howells 		if (pos - offset + len <= i_size)
27516211268SDavid Howells 			return false; /* Page entirely before EOF */
27616211268SDavid Howells 		zero_user_segment(&folio->page, 0, plen);
27716211268SDavid Howells 		folio_mark_uptodate(folio);
27816211268SDavid Howells 		return true;
27916211268SDavid Howells 	}
28016211268SDavid Howells 
28116211268SDavid Howells 	/* Full folio write */
28216211268SDavid Howells 	if (offset == 0 && len >= plen)
28316211268SDavid Howells 		return true;
28416211268SDavid Howells 
28516211268SDavid Howells 	/* Page entirely beyond the end of the file */
28616211268SDavid Howells 	if (pos - offset >= i_size)
28716211268SDavid Howells 		goto zero_out;
28816211268SDavid Howells 
28916211268SDavid Howells 	/* Write that covers from the start of the folio to EOF or beyond */
29016211268SDavid Howells 	if (offset == 0 && (pos + len) >= i_size)
29116211268SDavid Howells 		goto zero_out;
29216211268SDavid Howells 
29316211268SDavid Howells 	return false;
29416211268SDavid Howells zero_out:
29516211268SDavid Howells 	zero_user_segments(&folio->page, 0, offset, offset + len, plen);
29616211268SDavid Howells 	return true;
29716211268SDavid Howells }
29816211268SDavid Howells 
29916211268SDavid Howells /**
30016211268SDavid Howells  * netfs_write_begin - Helper to prepare for writing
30116211268SDavid Howells  * @file: The file to read from
30216211268SDavid Howells  * @mapping: The mapping to read from
30316211268SDavid Howells  * @pos: File position at which the write will begin
30416211268SDavid Howells  * @len: The length of the write (may extend beyond the end of the folio chosen)
30516211268SDavid Howells  * @_folio: Where to put the resultant folio
30616211268SDavid Howells  * @_fsdata: Place for the netfs to store a cookie
30716211268SDavid Howells  *
30816211268SDavid Howells  * Pre-read data for a write-begin request by drawing data from the cache if
30916211268SDavid Howells  * possible, or the netfs if not.  Space beyond the EOF is zero-filled.
31016211268SDavid Howells  * Multiple I/O requests from different sources will get munged together.  If
31116211268SDavid Howells  * necessary, the readahead window can be expanded in either direction to a
31216211268SDavid Howells  * more convenient alighment for RPC efficiency or to make storage in the cache
31316211268SDavid Howells  * feasible.
31416211268SDavid Howells  *
31516211268SDavid Howells  * The calling netfs must provide a table of operations, only one of which,
31616211268SDavid Howells  * issue_op, is mandatory.
31716211268SDavid Howells  *
31816211268SDavid Howells  * The check_write_begin() operation can be provided to check for and flush
31916211268SDavid Howells  * conflicting writes once the folio is grabbed and locked.  It is passed a
32016211268SDavid Howells  * pointer to the fsdata cookie that gets returned to the VM to be passed to
32116211268SDavid Howells  * write_end.  It is permitted to sleep.  It should return 0 if the request
32216211268SDavid Howells  * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
32316211268SDavid Howells  * be regot; or return an error.
32416211268SDavid Howells  *
32516211268SDavid Howells  * The calling netfs must initialise a netfs context contiguous to the vfs
32616211268SDavid Howells  * inode before calling this.
32716211268SDavid Howells  *
32816211268SDavid Howells  * This is usable whether or not caching is enabled.
32916211268SDavid Howells  */
33016211268SDavid Howells int netfs_write_begin(struct file *file, struct address_space *mapping,
33116211268SDavid Howells 		      loff_t pos, unsigned int len, unsigned int aop_flags,
33216211268SDavid Howells 		      struct folio **_folio, void **_fsdata)
33316211268SDavid Howells {
33416211268SDavid Howells 	struct netfs_io_request *rreq;
33516211268SDavid Howells 	struct netfs_i_context *ctx = netfs_i_context(file_inode(file ));
33616211268SDavid Howells 	struct folio *folio;
337*236d93c4SMatthew Wilcox (Oracle) 	unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
33816211268SDavid Howells 	pgoff_t index = pos >> PAGE_SHIFT;
33916211268SDavid Howells 	int ret;
34016211268SDavid Howells 
34116211268SDavid Howells 	DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
34216211268SDavid Howells 
34316211268SDavid Howells retry:
34416211268SDavid Howells 	folio = __filemap_get_folio(mapping, index, fgp_flags,
34516211268SDavid Howells 				    mapping_gfp_mask(mapping));
34616211268SDavid Howells 	if (!folio)
34716211268SDavid Howells 		return -ENOMEM;
34816211268SDavid Howells 
34916211268SDavid Howells 	if (ctx->ops->check_write_begin) {
35016211268SDavid Howells 		/* Allow the netfs (eg. ceph) to flush conflicts. */
35116211268SDavid Howells 		ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
35216211268SDavid Howells 		if (ret < 0) {
35316211268SDavid Howells 			trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
35416211268SDavid Howells 			if (ret == -EAGAIN)
35516211268SDavid Howells 				goto retry;
35616211268SDavid Howells 			goto error;
35716211268SDavid Howells 		}
35816211268SDavid Howells 	}
35916211268SDavid Howells 
36016211268SDavid Howells 	if (folio_test_uptodate(folio))
36116211268SDavid Howells 		goto have_folio;
36216211268SDavid Howells 
36316211268SDavid Howells 	/* If the page is beyond the EOF, we want to clear it - unless it's
36416211268SDavid Howells 	 * within the cache granule containing the EOF, in which case we need
36516211268SDavid Howells 	 * to preload the granule.
36616211268SDavid Howells 	 */
36716211268SDavid Howells 	if (!netfs_is_cache_enabled(ctx) &&
36816211268SDavid Howells 	    netfs_skip_folio_read(folio, pos, len, false)) {
36916211268SDavid Howells 		netfs_stat(&netfs_n_rh_write_zskip);
37016211268SDavid Howells 		goto have_folio_no_wait;
37116211268SDavid Howells 	}
37216211268SDavid Howells 
37316211268SDavid Howells 	rreq = netfs_alloc_request(mapping, file,
37416211268SDavid Howells 				   folio_file_pos(folio), folio_size(folio),
37516211268SDavid Howells 				   NETFS_READ_FOR_WRITE);
37616211268SDavid Howells 	if (IS_ERR(rreq)) {
37716211268SDavid Howells 		ret = PTR_ERR(rreq);
37816211268SDavid Howells 		goto error;
37916211268SDavid Howells 	}
38016211268SDavid Howells 	rreq->no_unlock_folio	= folio_index(folio);
38116211268SDavid Howells 	__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
38216211268SDavid Howells 
38316211268SDavid Howells 	if (ctx->ops->begin_cache_operation) {
38416211268SDavid Howells 		ret = ctx->ops->begin_cache_operation(rreq);
38516211268SDavid Howells 		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
38616211268SDavid Howells 			goto error_put;
38716211268SDavid Howells 	}
38816211268SDavid Howells 
38916211268SDavid Howells 	netfs_stat(&netfs_n_rh_write_begin);
39016211268SDavid Howells 	trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
39116211268SDavid Howells 
39216211268SDavid Howells 	/* Expand the request to meet caching requirements and download
39316211268SDavid Howells 	 * preferences.
39416211268SDavid Howells 	 */
39516211268SDavid Howells 	ractl._nr_pages = folio_nr_pages(folio);
39616211268SDavid Howells 	netfs_rreq_expand(rreq, &ractl);
39716211268SDavid Howells 
39816211268SDavid Howells 	/* We hold the folio locks, so we can drop the references */
39916211268SDavid Howells 	folio_get(folio);
40016211268SDavid Howells 	while (readahead_folio(&ractl))
40116211268SDavid Howells 		;
40216211268SDavid Howells 
40316211268SDavid Howells 	ret = netfs_begin_read(rreq, true);
40416211268SDavid Howells 	if (ret < 0)
40516211268SDavid Howells 		goto error;
40616211268SDavid Howells 
40716211268SDavid Howells have_folio:
40816211268SDavid Howells 	ret = folio_wait_fscache_killable(folio);
40916211268SDavid Howells 	if (ret < 0)
41016211268SDavid Howells 		goto error;
41116211268SDavid Howells have_folio_no_wait:
41216211268SDavid Howells 	*_folio = folio;
41316211268SDavid Howells 	_leave(" = 0");
41416211268SDavid Howells 	return 0;
41516211268SDavid Howells 
41616211268SDavid Howells error_put:
41716211268SDavid Howells 	netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
41816211268SDavid Howells error:
41916211268SDavid Howells 	folio_unlock(folio);
42016211268SDavid Howells 	folio_put(folio);
42116211268SDavid Howells 	_leave(" = %d", ret);
42216211268SDavid Howells 	return ret;
42316211268SDavid Howells }
42416211268SDavid Howells EXPORT_SYMBOL(netfs_write_begin);
425