116211268SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later
216211268SDavid Howells /* Network filesystem high-level buffered read support.
316211268SDavid Howells *
416211268SDavid Howells * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
516211268SDavid Howells * Written by David Howells (dhowells@redhat.com)
616211268SDavid Howells */
716211268SDavid Howells
816211268SDavid Howells #include <linux/export.h>
916211268SDavid Howells #include <linux/task_io_accounting_ops.h>
1016211268SDavid Howells #include "internal.h"
1116211268SDavid Howells
1216211268SDavid Howells /*
1316211268SDavid Howells * Unlock the folios in a read operation. We need to set PG_fscache on any
1416211268SDavid Howells * folios we're going to write back before we unlock them.
1516211268SDavid Howells */
netfs_rreq_unlock_folios(struct netfs_io_request * rreq)1616211268SDavid Howells void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
1716211268SDavid Howells {
1816211268SDavid Howells struct netfs_io_subrequest *subreq;
1916211268SDavid Howells struct folio *folio;
2016211268SDavid Howells pgoff_t start_page = rreq->start / PAGE_SIZE;
2116211268SDavid Howells pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
225e51c627SDavid Howells size_t account = 0;
2316211268SDavid Howells bool subreq_failed = false;
2416211268SDavid Howells
2516211268SDavid Howells XA_STATE(xas, &rreq->mapping->i_pages, start_page);
2616211268SDavid Howells
2716211268SDavid Howells if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
2816211268SDavid Howells __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
2916211268SDavid Howells list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
3016211268SDavid Howells __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
3116211268SDavid Howells }
3216211268SDavid Howells }
3316211268SDavid Howells
3416211268SDavid Howells /* Walk through the pagecache and the I/O request lists simultaneously.
3516211268SDavid Howells * We may have a mixture of cached and uncached sections and we only
3616211268SDavid Howells * really want to write out the uncached sections. This is slightly
3716211268SDavid Howells * complicated by the possibility that we might have huge pages with a
3816211268SDavid Howells * mixture inside.
3916211268SDavid Howells */
4016211268SDavid Howells subreq = list_first_entry(&rreq->subrequests,
4116211268SDavid Howells struct netfs_io_subrequest, rreq_link);
4216211268SDavid Howells subreq_failed = (subreq->error < 0);
4316211268SDavid Howells
4416211268SDavid Howells trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
4516211268SDavid Howells
4616211268SDavid Howells rcu_read_lock();
4716211268SDavid Howells xas_for_each(&xas, folio, last_page) {
485e51c627SDavid Howells loff_t pg_end;
4916211268SDavid Howells bool pg_failed = false;
50*df1c357fSDave Wysochanski bool folio_started;
5116211268SDavid Howells
527e043a80SDavid Howells if (xas_retry(&xas, folio))
537e043a80SDavid Howells continue;
547e043a80SDavid Howells
555e51c627SDavid Howells pg_end = folio_pos(folio) + folio_size(folio) - 1;
567e043a80SDavid Howells
57*df1c357fSDave Wysochanski folio_started = false;
5816211268SDavid Howells for (;;) {
595e51c627SDavid Howells loff_t sreq_end;
605e51c627SDavid Howells
6116211268SDavid Howells if (!subreq) {
6216211268SDavid Howells pg_failed = true;
6316211268SDavid Howells break;
6416211268SDavid Howells }
65*df1c357fSDave Wysochanski if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
6616211268SDavid Howells folio_start_fscache(folio);
67*df1c357fSDave Wysochanski folio_started = true;
68*df1c357fSDave Wysochanski }
6916211268SDavid Howells pg_failed |= subreq_failed;
705e51c627SDavid Howells sreq_end = subreq->start + subreq->len - 1;
715e51c627SDavid Howells if (pg_end < sreq_end)
7216211268SDavid Howells break;
7316211268SDavid Howells
7416211268SDavid Howells account += subreq->transferred;
7516211268SDavid Howells if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
7616211268SDavid Howells subreq = list_next_entry(subreq, rreq_link);
7716211268SDavid Howells subreq_failed = (subreq->error < 0);
7816211268SDavid Howells } else {
7916211268SDavid Howells subreq = NULL;
8016211268SDavid Howells subreq_failed = false;
8116211268SDavid Howells }
825e51c627SDavid Howells
835e51c627SDavid Howells if (pg_end == sreq_end)
8416211268SDavid Howells break;
8516211268SDavid Howells }
8616211268SDavid Howells
8716211268SDavid Howells if (!pg_failed) {
8816211268SDavid Howells flush_dcache_folio(folio);
8916211268SDavid Howells folio_mark_uptodate(folio);
9016211268SDavid Howells }
9116211268SDavid Howells
9216211268SDavid Howells if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
9316211268SDavid Howells if (folio_index(folio) == rreq->no_unlock_folio &&
9416211268SDavid Howells test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
9516211268SDavid Howells _debug("no unlock");
9616211268SDavid Howells else
9716211268SDavid Howells folio_unlock(folio);
9816211268SDavid Howells }
9916211268SDavid Howells }
10016211268SDavid Howells rcu_read_unlock();
10116211268SDavid Howells
10216211268SDavid Howells task_io_account_read(account);
10316211268SDavid Howells if (rreq->netfs_ops->done)
10416211268SDavid Howells rreq->netfs_ops->done(rreq);
10516211268SDavid Howells }
10616211268SDavid Howells
netfs_cache_expand_readahead(struct netfs_io_request * rreq,loff_t * _start,size_t * _len,loff_t i_size)10716211268SDavid Howells static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
10816211268SDavid Howells loff_t *_start, size_t *_len, loff_t i_size)
10916211268SDavid Howells {
11016211268SDavid Howells struct netfs_cache_resources *cres = &rreq->cache_resources;
11116211268SDavid Howells
11216211268SDavid Howells if (cres->ops && cres->ops->expand_readahead)
11316211268SDavid Howells cres->ops->expand_readahead(cres, _start, _len, i_size);
11416211268SDavid Howells }
11516211268SDavid Howells
netfs_rreq_expand(struct netfs_io_request * rreq,struct readahead_control * ractl)11616211268SDavid Howells static void netfs_rreq_expand(struct netfs_io_request *rreq,
11716211268SDavid Howells struct readahead_control *ractl)
11816211268SDavid Howells {
11916211268SDavid Howells /* Give the cache a chance to change the request parameters. The
12016211268SDavid Howells * resultant request must contain the original region.
12116211268SDavid Howells */
12216211268SDavid Howells netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
12316211268SDavid Howells
12416211268SDavid Howells /* Give the netfs a chance to change the request parameters. The
12516211268SDavid Howells * resultant request must contain the original region.
12616211268SDavid Howells */
12716211268SDavid Howells if (rreq->netfs_ops->expand_readahead)
12816211268SDavid Howells rreq->netfs_ops->expand_readahead(rreq);
12916211268SDavid Howells
13016211268SDavid Howells /* Expand the request if the cache wants it to start earlier. Note
13116211268SDavid Howells * that the expansion may get further extended if the VM wishes to
13216211268SDavid Howells * insert THPs and the preferred start and/or end wind up in the middle
13316211268SDavid Howells * of THPs.
13416211268SDavid Howells *
13516211268SDavid Howells * If this is the case, however, the THP size should be an integer
13616211268SDavid Howells * multiple of the cache granule size, so we get a whole number of
13716211268SDavid Howells * granules to deal with.
13816211268SDavid Howells */
13916211268SDavid Howells if (rreq->start != readahead_pos(ractl) ||
14016211268SDavid Howells rreq->len != readahead_length(ractl)) {
14116211268SDavid Howells readahead_expand(ractl, rreq->start, rreq->len);
14216211268SDavid Howells rreq->start = readahead_pos(ractl);
14316211268SDavid Howells rreq->len = readahead_length(ractl);
14416211268SDavid Howells
14516211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
14616211268SDavid Howells netfs_read_trace_expanded);
14716211268SDavid Howells }
14816211268SDavid Howells }
14916211268SDavid Howells
15016211268SDavid Howells /**
15116211268SDavid Howells * netfs_readahead - Helper to manage a read request
15216211268SDavid Howells * @ractl: The description of the readahead request
15316211268SDavid Howells *
15416211268SDavid Howells * Fulfil a readahead request by drawing data from the cache if possible, or
15516211268SDavid Howells * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
15616211268SDavid Howells * requests from different sources will get munged together. If necessary, the
15716211268SDavid Howells * readahead window can be expanded in either direction to a more convenient
15816211268SDavid Howells * alighment for RPC efficiency or to make storage in the cache feasible.
15916211268SDavid Howells *
16016211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs
16116211268SDavid Howells * inode before calling this.
16216211268SDavid Howells *
16316211268SDavid Howells * This is usable whether or not caching is enabled.
16416211268SDavid Howells */
netfs_readahead(struct readahead_control * ractl)16516211268SDavid Howells void netfs_readahead(struct readahead_control *ractl)
16616211268SDavid Howells {
16716211268SDavid Howells struct netfs_io_request *rreq;
168874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
16916211268SDavid Howells int ret;
17016211268SDavid Howells
17116211268SDavid Howells _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
17216211268SDavid Howells
17316211268SDavid Howells if (readahead_count(ractl) == 0)
17416211268SDavid Howells return;
17516211268SDavid Howells
17616211268SDavid Howells rreq = netfs_alloc_request(ractl->mapping, ractl->file,
17716211268SDavid Howells readahead_pos(ractl),
17816211268SDavid Howells readahead_length(ractl),
17916211268SDavid Howells NETFS_READAHEAD);
18016211268SDavid Howells if (IS_ERR(rreq))
18116211268SDavid Howells return;
18216211268SDavid Howells
18316211268SDavid Howells if (ctx->ops->begin_cache_operation) {
18416211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq);
18516211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
18616211268SDavid Howells goto cleanup_free;
18716211268SDavid Howells }
18816211268SDavid Howells
18916211268SDavid Howells netfs_stat(&netfs_n_rh_readahead);
19016211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
19116211268SDavid Howells netfs_read_trace_readahead);
19216211268SDavid Howells
19316211268SDavid Howells netfs_rreq_expand(rreq, ractl);
19416211268SDavid Howells
19516211268SDavid Howells /* Drop the refs on the folios here rather than in the cache or
19616211268SDavid Howells * filesystem. The locks will be dropped in netfs_rreq_unlock().
19716211268SDavid Howells */
19816211268SDavid Howells while (readahead_folio(ractl))
19916211268SDavid Howells ;
20016211268SDavid Howells
20116211268SDavid Howells netfs_begin_read(rreq, false);
20216211268SDavid Howells return;
20316211268SDavid Howells
20416211268SDavid Howells cleanup_free:
20516211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
20616211268SDavid Howells return;
20716211268SDavid Howells }
20816211268SDavid Howells EXPORT_SYMBOL(netfs_readahead);
20916211268SDavid Howells
21016211268SDavid Howells /**
2116c62371bSMatthew Wilcox (Oracle) * netfs_read_folio - Helper to manage a read_folio request
21216211268SDavid Howells * @file: The file to read from
2136c62371bSMatthew Wilcox (Oracle) * @folio: The folio to read
21416211268SDavid Howells *
2156c62371bSMatthew Wilcox (Oracle) * Fulfil a read_folio request by drawing data from the cache if
2166c62371bSMatthew Wilcox (Oracle) * possible, or the netfs if not. Space beyond the EOF is zero-filled.
2176c62371bSMatthew Wilcox (Oracle) * Multiple I/O requests from different sources will get munged together.
21816211268SDavid Howells *
21916211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs
22016211268SDavid Howells * inode before calling this.
22116211268SDavid Howells *
22216211268SDavid Howells * This is usable whether or not caching is enabled.
22316211268SDavid Howells */
netfs_read_folio(struct file * file,struct folio * folio)2246c62371bSMatthew Wilcox (Oracle) int netfs_read_folio(struct file *file, struct folio *folio)
22516211268SDavid Howells {
22616211268SDavid Howells struct address_space *mapping = folio_file_mapping(folio);
22716211268SDavid Howells struct netfs_io_request *rreq;
228874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(mapping->host);
22916211268SDavid Howells int ret;
23016211268SDavid Howells
23116211268SDavid Howells _enter("%lx", folio_index(folio));
23216211268SDavid Howells
23316211268SDavid Howells rreq = netfs_alloc_request(mapping, file,
23416211268SDavid Howells folio_file_pos(folio), folio_size(folio),
23516211268SDavid Howells NETFS_READPAGE);
23616211268SDavid Howells if (IS_ERR(rreq)) {
23716211268SDavid Howells ret = PTR_ERR(rreq);
23816211268SDavid Howells goto alloc_error;
23916211268SDavid Howells }
24016211268SDavid Howells
24116211268SDavid Howells if (ctx->ops->begin_cache_operation) {
24216211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq);
24316211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
24416211268SDavid Howells goto discard;
24516211268SDavid Howells }
24616211268SDavid Howells
24716211268SDavid Howells netfs_stat(&netfs_n_rh_readpage);
24816211268SDavid Howells trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
24916211268SDavid Howells return netfs_begin_read(rreq, true);
25016211268SDavid Howells
25116211268SDavid Howells discard:
25216211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
25316211268SDavid Howells alloc_error:
25416211268SDavid Howells folio_unlock(folio);
25516211268SDavid Howells return ret;
25616211268SDavid Howells }
2576c62371bSMatthew Wilcox (Oracle) EXPORT_SYMBOL(netfs_read_folio);
25816211268SDavid Howells
25916211268SDavid Howells /*
26016211268SDavid Howells * Prepare a folio for writing without reading first
26116211268SDavid Howells * @folio: The folio being prepared
26216211268SDavid Howells * @pos: starting position for the write
26316211268SDavid Howells * @len: length of write
26416211268SDavid Howells * @always_fill: T if the folio should always be completely filled/cleared
26516211268SDavid Howells *
26616211268SDavid Howells * In some cases, write_begin doesn't need to read at all:
26716211268SDavid Howells * - full folio write
26816211268SDavid Howells * - write that lies in a folio that is completely beyond EOF
26916211268SDavid Howells * - write that covers the folio from start to EOF or beyond it
27016211268SDavid Howells *
27116211268SDavid Howells * If any of these criteria are met, then zero out the unwritten parts
27216211268SDavid Howells * of the folio and return true. Otherwise, return false.
27316211268SDavid Howells */
netfs_skip_folio_read(struct folio * folio,loff_t pos,size_t len,bool always_fill)27416211268SDavid Howells static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
27516211268SDavid Howells bool always_fill)
27616211268SDavid Howells {
27716211268SDavid Howells struct inode *inode = folio_inode(folio);
27816211268SDavid Howells loff_t i_size = i_size_read(inode);
27916211268SDavid Howells size_t offset = offset_in_folio(folio, pos);
28016211268SDavid Howells size_t plen = folio_size(folio);
28116211268SDavid Howells
28216211268SDavid Howells if (unlikely(always_fill)) {
28316211268SDavid Howells if (pos - offset + len <= i_size)
28416211268SDavid Howells return false; /* Page entirely before EOF */
28516211268SDavid Howells zero_user_segment(&folio->page, 0, plen);
28616211268SDavid Howells folio_mark_uptodate(folio);
28716211268SDavid Howells return true;
28816211268SDavid Howells }
28916211268SDavid Howells
29016211268SDavid Howells /* Full folio write */
29116211268SDavid Howells if (offset == 0 && len >= plen)
29216211268SDavid Howells return true;
29316211268SDavid Howells
29416211268SDavid Howells /* Page entirely beyond the end of the file */
29516211268SDavid Howells if (pos - offset >= i_size)
29616211268SDavid Howells goto zero_out;
29716211268SDavid Howells
29816211268SDavid Howells /* Write that covers from the start of the folio to EOF or beyond */
29916211268SDavid Howells if (offset == 0 && (pos + len) >= i_size)
30016211268SDavid Howells goto zero_out;
30116211268SDavid Howells
30216211268SDavid Howells return false;
30316211268SDavid Howells zero_out:
30416211268SDavid Howells zero_user_segments(&folio->page, 0, offset, offset + len, plen);
30516211268SDavid Howells return true;
30616211268SDavid Howells }
30716211268SDavid Howells
30816211268SDavid Howells /**
30916211268SDavid Howells * netfs_write_begin - Helper to prepare for writing
310e81fb419SLinus Torvalds * @ctx: The netfs context
31116211268SDavid Howells * @file: The file to read from
31216211268SDavid Howells * @mapping: The mapping to read from
31316211268SDavid Howells * @pos: File position at which the write will begin
31416211268SDavid Howells * @len: The length of the write (may extend beyond the end of the folio chosen)
31516211268SDavid Howells * @_folio: Where to put the resultant folio
31616211268SDavid Howells * @_fsdata: Place for the netfs to store a cookie
31716211268SDavid Howells *
31816211268SDavid Howells * Pre-read data for a write-begin request by drawing data from the cache if
31916211268SDavid Howells * possible, or the netfs if not. Space beyond the EOF is zero-filled.
32016211268SDavid Howells * Multiple I/O requests from different sources will get munged together. If
32116211268SDavid Howells * necessary, the readahead window can be expanded in either direction to a
32216211268SDavid Howells * more convenient alighment for RPC efficiency or to make storage in the cache
32316211268SDavid Howells * feasible.
32416211268SDavid Howells *
32516211268SDavid Howells * The calling netfs must provide a table of operations, only one of which,
32616211268SDavid Howells * issue_op, is mandatory.
32716211268SDavid Howells *
32816211268SDavid Howells * The check_write_begin() operation can be provided to check for and flush
32916211268SDavid Howells * conflicting writes once the folio is grabbed and locked. It is passed a
33016211268SDavid Howells * pointer to the fsdata cookie that gets returned to the VM to be passed to
33116211268SDavid Howells * write_end. It is permitted to sleep. It should return 0 if the request
332fac47b43SXiubo Li * should go ahead or it may return an error. It may also unlock and put the
333fac47b43SXiubo Li * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
334fac47b43SXiubo Li * will cause the folio to be re-got and the process to be retried.
33516211268SDavid Howells *
33616211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs
33716211268SDavid Howells * inode before calling this.
33816211268SDavid Howells *
33916211268SDavid Howells * This is usable whether or not caching is enabled.
34016211268SDavid Howells */
netfs_write_begin(struct netfs_inode * ctx,struct file * file,struct address_space * mapping,loff_t pos,unsigned int len,struct folio ** _folio,void ** _fsdata)341e81fb419SLinus Torvalds int netfs_write_begin(struct netfs_inode *ctx,
342e81fb419SLinus Torvalds struct file *file, struct address_space *mapping,
343de2a9311SMatthew Wilcox (Oracle) loff_t pos, unsigned int len, struct folio **_folio,
344de2a9311SMatthew Wilcox (Oracle) void **_fsdata)
34516211268SDavid Howells {
34616211268SDavid Howells struct netfs_io_request *rreq;
34716211268SDavid Howells struct folio *folio;
34816211268SDavid Howells pgoff_t index = pos >> PAGE_SHIFT;
34916211268SDavid Howells int ret;
35016211268SDavid Howells
35116211268SDavid Howells DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
35216211268SDavid Howells
35316211268SDavid Howells retry:
354e999a5c5SMatthew Wilcox folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
35516211268SDavid Howells mapping_gfp_mask(mapping));
35666dabbb6SChristoph Hellwig if (IS_ERR(folio))
35766dabbb6SChristoph Hellwig return PTR_ERR(folio);
35816211268SDavid Howells
35916211268SDavid Howells if (ctx->ops->check_write_begin) {
36016211268SDavid Howells /* Allow the netfs (eg. ceph) to flush conflicts. */
361fac47b43SXiubo Li ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
36216211268SDavid Howells if (ret < 0) {
36316211268SDavid Howells trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
36416211268SDavid Howells goto error;
36516211268SDavid Howells }
366fac47b43SXiubo Li if (!folio)
367fac47b43SXiubo Li goto retry;
36816211268SDavid Howells }
36916211268SDavid Howells
37016211268SDavid Howells if (folio_test_uptodate(folio))
37116211268SDavid Howells goto have_folio;
37216211268SDavid Howells
37316211268SDavid Howells /* If the page is beyond the EOF, we want to clear it - unless it's
37416211268SDavid Howells * within the cache granule containing the EOF, in which case we need
37516211268SDavid Howells * to preload the granule.
37616211268SDavid Howells */
37716211268SDavid Howells if (!netfs_is_cache_enabled(ctx) &&
37816211268SDavid Howells netfs_skip_folio_read(folio, pos, len, false)) {
37916211268SDavid Howells netfs_stat(&netfs_n_rh_write_zskip);
38016211268SDavid Howells goto have_folio_no_wait;
38116211268SDavid Howells }
38216211268SDavid Howells
38316211268SDavid Howells rreq = netfs_alloc_request(mapping, file,
38416211268SDavid Howells folio_file_pos(folio), folio_size(folio),
38516211268SDavid Howells NETFS_READ_FOR_WRITE);
38616211268SDavid Howells if (IS_ERR(rreq)) {
38716211268SDavid Howells ret = PTR_ERR(rreq);
38816211268SDavid Howells goto error;
38916211268SDavid Howells }
39016211268SDavid Howells rreq->no_unlock_folio = folio_index(folio);
39116211268SDavid Howells __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
39216211268SDavid Howells
39316211268SDavid Howells if (ctx->ops->begin_cache_operation) {
39416211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq);
39516211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
39616211268SDavid Howells goto error_put;
39716211268SDavid Howells }
39816211268SDavid Howells
39916211268SDavid Howells netfs_stat(&netfs_n_rh_write_begin);
40016211268SDavid Howells trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
40116211268SDavid Howells
40216211268SDavid Howells /* Expand the request to meet caching requirements and download
40316211268SDavid Howells * preferences.
40416211268SDavid Howells */
40516211268SDavid Howells ractl._nr_pages = folio_nr_pages(folio);
40616211268SDavid Howells netfs_rreq_expand(rreq, &ractl);
40716211268SDavid Howells
40816211268SDavid Howells /* We hold the folio locks, so we can drop the references */
40916211268SDavid Howells folio_get(folio);
41016211268SDavid Howells while (readahead_folio(&ractl))
41116211268SDavid Howells ;
41216211268SDavid Howells
41316211268SDavid Howells ret = netfs_begin_read(rreq, true);
41416211268SDavid Howells if (ret < 0)
41516211268SDavid Howells goto error;
41616211268SDavid Howells
41716211268SDavid Howells have_folio:
41816211268SDavid Howells ret = folio_wait_fscache_killable(folio);
41916211268SDavid Howells if (ret < 0)
42016211268SDavid Howells goto error;
42116211268SDavid Howells have_folio_no_wait:
42216211268SDavid Howells *_folio = folio;
42316211268SDavid Howells _leave(" = 0");
42416211268SDavid Howells return 0;
42516211268SDavid Howells
42616211268SDavid Howells error_put:
42716211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
42816211268SDavid Howells error:
429fac47b43SXiubo Li if (folio) {
43016211268SDavid Howells folio_unlock(folio);
43116211268SDavid Howells folio_put(folio);
432fac47b43SXiubo Li }
43316211268SDavid Howells _leave(" = %d", ret);
43416211268SDavid Howells return ret;
43516211268SDavid Howells }
43616211268SDavid Howells EXPORT_SYMBOL(netfs_write_begin);
437