116211268SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later 216211268SDavid Howells /* Network filesystem high-level buffered read support. 316211268SDavid Howells * 416211268SDavid Howells * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 516211268SDavid Howells * Written by David Howells (dhowells@redhat.com) 616211268SDavid Howells */ 716211268SDavid Howells 816211268SDavid Howells #include <linux/export.h> 916211268SDavid Howells #include <linux/task_io_accounting_ops.h> 1016211268SDavid Howells #include "internal.h" 1116211268SDavid Howells 1216211268SDavid Howells /* 1316211268SDavid Howells * Unlock the folios in a read operation. We need to set PG_fscache on any 1416211268SDavid Howells * folios we're going to write back before we unlock them. 1516211268SDavid Howells */ 1616211268SDavid Howells void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) 1716211268SDavid Howells { 1816211268SDavid Howells struct netfs_io_subrequest *subreq; 1916211268SDavid Howells struct folio *folio; 2016211268SDavid Howells pgoff_t start_page = rreq->start / PAGE_SIZE; 2116211268SDavid Howells pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; 225e51c627SDavid Howells size_t account = 0; 2316211268SDavid Howells bool subreq_failed = false; 2416211268SDavid Howells 2516211268SDavid Howells XA_STATE(xas, &rreq->mapping->i_pages, start_page); 2616211268SDavid Howells 2716211268SDavid Howells if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { 2816211268SDavid Howells __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); 2916211268SDavid Howells list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 3016211268SDavid Howells __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 3116211268SDavid Howells } 3216211268SDavid Howells } 3316211268SDavid Howells 3416211268SDavid Howells /* Walk through the pagecache and the I/O request lists simultaneously. 3516211268SDavid Howells * We may have a mixture of cached and uncached sections and we only 3616211268SDavid Howells * really want to write out the uncached sections. This is slightly 3716211268SDavid Howells * complicated by the possibility that we might have huge pages with a 3816211268SDavid Howells * mixture inside. 3916211268SDavid Howells */ 4016211268SDavid Howells subreq = list_first_entry(&rreq->subrequests, 4116211268SDavid Howells struct netfs_io_subrequest, rreq_link); 4216211268SDavid Howells subreq_failed = (subreq->error < 0); 4316211268SDavid Howells 4416211268SDavid Howells trace_netfs_rreq(rreq, netfs_rreq_trace_unlock); 4516211268SDavid Howells 4616211268SDavid Howells rcu_read_lock(); 4716211268SDavid Howells xas_for_each(&xas, folio, last_page) { 485e51c627SDavid Howells loff_t pg_end; 4916211268SDavid Howells bool pg_failed = false; 5016211268SDavid Howells 517e043a80SDavid Howells if (xas_retry(&xas, folio)) 527e043a80SDavid Howells continue; 537e043a80SDavid Howells 545e51c627SDavid Howells pg_end = folio_pos(folio) + folio_size(folio) - 1; 557e043a80SDavid Howells 5616211268SDavid Howells for (;;) { 575e51c627SDavid Howells loff_t sreq_end; 585e51c627SDavid Howells 5916211268SDavid Howells if (!subreq) { 6016211268SDavid Howells pg_failed = true; 6116211268SDavid Howells break; 6216211268SDavid Howells } 6316211268SDavid Howells if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) 6416211268SDavid Howells folio_start_fscache(folio); 6516211268SDavid Howells pg_failed |= subreq_failed; 665e51c627SDavid Howells sreq_end = subreq->start + subreq->len - 1; 675e51c627SDavid Howells if (pg_end < sreq_end) 6816211268SDavid Howells break; 6916211268SDavid Howells 7016211268SDavid Howells account += subreq->transferred; 7116211268SDavid Howells if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 7216211268SDavid Howells subreq = list_next_entry(subreq, rreq_link); 7316211268SDavid Howells subreq_failed = (subreq->error < 0); 7416211268SDavid Howells } else { 7516211268SDavid Howells subreq = NULL; 7616211268SDavid Howells subreq_failed = false; 7716211268SDavid Howells } 785e51c627SDavid Howells 795e51c627SDavid Howells if (pg_end == sreq_end) 8016211268SDavid Howells break; 8116211268SDavid Howells } 8216211268SDavid Howells 8316211268SDavid Howells if (!pg_failed) { 8416211268SDavid Howells flush_dcache_folio(folio); 8516211268SDavid Howells folio_mark_uptodate(folio); 8616211268SDavid Howells } 8716211268SDavid Howells 8816211268SDavid Howells if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 8916211268SDavid Howells if (folio_index(folio) == rreq->no_unlock_folio && 9016211268SDavid Howells test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) 9116211268SDavid Howells _debug("no unlock"); 9216211268SDavid Howells else 9316211268SDavid Howells folio_unlock(folio); 9416211268SDavid Howells } 9516211268SDavid Howells } 9616211268SDavid Howells rcu_read_unlock(); 9716211268SDavid Howells 9816211268SDavid Howells task_io_account_read(account); 9916211268SDavid Howells if (rreq->netfs_ops->done) 10016211268SDavid Howells rreq->netfs_ops->done(rreq); 10116211268SDavid Howells } 10216211268SDavid Howells 10316211268SDavid Howells static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, 10416211268SDavid Howells loff_t *_start, size_t *_len, loff_t i_size) 10516211268SDavid Howells { 10616211268SDavid Howells struct netfs_cache_resources *cres = &rreq->cache_resources; 10716211268SDavid Howells 10816211268SDavid Howells if (cres->ops && cres->ops->expand_readahead) 10916211268SDavid Howells cres->ops->expand_readahead(cres, _start, _len, i_size); 11016211268SDavid Howells } 11116211268SDavid Howells 11216211268SDavid Howells static void netfs_rreq_expand(struct netfs_io_request *rreq, 11316211268SDavid Howells struct readahead_control *ractl) 11416211268SDavid Howells { 11516211268SDavid Howells /* Give the cache a chance to change the request parameters. The 11616211268SDavid Howells * resultant request must contain the original region. 11716211268SDavid Howells */ 11816211268SDavid Howells netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); 11916211268SDavid Howells 12016211268SDavid Howells /* Give the netfs a chance to change the request parameters. The 12116211268SDavid Howells * resultant request must contain the original region. 12216211268SDavid Howells */ 12316211268SDavid Howells if (rreq->netfs_ops->expand_readahead) 12416211268SDavid Howells rreq->netfs_ops->expand_readahead(rreq); 12516211268SDavid Howells 12616211268SDavid Howells /* Expand the request if the cache wants it to start earlier. Note 12716211268SDavid Howells * that the expansion may get further extended if the VM wishes to 12816211268SDavid Howells * insert THPs and the preferred start and/or end wind up in the middle 12916211268SDavid Howells * of THPs. 13016211268SDavid Howells * 13116211268SDavid Howells * If this is the case, however, the THP size should be an integer 13216211268SDavid Howells * multiple of the cache granule size, so we get a whole number of 13316211268SDavid Howells * granules to deal with. 13416211268SDavid Howells */ 13516211268SDavid Howells if (rreq->start != readahead_pos(ractl) || 13616211268SDavid Howells rreq->len != readahead_length(ractl)) { 13716211268SDavid Howells readahead_expand(ractl, rreq->start, rreq->len); 13816211268SDavid Howells rreq->start = readahead_pos(ractl); 13916211268SDavid Howells rreq->len = readahead_length(ractl); 14016211268SDavid Howells 14116211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), 14216211268SDavid Howells netfs_read_trace_expanded); 14316211268SDavid Howells } 14416211268SDavid Howells } 14516211268SDavid Howells 14616211268SDavid Howells /** 14716211268SDavid Howells * netfs_readahead - Helper to manage a read request 14816211268SDavid Howells * @ractl: The description of the readahead request 14916211268SDavid Howells * 15016211268SDavid Howells * Fulfil a readahead request by drawing data from the cache if possible, or 15116211268SDavid Howells * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O 15216211268SDavid Howells * requests from different sources will get munged together. If necessary, the 15316211268SDavid Howells * readahead window can be expanded in either direction to a more convenient 15416211268SDavid Howells * alighment for RPC efficiency or to make storage in the cache feasible. 15516211268SDavid Howells * 15616211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 15716211268SDavid Howells * inode before calling this. 15816211268SDavid Howells * 15916211268SDavid Howells * This is usable whether or not caching is enabled. 16016211268SDavid Howells */ 16116211268SDavid Howells void netfs_readahead(struct readahead_control *ractl) 16216211268SDavid Howells { 16316211268SDavid Howells struct netfs_io_request *rreq; 164874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); 16516211268SDavid Howells int ret; 16616211268SDavid Howells 16716211268SDavid Howells _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); 16816211268SDavid Howells 16916211268SDavid Howells if (readahead_count(ractl) == 0) 17016211268SDavid Howells return; 17116211268SDavid Howells 17216211268SDavid Howells rreq = netfs_alloc_request(ractl->mapping, ractl->file, 17316211268SDavid Howells readahead_pos(ractl), 17416211268SDavid Howells readahead_length(ractl), 17516211268SDavid Howells NETFS_READAHEAD); 17616211268SDavid Howells if (IS_ERR(rreq)) 17716211268SDavid Howells return; 17816211268SDavid Howells 17916211268SDavid Howells if (ctx->ops->begin_cache_operation) { 18016211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 18116211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 18216211268SDavid Howells goto cleanup_free; 18316211268SDavid Howells } 18416211268SDavid Howells 18516211268SDavid Howells netfs_stat(&netfs_n_rh_readahead); 18616211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), 18716211268SDavid Howells netfs_read_trace_readahead); 18816211268SDavid Howells 18916211268SDavid Howells netfs_rreq_expand(rreq, ractl); 19016211268SDavid Howells 19116211268SDavid Howells /* Drop the refs on the folios here rather than in the cache or 19216211268SDavid Howells * filesystem. The locks will be dropped in netfs_rreq_unlock(). 19316211268SDavid Howells */ 19416211268SDavid Howells while (readahead_folio(ractl)) 19516211268SDavid Howells ; 19616211268SDavid Howells 19716211268SDavid Howells netfs_begin_read(rreq, false); 19816211268SDavid Howells return; 19916211268SDavid Howells 20016211268SDavid Howells cleanup_free: 20116211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 20216211268SDavid Howells return; 20316211268SDavid Howells } 20416211268SDavid Howells EXPORT_SYMBOL(netfs_readahead); 20516211268SDavid Howells 20616211268SDavid Howells /** 2076c62371bSMatthew Wilcox (Oracle) * netfs_read_folio - Helper to manage a read_folio request 20816211268SDavid Howells * @file: The file to read from 2096c62371bSMatthew Wilcox (Oracle) * @folio: The folio to read 21016211268SDavid Howells * 2116c62371bSMatthew Wilcox (Oracle) * Fulfil a read_folio request by drawing data from the cache if 2126c62371bSMatthew Wilcox (Oracle) * possible, or the netfs if not. Space beyond the EOF is zero-filled. 2136c62371bSMatthew Wilcox (Oracle) * Multiple I/O requests from different sources will get munged together. 21416211268SDavid Howells * 21516211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 21616211268SDavid Howells * inode before calling this. 21716211268SDavid Howells * 21816211268SDavid Howells * This is usable whether or not caching is enabled. 21916211268SDavid Howells */ 2206c62371bSMatthew Wilcox (Oracle) int netfs_read_folio(struct file *file, struct folio *folio) 22116211268SDavid Howells { 22216211268SDavid Howells struct address_space *mapping = folio_file_mapping(folio); 22316211268SDavid Howells struct netfs_io_request *rreq; 224874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(mapping->host); 22516211268SDavid Howells int ret; 22616211268SDavid Howells 22716211268SDavid Howells _enter("%lx", folio_index(folio)); 22816211268SDavid Howells 22916211268SDavid Howells rreq = netfs_alloc_request(mapping, file, 23016211268SDavid Howells folio_file_pos(folio), folio_size(folio), 23116211268SDavid Howells NETFS_READPAGE); 23216211268SDavid Howells if (IS_ERR(rreq)) { 23316211268SDavid Howells ret = PTR_ERR(rreq); 23416211268SDavid Howells goto alloc_error; 23516211268SDavid Howells } 23616211268SDavid Howells 23716211268SDavid Howells if (ctx->ops->begin_cache_operation) { 23816211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 23916211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 24016211268SDavid Howells goto discard; 24116211268SDavid Howells } 24216211268SDavid Howells 24316211268SDavid Howells netfs_stat(&netfs_n_rh_readpage); 24416211268SDavid Howells trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); 24516211268SDavid Howells return netfs_begin_read(rreq, true); 24616211268SDavid Howells 24716211268SDavid Howells discard: 24816211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 24916211268SDavid Howells alloc_error: 25016211268SDavid Howells folio_unlock(folio); 25116211268SDavid Howells return ret; 25216211268SDavid Howells } 2536c62371bSMatthew Wilcox (Oracle) EXPORT_SYMBOL(netfs_read_folio); 25416211268SDavid Howells 25516211268SDavid Howells /* 25616211268SDavid Howells * Prepare a folio for writing without reading first 25716211268SDavid Howells * @folio: The folio being prepared 25816211268SDavid Howells * @pos: starting position for the write 25916211268SDavid Howells * @len: length of write 26016211268SDavid Howells * @always_fill: T if the folio should always be completely filled/cleared 26116211268SDavid Howells * 26216211268SDavid Howells * In some cases, write_begin doesn't need to read at all: 26316211268SDavid Howells * - full folio write 26416211268SDavid Howells * - write that lies in a folio that is completely beyond EOF 26516211268SDavid Howells * - write that covers the folio from start to EOF or beyond it 26616211268SDavid Howells * 26716211268SDavid Howells * If any of these criteria are met, then zero out the unwritten parts 26816211268SDavid Howells * of the folio and return true. Otherwise, return false. 26916211268SDavid Howells */ 27016211268SDavid Howells static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len, 27116211268SDavid Howells bool always_fill) 27216211268SDavid Howells { 27316211268SDavid Howells struct inode *inode = folio_inode(folio); 27416211268SDavid Howells loff_t i_size = i_size_read(inode); 27516211268SDavid Howells size_t offset = offset_in_folio(folio, pos); 27616211268SDavid Howells size_t plen = folio_size(folio); 27716211268SDavid Howells 27816211268SDavid Howells if (unlikely(always_fill)) { 27916211268SDavid Howells if (pos - offset + len <= i_size) 28016211268SDavid Howells return false; /* Page entirely before EOF */ 28116211268SDavid Howells zero_user_segment(&folio->page, 0, plen); 28216211268SDavid Howells folio_mark_uptodate(folio); 28316211268SDavid Howells return true; 28416211268SDavid Howells } 28516211268SDavid Howells 28616211268SDavid Howells /* Full folio write */ 28716211268SDavid Howells if (offset == 0 && len >= plen) 28816211268SDavid Howells return true; 28916211268SDavid Howells 29016211268SDavid Howells /* Page entirely beyond the end of the file */ 29116211268SDavid Howells if (pos - offset >= i_size) 29216211268SDavid Howells goto zero_out; 29316211268SDavid Howells 29416211268SDavid Howells /* Write that covers from the start of the folio to EOF or beyond */ 29516211268SDavid Howells if (offset == 0 && (pos + len) >= i_size) 29616211268SDavid Howells goto zero_out; 29716211268SDavid Howells 29816211268SDavid Howells return false; 29916211268SDavid Howells zero_out: 30016211268SDavid Howells zero_user_segments(&folio->page, 0, offset, offset + len, plen); 30116211268SDavid Howells return true; 30216211268SDavid Howells } 30316211268SDavid Howells 30416211268SDavid Howells /** 30516211268SDavid Howells * netfs_write_begin - Helper to prepare for writing 306e81fb419SLinus Torvalds * @ctx: The netfs context 30716211268SDavid Howells * @file: The file to read from 30816211268SDavid Howells * @mapping: The mapping to read from 30916211268SDavid Howells * @pos: File position at which the write will begin 31016211268SDavid Howells * @len: The length of the write (may extend beyond the end of the folio chosen) 31116211268SDavid Howells * @_folio: Where to put the resultant folio 31216211268SDavid Howells * @_fsdata: Place for the netfs to store a cookie 31316211268SDavid Howells * 31416211268SDavid Howells * Pre-read data for a write-begin request by drawing data from the cache if 31516211268SDavid Howells * possible, or the netfs if not. Space beyond the EOF is zero-filled. 31616211268SDavid Howells * Multiple I/O requests from different sources will get munged together. If 31716211268SDavid Howells * necessary, the readahead window can be expanded in either direction to a 31816211268SDavid Howells * more convenient alighment for RPC efficiency or to make storage in the cache 31916211268SDavid Howells * feasible. 32016211268SDavid Howells * 32116211268SDavid Howells * The calling netfs must provide a table of operations, only one of which, 32216211268SDavid Howells * issue_op, is mandatory. 32316211268SDavid Howells * 32416211268SDavid Howells * The check_write_begin() operation can be provided to check for and flush 32516211268SDavid Howells * conflicting writes once the folio is grabbed and locked. It is passed a 32616211268SDavid Howells * pointer to the fsdata cookie that gets returned to the VM to be passed to 32716211268SDavid Howells * write_end. It is permitted to sleep. It should return 0 if the request 328fac47b43SXiubo Li * should go ahead or it may return an error. It may also unlock and put the 329fac47b43SXiubo Li * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0 330fac47b43SXiubo Li * will cause the folio to be re-got and the process to be retried. 33116211268SDavid Howells * 33216211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 33316211268SDavid Howells * inode before calling this. 33416211268SDavid Howells * 33516211268SDavid Howells * This is usable whether or not caching is enabled. 33616211268SDavid Howells */ 337e81fb419SLinus Torvalds int netfs_write_begin(struct netfs_inode *ctx, 338e81fb419SLinus Torvalds struct file *file, struct address_space *mapping, 339de2a9311SMatthew Wilcox (Oracle) loff_t pos, unsigned int len, struct folio **_folio, 340de2a9311SMatthew Wilcox (Oracle) void **_fsdata) 34116211268SDavid Howells { 34216211268SDavid Howells struct netfs_io_request *rreq; 34316211268SDavid Howells struct folio *folio; 344236d93c4SMatthew Wilcox (Oracle) unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 34516211268SDavid Howells pgoff_t index = pos >> PAGE_SHIFT; 34616211268SDavid Howells int ret; 34716211268SDavid Howells 34816211268SDavid Howells DEFINE_READAHEAD(ractl, file, NULL, mapping, index); 34916211268SDavid Howells 35016211268SDavid Howells retry: 35116211268SDavid Howells folio = __filemap_get_folio(mapping, index, fgp_flags, 35216211268SDavid Howells mapping_gfp_mask(mapping)); 353*66dabbb6SChristoph Hellwig if (IS_ERR(folio)) 354*66dabbb6SChristoph Hellwig return PTR_ERR(folio); 35516211268SDavid Howells 35616211268SDavid Howells if (ctx->ops->check_write_begin) { 35716211268SDavid Howells /* Allow the netfs (eg. ceph) to flush conflicts. */ 358fac47b43SXiubo Li ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); 35916211268SDavid Howells if (ret < 0) { 36016211268SDavid Howells trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); 36116211268SDavid Howells goto error; 36216211268SDavid Howells } 363fac47b43SXiubo Li if (!folio) 364fac47b43SXiubo Li goto retry; 36516211268SDavid Howells } 36616211268SDavid Howells 36716211268SDavid Howells if (folio_test_uptodate(folio)) 36816211268SDavid Howells goto have_folio; 36916211268SDavid Howells 37016211268SDavid Howells /* If the page is beyond the EOF, we want to clear it - unless it's 37116211268SDavid Howells * within the cache granule containing the EOF, in which case we need 37216211268SDavid Howells * to preload the granule. 37316211268SDavid Howells */ 37416211268SDavid Howells if (!netfs_is_cache_enabled(ctx) && 37516211268SDavid Howells netfs_skip_folio_read(folio, pos, len, false)) { 37616211268SDavid Howells netfs_stat(&netfs_n_rh_write_zskip); 37716211268SDavid Howells goto have_folio_no_wait; 37816211268SDavid Howells } 37916211268SDavid Howells 38016211268SDavid Howells rreq = netfs_alloc_request(mapping, file, 38116211268SDavid Howells folio_file_pos(folio), folio_size(folio), 38216211268SDavid Howells NETFS_READ_FOR_WRITE); 38316211268SDavid Howells if (IS_ERR(rreq)) { 38416211268SDavid Howells ret = PTR_ERR(rreq); 38516211268SDavid Howells goto error; 38616211268SDavid Howells } 38716211268SDavid Howells rreq->no_unlock_folio = folio_index(folio); 38816211268SDavid Howells __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); 38916211268SDavid Howells 39016211268SDavid Howells if (ctx->ops->begin_cache_operation) { 39116211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 39216211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 39316211268SDavid Howells goto error_put; 39416211268SDavid Howells } 39516211268SDavid Howells 39616211268SDavid Howells netfs_stat(&netfs_n_rh_write_begin); 39716211268SDavid Howells trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); 39816211268SDavid Howells 39916211268SDavid Howells /* Expand the request to meet caching requirements and download 40016211268SDavid Howells * preferences. 40116211268SDavid Howells */ 40216211268SDavid Howells ractl._nr_pages = folio_nr_pages(folio); 40316211268SDavid Howells netfs_rreq_expand(rreq, &ractl); 40416211268SDavid Howells 40516211268SDavid Howells /* We hold the folio locks, so we can drop the references */ 40616211268SDavid Howells folio_get(folio); 40716211268SDavid Howells while (readahead_folio(&ractl)) 40816211268SDavid Howells ; 40916211268SDavid Howells 41016211268SDavid Howells ret = netfs_begin_read(rreq, true); 41116211268SDavid Howells if (ret < 0) 41216211268SDavid Howells goto error; 41316211268SDavid Howells 41416211268SDavid Howells have_folio: 41516211268SDavid Howells ret = folio_wait_fscache_killable(folio); 41616211268SDavid Howells if (ret < 0) 41716211268SDavid Howells goto error; 41816211268SDavid Howells have_folio_no_wait: 41916211268SDavid Howells *_folio = folio; 42016211268SDavid Howells _leave(" = 0"); 42116211268SDavid Howells return 0; 42216211268SDavid Howells 42316211268SDavid Howells error_put: 42416211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 42516211268SDavid Howells error: 426fac47b43SXiubo Li if (folio) { 42716211268SDavid Howells folio_unlock(folio); 42816211268SDavid Howells folio_put(folio); 429fac47b43SXiubo Li } 43016211268SDavid Howells _leave(" = %d", ret); 43116211268SDavid Howells return ret; 43216211268SDavid Howells } 43316211268SDavid Howells EXPORT_SYMBOL(netfs_write_begin); 434