116211268SDavid Howells // SPDX-License-Identifier: GPL-2.0-or-later 216211268SDavid Howells /* Network filesystem high-level buffered read support. 316211268SDavid Howells * 416211268SDavid Howells * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. 516211268SDavid Howells * Written by David Howells (dhowells@redhat.com) 616211268SDavid Howells */ 716211268SDavid Howells 816211268SDavid Howells #include <linux/export.h> 916211268SDavid Howells #include <linux/task_io_accounting_ops.h> 1016211268SDavid Howells #include "internal.h" 1116211268SDavid Howells 1216211268SDavid Howells /* 1316211268SDavid Howells * Unlock the folios in a read operation. We need to set PG_fscache on any 1416211268SDavid Howells * folios we're going to write back before we unlock them. 1516211268SDavid Howells */ 1616211268SDavid Howells void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) 1716211268SDavid Howells { 1816211268SDavid Howells struct netfs_io_subrequest *subreq; 1916211268SDavid Howells struct folio *folio; 2016211268SDavid Howells unsigned int iopos, account = 0; 2116211268SDavid Howells pgoff_t start_page = rreq->start / PAGE_SIZE; 2216211268SDavid Howells pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; 2316211268SDavid Howells bool subreq_failed = false; 2416211268SDavid Howells 2516211268SDavid Howells XA_STATE(xas, &rreq->mapping->i_pages, start_page); 2616211268SDavid Howells 2716211268SDavid Howells if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { 2816211268SDavid Howells __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); 2916211268SDavid Howells list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 3016211268SDavid Howells __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); 3116211268SDavid Howells } 3216211268SDavid Howells } 3316211268SDavid Howells 3416211268SDavid Howells /* Walk through the pagecache and the I/O request lists simultaneously. 3516211268SDavid Howells * We may have a mixture of cached and uncached sections and we only 3616211268SDavid Howells * really want to write out the uncached sections. This is slightly 3716211268SDavid Howells * complicated by the possibility that we might have huge pages with a 3816211268SDavid Howells * mixture inside. 3916211268SDavid Howells */ 4016211268SDavid Howells subreq = list_first_entry(&rreq->subrequests, 4116211268SDavid Howells struct netfs_io_subrequest, rreq_link); 4216211268SDavid Howells iopos = 0; 4316211268SDavid Howells subreq_failed = (subreq->error < 0); 4416211268SDavid Howells 4516211268SDavid Howells trace_netfs_rreq(rreq, netfs_rreq_trace_unlock); 4616211268SDavid Howells 4716211268SDavid Howells rcu_read_lock(); 4816211268SDavid Howells xas_for_each(&xas, folio, last_page) { 49*7e043a80SDavid Howells unsigned int pgpos, pgend; 5016211268SDavid Howells bool pg_failed = false; 5116211268SDavid Howells 52*7e043a80SDavid Howells if (xas_retry(&xas, folio)) 53*7e043a80SDavid Howells continue; 54*7e043a80SDavid Howells 55*7e043a80SDavid Howells pgpos = (folio_index(folio) - start_page) * PAGE_SIZE; 56*7e043a80SDavid Howells pgend = pgpos + folio_size(folio); 57*7e043a80SDavid Howells 5816211268SDavid Howells for (;;) { 5916211268SDavid Howells if (!subreq) { 6016211268SDavid Howells pg_failed = true; 6116211268SDavid Howells break; 6216211268SDavid Howells } 6316211268SDavid Howells if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) 6416211268SDavid Howells folio_start_fscache(folio); 6516211268SDavid Howells pg_failed |= subreq_failed; 6616211268SDavid Howells if (pgend < iopos + subreq->len) 6716211268SDavid Howells break; 6816211268SDavid Howells 6916211268SDavid Howells account += subreq->transferred; 7016211268SDavid Howells iopos += subreq->len; 7116211268SDavid Howells if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 7216211268SDavid Howells subreq = list_next_entry(subreq, rreq_link); 7316211268SDavid Howells subreq_failed = (subreq->error < 0); 7416211268SDavid Howells } else { 7516211268SDavid Howells subreq = NULL; 7616211268SDavid Howells subreq_failed = false; 7716211268SDavid Howells } 7816211268SDavid Howells if (pgend == iopos) 7916211268SDavid Howells break; 8016211268SDavid Howells } 8116211268SDavid Howells 8216211268SDavid Howells if (!pg_failed) { 8316211268SDavid Howells flush_dcache_folio(folio); 8416211268SDavid Howells folio_mark_uptodate(folio); 8516211268SDavid Howells } 8616211268SDavid Howells 8716211268SDavid Howells if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 8816211268SDavid Howells if (folio_index(folio) == rreq->no_unlock_folio && 8916211268SDavid Howells test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) 9016211268SDavid Howells _debug("no unlock"); 9116211268SDavid Howells else 9216211268SDavid Howells folio_unlock(folio); 9316211268SDavid Howells } 9416211268SDavid Howells } 9516211268SDavid Howells rcu_read_unlock(); 9616211268SDavid Howells 9716211268SDavid Howells task_io_account_read(account); 9816211268SDavid Howells if (rreq->netfs_ops->done) 9916211268SDavid Howells rreq->netfs_ops->done(rreq); 10016211268SDavid Howells } 10116211268SDavid Howells 10216211268SDavid Howells static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, 10316211268SDavid Howells loff_t *_start, size_t *_len, loff_t i_size) 10416211268SDavid Howells { 10516211268SDavid Howells struct netfs_cache_resources *cres = &rreq->cache_resources; 10616211268SDavid Howells 10716211268SDavid Howells if (cres->ops && cres->ops->expand_readahead) 10816211268SDavid Howells cres->ops->expand_readahead(cres, _start, _len, i_size); 10916211268SDavid Howells } 11016211268SDavid Howells 11116211268SDavid Howells static void netfs_rreq_expand(struct netfs_io_request *rreq, 11216211268SDavid Howells struct readahead_control *ractl) 11316211268SDavid Howells { 11416211268SDavid Howells /* Give the cache a chance to change the request parameters. The 11516211268SDavid Howells * resultant request must contain the original region. 11616211268SDavid Howells */ 11716211268SDavid Howells netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); 11816211268SDavid Howells 11916211268SDavid Howells /* Give the netfs a chance to change the request parameters. The 12016211268SDavid Howells * resultant request must contain the original region. 12116211268SDavid Howells */ 12216211268SDavid Howells if (rreq->netfs_ops->expand_readahead) 12316211268SDavid Howells rreq->netfs_ops->expand_readahead(rreq); 12416211268SDavid Howells 12516211268SDavid Howells /* Expand the request if the cache wants it to start earlier. Note 12616211268SDavid Howells * that the expansion may get further extended if the VM wishes to 12716211268SDavid Howells * insert THPs and the preferred start and/or end wind up in the middle 12816211268SDavid Howells * of THPs. 12916211268SDavid Howells * 13016211268SDavid Howells * If this is the case, however, the THP size should be an integer 13116211268SDavid Howells * multiple of the cache granule size, so we get a whole number of 13216211268SDavid Howells * granules to deal with. 13316211268SDavid Howells */ 13416211268SDavid Howells if (rreq->start != readahead_pos(ractl) || 13516211268SDavid Howells rreq->len != readahead_length(ractl)) { 13616211268SDavid Howells readahead_expand(ractl, rreq->start, rreq->len); 13716211268SDavid Howells rreq->start = readahead_pos(ractl); 13816211268SDavid Howells rreq->len = readahead_length(ractl); 13916211268SDavid Howells 14016211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), 14116211268SDavid Howells netfs_read_trace_expanded); 14216211268SDavid Howells } 14316211268SDavid Howells } 14416211268SDavid Howells 14516211268SDavid Howells /** 14616211268SDavid Howells * netfs_readahead - Helper to manage a read request 14716211268SDavid Howells * @ractl: The description of the readahead request 14816211268SDavid Howells * 14916211268SDavid Howells * Fulfil a readahead request by drawing data from the cache if possible, or 15016211268SDavid Howells * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O 15116211268SDavid Howells * requests from different sources will get munged together. If necessary, the 15216211268SDavid Howells * readahead window can be expanded in either direction to a more convenient 15316211268SDavid Howells * alighment for RPC efficiency or to make storage in the cache feasible. 15416211268SDavid Howells * 15516211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 15616211268SDavid Howells * inode before calling this. 15716211268SDavid Howells * 15816211268SDavid Howells * This is usable whether or not caching is enabled. 15916211268SDavid Howells */ 16016211268SDavid Howells void netfs_readahead(struct readahead_control *ractl) 16116211268SDavid Howells { 16216211268SDavid Howells struct netfs_io_request *rreq; 163874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); 16416211268SDavid Howells int ret; 16516211268SDavid Howells 16616211268SDavid Howells _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); 16716211268SDavid Howells 16816211268SDavid Howells if (readahead_count(ractl) == 0) 16916211268SDavid Howells return; 17016211268SDavid Howells 17116211268SDavid Howells rreq = netfs_alloc_request(ractl->mapping, ractl->file, 17216211268SDavid Howells readahead_pos(ractl), 17316211268SDavid Howells readahead_length(ractl), 17416211268SDavid Howells NETFS_READAHEAD); 17516211268SDavid Howells if (IS_ERR(rreq)) 17616211268SDavid Howells return; 17716211268SDavid Howells 17816211268SDavid Howells if (ctx->ops->begin_cache_operation) { 17916211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 18016211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 18116211268SDavid Howells goto cleanup_free; 18216211268SDavid Howells } 18316211268SDavid Howells 18416211268SDavid Howells netfs_stat(&netfs_n_rh_readahead); 18516211268SDavid Howells trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), 18616211268SDavid Howells netfs_read_trace_readahead); 18716211268SDavid Howells 18816211268SDavid Howells netfs_rreq_expand(rreq, ractl); 18916211268SDavid Howells 19016211268SDavid Howells /* Drop the refs on the folios here rather than in the cache or 19116211268SDavid Howells * filesystem. The locks will be dropped in netfs_rreq_unlock(). 19216211268SDavid Howells */ 19316211268SDavid Howells while (readahead_folio(ractl)) 19416211268SDavid Howells ; 19516211268SDavid Howells 19616211268SDavid Howells netfs_begin_read(rreq, false); 19716211268SDavid Howells return; 19816211268SDavid Howells 19916211268SDavid Howells cleanup_free: 20016211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 20116211268SDavid Howells return; 20216211268SDavid Howells } 20316211268SDavid Howells EXPORT_SYMBOL(netfs_readahead); 20416211268SDavid Howells 20516211268SDavid Howells /** 2066c62371bSMatthew Wilcox (Oracle) * netfs_read_folio - Helper to manage a read_folio request 20716211268SDavid Howells * @file: The file to read from 2086c62371bSMatthew Wilcox (Oracle) * @folio: The folio to read 20916211268SDavid Howells * 2106c62371bSMatthew Wilcox (Oracle) * Fulfil a read_folio request by drawing data from the cache if 2116c62371bSMatthew Wilcox (Oracle) * possible, or the netfs if not. Space beyond the EOF is zero-filled. 2126c62371bSMatthew Wilcox (Oracle) * Multiple I/O requests from different sources will get munged together. 21316211268SDavid Howells * 21416211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 21516211268SDavid Howells * inode before calling this. 21616211268SDavid Howells * 21716211268SDavid Howells * This is usable whether or not caching is enabled. 21816211268SDavid Howells */ 2196c62371bSMatthew Wilcox (Oracle) int netfs_read_folio(struct file *file, struct folio *folio) 22016211268SDavid Howells { 22116211268SDavid Howells struct address_space *mapping = folio_file_mapping(folio); 22216211268SDavid Howells struct netfs_io_request *rreq; 223874c8ca1SDavid Howells struct netfs_inode *ctx = netfs_inode(mapping->host); 22416211268SDavid Howells int ret; 22516211268SDavid Howells 22616211268SDavid Howells _enter("%lx", folio_index(folio)); 22716211268SDavid Howells 22816211268SDavid Howells rreq = netfs_alloc_request(mapping, file, 22916211268SDavid Howells folio_file_pos(folio), folio_size(folio), 23016211268SDavid Howells NETFS_READPAGE); 23116211268SDavid Howells if (IS_ERR(rreq)) { 23216211268SDavid Howells ret = PTR_ERR(rreq); 23316211268SDavid Howells goto alloc_error; 23416211268SDavid Howells } 23516211268SDavid Howells 23616211268SDavid Howells if (ctx->ops->begin_cache_operation) { 23716211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 23816211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 23916211268SDavid Howells goto discard; 24016211268SDavid Howells } 24116211268SDavid Howells 24216211268SDavid Howells netfs_stat(&netfs_n_rh_readpage); 24316211268SDavid Howells trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); 24416211268SDavid Howells return netfs_begin_read(rreq, true); 24516211268SDavid Howells 24616211268SDavid Howells discard: 24716211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); 24816211268SDavid Howells alloc_error: 24916211268SDavid Howells folio_unlock(folio); 25016211268SDavid Howells return ret; 25116211268SDavid Howells } 2526c62371bSMatthew Wilcox (Oracle) EXPORT_SYMBOL(netfs_read_folio); 25316211268SDavid Howells 25416211268SDavid Howells /* 25516211268SDavid Howells * Prepare a folio for writing without reading first 25616211268SDavid Howells * @folio: The folio being prepared 25716211268SDavid Howells * @pos: starting position for the write 25816211268SDavid Howells * @len: length of write 25916211268SDavid Howells * @always_fill: T if the folio should always be completely filled/cleared 26016211268SDavid Howells * 26116211268SDavid Howells * In some cases, write_begin doesn't need to read at all: 26216211268SDavid Howells * - full folio write 26316211268SDavid Howells * - write that lies in a folio that is completely beyond EOF 26416211268SDavid Howells * - write that covers the folio from start to EOF or beyond it 26516211268SDavid Howells * 26616211268SDavid Howells * If any of these criteria are met, then zero out the unwritten parts 26716211268SDavid Howells * of the folio and return true. Otherwise, return false. 26816211268SDavid Howells */ 26916211268SDavid Howells static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len, 27016211268SDavid Howells bool always_fill) 27116211268SDavid Howells { 27216211268SDavid Howells struct inode *inode = folio_inode(folio); 27316211268SDavid Howells loff_t i_size = i_size_read(inode); 27416211268SDavid Howells size_t offset = offset_in_folio(folio, pos); 27516211268SDavid Howells size_t plen = folio_size(folio); 27616211268SDavid Howells 27716211268SDavid Howells if (unlikely(always_fill)) { 27816211268SDavid Howells if (pos - offset + len <= i_size) 27916211268SDavid Howells return false; /* Page entirely before EOF */ 28016211268SDavid Howells zero_user_segment(&folio->page, 0, plen); 28116211268SDavid Howells folio_mark_uptodate(folio); 28216211268SDavid Howells return true; 28316211268SDavid Howells } 28416211268SDavid Howells 28516211268SDavid Howells /* Full folio write */ 28616211268SDavid Howells if (offset == 0 && len >= plen) 28716211268SDavid Howells return true; 28816211268SDavid Howells 28916211268SDavid Howells /* Page entirely beyond the end of the file */ 29016211268SDavid Howells if (pos - offset >= i_size) 29116211268SDavid Howells goto zero_out; 29216211268SDavid Howells 29316211268SDavid Howells /* Write that covers from the start of the folio to EOF or beyond */ 29416211268SDavid Howells if (offset == 0 && (pos + len) >= i_size) 29516211268SDavid Howells goto zero_out; 29616211268SDavid Howells 29716211268SDavid Howells return false; 29816211268SDavid Howells zero_out: 29916211268SDavid Howells zero_user_segments(&folio->page, 0, offset, offset + len, plen); 30016211268SDavid Howells return true; 30116211268SDavid Howells } 30216211268SDavid Howells 30316211268SDavid Howells /** 30416211268SDavid Howells * netfs_write_begin - Helper to prepare for writing 305e81fb419SLinus Torvalds * @ctx: The netfs context 30616211268SDavid Howells * @file: The file to read from 30716211268SDavid Howells * @mapping: The mapping to read from 30816211268SDavid Howells * @pos: File position at which the write will begin 30916211268SDavid Howells * @len: The length of the write (may extend beyond the end of the folio chosen) 31016211268SDavid Howells * @_folio: Where to put the resultant folio 31116211268SDavid Howells * @_fsdata: Place for the netfs to store a cookie 31216211268SDavid Howells * 31316211268SDavid Howells * Pre-read data for a write-begin request by drawing data from the cache if 31416211268SDavid Howells * possible, or the netfs if not. Space beyond the EOF is zero-filled. 31516211268SDavid Howells * Multiple I/O requests from different sources will get munged together. If 31616211268SDavid Howells * necessary, the readahead window can be expanded in either direction to a 31716211268SDavid Howells * more convenient alighment for RPC efficiency or to make storage in the cache 31816211268SDavid Howells * feasible. 31916211268SDavid Howells * 32016211268SDavid Howells * The calling netfs must provide a table of operations, only one of which, 32116211268SDavid Howells * issue_op, is mandatory. 32216211268SDavid Howells * 32316211268SDavid Howells * The check_write_begin() operation can be provided to check for and flush 32416211268SDavid Howells * conflicting writes once the folio is grabbed and locked. It is passed a 32516211268SDavid Howells * pointer to the fsdata cookie that gets returned to the VM to be passed to 32616211268SDavid Howells * write_end. It is permitted to sleep. It should return 0 if the request 327fac47b43SXiubo Li * should go ahead or it may return an error. It may also unlock and put the 328fac47b43SXiubo Li * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0 329fac47b43SXiubo Li * will cause the folio to be re-got and the process to be retried. 33016211268SDavid Howells * 33116211268SDavid Howells * The calling netfs must initialise a netfs context contiguous to the vfs 33216211268SDavid Howells * inode before calling this. 33316211268SDavid Howells * 33416211268SDavid Howells * This is usable whether or not caching is enabled. 33516211268SDavid Howells */ 336e81fb419SLinus Torvalds int netfs_write_begin(struct netfs_inode *ctx, 337e81fb419SLinus Torvalds struct file *file, struct address_space *mapping, 338de2a9311SMatthew Wilcox (Oracle) loff_t pos, unsigned int len, struct folio **_folio, 339de2a9311SMatthew Wilcox (Oracle) void **_fsdata) 34016211268SDavid Howells { 34116211268SDavid Howells struct netfs_io_request *rreq; 34216211268SDavid Howells struct folio *folio; 343236d93c4SMatthew Wilcox (Oracle) unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; 34416211268SDavid Howells pgoff_t index = pos >> PAGE_SHIFT; 34516211268SDavid Howells int ret; 34616211268SDavid Howells 34716211268SDavid Howells DEFINE_READAHEAD(ractl, file, NULL, mapping, index); 34816211268SDavid Howells 34916211268SDavid Howells retry: 35016211268SDavid Howells folio = __filemap_get_folio(mapping, index, fgp_flags, 35116211268SDavid Howells mapping_gfp_mask(mapping)); 35216211268SDavid Howells if (!folio) 35316211268SDavid Howells return -ENOMEM; 35416211268SDavid Howells 35516211268SDavid Howells if (ctx->ops->check_write_begin) { 35616211268SDavid Howells /* Allow the netfs (eg. ceph) to flush conflicts. */ 357fac47b43SXiubo Li ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); 35816211268SDavid Howells if (ret < 0) { 35916211268SDavid Howells trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); 36016211268SDavid Howells goto error; 36116211268SDavid Howells } 362fac47b43SXiubo Li if (!folio) 363fac47b43SXiubo Li goto retry; 36416211268SDavid Howells } 36516211268SDavid Howells 36616211268SDavid Howells if (folio_test_uptodate(folio)) 36716211268SDavid Howells goto have_folio; 36816211268SDavid Howells 36916211268SDavid Howells /* If the page is beyond the EOF, we want to clear it - unless it's 37016211268SDavid Howells * within the cache granule containing the EOF, in which case we need 37116211268SDavid Howells * to preload the granule. 37216211268SDavid Howells */ 37316211268SDavid Howells if (!netfs_is_cache_enabled(ctx) && 37416211268SDavid Howells netfs_skip_folio_read(folio, pos, len, false)) { 37516211268SDavid Howells netfs_stat(&netfs_n_rh_write_zskip); 37616211268SDavid Howells goto have_folio_no_wait; 37716211268SDavid Howells } 37816211268SDavid Howells 37916211268SDavid Howells rreq = netfs_alloc_request(mapping, file, 38016211268SDavid Howells folio_file_pos(folio), folio_size(folio), 38116211268SDavid Howells NETFS_READ_FOR_WRITE); 38216211268SDavid Howells if (IS_ERR(rreq)) { 38316211268SDavid Howells ret = PTR_ERR(rreq); 38416211268SDavid Howells goto error; 38516211268SDavid Howells } 38616211268SDavid Howells rreq->no_unlock_folio = folio_index(folio); 38716211268SDavid Howells __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); 38816211268SDavid Howells 38916211268SDavid Howells if (ctx->ops->begin_cache_operation) { 39016211268SDavid Howells ret = ctx->ops->begin_cache_operation(rreq); 39116211268SDavid Howells if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) 39216211268SDavid Howells goto error_put; 39316211268SDavid Howells } 39416211268SDavid Howells 39516211268SDavid Howells netfs_stat(&netfs_n_rh_write_begin); 39616211268SDavid Howells trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); 39716211268SDavid Howells 39816211268SDavid Howells /* Expand the request to meet caching requirements and download 39916211268SDavid Howells * preferences. 40016211268SDavid Howells */ 40116211268SDavid Howells ractl._nr_pages = folio_nr_pages(folio); 40216211268SDavid Howells netfs_rreq_expand(rreq, &ractl); 40316211268SDavid Howells 40416211268SDavid Howells /* We hold the folio locks, so we can drop the references */ 40516211268SDavid Howells folio_get(folio); 40616211268SDavid Howells while (readahead_folio(&ractl)) 40716211268SDavid Howells ; 40816211268SDavid Howells 40916211268SDavid Howells ret = netfs_begin_read(rreq, true); 41016211268SDavid Howells if (ret < 0) 41116211268SDavid Howells goto error; 41216211268SDavid Howells 41316211268SDavid Howells have_folio: 41416211268SDavid Howells ret = folio_wait_fscache_killable(folio); 41516211268SDavid Howells if (ret < 0) 41616211268SDavid Howells goto error; 41716211268SDavid Howells have_folio_no_wait: 41816211268SDavid Howells *_folio = folio; 41916211268SDavid Howells _leave(" = 0"); 42016211268SDavid Howells return 0; 42116211268SDavid Howells 42216211268SDavid Howells error_put: 42316211268SDavid Howells netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); 42416211268SDavid Howells error: 425fac47b43SXiubo Li if (folio) { 42616211268SDavid Howells folio_unlock(folio); 42716211268SDavid Howells folio_put(folio); 428fac47b43SXiubo Li } 42916211268SDavid Howells _leave(" = %d", ret); 43016211268SDavid Howells return ret; 43116211268SDavid Howells } 43216211268SDavid Howells EXPORT_SYMBOL(netfs_write_begin); 433