Lines Matching +full:pre +full:- +full:filled
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Network filesystem high-level buffered read support.
20 pgoff_t start_page = rreq->start / PAGE_SIZE; in netfs_rreq_unlock_folios()
21 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; in netfs_rreq_unlock_folios()
25 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios()
27 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) { in netfs_rreq_unlock_folios()
28 __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags); in netfs_rreq_unlock_folios()
29 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { in netfs_rreq_unlock_folios()
30 __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); in netfs_rreq_unlock_folios()
40 subreq = list_first_entry(&rreq->subrequests, in netfs_rreq_unlock_folios()
42 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
55 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios()
65 if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { in netfs_rreq_unlock_folios()
70 sreq_end = subreq->start + subreq->len - 1; in netfs_rreq_unlock_folios()
74 account += subreq->transferred; in netfs_rreq_unlock_folios()
75 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { in netfs_rreq_unlock_folios()
77 subreq_failed = (subreq->error < 0); in netfs_rreq_unlock_folios()
92 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { in netfs_rreq_unlock_folios()
93 if (folio_index(folio) == rreq->no_unlock_folio && in netfs_rreq_unlock_folios()
94 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) in netfs_rreq_unlock_folios()
103 if (rreq->netfs_ops->done) in netfs_rreq_unlock_folios()
104 rreq->netfs_ops->done(rreq); in netfs_rreq_unlock_folios()
110 struct netfs_cache_resources *cres = &rreq->cache_resources; in netfs_cache_expand_readahead()
112 if (cres->ops && cres->ops->expand_readahead) in netfs_cache_expand_readahead()
113 cres->ops->expand_readahead(cres, _start, _len, i_size); in netfs_cache_expand_readahead()
122 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size); in netfs_rreq_expand()
127 if (rreq->netfs_ops->expand_readahead) in netfs_rreq_expand()
128 rreq->netfs_ops->expand_readahead(rreq); in netfs_rreq_expand()
139 if (rreq->start != readahead_pos(ractl) || in netfs_rreq_expand()
140 rreq->len != readahead_length(ractl)) { in netfs_rreq_expand()
141 readahead_expand(ractl, rreq->start, rreq->len); in netfs_rreq_expand()
142 rreq->start = readahead_pos(ractl); in netfs_rreq_expand()
143 rreq->len = readahead_length(ractl); in netfs_rreq_expand()
151 * netfs_readahead - Helper to manage a read request
155 * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
168 struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); in netfs_readahead()
176 rreq = netfs_alloc_request(ractl->mapping, ractl->file, in netfs_readahead()
183 if (ctx->ops->begin_cache_operation) { in netfs_readahead()
184 ret = ctx->ops->begin_cache_operation(rreq); in netfs_readahead()
185 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_readahead()
211 * netfs_read_folio - Helper to manage a read_folio request
216 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
228 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
241 if (ctx->ops->begin_cache_operation) { in netfs_read_folio()
242 ret = ctx->ops->begin_cache_operation(rreq); in netfs_read_folio()
243 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_read_folio()
248 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); in netfs_read_folio()
264 * @always_fill: T if the folio should always be completely filled/cleared
267 * - full folio write
268 * - write that lies in a folio that is completely beyond EOF
269 * - write that covers the folio from start to EOF or beyond it
283 if (pos - offset + len <= i_size) in netfs_skip_folio_read()
285 zero_user_segment(&folio->page, 0, plen); in netfs_skip_folio_read()
295 if (pos - offset >= i_size) in netfs_skip_folio_read()
304 zero_user_segments(&folio->page, 0, offset, offset + len, plen); in netfs_skip_folio_read()
309 * netfs_write_begin - Helper to prepare for writing
318 * Pre-read data for a write-begin request by drawing data from the cache if
319 * possible, or the netfs if not. Space beyond the EOF is zero-filled.
334 * will cause the folio to be re-got and the process to be retried.
359 if (ctx->ops->check_write_begin) { in netfs_write_begin()
361 ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); in netfs_write_begin()
373 /* If the page is beyond the EOF, we want to clear it - unless it's in netfs_write_begin()
390 rreq->no_unlock_folio = folio_index(folio); in netfs_write_begin()
391 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); in netfs_write_begin()
393 if (ctx->ops->begin_cache_operation) { in netfs_write_begin()
394 ret = ctx->ops->begin_cache_operation(rreq); in netfs_write_begin()
395 if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) in netfs_write_begin()