1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f64e02feSTheodore Ts'o /*
3f64e02feSTheodore Ts'o * linux/fs/ext4/readpage.c
4f64e02feSTheodore Ts'o *
5f64e02feSTheodore Ts'o * Copyright (C) 2002, Linus Torvalds.
6f64e02feSTheodore Ts'o * Copyright (C) 2015, Google, Inc.
7f64e02feSTheodore Ts'o *
8f64e02feSTheodore Ts'o * This was originally taken from fs/mpage.c
9f64e02feSTheodore Ts'o *
106311f91fSMatthew Wilcox (Oracle) * The ext4_mpage_readpages() function here is intended to
116311f91fSMatthew Wilcox (Oracle) * replace mpage_readahead() in the general case, not just for
12f64e02feSTheodore Ts'o * encrypted files. It has some limitations (see below), where it
13f64e02feSTheodore Ts'o * will fall back to read_block_full_page(), but these limitations
14f64e02feSTheodore Ts'o * should only be hit when page_size != block_size.
15f64e02feSTheodore Ts'o *
16f64e02feSTheodore Ts'o * This will allow us to attach a callback function to support ext4
17f64e02feSTheodore Ts'o * encryption.
18f64e02feSTheodore Ts'o *
19f64e02feSTheodore Ts'o * If anything unusual happens, such as:
20f64e02feSTheodore Ts'o *
21f64e02feSTheodore Ts'o * - encountering a page which has buffers
22f64e02feSTheodore Ts'o * - encountering a page which has a non-hole after a hole
23f64e02feSTheodore Ts'o * - encountering a page with non-contiguous blocks
24f64e02feSTheodore Ts'o *
25f64e02feSTheodore Ts'o * then this code just gives up and calls the buffer_head-based read function.
26f64e02feSTheodore Ts'o * It does handle a page which has holes at the end - that is a common case:
27ea1754a0SKirill A. Shutemov * the end-of-file on blocksize < PAGE_SIZE setups.
28f64e02feSTheodore Ts'o *
29f64e02feSTheodore Ts'o */
30f64e02feSTheodore Ts'o
31f64e02feSTheodore Ts'o #include <linux/kernel.h>
32f64e02feSTheodore Ts'o #include <linux/export.h>
33f64e02feSTheodore Ts'o #include <linux/mm.h>
34f64e02feSTheodore Ts'o #include <linux/kdev_t.h>
35f64e02feSTheodore Ts'o #include <linux/gfp.h>
36f64e02feSTheodore Ts'o #include <linux/bio.h>
37f64e02feSTheodore Ts'o #include <linux/fs.h>
38f64e02feSTheodore Ts'o #include <linux/buffer_head.h>
39f64e02feSTheodore Ts'o #include <linux/blkdev.h>
40f64e02feSTheodore Ts'o #include <linux/highmem.h>
41f64e02feSTheodore Ts'o #include <linux/prefetch.h>
42f64e02feSTheodore Ts'o #include <linux/mpage.h>
43f64e02feSTheodore Ts'o #include <linux/writeback.h>
44f64e02feSTheodore Ts'o #include <linux/backing-dev.h>
45f64e02feSTheodore Ts'o #include <linux/pagevec.h>
46f64e02feSTheodore Ts'o
47f64e02feSTheodore Ts'o #include "ext4.h"
48f64e02feSTheodore Ts'o
4922cfe4b4SEric Biggers #define NUM_PREALLOC_POST_READ_CTXS 128
5022cfe4b4SEric Biggers
5122cfe4b4SEric Biggers static struct kmem_cache *bio_post_read_ctx_cache;
5222cfe4b4SEric Biggers static mempool_t *bio_post_read_ctx_pool;
5322cfe4b4SEric Biggers
5422cfe4b4SEric Biggers /* postprocessing steps for read bios */
5522cfe4b4SEric Biggers enum bio_post_read_step {
5622cfe4b4SEric Biggers STEP_INITIAL = 0,
5722cfe4b4SEric Biggers STEP_DECRYPT,
5822cfe4b4SEric Biggers STEP_VERITY,
5968e45330SEric Biggers STEP_MAX,
6022cfe4b4SEric Biggers };
6122cfe4b4SEric Biggers
6222cfe4b4SEric Biggers struct bio_post_read_ctx {
6322cfe4b4SEric Biggers struct bio *bio;
6422cfe4b4SEric Biggers struct work_struct work;
6522cfe4b4SEric Biggers unsigned int cur_step;
6622cfe4b4SEric Biggers unsigned int enabled_steps;
6722cfe4b4SEric Biggers };
6822cfe4b4SEric Biggers
__read_end_io(struct bio * bio)6922cfe4b4SEric Biggers static void __read_end_io(struct bio *bio)
70c9c7429cSMichael Halcrow {
71f2b229a8SMatthew Wilcox struct folio_iter fi;
7222cfe4b4SEric Biggers
73f2b229a8SMatthew Wilcox bio_for_each_folio_all(fi, bio) {
74f2b229a8SMatthew Wilcox struct folio *folio = fi.folio;
7522cfe4b4SEric Biggers
7698dc08baSEric Biggers if (bio->bi_status)
77f2b229a8SMatthew Wilcox folio_clear_uptodate(folio);
7898dc08baSEric Biggers else
79f2b229a8SMatthew Wilcox folio_mark_uptodate(folio);
80f2b229a8SMatthew Wilcox folio_unlock(folio);
8122cfe4b4SEric Biggers }
8222cfe4b4SEric Biggers if (bio->bi_private)
8322cfe4b4SEric Biggers mempool_free(bio->bi_private, bio_post_read_ctx_pool);
8422cfe4b4SEric Biggers bio_put(bio);
8522cfe4b4SEric Biggers }
8622cfe4b4SEric Biggers
8722cfe4b4SEric Biggers static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
8822cfe4b4SEric Biggers
decrypt_work(struct work_struct * work)8922cfe4b4SEric Biggers static void decrypt_work(struct work_struct *work)
9022cfe4b4SEric Biggers {
9122cfe4b4SEric Biggers struct bio_post_read_ctx *ctx =
9222cfe4b4SEric Biggers container_of(work, struct bio_post_read_ctx, work);
9314db0b3cSEric Biggers struct bio *bio = ctx->bio;
9422cfe4b4SEric Biggers
9514db0b3cSEric Biggers if (fscrypt_decrypt_bio(bio))
9622cfe4b4SEric Biggers bio_post_read_processing(ctx);
9714db0b3cSEric Biggers else
9814db0b3cSEric Biggers __read_end_io(bio);
9922cfe4b4SEric Biggers }
10022cfe4b4SEric Biggers
verity_work(struct work_struct * work)10122cfe4b4SEric Biggers static void verity_work(struct work_struct *work)
10222cfe4b4SEric Biggers {
10322cfe4b4SEric Biggers struct bio_post_read_ctx *ctx =
10422cfe4b4SEric Biggers container_of(work, struct bio_post_read_ctx, work);
10568e45330SEric Biggers struct bio *bio = ctx->bio;
10622cfe4b4SEric Biggers
10768e45330SEric Biggers /*
108704528d8SMatthew Wilcox (Oracle) * fsverity_verify_bio() may call readahead() again, and although verity
10968e45330SEric Biggers * will be disabled for that, decryption may still be needed, causing
11068e45330SEric Biggers * another bio_post_read_ctx to be allocated. So to guarantee that
11168e45330SEric Biggers * mempool_alloc() never deadlocks we must free the current ctx first.
11268e45330SEric Biggers * This is safe because verity is the last post-read step.
11368e45330SEric Biggers */
11468e45330SEric Biggers BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
11568e45330SEric Biggers mempool_free(ctx, bio_post_read_ctx_pool);
11668e45330SEric Biggers bio->bi_private = NULL;
11722cfe4b4SEric Biggers
11868e45330SEric Biggers fsverity_verify_bio(bio);
11968e45330SEric Biggers
12068e45330SEric Biggers __read_end_io(bio);
12122cfe4b4SEric Biggers }
12222cfe4b4SEric Biggers
bio_post_read_processing(struct bio_post_read_ctx * ctx)12322cfe4b4SEric Biggers static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
12422cfe4b4SEric Biggers {
12522cfe4b4SEric Biggers /*
12622cfe4b4SEric Biggers * We use different work queues for decryption and for verity because
12722cfe4b4SEric Biggers * verity may require reading metadata pages that need decryption, and
12822cfe4b4SEric Biggers * we shouldn't recurse to the same workqueue.
12922cfe4b4SEric Biggers */
13022cfe4b4SEric Biggers switch (++ctx->cur_step) {
13122cfe4b4SEric Biggers case STEP_DECRYPT:
13222cfe4b4SEric Biggers if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
13322cfe4b4SEric Biggers INIT_WORK(&ctx->work, decrypt_work);
13422cfe4b4SEric Biggers fscrypt_enqueue_decrypt_work(&ctx->work);
13522cfe4b4SEric Biggers return;
13622cfe4b4SEric Biggers }
13722cfe4b4SEric Biggers ctx->cur_step++;
13870d7ced2SShijie Luo fallthrough;
13922cfe4b4SEric Biggers case STEP_VERITY:
14022cfe4b4SEric Biggers if (ctx->enabled_steps & (1 << STEP_VERITY)) {
14122cfe4b4SEric Biggers INIT_WORK(&ctx->work, verity_work);
14222cfe4b4SEric Biggers fsverity_enqueue_verify_work(&ctx->work);
14322cfe4b4SEric Biggers return;
14422cfe4b4SEric Biggers }
14522cfe4b4SEric Biggers ctx->cur_step++;
14670d7ced2SShijie Luo fallthrough;
14722cfe4b4SEric Biggers default:
14822cfe4b4SEric Biggers __read_end_io(ctx->bio);
14922cfe4b4SEric Biggers }
15022cfe4b4SEric Biggers }
15122cfe4b4SEric Biggers
bio_post_read_required(struct bio * bio)15222cfe4b4SEric Biggers static bool bio_post_read_required(struct bio *bio)
15322cfe4b4SEric Biggers {
15422cfe4b4SEric Biggers return bio->bi_private && !bio->bi_status;
155c9c7429cSMichael Halcrow }
156c9c7429cSMichael Halcrow
157c9c7429cSMichael Halcrow /*
158f64e02feSTheodore Ts'o * I/O completion handler for multipage BIOs.
159f64e02feSTheodore Ts'o *
160f64e02feSTheodore Ts'o * The mpage code never puts partial pages into a BIO (except for end-of-file).
161f64e02feSTheodore Ts'o * If a page does not map to a contiguous run of blocks then it simply falls
1622c69e205SMatthew Wilcox (Oracle) * back to block_read_full_folio().
163f64e02feSTheodore Ts'o *
164f64e02feSTheodore Ts'o * Why is this? If a page's completion depends on a number of different BIOs
165f64e02feSTheodore Ts'o * which can complete in any order (or at the same time) then determining the
166f64e02feSTheodore Ts'o * status of that page is hard. See end_buffer_async_read() for the details.
167f64e02feSTheodore Ts'o * There is no point in duplicating all that complexity.
168f64e02feSTheodore Ts'o */
mpage_end_io(struct bio * bio)1694246a0b6SChristoph Hellwig static void mpage_end_io(struct bio *bio)
170f64e02feSTheodore Ts'o {
17122cfe4b4SEric Biggers if (bio_post_read_required(bio)) {
17222cfe4b4SEric Biggers struct bio_post_read_ctx *ctx = bio->bi_private;
173f64e02feSTheodore Ts'o
17422cfe4b4SEric Biggers ctx->cur_step = STEP_INITIAL;
17522cfe4b4SEric Biggers bio_post_read_processing(ctx);
176c9c7429cSMichael Halcrow return;
177c9c7429cSMichael Halcrow }
17822cfe4b4SEric Biggers __read_end_io(bio);
179f64e02feSTheodore Ts'o }
180f64e02feSTheodore Ts'o
ext4_need_verity(const struct inode * inode,pgoff_t idx)18122cfe4b4SEric Biggers static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
18222cfe4b4SEric Biggers {
18322cfe4b4SEric Biggers return fsverity_active(inode) &&
18422cfe4b4SEric Biggers idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
18522cfe4b4SEric Biggers }
18622cfe4b4SEric Biggers
ext4_set_bio_post_read_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx)187fd5fe253SEric Biggers static void ext4_set_bio_post_read_ctx(struct bio *bio,
188fd5fe253SEric Biggers const struct inode *inode,
18922cfe4b4SEric Biggers pgoff_t first_idx)
19022cfe4b4SEric Biggers {
19122cfe4b4SEric Biggers unsigned int post_read_steps = 0;
19222cfe4b4SEric Biggers
1934f74d15fSEric Biggers if (fscrypt_inode_uses_fs_layer_crypto(inode))
19422cfe4b4SEric Biggers post_read_steps |= 1 << STEP_DECRYPT;
19522cfe4b4SEric Biggers
19622cfe4b4SEric Biggers if (ext4_need_verity(inode, first_idx))
19722cfe4b4SEric Biggers post_read_steps |= 1 << STEP_VERITY;
19822cfe4b4SEric Biggers
19922cfe4b4SEric Biggers if (post_read_steps) {
200fd5fe253SEric Biggers /* Due to the mempool, this never fails. */
201fd5fe253SEric Biggers struct bio_post_read_ctx *ctx =
202fd5fe253SEric Biggers mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
203fd5fe253SEric Biggers
20422cfe4b4SEric Biggers ctx->bio = bio;
20522cfe4b4SEric Biggers ctx->enabled_steps = post_read_steps;
20622cfe4b4SEric Biggers bio->bi_private = ctx;
20722cfe4b4SEric Biggers }
20822cfe4b4SEric Biggers }
20922cfe4b4SEric Biggers
ext4_readpage_limit(struct inode * inode)21022cfe4b4SEric Biggers static inline loff_t ext4_readpage_limit(struct inode *inode)
21122cfe4b4SEric Biggers {
2125e122148SEric Biggers if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
21322cfe4b4SEric Biggers return inode->i_sb->s_maxbytes;
21422cfe4b4SEric Biggers
21522cfe4b4SEric Biggers return i_size_read(inode);
216f64e02feSTheodore Ts'o }
217f64e02feSTheodore Ts'o
ext4_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct folio * folio)218a07f624bSMatthew Wilcox (Oracle) int ext4_mpage_readpages(struct inode *inode,
219c0be8e6fSMatthew Wilcox struct readahead_control *rac, struct folio *folio)
220f64e02feSTheodore Ts'o {
221f64e02feSTheodore Ts'o struct bio *bio = NULL;
222f64e02feSTheodore Ts'o sector_t last_block_in_bio = 0;
223f64e02feSTheodore Ts'o
224f64e02feSTheodore Ts'o const unsigned blkbits = inode->i_blkbits;
22509cbfeafSKirill A. Shutemov const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
226f64e02feSTheodore Ts'o const unsigned blocksize = 1 << blkbits;
2274f74d15fSEric Biggers sector_t next_block;
228f64e02feSTheodore Ts'o sector_t block_in_file;
229f64e02feSTheodore Ts'o sector_t last_block;
230f64e02feSTheodore Ts'o sector_t last_block_in_file;
231f64e02feSTheodore Ts'o sector_t blocks[MAX_BUF_PER_PAGE];
232f64e02feSTheodore Ts'o unsigned page_block;
233f64e02feSTheodore Ts'o struct block_device *bdev = inode->i_sb->s_bdev;
234f64e02feSTheodore Ts'o int length;
235f64e02feSTheodore Ts'o unsigned relative_block = 0;
236f64e02feSTheodore Ts'o struct ext4_map_blocks map;
2376311f91fSMatthew Wilcox (Oracle) unsigned int nr_pages = rac ? readahead_count(rac) : 1;
238f64e02feSTheodore Ts'o
239f64e02feSTheodore Ts'o map.m_pblk = 0;
240f64e02feSTheodore Ts'o map.m_lblk = 0;
241f64e02feSTheodore Ts'o map.m_len = 0;
242f64e02feSTheodore Ts'o map.m_flags = 0;
243f64e02feSTheodore Ts'o
244de9e9181Syalin wang for (; nr_pages; nr_pages--) {
245f64e02feSTheodore Ts'o int fully_mapped = 1;
246f64e02feSTheodore Ts'o unsigned first_hole = blocks_per_page;
247f64e02feSTheodore Ts'o
248c0be8e6fSMatthew Wilcox if (rac)
249c0be8e6fSMatthew Wilcox folio = readahead_folio(rac);
250c0be8e6fSMatthew Wilcox prefetchw(&folio->flags);
251f64e02feSTheodore Ts'o
252c0be8e6fSMatthew Wilcox if (folio_buffers(folio))
253f64e02feSTheodore Ts'o goto confused;
254f64e02feSTheodore Ts'o
2554f74d15fSEric Biggers block_in_file = next_block =
256c0be8e6fSMatthew Wilcox (sector_t)folio->index << (PAGE_SHIFT - blkbits);
257f64e02feSTheodore Ts'o last_block = block_in_file + nr_pages * blocks_per_page;
25822cfe4b4SEric Biggers last_block_in_file = (ext4_readpage_limit(inode) +
25922cfe4b4SEric Biggers blocksize - 1) >> blkbits;
260f64e02feSTheodore Ts'o if (last_block > last_block_in_file)
261f64e02feSTheodore Ts'o last_block = last_block_in_file;
262f64e02feSTheodore Ts'o page_block = 0;
263f64e02feSTheodore Ts'o
264f64e02feSTheodore Ts'o /*
265f64e02feSTheodore Ts'o * Map blocks using the previous result first.
266f64e02feSTheodore Ts'o */
267f64e02feSTheodore Ts'o if ((map.m_flags & EXT4_MAP_MAPPED) &&
268f64e02feSTheodore Ts'o block_in_file > map.m_lblk &&
269f64e02feSTheodore Ts'o block_in_file < (map.m_lblk + map.m_len)) {
270f64e02feSTheodore Ts'o unsigned map_offset = block_in_file - map.m_lblk;
271f64e02feSTheodore Ts'o unsigned last = map.m_len - map_offset;
272f64e02feSTheodore Ts'o
273f64e02feSTheodore Ts'o for (relative_block = 0; ; relative_block++) {
274f64e02feSTheodore Ts'o if (relative_block == last) {
275f64e02feSTheodore Ts'o /* needed? */
276f64e02feSTheodore Ts'o map.m_flags &= ~EXT4_MAP_MAPPED;
277f64e02feSTheodore Ts'o break;
278f64e02feSTheodore Ts'o }
279f64e02feSTheodore Ts'o if (page_block == blocks_per_page)
280f64e02feSTheodore Ts'o break;
281f64e02feSTheodore Ts'o blocks[page_block] = map.m_pblk + map_offset +
282f64e02feSTheodore Ts'o relative_block;
283f64e02feSTheodore Ts'o page_block++;
284f64e02feSTheodore Ts'o block_in_file++;
285f64e02feSTheodore Ts'o }
286f64e02feSTheodore Ts'o }
287f64e02feSTheodore Ts'o
288f64e02feSTheodore Ts'o /*
289f64e02feSTheodore Ts'o * Then do more ext4_map_blocks() calls until we are
290c0be8e6fSMatthew Wilcox * done with this folio.
291f64e02feSTheodore Ts'o */
292f64e02feSTheodore Ts'o while (page_block < blocks_per_page) {
293f64e02feSTheodore Ts'o if (block_in_file < last_block) {
294f64e02feSTheodore Ts'o map.m_lblk = block_in_file;
295f64e02feSTheodore Ts'o map.m_len = last_block - block_in_file;
296f64e02feSTheodore Ts'o
297f64e02feSTheodore Ts'o if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
298f64e02feSTheodore Ts'o set_error_page:
299c0be8e6fSMatthew Wilcox folio_set_error(folio);
300c0be8e6fSMatthew Wilcox folio_zero_segment(folio, 0,
301c0be8e6fSMatthew Wilcox folio_size(folio));
302c0be8e6fSMatthew Wilcox folio_unlock(folio);
303f64e02feSTheodore Ts'o goto next_page;
304f64e02feSTheodore Ts'o }
305f64e02feSTheodore Ts'o }
306f64e02feSTheodore Ts'o if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
307f64e02feSTheodore Ts'o fully_mapped = 0;
308f64e02feSTheodore Ts'o if (first_hole == blocks_per_page)
309f64e02feSTheodore Ts'o first_hole = page_block;
310f64e02feSTheodore Ts'o page_block++;
311f64e02feSTheodore Ts'o block_in_file++;
312f64e02feSTheodore Ts'o continue;
313f64e02feSTheodore Ts'o }
314f64e02feSTheodore Ts'o if (first_hole != blocks_per_page)
315f64e02feSTheodore Ts'o goto confused; /* hole -> non-hole */
316f64e02feSTheodore Ts'o
317f64e02feSTheodore Ts'o /* Contiguous blocks? */
318f64e02feSTheodore Ts'o if (page_block && blocks[page_block-1] != map.m_pblk-1)
319f64e02feSTheodore Ts'o goto confused;
320f64e02feSTheodore Ts'o for (relative_block = 0; ; relative_block++) {
321f64e02feSTheodore Ts'o if (relative_block == map.m_len) {
322f64e02feSTheodore Ts'o /* needed? */
323f64e02feSTheodore Ts'o map.m_flags &= ~EXT4_MAP_MAPPED;
324f64e02feSTheodore Ts'o break;
325f64e02feSTheodore Ts'o } else if (page_block == blocks_per_page)
326f64e02feSTheodore Ts'o break;
327f64e02feSTheodore Ts'o blocks[page_block] = map.m_pblk+relative_block;
328f64e02feSTheodore Ts'o page_block++;
329f64e02feSTheodore Ts'o block_in_file++;
330f64e02feSTheodore Ts'o }
331f64e02feSTheodore Ts'o }
332f64e02feSTheodore Ts'o if (first_hole != blocks_per_page) {
333c0be8e6fSMatthew Wilcox folio_zero_segment(folio, first_hole << blkbits,
334c0be8e6fSMatthew Wilcox folio_size(folio));
335f64e02feSTheodore Ts'o if (first_hole == 0) {
336c0be8e6fSMatthew Wilcox if (ext4_need_verity(inode, folio->index) &&
337*0dea40aaSMatthew Wilcox !fsverity_verify_folio(folio))
33822cfe4b4SEric Biggers goto set_error_page;
339c0be8e6fSMatthew Wilcox folio_mark_uptodate(folio);
340c0be8e6fSMatthew Wilcox folio_unlock(folio);
341c0be8e6fSMatthew Wilcox continue;
342f64e02feSTheodore Ts'o }
343f64e02feSTheodore Ts'o } else if (fully_mapped) {
344c0be8e6fSMatthew Wilcox folio_set_mappedtodisk(folio);
345f64e02feSTheodore Ts'o }
346f64e02feSTheodore Ts'o
347f64e02feSTheodore Ts'o /*
348c0be8e6fSMatthew Wilcox * This folio will go to BIO. Do we need to send this
349f64e02feSTheodore Ts'o * BIO off first?
350f64e02feSTheodore Ts'o */
3514f74d15fSEric Biggers if (bio && (last_block_in_bio != blocks[0] - 1 ||
3524f74d15fSEric Biggers !fscrypt_mergeable_bio(bio, inode, next_block))) {
353f64e02feSTheodore Ts'o submit_and_realloc:
3544e49ea4aSMike Christie submit_bio(bio);
355f64e02feSTheodore Ts'o bio = NULL;
356f64e02feSTheodore Ts'o }
357f64e02feSTheodore Ts'o if (bio == NULL) {
3585500221eSGao Xiang /*
3595500221eSGao Xiang * bio_alloc will _always_ be able to allocate a bio if
3605500221eSGao Xiang * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
3615500221eSGao Xiang */
36207888c66SChristoph Hellwig bio = bio_alloc(bdev, bio_max_segs(nr_pages),
36307888c66SChristoph Hellwig REQ_OP_READ, GFP_KERNEL);
3644f74d15fSEric Biggers fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
3654f74d15fSEric Biggers GFP_KERNEL);
366c0be8e6fSMatthew Wilcox ext4_set_bio_post_read_ctx(bio, inode, folio->index);
367f64e02feSTheodore Ts'o bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
368f64e02feSTheodore Ts'o bio->bi_end_io = mpage_end_io;
36907888c66SChristoph Hellwig if (rac)
37007888c66SChristoph Hellwig bio->bi_opf |= REQ_RAHEAD;
371f64e02feSTheodore Ts'o }
372f64e02feSTheodore Ts'o
373f64e02feSTheodore Ts'o length = first_hole << blkbits;
374c0be8e6fSMatthew Wilcox if (!bio_add_folio(bio, folio, length, 0))
375f64e02feSTheodore Ts'o goto submit_and_realloc;
376f64e02feSTheodore Ts'o
377f64e02feSTheodore Ts'o if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
378f64e02feSTheodore Ts'o (relative_block == map.m_len)) ||
379f64e02feSTheodore Ts'o (first_hole != blocks_per_page)) {
3804e49ea4aSMike Christie submit_bio(bio);
381f64e02feSTheodore Ts'o bio = NULL;
382f64e02feSTheodore Ts'o } else
383f64e02feSTheodore Ts'o last_block_in_bio = blocks[blocks_per_page - 1];
384c0be8e6fSMatthew Wilcox continue;
385f64e02feSTheodore Ts'o confused:
386f64e02feSTheodore Ts'o if (bio) {
3874e49ea4aSMike Christie submit_bio(bio);
388f64e02feSTheodore Ts'o bio = NULL;
389f64e02feSTheodore Ts'o }
390c0be8e6fSMatthew Wilcox if (!folio_test_uptodate(folio))
391c0be8e6fSMatthew Wilcox block_read_full_folio(folio, ext4_get_block);
392f64e02feSTheodore Ts'o else
393c0be8e6fSMatthew Wilcox folio_unlock(folio);
394f64e02feSTheodore Ts'o next_page:
395c0be8e6fSMatthew Wilcox ; /* A label shall be followed by a statement until C23 */
396f64e02feSTheodore Ts'o }
397f64e02feSTheodore Ts'o if (bio)
3984e49ea4aSMike Christie submit_bio(bio);
399f64e02feSTheodore Ts'o return 0;
400f64e02feSTheodore Ts'o }
40122cfe4b4SEric Biggers
ext4_init_post_read_processing(void)40222cfe4b4SEric Biggers int __init ext4_init_post_read_processing(void)
40322cfe4b4SEric Biggers {
404060f7739SJunChao Sun bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
405060f7739SJunChao Sun
40622cfe4b4SEric Biggers if (!bio_post_read_ctx_cache)
40722cfe4b4SEric Biggers goto fail;
40822cfe4b4SEric Biggers bio_post_read_ctx_pool =
40922cfe4b4SEric Biggers mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
41022cfe4b4SEric Biggers bio_post_read_ctx_cache);
41122cfe4b4SEric Biggers if (!bio_post_read_ctx_pool)
41222cfe4b4SEric Biggers goto fail_free_cache;
41322cfe4b4SEric Biggers return 0;
41422cfe4b4SEric Biggers
41522cfe4b4SEric Biggers fail_free_cache:
41622cfe4b4SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
41722cfe4b4SEric Biggers fail:
41822cfe4b4SEric Biggers return -ENOMEM;
41922cfe4b4SEric Biggers }
42022cfe4b4SEric Biggers
ext4_exit_post_read_processing(void)42122cfe4b4SEric Biggers void ext4_exit_post_read_processing(void)
42222cfe4b4SEric Biggers {
42322cfe4b4SEric Biggers mempool_destroy(bio_post_read_ctx_pool);
42422cfe4b4SEric Biggers kmem_cache_destroy(bio_post_read_ctx_cache);
42522cfe4b4SEric Biggers }
426