xref: /openbmc/linux/fs/ext4/readpage.c (revision c0be8e6f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/readpage.c
4  *
5  * Copyright (C) 2002, Linus Torvalds.
6  * Copyright (C) 2015, Google, Inc.
7  *
8  * This was originally taken from fs/mpage.c
9  *
10  * The ext4_mpage_readpages() function here is intended to
11  * replace mpage_readahead() in the general case, not just for
12  * encrypted files.  It has some limitations (see below), where it
13  * will fall back to read_block_full_page(), but these limitations
14  * should only be hit when page_size != block_size.
15  *
16  * This will allow us to attach a callback function to support ext4
17  * encryption.
18  *
19  * If anything unusual happens, such as:
20  *
21  * - encountering a page which has buffers
22  * - encountering a page which has a non-hole after a hole
23  * - encountering a page with non-contiguous blocks
24  *
25  * then this code just gives up and calls the buffer_head-based read function.
26  * It does handle a page which has holes at the end - that is a common case:
27  * the end-of-file on blocksize < PAGE_SIZE setups.
28  *
29  */
30 
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 
47 #include "ext4.h"
48 
49 #define NUM_PREALLOC_POST_READ_CTXS	128
50 
51 static struct kmem_cache *bio_post_read_ctx_cache;
52 static mempool_t *bio_post_read_ctx_pool;
53 
54 /* postprocessing steps for read bios */
55 enum bio_post_read_step {
56 	STEP_INITIAL = 0,
57 	STEP_DECRYPT,
58 	STEP_VERITY,
59 	STEP_MAX,
60 };
61 
62 struct bio_post_read_ctx {
63 	struct bio *bio;
64 	struct work_struct work;
65 	unsigned int cur_step;
66 	unsigned int enabled_steps;
67 };
68 
69 static void __read_end_io(struct bio *bio)
70 {
71 	struct page *page;
72 	struct bio_vec *bv;
73 	struct bvec_iter_all iter_all;
74 
75 	bio_for_each_segment_all(bv, bio, iter_all) {
76 		page = bv->bv_page;
77 
78 		if (bio->bi_status)
79 			ClearPageUptodate(page);
80 		else
81 			SetPageUptodate(page);
82 		unlock_page(page);
83 	}
84 	if (bio->bi_private)
85 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
86 	bio_put(bio);
87 }
88 
89 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
90 
91 static void decrypt_work(struct work_struct *work)
92 {
93 	struct bio_post_read_ctx *ctx =
94 		container_of(work, struct bio_post_read_ctx, work);
95 	struct bio *bio = ctx->bio;
96 
97 	if (fscrypt_decrypt_bio(bio))
98 		bio_post_read_processing(ctx);
99 	else
100 		__read_end_io(bio);
101 }
102 
103 static void verity_work(struct work_struct *work)
104 {
105 	struct bio_post_read_ctx *ctx =
106 		container_of(work, struct bio_post_read_ctx, work);
107 	struct bio *bio = ctx->bio;
108 
109 	/*
110 	 * fsverity_verify_bio() may call readahead() again, and although verity
111 	 * will be disabled for that, decryption may still be needed, causing
112 	 * another bio_post_read_ctx to be allocated.  So to guarantee that
113 	 * mempool_alloc() never deadlocks we must free the current ctx first.
114 	 * This is safe because verity is the last post-read step.
115 	 */
116 	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
117 	mempool_free(ctx, bio_post_read_ctx_pool);
118 	bio->bi_private = NULL;
119 
120 	fsverity_verify_bio(bio);
121 
122 	__read_end_io(bio);
123 }
124 
125 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
126 {
127 	/*
128 	 * We use different work queues for decryption and for verity because
129 	 * verity may require reading metadata pages that need decryption, and
130 	 * we shouldn't recurse to the same workqueue.
131 	 */
132 	switch (++ctx->cur_step) {
133 	case STEP_DECRYPT:
134 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
135 			INIT_WORK(&ctx->work, decrypt_work);
136 			fscrypt_enqueue_decrypt_work(&ctx->work);
137 			return;
138 		}
139 		ctx->cur_step++;
140 		fallthrough;
141 	case STEP_VERITY:
142 		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
143 			INIT_WORK(&ctx->work, verity_work);
144 			fsverity_enqueue_verify_work(&ctx->work);
145 			return;
146 		}
147 		ctx->cur_step++;
148 		fallthrough;
149 	default:
150 		__read_end_io(ctx->bio);
151 	}
152 }
153 
154 static bool bio_post_read_required(struct bio *bio)
155 {
156 	return bio->bi_private && !bio->bi_status;
157 }
158 
159 /*
160  * I/O completion handler for multipage BIOs.
161  *
162  * The mpage code never puts partial pages into a BIO (except for end-of-file).
163  * If a page does not map to a contiguous run of blocks then it simply falls
164  * back to block_read_full_folio().
165  *
166  * Why is this?  If a page's completion depends on a number of different BIOs
167  * which can complete in any order (or at the same time) then determining the
168  * status of that page is hard.  See end_buffer_async_read() for the details.
169  * There is no point in duplicating all that complexity.
170  */
171 static void mpage_end_io(struct bio *bio)
172 {
173 	if (bio_post_read_required(bio)) {
174 		struct bio_post_read_ctx *ctx = bio->bi_private;
175 
176 		ctx->cur_step = STEP_INITIAL;
177 		bio_post_read_processing(ctx);
178 		return;
179 	}
180 	__read_end_io(bio);
181 }
182 
183 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
184 {
185 	return fsverity_active(inode) &&
186 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
187 }
188 
189 static void ext4_set_bio_post_read_ctx(struct bio *bio,
190 				       const struct inode *inode,
191 				       pgoff_t first_idx)
192 {
193 	unsigned int post_read_steps = 0;
194 
195 	if (fscrypt_inode_uses_fs_layer_crypto(inode))
196 		post_read_steps |= 1 << STEP_DECRYPT;
197 
198 	if (ext4_need_verity(inode, first_idx))
199 		post_read_steps |= 1 << STEP_VERITY;
200 
201 	if (post_read_steps) {
202 		/* Due to the mempool, this never fails. */
203 		struct bio_post_read_ctx *ctx =
204 			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
205 
206 		ctx->bio = bio;
207 		ctx->enabled_steps = post_read_steps;
208 		bio->bi_private = ctx;
209 	}
210 }
211 
212 static inline loff_t ext4_readpage_limit(struct inode *inode)
213 {
214 	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
215 		return inode->i_sb->s_maxbytes;
216 
217 	return i_size_read(inode);
218 }
219 
220 int ext4_mpage_readpages(struct inode *inode,
221 		struct readahead_control *rac, struct folio *folio)
222 {
223 	struct bio *bio = NULL;
224 	sector_t last_block_in_bio = 0;
225 
226 	const unsigned blkbits = inode->i_blkbits;
227 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
228 	const unsigned blocksize = 1 << blkbits;
229 	sector_t next_block;
230 	sector_t block_in_file;
231 	sector_t last_block;
232 	sector_t last_block_in_file;
233 	sector_t blocks[MAX_BUF_PER_PAGE];
234 	unsigned page_block;
235 	struct block_device *bdev = inode->i_sb->s_bdev;
236 	int length;
237 	unsigned relative_block = 0;
238 	struct ext4_map_blocks map;
239 	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
240 
241 	map.m_pblk = 0;
242 	map.m_lblk = 0;
243 	map.m_len = 0;
244 	map.m_flags = 0;
245 
246 	for (; nr_pages; nr_pages--) {
247 		int fully_mapped = 1;
248 		unsigned first_hole = blocks_per_page;
249 
250 		if (rac)
251 			folio = readahead_folio(rac);
252 		prefetchw(&folio->flags);
253 
254 		if (folio_buffers(folio))
255 			goto confused;
256 
257 		block_in_file = next_block =
258 			(sector_t)folio->index << (PAGE_SHIFT - blkbits);
259 		last_block = block_in_file + nr_pages * blocks_per_page;
260 		last_block_in_file = (ext4_readpage_limit(inode) +
261 				      blocksize - 1) >> blkbits;
262 		if (last_block > last_block_in_file)
263 			last_block = last_block_in_file;
264 		page_block = 0;
265 
266 		/*
267 		 * Map blocks using the previous result first.
268 		 */
269 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
270 		    block_in_file > map.m_lblk &&
271 		    block_in_file < (map.m_lblk + map.m_len)) {
272 			unsigned map_offset = block_in_file - map.m_lblk;
273 			unsigned last = map.m_len - map_offset;
274 
275 			for (relative_block = 0; ; relative_block++) {
276 				if (relative_block == last) {
277 					/* needed? */
278 					map.m_flags &= ~EXT4_MAP_MAPPED;
279 					break;
280 				}
281 				if (page_block == blocks_per_page)
282 					break;
283 				blocks[page_block] = map.m_pblk + map_offset +
284 					relative_block;
285 				page_block++;
286 				block_in_file++;
287 			}
288 		}
289 
290 		/*
291 		 * Then do more ext4_map_blocks() calls until we are
292 		 * done with this folio.
293 		 */
294 		while (page_block < blocks_per_page) {
295 			if (block_in_file < last_block) {
296 				map.m_lblk = block_in_file;
297 				map.m_len = last_block - block_in_file;
298 
299 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
300 				set_error_page:
301 					folio_set_error(folio);
302 					folio_zero_segment(folio, 0,
303 							  folio_size(folio));
304 					folio_unlock(folio);
305 					goto next_page;
306 				}
307 			}
308 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
309 				fully_mapped = 0;
310 				if (first_hole == blocks_per_page)
311 					first_hole = page_block;
312 				page_block++;
313 				block_in_file++;
314 				continue;
315 			}
316 			if (first_hole != blocks_per_page)
317 				goto confused;		/* hole -> non-hole */
318 
319 			/* Contiguous blocks? */
320 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
321 				goto confused;
322 			for (relative_block = 0; ; relative_block++) {
323 				if (relative_block == map.m_len) {
324 					/* needed? */
325 					map.m_flags &= ~EXT4_MAP_MAPPED;
326 					break;
327 				} else if (page_block == blocks_per_page)
328 					break;
329 				blocks[page_block] = map.m_pblk+relative_block;
330 				page_block++;
331 				block_in_file++;
332 			}
333 		}
334 		if (first_hole != blocks_per_page) {
335 			folio_zero_segment(folio, first_hole << blkbits,
336 					  folio_size(folio));
337 			if (first_hole == 0) {
338 				if (ext4_need_verity(inode, folio->index) &&
339 				    !fsverity_verify_page(&folio->page))
340 					goto set_error_page;
341 				folio_mark_uptodate(folio);
342 				folio_unlock(folio);
343 				continue;
344 			}
345 		} else if (fully_mapped) {
346 			folio_set_mappedtodisk(folio);
347 		}
348 
349 		/*
350 		 * This folio will go to BIO.  Do we need to send this
351 		 * BIO off first?
352 		 */
353 		if (bio && (last_block_in_bio != blocks[0] - 1 ||
354 			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
355 		submit_and_realloc:
356 			submit_bio(bio);
357 			bio = NULL;
358 		}
359 		if (bio == NULL) {
360 			/*
361 			 * bio_alloc will _always_ be able to allocate a bio if
362 			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
363 			 */
364 			bio = bio_alloc(bdev, bio_max_segs(nr_pages),
365 					REQ_OP_READ, GFP_KERNEL);
366 			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
367 						  GFP_KERNEL);
368 			ext4_set_bio_post_read_ctx(bio, inode, folio->index);
369 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
370 			bio->bi_end_io = mpage_end_io;
371 			if (rac)
372 				bio->bi_opf |= REQ_RAHEAD;
373 		}
374 
375 		length = first_hole << blkbits;
376 		if (!bio_add_folio(bio, folio, length, 0))
377 			goto submit_and_realloc;
378 
379 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
380 		     (relative_block == map.m_len)) ||
381 		    (first_hole != blocks_per_page)) {
382 			submit_bio(bio);
383 			bio = NULL;
384 		} else
385 			last_block_in_bio = blocks[blocks_per_page - 1];
386 		continue;
387 	confused:
388 		if (bio) {
389 			submit_bio(bio);
390 			bio = NULL;
391 		}
392 		if (!folio_test_uptodate(folio))
393 			block_read_full_folio(folio, ext4_get_block);
394 		else
395 			folio_unlock(folio);
396 next_page:
397 		; /* A label shall be followed by a statement until C23 */
398 	}
399 	if (bio)
400 		submit_bio(bio);
401 	return 0;
402 }
403 
404 int __init ext4_init_post_read_processing(void)
405 {
406 	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
407 
408 	if (!bio_post_read_ctx_cache)
409 		goto fail;
410 	bio_post_read_ctx_pool =
411 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
412 					 bio_post_read_ctx_cache);
413 	if (!bio_post_read_ctx_pool)
414 		goto fail_free_cache;
415 	return 0;
416 
417 fail_free_cache:
418 	kmem_cache_destroy(bio_post_read_ctx_cache);
419 fail:
420 	return -ENOMEM;
421 }
422 
423 void ext4_exit_post_read_processing(void)
424 {
425 	mempool_destroy(bio_post_read_ctx_pool);
426 	kmem_cache_destroy(bio_post_read_ctx_cache);
427 }
428