xref: /openbmc/linux/fs/erofs/data.c (revision 94e4e153)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static inline void read_endio(struct bio *bio)
13 {
14 	struct super_block *const sb = bio->bi_private;
15 	struct bio_vec *bvec;
16 	blk_status_t err = bio->bi_status;
17 	struct bvec_iter_all iter_all;
18 
19 	if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
20 		erofs_show_injection_info(FAULT_READ_IO);
21 		err = BLK_STS_IOERR;
22 	}
23 
24 	bio_for_each_segment_all(bvec, bio, iter_all) {
25 		struct page *page = bvec->bv_page;
26 
27 		/* page is already locked */
28 		DBG_BUGON(PageUptodate(page));
29 
30 		if (err)
31 			SetPageError(page);
32 		else
33 			SetPageUptodate(page);
34 
35 		unlock_page(page);
36 		/* page could be reclaimed now */
37 	}
38 	bio_put(bio);
39 }
40 
41 static struct bio *erofs_grab_raw_bio(struct super_block *sb,
42 				      erofs_blk_t blkaddr,
43 				      unsigned int nr_pages,
44 				      bool ismeta)
45 {
46 	struct bio *bio = bio_alloc(GFP_NOIO, nr_pages);
47 
48 	bio->bi_end_io = read_endio;
49 	bio_set_dev(bio, sb->s_bdev);
50 	bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
51 	bio->bi_private = sb;
52 	if (ismeta)
53 		bio->bi_opf = REQ_OP_READ | REQ_META;
54 	else
55 		bio->bi_opf = REQ_OP_READ;
56 
57 	return bio;
58 }
59 
60 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
61 {
62 	struct inode *const bd_inode = sb->s_bdev->bd_inode;
63 	struct address_space *const mapping = bd_inode->i_mapping;
64 	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS);
65 	struct page *page;
66 	int err;
67 
68 repeat:
69 	page = find_or_create_page(mapping, blkaddr, gfp);
70 	if (!page)
71 		return ERR_PTR(-ENOMEM);
72 
73 	DBG_BUGON(!PageLocked(page));
74 
75 	if (!PageUptodate(page)) {
76 		struct bio *bio;
77 
78 		bio = erofs_grab_raw_bio(sb, blkaddr, 1, true);
79 
80 		if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
81 			err = -EFAULT;
82 			goto err_out;
83 		}
84 
85 		submit_bio(bio);
86 		lock_page(page);
87 
88 		/* this page has been truncated by others */
89 		if (page->mapping != mapping) {
90 			unlock_page(page);
91 			put_page(page);
92 			goto repeat;
93 		}
94 
95 		/* more likely a read error */
96 		if (!PageUptodate(page)) {
97 			err = -EIO;
98 			goto err_out;
99 		}
100 	}
101 	return page;
102 
103 err_out:
104 	unlock_page(page);
105 	put_page(page);
106 	return ERR_PTR(err);
107 }
108 
109 static int erofs_map_blocks_flatmode(struct inode *inode,
110 				     struct erofs_map_blocks *map,
111 				     int flags)
112 {
113 	int err = 0;
114 	erofs_blk_t nblocks, lastblk;
115 	u64 offset = map->m_la;
116 	struct erofs_inode *vi = EROFS_I(inode);
117 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
118 
119 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
120 
121 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
122 	lastblk = nblocks - tailendpacking;
123 
124 	if (offset >= inode->i_size) {
125 		/* leave out-of-bound access unmapped */
126 		map->m_flags = 0;
127 		map->m_plen = 0;
128 		goto out;
129 	}
130 
131 	/* there is no hole in flatmode */
132 	map->m_flags = EROFS_MAP_MAPPED;
133 
134 	if (offset < blknr_to_addr(lastblk)) {
135 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
136 		map->m_plen = blknr_to_addr(lastblk) - offset;
137 	} else if (tailendpacking) {
138 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
139 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
140 
141 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
142 			vi->xattr_isize + erofs_blkoff(map->m_la);
143 		map->m_plen = inode->i_size - offset;
144 
145 		/* inline data should be located in one meta block */
146 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
147 			errln("inline data cross block boundary @ nid %llu",
148 			      vi->nid);
149 			DBG_BUGON(1);
150 			err = -EFSCORRUPTED;
151 			goto err_out;
152 		}
153 
154 		map->m_flags |= EROFS_MAP_META;
155 	} else {
156 		errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
157 		      vi->nid, inode->i_size, map->m_la);
158 		DBG_BUGON(1);
159 		err = -EIO;
160 		goto err_out;
161 	}
162 
163 out:
164 	map->m_llen = map->m_plen;
165 
166 err_out:
167 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
168 	return err;
169 }
170 
171 int erofs_map_blocks(struct inode *inode,
172 		     struct erofs_map_blocks *map, int flags)
173 {
174 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
175 		int err = z_erofs_map_blocks_iter(inode, map, flags);
176 
177 		if (map->mpage) {
178 			put_page(map->mpage);
179 			map->mpage = NULL;
180 		}
181 		return err;
182 	}
183 	return erofs_map_blocks_flatmode(inode, map, flags);
184 }
185 
186 static inline struct bio *erofs_read_raw_page(struct bio *bio,
187 					      struct address_space *mapping,
188 					      struct page *page,
189 					      erofs_off_t *last_block,
190 					      unsigned int nblocks,
191 					      bool ra)
192 {
193 	struct inode *const inode = mapping->host;
194 	struct super_block *const sb = inode->i_sb;
195 	erofs_off_t current_block = (erofs_off_t)page->index;
196 	int err;
197 
198 	DBG_BUGON(!nblocks);
199 
200 	if (PageUptodate(page)) {
201 		err = 0;
202 		goto has_updated;
203 	}
204 
205 	/* note that for readpage case, bio also equals to NULL */
206 	if (bio &&
207 	    /* not continuous */
208 	    *last_block + 1 != current_block) {
209 submit_bio_retry:
210 		submit_bio(bio);
211 		bio = NULL;
212 	}
213 
214 	if (!bio) {
215 		struct erofs_map_blocks map = {
216 			.m_la = blknr_to_addr(current_block),
217 		};
218 		erofs_blk_t blknr;
219 		unsigned int blkoff;
220 
221 		err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
222 		if (err)
223 			goto err_out;
224 
225 		/* zero out the holed page */
226 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
227 			zero_user_segment(page, 0, PAGE_SIZE);
228 			SetPageUptodate(page);
229 
230 			/* imply err = 0, see erofs_map_blocks */
231 			goto has_updated;
232 		}
233 
234 		/* for RAW access mode, m_plen must be equal to m_llen */
235 		DBG_BUGON(map.m_plen != map.m_llen);
236 
237 		blknr = erofs_blknr(map.m_pa);
238 		blkoff = erofs_blkoff(map.m_pa);
239 
240 		/* deal with inline page */
241 		if (map.m_flags & EROFS_MAP_META) {
242 			void *vsrc, *vto;
243 			struct page *ipage;
244 
245 			DBG_BUGON(map.m_plen > PAGE_SIZE);
246 
247 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
248 
249 			if (IS_ERR(ipage)) {
250 				err = PTR_ERR(ipage);
251 				goto err_out;
252 			}
253 
254 			vsrc = kmap_atomic(ipage);
255 			vto = kmap_atomic(page);
256 			memcpy(vto, vsrc + blkoff, map.m_plen);
257 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
258 			kunmap_atomic(vto);
259 			kunmap_atomic(vsrc);
260 			flush_dcache_page(page);
261 
262 			SetPageUptodate(page);
263 			/* TODO: could we unlock the page earlier? */
264 			unlock_page(ipage);
265 			put_page(ipage);
266 
267 			/* imply err = 0, see erofs_map_blocks */
268 			goto has_updated;
269 		}
270 
271 		/* pa must be block-aligned for raw reading */
272 		DBG_BUGON(erofs_blkoff(map.m_pa));
273 
274 		/* max # of continuous pages */
275 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
276 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
277 		if (nblocks > BIO_MAX_PAGES)
278 			nblocks = BIO_MAX_PAGES;
279 
280 		bio = erofs_grab_raw_bio(sb, blknr, nblocks, false);
281 	}
282 
283 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
284 	/* out of the extent or bio is full */
285 	if (err < PAGE_SIZE)
286 		goto submit_bio_retry;
287 
288 	*last_block = current_block;
289 
290 	/* shift in advance in case of it followed by too many gaps */
291 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
292 		/* err should reassign to 0 after submitting */
293 		err = 0;
294 		goto submit_bio_out;
295 	}
296 
297 	return bio;
298 
299 err_out:
300 	/* for sync reading, set page error immediately */
301 	if (!ra) {
302 		SetPageError(page);
303 		ClearPageUptodate(page);
304 	}
305 has_updated:
306 	unlock_page(page);
307 
308 	/* if updated manually, continuous pages has a gap */
309 	if (bio)
310 submit_bio_out:
311 		submit_bio(bio);
312 	return err ? ERR_PTR(err) : NULL;
313 }
314 
315 /*
316  * since we dont have write or truncate flows, so no inode
317  * locking needs to be held at the moment.
318  */
319 static int erofs_raw_access_readpage(struct file *file, struct page *page)
320 {
321 	erofs_off_t last_block;
322 	struct bio *bio;
323 
324 	trace_erofs_readpage(page, true);
325 
326 	bio = erofs_read_raw_page(NULL, page->mapping,
327 				  page, &last_block, 1, false);
328 
329 	if (IS_ERR(bio))
330 		return PTR_ERR(bio);
331 
332 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
333 	return 0;
334 }
335 
336 static int erofs_raw_access_readpages(struct file *filp,
337 				      struct address_space *mapping,
338 				      struct list_head *pages,
339 				      unsigned int nr_pages)
340 {
341 	erofs_off_t last_block;
342 	struct bio *bio = NULL;
343 	gfp_t gfp = readahead_gfp_mask(mapping);
344 	struct page *page = list_last_entry(pages, struct page, lru);
345 
346 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
347 
348 	for (; nr_pages; --nr_pages) {
349 		page = list_entry(pages->prev, struct page, lru);
350 
351 		prefetchw(&page->flags);
352 		list_del(&page->lru);
353 
354 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
355 			bio = erofs_read_raw_page(bio, mapping, page,
356 						  &last_block, nr_pages, true);
357 
358 			/* all the page errors are ignored when readahead */
359 			if (IS_ERR(bio)) {
360 				pr_err("%s, readahead error at page %lu of nid %llu\n",
361 				       __func__, page->index,
362 				       EROFS_I(mapping->host)->nid);
363 
364 				bio = NULL;
365 			}
366 		}
367 
368 		/* pages could still be locked */
369 		put_page(page);
370 	}
371 	DBG_BUGON(!list_empty(pages));
372 
373 	/* the rare case (end in gaps) */
374 	if (bio)
375 		submit_bio(bio);
376 	return 0;
377 }
378 
379 static int erofs_get_block(struct inode *inode, sector_t iblock,
380 			   struct buffer_head *bh, int create)
381 {
382 	struct erofs_map_blocks map = {
383 		.m_la = iblock << 9,
384 	};
385 	int err;
386 
387 	err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
388 	if (err)
389 		return err;
390 
391 	if (map.m_flags & EROFS_MAP_MAPPED)
392 		bh->b_blocknr = erofs_blknr(map.m_pa);
393 
394 	return err;
395 }
396 
397 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
398 {
399 	struct inode *inode = mapping->host;
400 
401 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
402 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
403 
404 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
405 			return 0;
406 	}
407 
408 	return generic_block_bmap(mapping, block, erofs_get_block);
409 }
410 
411 /* for uncompressed (aligned) files and raw access for other files */
412 const struct address_space_operations erofs_raw_access_aops = {
413 	.readpage = erofs_raw_access_readpage,
414 	.readpages = erofs_raw_access_readpages,
415 	.bmap = erofs_bmap,
416 };
417 
418