xref: /openbmc/linux/fs/erofs/data.c (revision a5c0b780)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static inline void read_endio(struct bio *bio)
13 {
14 	struct super_block *const sb = bio->bi_private;
15 	struct bio_vec *bvec;
16 	blk_status_t err = bio->bi_status;
17 	struct bvec_iter_all iter_all;
18 
19 	if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
20 		erofs_show_injection_info(FAULT_READ_IO);
21 		err = BLK_STS_IOERR;
22 	}
23 
24 	bio_for_each_segment_all(bvec, bio, iter_all) {
25 		struct page *page = bvec->bv_page;
26 
27 		/* page is already locked */
28 		DBG_BUGON(PageUptodate(page));
29 
30 		if (err)
31 			SetPageError(page);
32 		else
33 			SetPageUptodate(page);
34 
35 		unlock_page(page);
36 		/* page could be reclaimed now */
37 	}
38 	bio_put(bio);
39 }
40 
41 static struct bio *erofs_grab_raw_bio(struct super_block *sb,
42 				      erofs_blk_t blkaddr,
43 				      unsigned int nr_pages)
44 {
45 	struct bio *bio = bio_alloc(GFP_NOIO, nr_pages);
46 
47 	bio->bi_end_io = read_endio;
48 	bio_set_dev(bio, sb->s_bdev);
49 	bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
50 	bio->bi_private = sb;
51 	return bio;
52 }
53 
54 /* prio -- true is used for dir */
55 struct page *__erofs_get_meta_page(struct super_block *sb,
56 				   erofs_blk_t blkaddr, bool prio, bool nofail)
57 {
58 	struct inode *const bd_inode = sb->s_bdev->bd_inode;
59 	struct address_space *const mapping = bd_inode->i_mapping;
60 	/* prefer retrying in the allocator to blindly looping below */
61 	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
62 		(nofail ? __GFP_NOFAIL : 0);
63 	unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
64 	struct page *page;
65 	int err;
66 
67 repeat:
68 	page = find_or_create_page(mapping, blkaddr, gfp);
69 	if (!page) {
70 		DBG_BUGON(nofail);
71 		return ERR_PTR(-ENOMEM);
72 	}
73 	DBG_BUGON(!PageLocked(page));
74 
75 	if (!PageUptodate(page)) {
76 		struct bio *bio;
77 
78 		bio = erofs_grab_raw_bio(sb, blkaddr, 1);
79 
80 		if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
81 			err = -EFAULT;
82 			goto err_out;
83 		}
84 
85 		__submit_bio(bio, REQ_OP_READ,
86 			     REQ_META | (prio ? REQ_PRIO : 0));
87 
88 		lock_page(page);
89 
90 		/* this page has been truncated by others */
91 		if (page->mapping != mapping) {
92 unlock_repeat:
93 			unlock_page(page);
94 			put_page(page);
95 			goto repeat;
96 		}
97 
98 		/* more likely a read error */
99 		if (!PageUptodate(page)) {
100 			if (io_retries) {
101 				--io_retries;
102 				goto unlock_repeat;
103 			}
104 			err = -EIO;
105 			goto err_out;
106 		}
107 	}
108 	return page;
109 
110 err_out:
111 	unlock_page(page);
112 	put_page(page);
113 	return ERR_PTR(err);
114 }
115 
116 static int erofs_map_blocks_flatmode(struct inode *inode,
117 				     struct erofs_map_blocks *map,
118 				     int flags)
119 {
120 	int err = 0;
121 	erofs_blk_t nblocks, lastblk;
122 	u64 offset = map->m_la;
123 	struct erofs_inode *vi = EROFS_I(inode);
124 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
125 
126 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
127 
128 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
129 	lastblk = nblocks - tailendpacking;
130 
131 	if (offset >= inode->i_size) {
132 		/* leave out-of-bound access unmapped */
133 		map->m_flags = 0;
134 		map->m_plen = 0;
135 		goto out;
136 	}
137 
138 	/* there is no hole in flatmode */
139 	map->m_flags = EROFS_MAP_MAPPED;
140 
141 	if (offset < blknr_to_addr(lastblk)) {
142 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
143 		map->m_plen = blknr_to_addr(lastblk) - offset;
144 	} else if (tailendpacking) {
145 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
146 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
147 
148 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
149 			vi->xattr_isize + erofs_blkoff(map->m_la);
150 		map->m_plen = inode->i_size - offset;
151 
152 		/* inline data should be located in one meta block */
153 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
154 			errln("inline data cross block boundary @ nid %llu",
155 			      vi->nid);
156 			DBG_BUGON(1);
157 			err = -EFSCORRUPTED;
158 			goto err_out;
159 		}
160 
161 		map->m_flags |= EROFS_MAP_META;
162 	} else {
163 		errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
164 		      vi->nid, inode->i_size, map->m_la);
165 		DBG_BUGON(1);
166 		err = -EIO;
167 		goto err_out;
168 	}
169 
170 out:
171 	map->m_llen = map->m_plen;
172 
173 err_out:
174 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
175 	return err;
176 }
177 
178 int erofs_map_blocks(struct inode *inode,
179 		     struct erofs_map_blocks *map, int flags)
180 {
181 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
182 		int err = z_erofs_map_blocks_iter(inode, map, flags);
183 
184 		if (map->mpage) {
185 			put_page(map->mpage);
186 			map->mpage = NULL;
187 		}
188 		return err;
189 	}
190 	return erofs_map_blocks_flatmode(inode, map, flags);
191 }
192 
193 static inline struct bio *erofs_read_raw_page(struct bio *bio,
194 					      struct address_space *mapping,
195 					      struct page *page,
196 					      erofs_off_t *last_block,
197 					      unsigned int nblocks,
198 					      bool ra)
199 {
200 	struct inode *const inode = mapping->host;
201 	struct super_block *const sb = inode->i_sb;
202 	erofs_off_t current_block = (erofs_off_t)page->index;
203 	int err;
204 
205 	DBG_BUGON(!nblocks);
206 
207 	if (PageUptodate(page)) {
208 		err = 0;
209 		goto has_updated;
210 	}
211 
212 	/* note that for readpage case, bio also equals to NULL */
213 	if (bio &&
214 	    /* not continuous */
215 	    *last_block + 1 != current_block) {
216 submit_bio_retry:
217 		__submit_bio(bio, REQ_OP_READ, 0);
218 		bio = NULL;
219 	}
220 
221 	if (!bio) {
222 		struct erofs_map_blocks map = {
223 			.m_la = blknr_to_addr(current_block),
224 		};
225 		erofs_blk_t blknr;
226 		unsigned int blkoff;
227 
228 		err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
229 		if (err)
230 			goto err_out;
231 
232 		/* zero out the holed page */
233 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
234 			zero_user_segment(page, 0, PAGE_SIZE);
235 			SetPageUptodate(page);
236 
237 			/* imply err = 0, see erofs_map_blocks */
238 			goto has_updated;
239 		}
240 
241 		/* for RAW access mode, m_plen must be equal to m_llen */
242 		DBG_BUGON(map.m_plen != map.m_llen);
243 
244 		blknr = erofs_blknr(map.m_pa);
245 		blkoff = erofs_blkoff(map.m_pa);
246 
247 		/* deal with inline page */
248 		if (map.m_flags & EROFS_MAP_META) {
249 			void *vsrc, *vto;
250 			struct page *ipage;
251 
252 			DBG_BUGON(map.m_plen > PAGE_SIZE);
253 
254 			ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
255 
256 			if (IS_ERR(ipage)) {
257 				err = PTR_ERR(ipage);
258 				goto err_out;
259 			}
260 
261 			vsrc = kmap_atomic(ipage);
262 			vto = kmap_atomic(page);
263 			memcpy(vto, vsrc + blkoff, map.m_plen);
264 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
265 			kunmap_atomic(vto);
266 			kunmap_atomic(vsrc);
267 			flush_dcache_page(page);
268 
269 			SetPageUptodate(page);
270 			/* TODO: could we unlock the page earlier? */
271 			unlock_page(ipage);
272 			put_page(ipage);
273 
274 			/* imply err = 0, see erofs_map_blocks */
275 			goto has_updated;
276 		}
277 
278 		/* pa must be block-aligned for raw reading */
279 		DBG_BUGON(erofs_blkoff(map.m_pa));
280 
281 		/* max # of continuous pages */
282 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
283 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
284 		if (nblocks > BIO_MAX_PAGES)
285 			nblocks = BIO_MAX_PAGES;
286 
287 		bio = erofs_grab_raw_bio(sb, blknr, nblocks);
288 	}
289 
290 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
291 	/* out of the extent or bio is full */
292 	if (err < PAGE_SIZE)
293 		goto submit_bio_retry;
294 
295 	*last_block = current_block;
296 
297 	/* shift in advance in case of it followed by too many gaps */
298 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
299 		/* err should reassign to 0 after submitting */
300 		err = 0;
301 		goto submit_bio_out;
302 	}
303 
304 	return bio;
305 
306 err_out:
307 	/* for sync reading, set page error immediately */
308 	if (!ra) {
309 		SetPageError(page);
310 		ClearPageUptodate(page);
311 	}
312 has_updated:
313 	unlock_page(page);
314 
315 	/* if updated manually, continuous pages has a gap */
316 	if (bio)
317 submit_bio_out:
318 		__submit_bio(bio, REQ_OP_READ, 0);
319 
320 	return err ? ERR_PTR(err) : NULL;
321 }
322 
323 /*
324  * since we dont have write or truncate flows, so no inode
325  * locking needs to be held at the moment.
326  */
327 static int erofs_raw_access_readpage(struct file *file, struct page *page)
328 {
329 	erofs_off_t last_block;
330 	struct bio *bio;
331 
332 	trace_erofs_readpage(page, true);
333 
334 	bio = erofs_read_raw_page(NULL, page->mapping,
335 				  page, &last_block, 1, false);
336 
337 	if (IS_ERR(bio))
338 		return PTR_ERR(bio);
339 
340 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
341 	return 0;
342 }
343 
344 static int erofs_raw_access_readpages(struct file *filp,
345 				      struct address_space *mapping,
346 				      struct list_head *pages,
347 				      unsigned int nr_pages)
348 {
349 	erofs_off_t last_block;
350 	struct bio *bio = NULL;
351 	gfp_t gfp = readahead_gfp_mask(mapping);
352 	struct page *page = list_last_entry(pages, struct page, lru);
353 
354 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
355 
356 	for (; nr_pages; --nr_pages) {
357 		page = list_entry(pages->prev, struct page, lru);
358 
359 		prefetchw(&page->flags);
360 		list_del(&page->lru);
361 
362 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
363 			bio = erofs_read_raw_page(bio, mapping, page,
364 						  &last_block, nr_pages, true);
365 
366 			/* all the page errors are ignored when readahead */
367 			if (IS_ERR(bio)) {
368 				pr_err("%s, readahead error at page %lu of nid %llu\n",
369 				       __func__, page->index,
370 				       EROFS_I(mapping->host)->nid);
371 
372 				bio = NULL;
373 			}
374 		}
375 
376 		/* pages could still be locked */
377 		put_page(page);
378 	}
379 	DBG_BUGON(!list_empty(pages));
380 
381 	/* the rare case (end in gaps) */
382 	if (bio)
383 		__submit_bio(bio, REQ_OP_READ, 0);
384 	return 0;
385 }
386 
387 static int erofs_get_block(struct inode *inode, sector_t iblock,
388 			   struct buffer_head *bh, int create)
389 {
390 	struct erofs_map_blocks map = {
391 		.m_la = iblock << 9,
392 	};
393 	int err;
394 
395 	err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
396 	if (err)
397 		return err;
398 
399 	if (map.m_flags & EROFS_MAP_MAPPED)
400 		bh->b_blocknr = erofs_blknr(map.m_pa);
401 
402 	return err;
403 }
404 
405 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
406 {
407 	struct inode *inode = mapping->host;
408 
409 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
410 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
411 
412 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
413 			return 0;
414 	}
415 
416 	return generic_block_bmap(mapping, block, erofs_get_block);
417 }
418 
419 /* for uncompressed (aligned) files and raw access for other files */
420 const struct address_space_operations erofs_raw_access_aops = {
421 	.readpage = erofs_raw_access_readpage,
422 	.readpages = erofs_raw_access_readpages,
423 	.bmap = erofs_bmap,
424 };
425 
426