xref: /openbmc/linux/fs/erofs/data.c (revision e655b5b3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static inline void read_endio(struct bio *bio)
13 {
14 	struct super_block *const sb = bio->bi_private;
15 	struct bio_vec *bvec;
16 	blk_status_t err = bio->bi_status;
17 	struct bvec_iter_all iter_all;
18 
19 	if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) {
20 		erofs_show_injection_info(FAULT_READ_IO);
21 		err = BLK_STS_IOERR;
22 	}
23 
24 	bio_for_each_segment_all(bvec, bio, iter_all) {
25 		struct page *page = bvec->bv_page;
26 
27 		/* page is already locked */
28 		DBG_BUGON(PageUptodate(page));
29 
30 		if (err)
31 			SetPageError(page);
32 		else
33 			SetPageUptodate(page);
34 
35 		unlock_page(page);
36 		/* page could be reclaimed now */
37 	}
38 	bio_put(bio);
39 }
40 
41 static struct bio *erofs_grab_raw_bio(struct super_block *sb,
42 				      erofs_blk_t blkaddr,
43 				      unsigned int nr_pages)
44 {
45 	struct bio *bio = bio_alloc(GFP_NOIO, nr_pages);
46 
47 	bio->bi_end_io = read_endio;
48 	bio_set_dev(bio, sb->s_bdev);
49 	bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
50 	bio->bi_private = sb;
51 	return bio;
52 }
53 
54 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
55 {
56 	struct inode *const bd_inode = sb->s_bdev->bd_inode;
57 	struct address_space *const mapping = bd_inode->i_mapping;
58 	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS);
59 	struct page *page;
60 	int err;
61 
62 repeat:
63 	page = find_or_create_page(mapping, blkaddr, gfp);
64 	if (!page)
65 		return ERR_PTR(-ENOMEM);
66 
67 	DBG_BUGON(!PageLocked(page));
68 
69 	if (!PageUptodate(page)) {
70 		struct bio *bio;
71 
72 		bio = erofs_grab_raw_bio(sb, blkaddr, 1);
73 
74 		if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
75 			err = -EFAULT;
76 			goto err_out;
77 		}
78 
79 		__submit_bio(bio, REQ_OP_READ, REQ_META);
80 		lock_page(page);
81 
82 		/* this page has been truncated by others */
83 		if (page->mapping != mapping) {
84 			unlock_page(page);
85 			put_page(page);
86 			goto repeat;
87 		}
88 
89 		/* more likely a read error */
90 		if (!PageUptodate(page)) {
91 			err = -EIO;
92 			goto err_out;
93 		}
94 	}
95 	return page;
96 
97 err_out:
98 	unlock_page(page);
99 	put_page(page);
100 	return ERR_PTR(err);
101 }
102 
103 static int erofs_map_blocks_flatmode(struct inode *inode,
104 				     struct erofs_map_blocks *map,
105 				     int flags)
106 {
107 	int err = 0;
108 	erofs_blk_t nblocks, lastblk;
109 	u64 offset = map->m_la;
110 	struct erofs_inode *vi = EROFS_I(inode);
111 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
112 
113 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
114 
115 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
116 	lastblk = nblocks - tailendpacking;
117 
118 	if (offset >= inode->i_size) {
119 		/* leave out-of-bound access unmapped */
120 		map->m_flags = 0;
121 		map->m_plen = 0;
122 		goto out;
123 	}
124 
125 	/* there is no hole in flatmode */
126 	map->m_flags = EROFS_MAP_MAPPED;
127 
128 	if (offset < blknr_to_addr(lastblk)) {
129 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
130 		map->m_plen = blknr_to_addr(lastblk) - offset;
131 	} else if (tailendpacking) {
132 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
133 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
134 
135 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
136 			vi->xattr_isize + erofs_blkoff(map->m_la);
137 		map->m_plen = inode->i_size - offset;
138 
139 		/* inline data should be located in one meta block */
140 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
141 			errln("inline data cross block boundary @ nid %llu",
142 			      vi->nid);
143 			DBG_BUGON(1);
144 			err = -EFSCORRUPTED;
145 			goto err_out;
146 		}
147 
148 		map->m_flags |= EROFS_MAP_META;
149 	} else {
150 		errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
151 		      vi->nid, inode->i_size, map->m_la);
152 		DBG_BUGON(1);
153 		err = -EIO;
154 		goto err_out;
155 	}
156 
157 out:
158 	map->m_llen = map->m_plen;
159 
160 err_out:
161 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
162 	return err;
163 }
164 
165 int erofs_map_blocks(struct inode *inode,
166 		     struct erofs_map_blocks *map, int flags)
167 {
168 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
169 		int err = z_erofs_map_blocks_iter(inode, map, flags);
170 
171 		if (map->mpage) {
172 			put_page(map->mpage);
173 			map->mpage = NULL;
174 		}
175 		return err;
176 	}
177 	return erofs_map_blocks_flatmode(inode, map, flags);
178 }
179 
180 static inline struct bio *erofs_read_raw_page(struct bio *bio,
181 					      struct address_space *mapping,
182 					      struct page *page,
183 					      erofs_off_t *last_block,
184 					      unsigned int nblocks,
185 					      bool ra)
186 {
187 	struct inode *const inode = mapping->host;
188 	struct super_block *const sb = inode->i_sb;
189 	erofs_off_t current_block = (erofs_off_t)page->index;
190 	int err;
191 
192 	DBG_BUGON(!nblocks);
193 
194 	if (PageUptodate(page)) {
195 		err = 0;
196 		goto has_updated;
197 	}
198 
199 	/* note that for readpage case, bio also equals to NULL */
200 	if (bio &&
201 	    /* not continuous */
202 	    *last_block + 1 != current_block) {
203 submit_bio_retry:
204 		__submit_bio(bio, REQ_OP_READ, 0);
205 		bio = NULL;
206 	}
207 
208 	if (!bio) {
209 		struct erofs_map_blocks map = {
210 			.m_la = blknr_to_addr(current_block),
211 		};
212 		erofs_blk_t blknr;
213 		unsigned int blkoff;
214 
215 		err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
216 		if (err)
217 			goto err_out;
218 
219 		/* zero out the holed page */
220 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
221 			zero_user_segment(page, 0, PAGE_SIZE);
222 			SetPageUptodate(page);
223 
224 			/* imply err = 0, see erofs_map_blocks */
225 			goto has_updated;
226 		}
227 
228 		/* for RAW access mode, m_plen must be equal to m_llen */
229 		DBG_BUGON(map.m_plen != map.m_llen);
230 
231 		blknr = erofs_blknr(map.m_pa);
232 		blkoff = erofs_blkoff(map.m_pa);
233 
234 		/* deal with inline page */
235 		if (map.m_flags & EROFS_MAP_META) {
236 			void *vsrc, *vto;
237 			struct page *ipage;
238 
239 			DBG_BUGON(map.m_plen > PAGE_SIZE);
240 
241 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
242 
243 			if (IS_ERR(ipage)) {
244 				err = PTR_ERR(ipage);
245 				goto err_out;
246 			}
247 
248 			vsrc = kmap_atomic(ipage);
249 			vto = kmap_atomic(page);
250 			memcpy(vto, vsrc + blkoff, map.m_plen);
251 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
252 			kunmap_atomic(vto);
253 			kunmap_atomic(vsrc);
254 			flush_dcache_page(page);
255 
256 			SetPageUptodate(page);
257 			/* TODO: could we unlock the page earlier? */
258 			unlock_page(ipage);
259 			put_page(ipage);
260 
261 			/* imply err = 0, see erofs_map_blocks */
262 			goto has_updated;
263 		}
264 
265 		/* pa must be block-aligned for raw reading */
266 		DBG_BUGON(erofs_blkoff(map.m_pa));
267 
268 		/* max # of continuous pages */
269 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
270 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
271 		if (nblocks > BIO_MAX_PAGES)
272 			nblocks = BIO_MAX_PAGES;
273 
274 		bio = erofs_grab_raw_bio(sb, blknr, nblocks);
275 	}
276 
277 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
278 	/* out of the extent or bio is full */
279 	if (err < PAGE_SIZE)
280 		goto submit_bio_retry;
281 
282 	*last_block = current_block;
283 
284 	/* shift in advance in case of it followed by too many gaps */
285 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
286 		/* err should reassign to 0 after submitting */
287 		err = 0;
288 		goto submit_bio_out;
289 	}
290 
291 	return bio;
292 
293 err_out:
294 	/* for sync reading, set page error immediately */
295 	if (!ra) {
296 		SetPageError(page);
297 		ClearPageUptodate(page);
298 	}
299 has_updated:
300 	unlock_page(page);
301 
302 	/* if updated manually, continuous pages has a gap */
303 	if (bio)
304 submit_bio_out:
305 		__submit_bio(bio, REQ_OP_READ, 0);
306 
307 	return err ? ERR_PTR(err) : NULL;
308 }
309 
310 /*
311  * since we dont have write or truncate flows, so no inode
312  * locking needs to be held at the moment.
313  */
314 static int erofs_raw_access_readpage(struct file *file, struct page *page)
315 {
316 	erofs_off_t last_block;
317 	struct bio *bio;
318 
319 	trace_erofs_readpage(page, true);
320 
321 	bio = erofs_read_raw_page(NULL, page->mapping,
322 				  page, &last_block, 1, false);
323 
324 	if (IS_ERR(bio))
325 		return PTR_ERR(bio);
326 
327 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
328 	return 0;
329 }
330 
331 static int erofs_raw_access_readpages(struct file *filp,
332 				      struct address_space *mapping,
333 				      struct list_head *pages,
334 				      unsigned int nr_pages)
335 {
336 	erofs_off_t last_block;
337 	struct bio *bio = NULL;
338 	gfp_t gfp = readahead_gfp_mask(mapping);
339 	struct page *page = list_last_entry(pages, struct page, lru);
340 
341 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
342 
343 	for (; nr_pages; --nr_pages) {
344 		page = list_entry(pages->prev, struct page, lru);
345 
346 		prefetchw(&page->flags);
347 		list_del(&page->lru);
348 
349 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
350 			bio = erofs_read_raw_page(bio, mapping, page,
351 						  &last_block, nr_pages, true);
352 
353 			/* all the page errors are ignored when readahead */
354 			if (IS_ERR(bio)) {
355 				pr_err("%s, readahead error at page %lu of nid %llu\n",
356 				       __func__, page->index,
357 				       EROFS_I(mapping->host)->nid);
358 
359 				bio = NULL;
360 			}
361 		}
362 
363 		/* pages could still be locked */
364 		put_page(page);
365 	}
366 	DBG_BUGON(!list_empty(pages));
367 
368 	/* the rare case (end in gaps) */
369 	if (bio)
370 		__submit_bio(bio, REQ_OP_READ, 0);
371 	return 0;
372 }
373 
374 static int erofs_get_block(struct inode *inode, sector_t iblock,
375 			   struct buffer_head *bh, int create)
376 {
377 	struct erofs_map_blocks map = {
378 		.m_la = iblock << 9,
379 	};
380 	int err;
381 
382 	err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
383 	if (err)
384 		return err;
385 
386 	if (map.m_flags & EROFS_MAP_MAPPED)
387 		bh->b_blocknr = erofs_blknr(map.m_pa);
388 
389 	return err;
390 }
391 
392 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
393 {
394 	struct inode *inode = mapping->host;
395 
396 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
397 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
398 
399 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
400 			return 0;
401 	}
402 
403 	return generic_block_bmap(mapping, block, erofs_get_block);
404 }
405 
406 /* for uncompressed (aligned) files and raw access for other files */
407 const struct address_space_operations erofs_raw_access_aops = {
408 	.readpage = erofs_raw_access_readpage,
409 	.readpages = erofs_raw_access_readpages,
410 	.bmap = erofs_bmap,
411 };
412 
413