xref: /openbmc/linux/fs/erofs/data.c (revision e2c71e74)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static void erofs_readendio(struct bio *bio)
13 {
14 	struct bio_vec *bvec;
15 	blk_status_t err = bio->bi_status;
16 	struct bvec_iter_all iter_all;
17 
18 	bio_for_each_segment_all(bvec, bio, iter_all) {
19 		struct page *page = bvec->bv_page;
20 
21 		/* page is already locked */
22 		DBG_BUGON(PageUptodate(page));
23 
24 		if (err)
25 			SetPageError(page);
26 		else
27 			SetPageUptodate(page);
28 
29 		unlock_page(page);
30 		/* page could be reclaimed now */
31 	}
32 	bio_put(bio);
33 }
34 
35 static struct bio *erofs_grab_raw_bio(struct super_block *sb,
36 				      erofs_blk_t blkaddr,
37 				      unsigned int nr_pages,
38 				      bool ismeta)
39 {
40 	struct bio *bio = bio_alloc(GFP_NOIO, nr_pages);
41 
42 	bio->bi_end_io = erofs_readendio;
43 	bio_set_dev(bio, sb->s_bdev);
44 	bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
45 	if (ismeta)
46 		bio->bi_opf = REQ_OP_READ | REQ_META;
47 	else
48 		bio->bi_opf = REQ_OP_READ;
49 
50 	return bio;
51 }
52 
53 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
54 {
55 	struct inode *const bd_inode = sb->s_bdev->bd_inode;
56 	struct address_space *const mapping = bd_inode->i_mapping;
57 	const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS);
58 	struct page *page;
59 	int err;
60 
61 repeat:
62 	page = find_or_create_page(mapping, blkaddr, gfp);
63 	if (!page)
64 		return ERR_PTR(-ENOMEM);
65 
66 	DBG_BUGON(!PageLocked(page));
67 
68 	if (!PageUptodate(page)) {
69 		struct bio *bio;
70 
71 		bio = erofs_grab_raw_bio(sb, blkaddr, 1, true);
72 
73 		if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
74 			err = -EFAULT;
75 			goto err_out;
76 		}
77 
78 		submit_bio(bio);
79 		lock_page(page);
80 
81 		/* this page has been truncated by others */
82 		if (page->mapping != mapping) {
83 			unlock_page(page);
84 			put_page(page);
85 			goto repeat;
86 		}
87 
88 		/* more likely a read error */
89 		if (!PageUptodate(page)) {
90 			err = -EIO;
91 			goto err_out;
92 		}
93 	}
94 	return page;
95 
96 err_out:
97 	unlock_page(page);
98 	put_page(page);
99 	return ERR_PTR(err);
100 }
101 
102 static int erofs_map_blocks_flatmode(struct inode *inode,
103 				     struct erofs_map_blocks *map,
104 				     int flags)
105 {
106 	int err = 0;
107 	erofs_blk_t nblocks, lastblk;
108 	u64 offset = map->m_la;
109 	struct erofs_inode *vi = EROFS_I(inode);
110 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
111 
112 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
113 
114 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
115 	lastblk = nblocks - tailendpacking;
116 
117 	if (offset >= inode->i_size) {
118 		/* leave out-of-bound access unmapped */
119 		map->m_flags = 0;
120 		map->m_plen = 0;
121 		goto out;
122 	}
123 
124 	/* there is no hole in flatmode */
125 	map->m_flags = EROFS_MAP_MAPPED;
126 
127 	if (offset < blknr_to_addr(lastblk)) {
128 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
129 		map->m_plen = blknr_to_addr(lastblk) - offset;
130 	} else if (tailendpacking) {
131 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
132 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
133 
134 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
135 			vi->xattr_isize + erofs_blkoff(map->m_la);
136 		map->m_plen = inode->i_size - offset;
137 
138 		/* inline data should be located in one meta block */
139 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
140 			errln("inline data cross block boundary @ nid %llu",
141 			      vi->nid);
142 			DBG_BUGON(1);
143 			err = -EFSCORRUPTED;
144 			goto err_out;
145 		}
146 
147 		map->m_flags |= EROFS_MAP_META;
148 	} else {
149 		errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
150 		      vi->nid, inode->i_size, map->m_la);
151 		DBG_BUGON(1);
152 		err = -EIO;
153 		goto err_out;
154 	}
155 
156 out:
157 	map->m_llen = map->m_plen;
158 
159 err_out:
160 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
161 	return err;
162 }
163 
164 int erofs_map_blocks(struct inode *inode,
165 		     struct erofs_map_blocks *map, int flags)
166 {
167 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
168 		int err = z_erofs_map_blocks_iter(inode, map, flags);
169 
170 		if (map->mpage) {
171 			put_page(map->mpage);
172 			map->mpage = NULL;
173 		}
174 		return err;
175 	}
176 	return erofs_map_blocks_flatmode(inode, map, flags);
177 }
178 
179 static inline struct bio *erofs_read_raw_page(struct bio *bio,
180 					      struct address_space *mapping,
181 					      struct page *page,
182 					      erofs_off_t *last_block,
183 					      unsigned int nblocks,
184 					      bool ra)
185 {
186 	struct inode *const inode = mapping->host;
187 	struct super_block *const sb = inode->i_sb;
188 	erofs_off_t current_block = (erofs_off_t)page->index;
189 	int err;
190 
191 	DBG_BUGON(!nblocks);
192 
193 	if (PageUptodate(page)) {
194 		err = 0;
195 		goto has_updated;
196 	}
197 
198 	/* note that for readpage case, bio also equals to NULL */
199 	if (bio &&
200 	    /* not continuous */
201 	    *last_block + 1 != current_block) {
202 submit_bio_retry:
203 		submit_bio(bio);
204 		bio = NULL;
205 	}
206 
207 	if (!bio) {
208 		struct erofs_map_blocks map = {
209 			.m_la = blknr_to_addr(current_block),
210 		};
211 		erofs_blk_t blknr;
212 		unsigned int blkoff;
213 
214 		err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
215 		if (err)
216 			goto err_out;
217 
218 		/* zero out the holed page */
219 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
220 			zero_user_segment(page, 0, PAGE_SIZE);
221 			SetPageUptodate(page);
222 
223 			/* imply err = 0, see erofs_map_blocks */
224 			goto has_updated;
225 		}
226 
227 		/* for RAW access mode, m_plen must be equal to m_llen */
228 		DBG_BUGON(map.m_plen != map.m_llen);
229 
230 		blknr = erofs_blknr(map.m_pa);
231 		blkoff = erofs_blkoff(map.m_pa);
232 
233 		/* deal with inline page */
234 		if (map.m_flags & EROFS_MAP_META) {
235 			void *vsrc, *vto;
236 			struct page *ipage;
237 
238 			DBG_BUGON(map.m_plen > PAGE_SIZE);
239 
240 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
241 
242 			if (IS_ERR(ipage)) {
243 				err = PTR_ERR(ipage);
244 				goto err_out;
245 			}
246 
247 			vsrc = kmap_atomic(ipage);
248 			vto = kmap_atomic(page);
249 			memcpy(vto, vsrc + blkoff, map.m_plen);
250 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
251 			kunmap_atomic(vto);
252 			kunmap_atomic(vsrc);
253 			flush_dcache_page(page);
254 
255 			SetPageUptodate(page);
256 			/* TODO: could we unlock the page earlier? */
257 			unlock_page(ipage);
258 			put_page(ipage);
259 
260 			/* imply err = 0, see erofs_map_blocks */
261 			goto has_updated;
262 		}
263 
264 		/* pa must be block-aligned for raw reading */
265 		DBG_BUGON(erofs_blkoff(map.m_pa));
266 
267 		/* max # of continuous pages */
268 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
269 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
270 		if (nblocks > BIO_MAX_PAGES)
271 			nblocks = BIO_MAX_PAGES;
272 
273 		bio = erofs_grab_raw_bio(sb, blknr, nblocks, false);
274 	}
275 
276 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
277 	/* out of the extent or bio is full */
278 	if (err < PAGE_SIZE)
279 		goto submit_bio_retry;
280 
281 	*last_block = current_block;
282 
283 	/* shift in advance in case of it followed by too many gaps */
284 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
285 		/* err should reassign to 0 after submitting */
286 		err = 0;
287 		goto submit_bio_out;
288 	}
289 
290 	return bio;
291 
292 err_out:
293 	/* for sync reading, set page error immediately */
294 	if (!ra) {
295 		SetPageError(page);
296 		ClearPageUptodate(page);
297 	}
298 has_updated:
299 	unlock_page(page);
300 
301 	/* if updated manually, continuous pages has a gap */
302 	if (bio)
303 submit_bio_out:
304 		submit_bio(bio);
305 	return err ? ERR_PTR(err) : NULL;
306 }
307 
308 /*
309  * since we dont have write or truncate flows, so no inode
310  * locking needs to be held at the moment.
311  */
312 static int erofs_raw_access_readpage(struct file *file, struct page *page)
313 {
314 	erofs_off_t last_block;
315 	struct bio *bio;
316 
317 	trace_erofs_readpage(page, true);
318 
319 	bio = erofs_read_raw_page(NULL, page->mapping,
320 				  page, &last_block, 1, false);
321 
322 	if (IS_ERR(bio))
323 		return PTR_ERR(bio);
324 
325 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
326 	return 0;
327 }
328 
329 static int erofs_raw_access_readpages(struct file *filp,
330 				      struct address_space *mapping,
331 				      struct list_head *pages,
332 				      unsigned int nr_pages)
333 {
334 	erofs_off_t last_block;
335 	struct bio *bio = NULL;
336 	gfp_t gfp = readahead_gfp_mask(mapping);
337 	struct page *page = list_last_entry(pages, struct page, lru);
338 
339 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
340 
341 	for (; nr_pages; --nr_pages) {
342 		page = list_entry(pages->prev, struct page, lru);
343 
344 		prefetchw(&page->flags);
345 		list_del(&page->lru);
346 
347 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
348 			bio = erofs_read_raw_page(bio, mapping, page,
349 						  &last_block, nr_pages, true);
350 
351 			/* all the page errors are ignored when readahead */
352 			if (IS_ERR(bio)) {
353 				pr_err("%s, readahead error at page %lu of nid %llu\n",
354 				       __func__, page->index,
355 				       EROFS_I(mapping->host)->nid);
356 
357 				bio = NULL;
358 			}
359 		}
360 
361 		/* pages could still be locked */
362 		put_page(page);
363 	}
364 	DBG_BUGON(!list_empty(pages));
365 
366 	/* the rare case (end in gaps) */
367 	if (bio)
368 		submit_bio(bio);
369 	return 0;
370 }
371 
372 static int erofs_get_block(struct inode *inode, sector_t iblock,
373 			   struct buffer_head *bh, int create)
374 {
375 	struct erofs_map_blocks map = {
376 		.m_la = iblock << 9,
377 	};
378 	int err;
379 
380 	err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
381 	if (err)
382 		return err;
383 
384 	if (map.m_flags & EROFS_MAP_MAPPED)
385 		bh->b_blocknr = erofs_blknr(map.m_pa);
386 
387 	return err;
388 }
389 
390 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
391 {
392 	struct inode *inode = mapping->host;
393 
394 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
395 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
396 
397 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
398 			return 0;
399 	}
400 
401 	return generic_block_bmap(mapping, block, erofs_get_block);
402 }
403 
404 /* for uncompressed (aligned) files and raw access for other files */
405 const struct address_space_operations erofs_raw_access_aops = {
406 	.readpage = erofs_raw_access_readpage,
407 	.readpages = erofs_raw_access_readpages,
408 	.bmap = erofs_bmap,
409 };
410 
411