xref: /openbmc/linux/fs/erofs/data.c (revision 4ee812f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             http://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static void erofs_readendio(struct bio *bio)
13 {
14 	struct bio_vec *bvec;
15 	blk_status_t err = bio->bi_status;
16 	struct bvec_iter_all iter_all;
17 
18 	bio_for_each_segment_all(bvec, bio, iter_all) {
19 		struct page *page = bvec->bv_page;
20 
21 		/* page is already locked */
22 		DBG_BUGON(PageUptodate(page));
23 
24 		if (err)
25 			SetPageError(page);
26 		else
27 			SetPageUptodate(page);
28 
29 		unlock_page(page);
30 		/* page could be reclaimed now */
31 	}
32 	bio_put(bio);
33 }
34 
35 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36 {
37 	struct inode *const bd_inode = sb->s_bdev->bd_inode;
38 	struct address_space *const mapping = bd_inode->i_mapping;
39 
40 	return read_cache_page_gfp(mapping, blkaddr,
41 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
42 }
43 
44 static int erofs_map_blocks_flatmode(struct inode *inode,
45 				     struct erofs_map_blocks *map,
46 				     int flags)
47 {
48 	int err = 0;
49 	erofs_blk_t nblocks, lastblk;
50 	u64 offset = map->m_la;
51 	struct erofs_inode *vi = EROFS_I(inode);
52 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
53 
54 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
55 
56 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
57 	lastblk = nblocks - tailendpacking;
58 
59 	if (offset >= inode->i_size) {
60 		/* leave out-of-bound access unmapped */
61 		map->m_flags = 0;
62 		map->m_plen = 0;
63 		goto out;
64 	}
65 
66 	/* there is no hole in flatmode */
67 	map->m_flags = EROFS_MAP_MAPPED;
68 
69 	if (offset < blknr_to_addr(lastblk)) {
70 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
71 		map->m_plen = blknr_to_addr(lastblk) - offset;
72 	} else if (tailendpacking) {
73 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
74 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
75 
76 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
77 			vi->xattr_isize + erofs_blkoff(map->m_la);
78 		map->m_plen = inode->i_size - offset;
79 
80 		/* inline data should be located in one meta block */
81 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
82 			erofs_err(inode->i_sb,
83 				  "inline data cross block boundary @ nid %llu",
84 				  vi->nid);
85 			DBG_BUGON(1);
86 			err = -EFSCORRUPTED;
87 			goto err_out;
88 		}
89 
90 		map->m_flags |= EROFS_MAP_META;
91 	} else {
92 		erofs_err(inode->i_sb,
93 			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
94 			  vi->nid, inode->i_size, map->m_la);
95 		DBG_BUGON(1);
96 		err = -EIO;
97 		goto err_out;
98 	}
99 
100 out:
101 	map->m_llen = map->m_plen;
102 
103 err_out:
104 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
105 	return err;
106 }
107 
108 int erofs_map_blocks(struct inode *inode,
109 		     struct erofs_map_blocks *map, int flags)
110 {
111 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
112 		int err = z_erofs_map_blocks_iter(inode, map, flags);
113 
114 		if (map->mpage) {
115 			put_page(map->mpage);
116 			map->mpage = NULL;
117 		}
118 		return err;
119 	}
120 	return erofs_map_blocks_flatmode(inode, map, flags);
121 }
122 
123 static inline struct bio *erofs_read_raw_page(struct bio *bio,
124 					      struct address_space *mapping,
125 					      struct page *page,
126 					      erofs_off_t *last_block,
127 					      unsigned int nblocks,
128 					      bool ra)
129 {
130 	struct inode *const inode = mapping->host;
131 	struct super_block *const sb = inode->i_sb;
132 	erofs_off_t current_block = (erofs_off_t)page->index;
133 	int err;
134 
135 	DBG_BUGON(!nblocks);
136 
137 	if (PageUptodate(page)) {
138 		err = 0;
139 		goto has_updated;
140 	}
141 
142 	/* note that for readpage case, bio also equals to NULL */
143 	if (bio &&
144 	    /* not continuous */
145 	    *last_block + 1 != current_block) {
146 submit_bio_retry:
147 		submit_bio(bio);
148 		bio = NULL;
149 	}
150 
151 	if (!bio) {
152 		struct erofs_map_blocks map = {
153 			.m_la = blknr_to_addr(current_block),
154 		};
155 		erofs_blk_t blknr;
156 		unsigned int blkoff;
157 
158 		err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
159 		if (err)
160 			goto err_out;
161 
162 		/* zero out the holed page */
163 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
164 			zero_user_segment(page, 0, PAGE_SIZE);
165 			SetPageUptodate(page);
166 
167 			/* imply err = 0, see erofs_map_blocks */
168 			goto has_updated;
169 		}
170 
171 		/* for RAW access mode, m_plen must be equal to m_llen */
172 		DBG_BUGON(map.m_plen != map.m_llen);
173 
174 		blknr = erofs_blknr(map.m_pa);
175 		blkoff = erofs_blkoff(map.m_pa);
176 
177 		/* deal with inline page */
178 		if (map.m_flags & EROFS_MAP_META) {
179 			void *vsrc, *vto;
180 			struct page *ipage;
181 
182 			DBG_BUGON(map.m_plen > PAGE_SIZE);
183 
184 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
185 
186 			if (IS_ERR(ipage)) {
187 				err = PTR_ERR(ipage);
188 				goto err_out;
189 			}
190 
191 			vsrc = kmap_atomic(ipage);
192 			vto = kmap_atomic(page);
193 			memcpy(vto, vsrc + blkoff, map.m_plen);
194 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
195 			kunmap_atomic(vto);
196 			kunmap_atomic(vsrc);
197 			flush_dcache_page(page);
198 
199 			SetPageUptodate(page);
200 			/* TODO: could we unlock the page earlier? */
201 			unlock_page(ipage);
202 			put_page(ipage);
203 
204 			/* imply err = 0, see erofs_map_blocks */
205 			goto has_updated;
206 		}
207 
208 		/* pa must be block-aligned for raw reading */
209 		DBG_BUGON(erofs_blkoff(map.m_pa));
210 
211 		/* max # of continuous pages */
212 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
213 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
214 		if (nblocks > BIO_MAX_PAGES)
215 			nblocks = BIO_MAX_PAGES;
216 
217 		bio = bio_alloc(GFP_NOIO, nblocks);
218 
219 		bio->bi_end_io = erofs_readendio;
220 		bio_set_dev(bio, sb->s_bdev);
221 		bio->bi_iter.bi_sector = (sector_t)blknr <<
222 			LOG_SECTORS_PER_BLOCK;
223 		bio->bi_opf = REQ_OP_READ;
224 	}
225 
226 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
227 	/* out of the extent or bio is full */
228 	if (err < PAGE_SIZE)
229 		goto submit_bio_retry;
230 
231 	*last_block = current_block;
232 
233 	/* shift in advance in case of it followed by too many gaps */
234 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
235 		/* err should reassign to 0 after submitting */
236 		err = 0;
237 		goto submit_bio_out;
238 	}
239 
240 	return bio;
241 
242 err_out:
243 	/* for sync reading, set page error immediately */
244 	if (!ra) {
245 		SetPageError(page);
246 		ClearPageUptodate(page);
247 	}
248 has_updated:
249 	unlock_page(page);
250 
251 	/* if updated manually, continuous pages has a gap */
252 	if (bio)
253 submit_bio_out:
254 		submit_bio(bio);
255 	return err ? ERR_PTR(err) : NULL;
256 }
257 
258 /*
259  * since we dont have write or truncate flows, so no inode
260  * locking needs to be held at the moment.
261  */
262 static int erofs_raw_access_readpage(struct file *file, struct page *page)
263 {
264 	erofs_off_t last_block;
265 	struct bio *bio;
266 
267 	trace_erofs_readpage(page, true);
268 
269 	bio = erofs_read_raw_page(NULL, page->mapping,
270 				  page, &last_block, 1, false);
271 
272 	if (IS_ERR(bio))
273 		return PTR_ERR(bio);
274 
275 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
276 	return 0;
277 }
278 
279 static int erofs_raw_access_readpages(struct file *filp,
280 				      struct address_space *mapping,
281 				      struct list_head *pages,
282 				      unsigned int nr_pages)
283 {
284 	erofs_off_t last_block;
285 	struct bio *bio = NULL;
286 	gfp_t gfp = readahead_gfp_mask(mapping);
287 	struct page *page = list_last_entry(pages, struct page, lru);
288 
289 	trace_erofs_readpages(mapping->host, page, nr_pages, true);
290 
291 	for (; nr_pages; --nr_pages) {
292 		page = list_entry(pages->prev, struct page, lru);
293 
294 		prefetchw(&page->flags);
295 		list_del(&page->lru);
296 
297 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
298 			bio = erofs_read_raw_page(bio, mapping, page,
299 						  &last_block, nr_pages, true);
300 
301 			/* all the page errors are ignored when readahead */
302 			if (IS_ERR(bio)) {
303 				pr_err("%s, readahead error at page %lu of nid %llu\n",
304 				       __func__, page->index,
305 				       EROFS_I(mapping->host)->nid);
306 
307 				bio = NULL;
308 			}
309 		}
310 
311 		/* pages could still be locked */
312 		put_page(page);
313 	}
314 	DBG_BUGON(!list_empty(pages));
315 
316 	/* the rare case (end in gaps) */
317 	if (bio)
318 		submit_bio(bio);
319 	return 0;
320 }
321 
322 static int erofs_get_block(struct inode *inode, sector_t iblock,
323 			   struct buffer_head *bh, int create)
324 {
325 	struct erofs_map_blocks map = {
326 		.m_la = iblock << 9,
327 	};
328 	int err;
329 
330 	err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
331 	if (err)
332 		return err;
333 
334 	if (map.m_flags & EROFS_MAP_MAPPED)
335 		bh->b_blocknr = erofs_blknr(map.m_pa);
336 
337 	return err;
338 }
339 
340 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
341 {
342 	struct inode *inode = mapping->host;
343 
344 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
345 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
346 
347 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
348 			return 0;
349 	}
350 
351 	return generic_block_bmap(mapping, block, erofs_get_block);
352 }
353 
354 /* for uncompressed (aligned) files and raw access for other files */
355 const struct address_space_operations erofs_raw_access_aops = {
356 	.readpage = erofs_raw_access_readpage,
357 	.readpages = erofs_raw_access_readpages,
358 	.bmap = erofs_bmap,
359 };
360 
361