xref: /openbmc/linux/fs/erofs/data.c (revision 9e255e2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Created by Gao Xiang <gaoxiang25@huawei.com>
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 
10 #include <trace/events/erofs.h>
11 
12 static void erofs_readendio(struct bio *bio)
13 {
14 	struct bio_vec *bvec;
15 	blk_status_t err = bio->bi_status;
16 	struct bvec_iter_all iter_all;
17 
18 	bio_for_each_segment_all(bvec, bio, iter_all) {
19 		struct page *page = bvec->bv_page;
20 
21 		/* page is already locked */
22 		DBG_BUGON(PageUptodate(page));
23 
24 		if (err)
25 			SetPageError(page);
26 		else
27 			SetPageUptodate(page);
28 
29 		unlock_page(page);
30 		/* page could be reclaimed now */
31 	}
32 	bio_put(bio);
33 }
34 
35 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
36 {
37 	struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38 	struct page *page;
39 
40 	page = read_cache_page_gfp(mapping, blkaddr,
41 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
42 	/* should already be PageUptodate */
43 	if (!IS_ERR(page))
44 		lock_page(page);
45 	return page;
46 }
47 
48 static int erofs_map_blocks_flatmode(struct inode *inode,
49 				     struct erofs_map_blocks *map,
50 				     int flags)
51 {
52 	int err = 0;
53 	erofs_blk_t nblocks, lastblk;
54 	u64 offset = map->m_la;
55 	struct erofs_inode *vi = EROFS_I(inode);
56 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
57 
58 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
59 
60 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
61 	lastblk = nblocks - tailendpacking;
62 
63 	if (offset >= inode->i_size) {
64 		/* leave out-of-bound access unmapped */
65 		map->m_flags = 0;
66 		map->m_plen = 0;
67 		goto out;
68 	}
69 
70 	/* there is no hole in flatmode */
71 	map->m_flags = EROFS_MAP_MAPPED;
72 
73 	if (offset < blknr_to_addr(lastblk)) {
74 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
75 		map->m_plen = blknr_to_addr(lastblk) - offset;
76 	} else if (tailendpacking) {
77 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
78 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
79 
80 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
81 			vi->xattr_isize + erofs_blkoff(map->m_la);
82 		map->m_plen = inode->i_size - offset;
83 
84 		/* inline data should be located in one meta block */
85 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
86 			erofs_err(inode->i_sb,
87 				  "inline data cross block boundary @ nid %llu",
88 				  vi->nid);
89 			DBG_BUGON(1);
90 			err = -EFSCORRUPTED;
91 			goto err_out;
92 		}
93 
94 		map->m_flags |= EROFS_MAP_META;
95 	} else {
96 		erofs_err(inode->i_sb,
97 			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
98 			  vi->nid, inode->i_size, map->m_la);
99 		DBG_BUGON(1);
100 		err = -EIO;
101 		goto err_out;
102 	}
103 
104 out:
105 	map->m_llen = map->m_plen;
106 
107 err_out:
108 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
109 	return err;
110 }
111 
112 static inline struct bio *erofs_read_raw_page(struct bio *bio,
113 					      struct address_space *mapping,
114 					      struct page *page,
115 					      erofs_off_t *last_block,
116 					      unsigned int nblocks,
117 					      unsigned int *eblks,
118 					      bool ra)
119 {
120 	struct inode *const inode = mapping->host;
121 	struct super_block *const sb = inode->i_sb;
122 	erofs_off_t current_block = (erofs_off_t)page->index;
123 	int err;
124 
125 	DBG_BUGON(!nblocks);
126 
127 	if (PageUptodate(page)) {
128 		err = 0;
129 		goto has_updated;
130 	}
131 
132 	/* note that for readpage case, bio also equals to NULL */
133 	if (bio &&
134 	    (*last_block + 1 != current_block || !*eblks)) {
135 submit_bio_retry:
136 		submit_bio(bio);
137 		bio = NULL;
138 	}
139 
140 	if (!bio) {
141 		struct erofs_map_blocks map = {
142 			.m_la = blknr_to_addr(current_block),
143 		};
144 		erofs_blk_t blknr;
145 		unsigned int blkoff;
146 
147 		err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
148 		if (err)
149 			goto err_out;
150 
151 		/* zero out the holed page */
152 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
153 			zero_user_segment(page, 0, PAGE_SIZE);
154 			SetPageUptodate(page);
155 
156 			/* imply err = 0, see erofs_map_blocks */
157 			goto has_updated;
158 		}
159 
160 		/* for RAW access mode, m_plen must be equal to m_llen */
161 		DBG_BUGON(map.m_plen != map.m_llen);
162 
163 		blknr = erofs_blknr(map.m_pa);
164 		blkoff = erofs_blkoff(map.m_pa);
165 
166 		/* deal with inline page */
167 		if (map.m_flags & EROFS_MAP_META) {
168 			void *vsrc, *vto;
169 			struct page *ipage;
170 
171 			DBG_BUGON(map.m_plen > PAGE_SIZE);
172 
173 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
174 
175 			if (IS_ERR(ipage)) {
176 				err = PTR_ERR(ipage);
177 				goto err_out;
178 			}
179 
180 			vsrc = kmap_atomic(ipage);
181 			vto = kmap_atomic(page);
182 			memcpy(vto, vsrc + blkoff, map.m_plen);
183 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
184 			kunmap_atomic(vto);
185 			kunmap_atomic(vsrc);
186 			flush_dcache_page(page);
187 
188 			SetPageUptodate(page);
189 			/* TODO: could we unlock the page earlier? */
190 			unlock_page(ipage);
191 			put_page(ipage);
192 
193 			/* imply err = 0, see erofs_map_blocks */
194 			goto has_updated;
195 		}
196 
197 		/* pa must be block-aligned for raw reading */
198 		DBG_BUGON(erofs_blkoff(map.m_pa));
199 
200 		/* max # of continuous pages */
201 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
202 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
203 
204 		*eblks = bio_max_segs(nblocks);
205 		bio = bio_alloc(GFP_NOIO, *eblks);
206 
207 		bio->bi_end_io = erofs_readendio;
208 		bio_set_dev(bio, sb->s_bdev);
209 		bio->bi_iter.bi_sector = (sector_t)blknr <<
210 			LOG_SECTORS_PER_BLOCK;
211 		bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
212 	}
213 
214 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
215 	/* out of the extent or bio is full */
216 	if (err < PAGE_SIZE)
217 		goto submit_bio_retry;
218 	--*eblks;
219 	*last_block = current_block;
220 	return bio;
221 
222 err_out:
223 	/* for sync reading, set page error immediately */
224 	if (!ra) {
225 		SetPageError(page);
226 		ClearPageUptodate(page);
227 	}
228 has_updated:
229 	unlock_page(page);
230 
231 	/* if updated manually, continuous pages has a gap */
232 	if (bio)
233 		submit_bio(bio);
234 	return err ? ERR_PTR(err) : NULL;
235 }
236 
237 /*
238  * since we dont have write or truncate flows, so no inode
239  * locking needs to be held at the moment.
240  */
241 static int erofs_raw_access_readpage(struct file *file, struct page *page)
242 {
243 	erofs_off_t last_block;
244 	unsigned int eblks;
245 	struct bio *bio;
246 
247 	trace_erofs_readpage(page, true);
248 
249 	bio = erofs_read_raw_page(NULL, page->mapping,
250 				  page, &last_block, 1, &eblks, false);
251 
252 	if (IS_ERR(bio))
253 		return PTR_ERR(bio);
254 
255 	if (bio)
256 		submit_bio(bio);
257 	return 0;
258 }
259 
260 static void erofs_raw_access_readahead(struct readahead_control *rac)
261 {
262 	erofs_off_t last_block;
263 	unsigned int eblks;
264 	struct bio *bio = NULL;
265 	struct page *page;
266 
267 	trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
268 			readahead_count(rac), true);
269 
270 	while ((page = readahead_page(rac))) {
271 		prefetchw(&page->flags);
272 
273 		bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
274 				readahead_count(rac), &eblks, true);
275 
276 		/* all the page errors are ignored when readahead */
277 		if (IS_ERR(bio)) {
278 			pr_err("%s, readahead error at page %lu of nid %llu\n",
279 			       __func__, page->index,
280 			       EROFS_I(rac->mapping->host)->nid);
281 
282 			bio = NULL;
283 		}
284 
285 		put_page(page);
286 	}
287 
288 	if (bio)
289 		submit_bio(bio);
290 }
291 
292 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
293 {
294 	struct inode *inode = mapping->host;
295 	struct erofs_map_blocks map = {
296 		.m_la = blknr_to_addr(block),
297 	};
298 
299 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
300 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
301 
302 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
303 			return 0;
304 	}
305 
306 	if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
307 		return erofs_blknr(map.m_pa);
308 
309 	return 0;
310 }
311 
312 /* for uncompressed (aligned) files and raw access for other files */
313 const struct address_space_operations erofs_raw_access_aops = {
314 	.readpage = erofs_raw_access_readpage,
315 	.readahead = erofs_raw_access_readahead,
316 	.bmap = erofs_bmap,
317 };
318 
319