xref: /openbmc/linux/fs/erofs/data.c (revision 901181b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/dax.h>
10 #include <trace/events/erofs.h>
11 
12 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
13 {
14 	struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
15 	struct page *page;
16 
17 	page = read_cache_page_gfp(mapping, blkaddr,
18 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
19 	/* should already be PageUptodate */
20 	if (!IS_ERR(page))
21 		lock_page(page);
22 	return page;
23 }
24 
25 static int erofs_map_blocks_flatmode(struct inode *inode,
26 				     struct erofs_map_blocks *map,
27 				     int flags)
28 {
29 	int err = 0;
30 	erofs_blk_t nblocks, lastblk;
31 	u64 offset = map->m_la;
32 	struct erofs_inode *vi = EROFS_I(inode);
33 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
34 
35 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
36 
37 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
38 	lastblk = nblocks - tailendpacking;
39 
40 	/* there is no hole in flatmode */
41 	map->m_flags = EROFS_MAP_MAPPED;
42 
43 	if (offset < blknr_to_addr(lastblk)) {
44 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
45 		map->m_plen = blknr_to_addr(lastblk) - offset;
46 	} else if (tailendpacking) {
47 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
48 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
49 
50 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
51 			vi->xattr_isize + erofs_blkoff(map->m_la);
52 		map->m_plen = inode->i_size - offset;
53 
54 		/* inline data should be located in one meta block */
55 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
56 			erofs_err(inode->i_sb,
57 				  "inline data cross block boundary @ nid %llu",
58 				  vi->nid);
59 			DBG_BUGON(1);
60 			err = -EFSCORRUPTED;
61 			goto err_out;
62 		}
63 
64 		map->m_flags |= EROFS_MAP_META;
65 	} else {
66 		erofs_err(inode->i_sb,
67 			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
68 			  vi->nid, inode->i_size, map->m_la);
69 		DBG_BUGON(1);
70 		err = -EIO;
71 		goto err_out;
72 	}
73 
74 	map->m_llen = map->m_plen;
75 err_out:
76 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
77 	return err;
78 }
79 
80 static int erofs_map_blocks(struct inode *inode,
81 			    struct erofs_map_blocks *map, int flags)
82 {
83 	struct super_block *sb = inode->i_sb;
84 	struct erofs_inode *vi = EROFS_I(inode);
85 	struct erofs_inode_chunk_index *idx;
86 	struct page *page;
87 	u64 chunknr;
88 	unsigned int unit;
89 	erofs_off_t pos;
90 	int err = 0;
91 
92 	map->m_deviceid = 0;
93 	if (map->m_la >= inode->i_size) {
94 		/* leave out-of-bound access unmapped */
95 		map->m_flags = 0;
96 		map->m_plen = 0;
97 		goto out;
98 	}
99 
100 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
101 		return erofs_map_blocks_flatmode(inode, map, flags);
102 
103 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
104 		unit = sizeof(*idx);			/* chunk index */
105 	else
106 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
107 
108 	chunknr = map->m_la >> vi->chunkbits;
109 	pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
110 		    vi->xattr_isize, unit) + unit * chunknr;
111 
112 	page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos));
113 	if (IS_ERR(page))
114 		return PTR_ERR(page);
115 
116 	map->m_la = chunknr << vi->chunkbits;
117 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
118 			    roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
119 
120 	/* handle block map */
121 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
122 		__le32 *blkaddr = page_address(page) + erofs_blkoff(pos);
123 
124 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
125 			map->m_flags = 0;
126 		} else {
127 			map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
128 			map->m_flags = EROFS_MAP_MAPPED;
129 		}
130 		goto out_unlock;
131 	}
132 	/* parse chunk indexes */
133 	idx = page_address(page) + erofs_blkoff(pos);
134 	switch (le32_to_cpu(idx->blkaddr)) {
135 	case EROFS_NULL_ADDR:
136 		map->m_flags = 0;
137 		break;
138 	default:
139 		map->m_deviceid = le16_to_cpu(idx->device_id) &
140 			EROFS_SB(sb)->device_id_mask;
141 		map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
142 		map->m_flags = EROFS_MAP_MAPPED;
143 		break;
144 	}
145 out_unlock:
146 	unlock_page(page);
147 	put_page(page);
148 out:
149 	map->m_llen = map->m_plen;
150 	return err;
151 }
152 
153 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
154 {
155 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
156 	struct erofs_device_info *dif;
157 	int id;
158 
159 	/* primary device by default */
160 	map->m_bdev = sb->s_bdev;
161 	map->m_daxdev = EROFS_SB(sb)->dax_dev;
162 
163 	if (map->m_deviceid) {
164 		down_read(&devs->rwsem);
165 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
166 		if (!dif) {
167 			up_read(&devs->rwsem);
168 			return -ENODEV;
169 		}
170 		map->m_bdev = dif->bdev;
171 		map->m_daxdev = dif->dax_dev;
172 		up_read(&devs->rwsem);
173 	} else if (devs->extra_devices) {
174 		down_read(&devs->rwsem);
175 		idr_for_each_entry(&devs->tree, dif, id) {
176 			erofs_off_t startoff, length;
177 
178 			if (!dif->mapped_blkaddr)
179 				continue;
180 			startoff = blknr_to_addr(dif->mapped_blkaddr);
181 			length = blknr_to_addr(dif->blocks);
182 
183 			if (map->m_pa >= startoff &&
184 			    map->m_pa < startoff + length) {
185 				map->m_pa -= startoff;
186 				map->m_bdev = dif->bdev;
187 				map->m_daxdev = dif->dax_dev;
188 				break;
189 			}
190 		}
191 		up_read(&devs->rwsem);
192 	}
193 	return 0;
194 }
195 
196 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
197 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
198 {
199 	int ret;
200 	struct erofs_map_blocks map;
201 	struct erofs_map_dev mdev;
202 
203 	map.m_la = offset;
204 	map.m_llen = length;
205 
206 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
207 	if (ret < 0)
208 		return ret;
209 
210 	mdev = (struct erofs_map_dev) {
211 		.m_deviceid = map.m_deviceid,
212 		.m_pa = map.m_pa,
213 	};
214 	ret = erofs_map_dev(inode->i_sb, &mdev);
215 	if (ret)
216 		return ret;
217 
218 	iomap->bdev = mdev.m_bdev;
219 	iomap->dax_dev = mdev.m_daxdev;
220 	iomap->offset = map.m_la;
221 	iomap->length = map.m_llen;
222 	iomap->flags = 0;
223 	iomap->private = NULL;
224 
225 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
226 		iomap->type = IOMAP_HOLE;
227 		iomap->addr = IOMAP_NULL_ADDR;
228 		if (!iomap->length)
229 			iomap->length = length;
230 		return 0;
231 	}
232 
233 	if (map.m_flags & EROFS_MAP_META) {
234 		struct page *ipage;
235 
236 		iomap->type = IOMAP_INLINE;
237 		ipage = erofs_get_meta_page(inode->i_sb,
238 					    erofs_blknr(mdev.m_pa));
239 		if (IS_ERR(ipage))
240 			return PTR_ERR(ipage);
241 		iomap->inline_data = page_address(ipage) +
242 					erofs_blkoff(mdev.m_pa);
243 		iomap->private = ipage;
244 	} else {
245 		iomap->type = IOMAP_MAPPED;
246 		iomap->addr = mdev.m_pa;
247 	}
248 	return 0;
249 }
250 
251 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
252 		ssize_t written, unsigned int flags, struct iomap *iomap)
253 {
254 	struct page *ipage = iomap->private;
255 
256 	if (ipage) {
257 		DBG_BUGON(iomap->type != IOMAP_INLINE);
258 		unlock_page(ipage);
259 		put_page(ipage);
260 	} else {
261 		DBG_BUGON(iomap->type == IOMAP_INLINE);
262 	}
263 	return written;
264 }
265 
266 static const struct iomap_ops erofs_iomap_ops = {
267 	.iomap_begin = erofs_iomap_begin,
268 	.iomap_end = erofs_iomap_end,
269 };
270 
271 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
272 		 u64 start, u64 len)
273 {
274 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
275 #ifdef CONFIG_EROFS_FS_ZIP
276 		return iomap_fiemap(inode, fieinfo, start, len,
277 				    &z_erofs_iomap_report_ops);
278 #else
279 		return -EOPNOTSUPP;
280 #endif
281 	}
282 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
283 }
284 
285 /*
286  * since we dont have write or truncate flows, so no inode
287  * locking needs to be held at the moment.
288  */
289 static int erofs_readpage(struct file *file, struct page *page)
290 {
291 	return iomap_readpage(page, &erofs_iomap_ops);
292 }
293 
294 static void erofs_readahead(struct readahead_control *rac)
295 {
296 	return iomap_readahead(rac, &erofs_iomap_ops);
297 }
298 
299 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
300 {
301 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
302 }
303 
304 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
305 {
306 	struct inode *inode = file_inode(iocb->ki_filp);
307 	loff_t align = iocb->ki_pos | iov_iter_count(to) |
308 		iov_iter_alignment(to);
309 	struct block_device *bdev = inode->i_sb->s_bdev;
310 	unsigned int blksize_mask;
311 
312 	if (bdev)
313 		blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
314 	else
315 		blksize_mask = (1 << inode->i_blkbits) - 1;
316 
317 	if (align & blksize_mask)
318 		return -EINVAL;
319 	return 0;
320 }
321 
322 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
323 {
324 	/* no need taking (shared) inode lock since it's a ro filesystem */
325 	if (!iov_iter_count(to))
326 		return 0;
327 
328 #ifdef CONFIG_FS_DAX
329 	if (IS_DAX(iocb->ki_filp->f_mapping->host))
330 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
331 #endif
332 	if (iocb->ki_flags & IOCB_DIRECT) {
333 		int err = erofs_prepare_dio(iocb, to);
334 
335 		if (!err)
336 			return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
337 					    NULL, 0, 0);
338 		if (err < 0)
339 			return err;
340 	}
341 	return filemap_read(iocb, to, 0);
342 }
343 
344 /* for uncompressed (aligned) files and raw access for other files */
345 const struct address_space_operations erofs_raw_access_aops = {
346 	.readpage = erofs_readpage,
347 	.readahead = erofs_readahead,
348 	.bmap = erofs_bmap,
349 	.direct_IO = noop_direct_IO,
350 };
351 
352 #ifdef CONFIG_FS_DAX
353 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
354 		enum page_entry_size pe_size)
355 {
356 	return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
357 }
358 
359 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
360 {
361 	return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
362 }
363 
364 static const struct vm_operations_struct erofs_dax_vm_ops = {
365 	.fault		= erofs_dax_fault,
366 	.huge_fault	= erofs_dax_huge_fault,
367 };
368 
369 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
370 {
371 	if (!IS_DAX(file_inode(file)))
372 		return generic_file_readonly_mmap(file, vma);
373 
374 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
375 		return -EINVAL;
376 
377 	vma->vm_ops = &erofs_dax_vm_ops;
378 	vma->vm_flags |= VM_HUGEPAGE;
379 	return 0;
380 }
381 #else
382 #define erofs_file_mmap	generic_file_readonly_mmap
383 #endif
384 
385 const struct file_operations erofs_file_fops = {
386 	.llseek		= generic_file_llseek,
387 	.read_iter	= erofs_file_read_iter,
388 	.mmap		= erofs_file_mmap,
389 	.splice_read	= generic_file_splice_read,
390 };
391