xref: /openbmc/linux/fs/erofs/data.c (revision 7479c505)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/dax.h>
10 #include <trace/events/erofs.h>
11 
12 void erofs_unmap_metabuf(struct erofs_buf *buf)
13 {
14 	if (buf->kmap_type == EROFS_KMAP)
15 		kunmap(buf->page);
16 	else if (buf->kmap_type == EROFS_KMAP_ATOMIC)
17 		kunmap_atomic(buf->base);
18 	buf->base = NULL;
19 	buf->kmap_type = EROFS_NO_KMAP;
20 }
21 
22 void erofs_put_metabuf(struct erofs_buf *buf)
23 {
24 	if (!buf->page)
25 		return;
26 	erofs_unmap_metabuf(buf);
27 	put_page(buf->page);
28 	buf->page = NULL;
29 }
30 
31 void *erofs_bread(struct erofs_buf *buf, struct inode *inode,
32 		  erofs_blk_t blkaddr, enum erofs_kmap_type type)
33 {
34 	struct address_space *const mapping = inode->i_mapping;
35 	erofs_off_t offset = blknr_to_addr(blkaddr);
36 	pgoff_t index = offset >> PAGE_SHIFT;
37 	struct page *page = buf->page;
38 
39 	if (!page || page->index != index) {
40 		erofs_put_metabuf(buf);
41 		page = read_cache_page_gfp(mapping, index,
42 				mapping_gfp_constraint(mapping, ~__GFP_FS));
43 		if (IS_ERR(page))
44 			return page;
45 		/* should already be PageUptodate, no need to lock page */
46 		buf->page = page;
47 	}
48 	if (buf->kmap_type == EROFS_NO_KMAP) {
49 		if (type == EROFS_KMAP)
50 			buf->base = kmap(page);
51 		else if (type == EROFS_KMAP_ATOMIC)
52 			buf->base = kmap_atomic(page);
53 		buf->kmap_type = type;
54 	} else if (buf->kmap_type != type) {
55 		DBG_BUGON(1);
56 		return ERR_PTR(-EFAULT);
57 	}
58 	if (type == EROFS_NO_KMAP)
59 		return NULL;
60 	return buf->base + (offset & ~PAGE_MASK);
61 }
62 
63 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
64 			 erofs_blk_t blkaddr, enum erofs_kmap_type type)
65 {
66 	return erofs_bread(buf, sb->s_bdev->bd_inode, blkaddr, type);
67 }
68 
69 static int erofs_map_blocks_flatmode(struct inode *inode,
70 				     struct erofs_map_blocks *map,
71 				     int flags)
72 {
73 	erofs_blk_t nblocks, lastblk;
74 	u64 offset = map->m_la;
75 	struct erofs_inode *vi = EROFS_I(inode);
76 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
77 
78 	nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
79 	lastblk = nblocks - tailendpacking;
80 
81 	/* there is no hole in flatmode */
82 	map->m_flags = EROFS_MAP_MAPPED;
83 	if (offset < blknr_to_addr(lastblk)) {
84 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
85 		map->m_plen = blknr_to_addr(lastblk) - offset;
86 	} else if (tailendpacking) {
87 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
88 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
89 
90 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
91 			vi->xattr_isize + erofs_blkoff(map->m_la);
92 		map->m_plen = inode->i_size - offset;
93 
94 		/* inline data should be located in the same meta block */
95 		if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) {
96 			erofs_err(inode->i_sb,
97 				  "inline data cross block boundary @ nid %llu",
98 				  vi->nid);
99 			DBG_BUGON(1);
100 			return -EFSCORRUPTED;
101 		}
102 		map->m_flags |= EROFS_MAP_META;
103 	} else {
104 		erofs_err(inode->i_sb,
105 			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
106 			  vi->nid, inode->i_size, map->m_la);
107 		DBG_BUGON(1);
108 		return -EIO;
109 	}
110 	return 0;
111 }
112 
113 static int erofs_map_blocks(struct inode *inode,
114 			    struct erofs_map_blocks *map, int flags)
115 {
116 	struct super_block *sb = inode->i_sb;
117 	struct erofs_inode *vi = EROFS_I(inode);
118 	struct erofs_inode_chunk_index *idx;
119 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
120 	u64 chunknr;
121 	unsigned int unit;
122 	erofs_off_t pos;
123 	void *kaddr;
124 	int err = 0;
125 
126 	trace_erofs_map_blocks_enter(inode, map, flags);
127 	map->m_deviceid = 0;
128 	if (map->m_la >= inode->i_size) {
129 		/* leave out-of-bound access unmapped */
130 		map->m_flags = 0;
131 		map->m_plen = 0;
132 		goto out;
133 	}
134 
135 	if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
136 		err = erofs_map_blocks_flatmode(inode, map, flags);
137 		goto out;
138 	}
139 
140 	if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
141 		unit = sizeof(*idx);			/* chunk index */
142 	else
143 		unit = EROFS_BLOCK_MAP_ENTRY_SIZE;	/* block map */
144 
145 	chunknr = map->m_la >> vi->chunkbits;
146 	pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
147 		    vi->xattr_isize, unit) + unit * chunknr;
148 
149 	kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), EROFS_KMAP);
150 	if (IS_ERR(kaddr)) {
151 		err = PTR_ERR(kaddr);
152 		goto out;
153 	}
154 	map->m_la = chunknr << vi->chunkbits;
155 	map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
156 			    roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
157 
158 	/* handle block map */
159 	if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
160 		__le32 *blkaddr = kaddr + erofs_blkoff(pos);
161 
162 		if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
163 			map->m_flags = 0;
164 		} else {
165 			map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
166 			map->m_flags = EROFS_MAP_MAPPED;
167 		}
168 		goto out_unlock;
169 	}
170 	/* parse chunk indexes */
171 	idx = kaddr + erofs_blkoff(pos);
172 	switch (le32_to_cpu(idx->blkaddr)) {
173 	case EROFS_NULL_ADDR:
174 		map->m_flags = 0;
175 		break;
176 	default:
177 		map->m_deviceid = le16_to_cpu(idx->device_id) &
178 			EROFS_SB(sb)->device_id_mask;
179 		map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
180 		map->m_flags = EROFS_MAP_MAPPED;
181 		break;
182 	}
183 out_unlock:
184 	erofs_put_metabuf(&buf);
185 out:
186 	if (!err)
187 		map->m_llen = map->m_plen;
188 	trace_erofs_map_blocks_exit(inode, map, flags, 0);
189 	return err;
190 }
191 
192 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
193 {
194 	struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
195 	struct erofs_device_info *dif;
196 	int id;
197 
198 	/* primary device by default */
199 	map->m_bdev = sb->s_bdev;
200 	map->m_daxdev = EROFS_SB(sb)->dax_dev;
201 	map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
202 
203 	if (map->m_deviceid) {
204 		down_read(&devs->rwsem);
205 		dif = idr_find(&devs->tree, map->m_deviceid - 1);
206 		if (!dif) {
207 			up_read(&devs->rwsem);
208 			return -ENODEV;
209 		}
210 		map->m_bdev = dif->bdev;
211 		map->m_daxdev = dif->dax_dev;
212 		map->m_dax_part_off = dif->dax_part_off;
213 		up_read(&devs->rwsem);
214 	} else if (devs->extra_devices) {
215 		down_read(&devs->rwsem);
216 		idr_for_each_entry(&devs->tree, dif, id) {
217 			erofs_off_t startoff, length;
218 
219 			if (!dif->mapped_blkaddr)
220 				continue;
221 			startoff = blknr_to_addr(dif->mapped_blkaddr);
222 			length = blknr_to_addr(dif->blocks);
223 
224 			if (map->m_pa >= startoff &&
225 			    map->m_pa < startoff + length) {
226 				map->m_pa -= startoff;
227 				map->m_bdev = dif->bdev;
228 				map->m_daxdev = dif->dax_dev;
229 				map->m_dax_part_off = dif->dax_part_off;
230 				break;
231 			}
232 		}
233 		up_read(&devs->rwsem);
234 	}
235 	return 0;
236 }
237 
238 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
239 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
240 {
241 	int ret;
242 	struct erofs_map_blocks map;
243 	struct erofs_map_dev mdev;
244 
245 	map.m_la = offset;
246 	map.m_llen = length;
247 
248 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
249 	if (ret < 0)
250 		return ret;
251 
252 	mdev = (struct erofs_map_dev) {
253 		.m_deviceid = map.m_deviceid,
254 		.m_pa = map.m_pa,
255 	};
256 	ret = erofs_map_dev(inode->i_sb, &mdev);
257 	if (ret)
258 		return ret;
259 
260 	iomap->offset = map.m_la;
261 	if (flags & IOMAP_DAX)
262 		iomap->dax_dev = mdev.m_daxdev;
263 	else
264 		iomap->bdev = mdev.m_bdev;
265 	iomap->length = map.m_llen;
266 	iomap->flags = 0;
267 	iomap->private = NULL;
268 
269 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
270 		iomap->type = IOMAP_HOLE;
271 		iomap->addr = IOMAP_NULL_ADDR;
272 		if (!iomap->length)
273 			iomap->length = length;
274 		return 0;
275 	}
276 
277 	if (map.m_flags & EROFS_MAP_META) {
278 		void *ptr;
279 		struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
280 
281 		iomap->type = IOMAP_INLINE;
282 		ptr = erofs_read_metabuf(&buf, inode->i_sb,
283 					 erofs_blknr(mdev.m_pa), EROFS_KMAP);
284 		if (IS_ERR(ptr))
285 			return PTR_ERR(ptr);
286 		iomap->inline_data = ptr + erofs_blkoff(mdev.m_pa);
287 		iomap->private = buf.base;
288 	} else {
289 		iomap->type = IOMAP_MAPPED;
290 		iomap->addr = mdev.m_pa;
291 		if (flags & IOMAP_DAX)
292 			iomap->addr += mdev.m_dax_part_off;
293 	}
294 	return 0;
295 }
296 
297 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
298 		ssize_t written, unsigned int flags, struct iomap *iomap)
299 {
300 	void *ptr = iomap->private;
301 
302 	if (ptr) {
303 		struct erofs_buf buf = {
304 			.page = kmap_to_page(ptr),
305 			.base = ptr,
306 			.kmap_type = EROFS_KMAP,
307 		};
308 
309 		DBG_BUGON(iomap->type != IOMAP_INLINE);
310 		erofs_put_metabuf(&buf);
311 	} else {
312 		DBG_BUGON(iomap->type == IOMAP_INLINE);
313 	}
314 	return written;
315 }
316 
317 static const struct iomap_ops erofs_iomap_ops = {
318 	.iomap_begin = erofs_iomap_begin,
319 	.iomap_end = erofs_iomap_end,
320 };
321 
322 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
323 		 u64 start, u64 len)
324 {
325 	if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
326 #ifdef CONFIG_EROFS_FS_ZIP
327 		return iomap_fiemap(inode, fieinfo, start, len,
328 				    &z_erofs_iomap_report_ops);
329 #else
330 		return -EOPNOTSUPP;
331 #endif
332 	}
333 	return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
334 }
335 
336 /*
337  * since we dont have write or truncate flows, so no inode
338  * locking needs to be held at the moment.
339  */
340 static int erofs_read_folio(struct file *file, struct folio *folio)
341 {
342 	return iomap_read_folio(folio, &erofs_iomap_ops);
343 }
344 
345 static void erofs_readahead(struct readahead_control *rac)
346 {
347 	return iomap_readahead(rac, &erofs_iomap_ops);
348 }
349 
350 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
351 {
352 	return iomap_bmap(mapping, block, &erofs_iomap_ops);
353 }
354 
355 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
356 {
357 	struct inode *inode = file_inode(iocb->ki_filp);
358 	loff_t align = iocb->ki_pos | iov_iter_count(to) |
359 		iov_iter_alignment(to);
360 	struct block_device *bdev = inode->i_sb->s_bdev;
361 	unsigned int blksize_mask;
362 
363 	if (bdev)
364 		blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
365 	else
366 		blksize_mask = (1 << inode->i_blkbits) - 1;
367 
368 	if (align & blksize_mask)
369 		return -EINVAL;
370 	return 0;
371 }
372 
373 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
374 {
375 	/* no need taking (shared) inode lock since it's a ro filesystem */
376 	if (!iov_iter_count(to))
377 		return 0;
378 
379 #ifdef CONFIG_FS_DAX
380 	if (IS_DAX(iocb->ki_filp->f_mapping->host))
381 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
382 #endif
383 	if (iocb->ki_flags & IOCB_DIRECT) {
384 		int err = erofs_prepare_dio(iocb, to);
385 
386 		if (!err)
387 			return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
388 					    NULL, 0, 0);
389 		if (err < 0)
390 			return err;
391 	}
392 	return filemap_read(iocb, to, 0);
393 }
394 
395 /* for uncompressed (aligned) files and raw access for other files */
396 const struct address_space_operations erofs_raw_access_aops = {
397 	.read_folio = erofs_read_folio,
398 	.readahead = erofs_readahead,
399 	.bmap = erofs_bmap,
400 	.direct_IO = noop_direct_IO,
401 };
402 
403 #ifdef CONFIG_FS_DAX
404 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
405 		enum page_entry_size pe_size)
406 {
407 	return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
408 }
409 
410 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
411 {
412 	return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
413 }
414 
415 static const struct vm_operations_struct erofs_dax_vm_ops = {
416 	.fault		= erofs_dax_fault,
417 	.huge_fault	= erofs_dax_huge_fault,
418 };
419 
420 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
421 {
422 	if (!IS_DAX(file_inode(file)))
423 		return generic_file_readonly_mmap(file, vma);
424 
425 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
426 		return -EINVAL;
427 
428 	vma->vm_ops = &erofs_dax_vm_ops;
429 	vma->vm_flags |= VM_HUGEPAGE;
430 	return 0;
431 }
432 #else
433 #define erofs_file_mmap	generic_file_readonly_mmap
434 #endif
435 
436 const struct file_operations erofs_file_fops = {
437 	.llseek		= generic_file_llseek,
438 	.read_iter	= erofs_file_read_iter,
439 	.mmap		= erofs_file_mmap,
440 	.splice_read	= generic_file_splice_read,
441 };
442