1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #include "internal.h"
8 #include <linux/prefetch.h>
9 #include <linux/sched/mm.h>
10 #include <linux/dax.h>
11 #include <trace/events/erofs.h>
12
erofs_unmap_metabuf(struct erofs_buf * buf)13 void erofs_unmap_metabuf(struct erofs_buf *buf)
14 {
15 if (buf->kmap_type == EROFS_KMAP)
16 kunmap_local(buf->base);
17 buf->base = NULL;
18 buf->kmap_type = EROFS_NO_KMAP;
19 }
20
erofs_put_metabuf(struct erofs_buf * buf)21 void erofs_put_metabuf(struct erofs_buf *buf)
22 {
23 if (!buf->page)
24 return;
25 erofs_unmap_metabuf(buf);
26 put_page(buf->page);
27 buf->page = NULL;
28 }
29
30 /*
31 * Derive the block size from inode->i_blkbits to make compatible with
32 * anonymous inode in fscache mode.
33 */
erofs_bread(struct erofs_buf * buf,erofs_blk_t blkaddr,enum erofs_kmap_type type)34 void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr,
35 enum erofs_kmap_type type)
36 {
37 struct inode *inode = buf->inode;
38 erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits;
39 pgoff_t index = offset >> PAGE_SHIFT;
40 struct page *page = buf->page;
41 struct folio *folio;
42 unsigned int nofs_flag;
43
44 if (!page || page->index != index) {
45 erofs_put_metabuf(buf);
46
47 nofs_flag = memalloc_nofs_save();
48 folio = read_cache_folio(inode->i_mapping, index, NULL, NULL);
49 memalloc_nofs_restore(nofs_flag);
50 if (IS_ERR(folio))
51 return folio;
52
53 /* should already be PageUptodate, no need to lock page */
54 page = folio_file_page(folio, index);
55 buf->page = page;
56 }
57 if (buf->kmap_type == EROFS_NO_KMAP) {
58 if (type == EROFS_KMAP)
59 buf->base = kmap_local_page(page);
60 buf->kmap_type = type;
61 } else if (buf->kmap_type != type) {
62 DBG_BUGON(1);
63 return ERR_PTR(-EFAULT);
64 }
65 if (type == EROFS_NO_KMAP)
66 return NULL;
67 return buf->base + (offset & ~PAGE_MASK);
68 }
69
erofs_init_metabuf(struct erofs_buf * buf,struct super_block * sb)70 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
71 {
72 if (erofs_is_fscache_mode(sb))
73 buf->inode = EROFS_SB(sb)->s_fscache->inode;
74 else
75 buf->inode = sb->s_bdev->bd_inode;
76 }
77
erofs_read_metabuf(struct erofs_buf * buf,struct super_block * sb,erofs_blk_t blkaddr,enum erofs_kmap_type type)78 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
79 erofs_blk_t blkaddr, enum erofs_kmap_type type)
80 {
81 erofs_init_metabuf(buf, sb);
82 return erofs_bread(buf, blkaddr, type);
83 }
84
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map)85 static int erofs_map_blocks_flatmode(struct inode *inode,
86 struct erofs_map_blocks *map)
87 {
88 erofs_blk_t nblocks, lastblk;
89 u64 offset = map->m_la;
90 struct erofs_inode *vi = EROFS_I(inode);
91 struct super_block *sb = inode->i_sb;
92 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
93
94 nblocks = erofs_iblks(inode);
95 lastblk = nblocks - tailendpacking;
96
97 /* there is no hole in flatmode */
98 map->m_flags = EROFS_MAP_MAPPED;
99 if (offset < erofs_pos(sb, lastblk)) {
100 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
101 map->m_plen = erofs_pos(sb, lastblk) - offset;
102 } else if (tailendpacking) {
103 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
104 vi->xattr_isize + erofs_blkoff(sb, offset);
105 map->m_plen = inode->i_size - offset;
106
107 /* inline data should be located in the same meta block */
108 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
109 erofs_err(sb, "inline data cross block boundary @ nid %llu",
110 vi->nid);
111 DBG_BUGON(1);
112 return -EFSCORRUPTED;
113 }
114 map->m_flags |= EROFS_MAP_META;
115 } else {
116 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
117 vi->nid, inode->i_size, map->m_la);
118 DBG_BUGON(1);
119 return -EIO;
120 }
121 return 0;
122 }
123
erofs_map_blocks(struct inode * inode,struct erofs_map_blocks * map)124 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
125 {
126 struct super_block *sb = inode->i_sb;
127 struct erofs_inode *vi = EROFS_I(inode);
128 struct erofs_inode_chunk_index *idx;
129 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
130 u64 chunknr;
131 unsigned int unit;
132 erofs_off_t pos;
133 void *kaddr;
134 int err = 0;
135
136 trace_erofs_map_blocks_enter(inode, map, 0);
137 map->m_deviceid = 0;
138 if (map->m_la >= inode->i_size) {
139 /* leave out-of-bound access unmapped */
140 map->m_flags = 0;
141 map->m_plen = 0;
142 goto out;
143 }
144
145 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
146 err = erofs_map_blocks_flatmode(inode, map);
147 goto out;
148 }
149
150 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
151 unit = sizeof(*idx); /* chunk index */
152 else
153 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
154
155 chunknr = map->m_la >> vi->chunkbits;
156 pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
157 vi->xattr_isize, unit) + unit * chunknr;
158
159 kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP);
160 if (IS_ERR(kaddr)) {
161 err = PTR_ERR(kaddr);
162 goto out;
163 }
164 map->m_la = chunknr << vi->chunkbits;
165 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
166 round_up(inode->i_size - map->m_la, sb->s_blocksize));
167
168 /* handle block map */
169 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
170 __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos);
171
172 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
173 map->m_flags = 0;
174 } else {
175 map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
176 map->m_flags = EROFS_MAP_MAPPED;
177 }
178 goto out_unlock;
179 }
180 /* parse chunk indexes */
181 idx = kaddr + erofs_blkoff(sb, pos);
182 switch (le32_to_cpu(idx->blkaddr)) {
183 case EROFS_NULL_ADDR:
184 map->m_flags = 0;
185 break;
186 default:
187 map->m_deviceid = le16_to_cpu(idx->device_id) &
188 EROFS_SB(sb)->device_id_mask;
189 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
190 map->m_flags = EROFS_MAP_MAPPED;
191 break;
192 }
193 out_unlock:
194 erofs_put_metabuf(&buf);
195 out:
196 if (!err)
197 map->m_llen = map->m_plen;
198 trace_erofs_map_blocks_exit(inode, map, 0, err);
199 return err;
200 }
201
erofs_map_dev(struct super_block * sb,struct erofs_map_dev * map)202 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
203 {
204 struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
205 struct erofs_device_info *dif;
206 int id;
207
208 map->m_bdev = sb->s_bdev;
209 map->m_daxdev = EROFS_SB(sb)->dax_dev;
210 map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
211 map->m_fscache = EROFS_SB(sb)->s_fscache;
212
213 if (map->m_deviceid) {
214 down_read(&devs->rwsem);
215 dif = idr_find(&devs->tree, map->m_deviceid - 1);
216 if (!dif) {
217 up_read(&devs->rwsem);
218 return -ENODEV;
219 }
220 if (devs->flatdev) {
221 map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
222 up_read(&devs->rwsem);
223 return 0;
224 }
225 map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL;
226 map->m_daxdev = dif->dax_dev;
227 map->m_dax_part_off = dif->dax_part_off;
228 map->m_fscache = dif->fscache;
229 up_read(&devs->rwsem);
230 } else if (devs->extra_devices && !devs->flatdev) {
231 down_read(&devs->rwsem);
232 idr_for_each_entry(&devs->tree, dif, id) {
233 erofs_off_t startoff, length;
234
235 if (!dif->mapped_blkaddr)
236 continue;
237 startoff = erofs_pos(sb, dif->mapped_blkaddr);
238 length = erofs_pos(sb, dif->blocks);
239
240 if (map->m_pa >= startoff &&
241 map->m_pa < startoff + length) {
242 map->m_pa -= startoff;
243 map->m_bdev = dif->bdev_handle ?
244 dif->bdev_handle->bdev : NULL;
245 map->m_daxdev = dif->dax_dev;
246 map->m_dax_part_off = dif->dax_part_off;
247 map->m_fscache = dif->fscache;
248 break;
249 }
250 }
251 up_read(&devs->rwsem);
252 }
253 return 0;
254 }
255
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)256 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
257 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
258 {
259 int ret;
260 struct super_block *sb = inode->i_sb;
261 struct erofs_map_blocks map;
262 struct erofs_map_dev mdev;
263
264 map.m_la = offset;
265 map.m_llen = length;
266
267 ret = erofs_map_blocks(inode, &map);
268 if (ret < 0)
269 return ret;
270
271 mdev = (struct erofs_map_dev) {
272 .m_deviceid = map.m_deviceid,
273 .m_pa = map.m_pa,
274 };
275 ret = erofs_map_dev(sb, &mdev);
276 if (ret)
277 return ret;
278
279 iomap->offset = map.m_la;
280 if (flags & IOMAP_DAX)
281 iomap->dax_dev = mdev.m_daxdev;
282 else
283 iomap->bdev = mdev.m_bdev;
284 iomap->length = map.m_llen;
285 iomap->flags = 0;
286 iomap->private = NULL;
287
288 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
289 iomap->type = IOMAP_HOLE;
290 iomap->addr = IOMAP_NULL_ADDR;
291 if (!iomap->length)
292 iomap->length = length;
293 return 0;
294 }
295
296 if (map.m_flags & EROFS_MAP_META) {
297 void *ptr;
298 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
299
300 iomap->type = IOMAP_INLINE;
301 ptr = erofs_read_metabuf(&buf, sb,
302 erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
303 if (IS_ERR(ptr))
304 return PTR_ERR(ptr);
305 iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa);
306 iomap->private = buf.base;
307 } else {
308 iomap->type = IOMAP_MAPPED;
309 iomap->addr = mdev.m_pa;
310 if (flags & IOMAP_DAX)
311 iomap->addr += mdev.m_dax_part_off;
312 }
313 return 0;
314 }
315
erofs_iomap_end(struct inode * inode,loff_t pos,loff_t length,ssize_t written,unsigned int flags,struct iomap * iomap)316 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
317 ssize_t written, unsigned int flags, struct iomap *iomap)
318 {
319 void *ptr = iomap->private;
320
321 if (ptr) {
322 struct erofs_buf buf = {
323 .page = kmap_to_page(ptr),
324 .base = ptr,
325 .kmap_type = EROFS_KMAP,
326 };
327
328 DBG_BUGON(iomap->type != IOMAP_INLINE);
329 erofs_put_metabuf(&buf);
330 } else {
331 DBG_BUGON(iomap->type == IOMAP_INLINE);
332 }
333 return written;
334 }
335
336 static const struct iomap_ops erofs_iomap_ops = {
337 .iomap_begin = erofs_iomap_begin,
338 .iomap_end = erofs_iomap_end,
339 };
340
erofs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)341 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
342 u64 start, u64 len)
343 {
344 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
345 #ifdef CONFIG_EROFS_FS_ZIP
346 return iomap_fiemap(inode, fieinfo, start, len,
347 &z_erofs_iomap_report_ops);
348 #else
349 return -EOPNOTSUPP;
350 #endif
351 }
352 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
353 }
354
355 /*
356 * since we dont have write or truncate flows, so no inode
357 * locking needs to be held at the moment.
358 */
erofs_read_folio(struct file * file,struct folio * folio)359 static int erofs_read_folio(struct file *file, struct folio *folio)
360 {
361 return iomap_read_folio(folio, &erofs_iomap_ops);
362 }
363
erofs_readahead(struct readahead_control * rac)364 static void erofs_readahead(struct readahead_control *rac)
365 {
366 return iomap_readahead(rac, &erofs_iomap_ops);
367 }
368
erofs_bmap(struct address_space * mapping,sector_t block)369 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
370 {
371 return iomap_bmap(mapping, block, &erofs_iomap_ops);
372 }
373
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)374 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
375 {
376 struct inode *inode = file_inode(iocb->ki_filp);
377
378 /* no need taking (shared) inode lock since it's a ro filesystem */
379 if (!iov_iter_count(to))
380 return 0;
381
382 #ifdef CONFIG_FS_DAX
383 if (IS_DAX(inode))
384 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
385 #endif
386 if (iocb->ki_flags & IOCB_DIRECT) {
387 struct block_device *bdev = inode->i_sb->s_bdev;
388 unsigned int blksize_mask;
389
390 if (bdev)
391 blksize_mask = bdev_logical_block_size(bdev) - 1;
392 else
393 blksize_mask = i_blocksize(inode) - 1;
394
395 if ((iocb->ki_pos | iov_iter_count(to) |
396 iov_iter_alignment(to)) & blksize_mask)
397 return -EINVAL;
398
399 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
400 NULL, 0, NULL, 0);
401 }
402 return filemap_read(iocb, to, 0);
403 }
404
405 /* for uncompressed (aligned) files and raw access for other files */
406 const struct address_space_operations erofs_raw_access_aops = {
407 .read_folio = erofs_read_folio,
408 .readahead = erofs_readahead,
409 .bmap = erofs_bmap,
410 .direct_IO = noop_direct_IO,
411 .release_folio = iomap_release_folio,
412 .invalidate_folio = iomap_invalidate_folio,
413 };
414
415 #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,unsigned int order)416 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
417 unsigned int order)
418 {
419 return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
420 }
421
erofs_dax_fault(struct vm_fault * vmf)422 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
423 {
424 return erofs_dax_huge_fault(vmf, 0);
425 }
426
427 static const struct vm_operations_struct erofs_dax_vm_ops = {
428 .fault = erofs_dax_fault,
429 .huge_fault = erofs_dax_huge_fault,
430 };
431
erofs_file_mmap(struct file * file,struct vm_area_struct * vma)432 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
433 {
434 if (!IS_DAX(file_inode(file)))
435 return generic_file_readonly_mmap(file, vma);
436
437 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
438 return -EINVAL;
439
440 vma->vm_ops = &erofs_dax_vm_ops;
441 vm_flags_set(vma, VM_HUGEPAGE);
442 return 0;
443 }
444 #else
445 #define erofs_file_mmap generic_file_readonly_mmap
446 #endif
447
448 const struct file_operations erofs_file_fops = {
449 .llseek = generic_file_llseek,
450 .read_iter = erofs_file_read_iter,
451 .mmap = erofs_file_mmap,
452 .get_unmapped_area = thp_get_unmapped_area,
453 .splice_read = filemap_splice_read,
454 };
455