1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include "internal.h" 8 #include <linux/prefetch.h> 9 #include <linux/dax.h> 10 #include <trace/events/erofs.h> 11 12 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr) 13 { 14 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping; 15 struct page *page; 16 17 page = read_cache_page_gfp(mapping, blkaddr, 18 mapping_gfp_constraint(mapping, ~__GFP_FS)); 19 /* should already be PageUptodate */ 20 if (!IS_ERR(page)) 21 lock_page(page); 22 return page; 23 } 24 25 static int erofs_map_blocks_flatmode(struct inode *inode, 26 struct erofs_map_blocks *map, 27 int flags) 28 { 29 erofs_blk_t nblocks, lastblk; 30 u64 offset = map->m_la; 31 struct erofs_inode *vi = EROFS_I(inode); 32 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); 33 34 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); 35 lastblk = nblocks - tailendpacking; 36 37 /* there is no hole in flatmode */ 38 map->m_flags = EROFS_MAP_MAPPED; 39 if (offset < blknr_to_addr(lastblk)) { 40 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; 41 map->m_plen = blknr_to_addr(lastblk) - offset; 42 } else if (tailendpacking) { 43 /* 2 - inode inline B: inode, [xattrs], inline last blk... */ 44 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 45 46 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + 47 vi->xattr_isize + erofs_blkoff(map->m_la); 48 map->m_plen = inode->i_size - offset; 49 50 /* inline data should be located in the same meta block */ 51 if (erofs_blkoff(map->m_pa) + map->m_plen > EROFS_BLKSIZ) { 52 erofs_err(inode->i_sb, 53 "inline data cross block boundary @ nid %llu", 54 vi->nid); 55 DBG_BUGON(1); 56 return -EFSCORRUPTED; 57 } 58 map->m_flags |= EROFS_MAP_META; 59 } else { 60 erofs_err(inode->i_sb, 61 "internal error @ nid: %llu (size %llu), m_la 0x%llx", 62 vi->nid, inode->i_size, map->m_la); 63 DBG_BUGON(1); 64 return -EIO; 65 } 66 return 0; 67 } 68 69 static int erofs_map_blocks(struct inode *inode, 70 struct erofs_map_blocks *map, int flags) 71 { 72 struct super_block *sb = inode->i_sb; 73 struct erofs_inode *vi = EROFS_I(inode); 74 struct erofs_inode_chunk_index *idx; 75 struct page *page; 76 u64 chunknr; 77 unsigned int unit; 78 erofs_off_t pos; 79 int err = 0; 80 81 trace_erofs_map_blocks_enter(inode, map, flags); 82 map->m_deviceid = 0; 83 if (map->m_la >= inode->i_size) { 84 /* leave out-of-bound access unmapped */ 85 map->m_flags = 0; 86 map->m_plen = 0; 87 goto out; 88 } 89 90 if (vi->datalayout != EROFS_INODE_CHUNK_BASED) { 91 err = erofs_map_blocks_flatmode(inode, map, flags); 92 goto out; 93 } 94 95 if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES) 96 unit = sizeof(*idx); /* chunk index */ 97 else 98 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */ 99 100 chunknr = map->m_la >> vi->chunkbits; 101 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + 102 vi->xattr_isize, unit) + unit * chunknr; 103 104 page = erofs_get_meta_page(inode->i_sb, erofs_blknr(pos)); 105 if (IS_ERR(page)) { 106 err = PTR_ERR(page); 107 goto out; 108 } 109 map->m_la = chunknr << vi->chunkbits; 110 map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits, 111 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ)); 112 113 /* handle block map */ 114 if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { 115 __le32 *blkaddr = page_address(page) + erofs_blkoff(pos); 116 117 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { 118 map->m_flags = 0; 119 } else { 120 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr)); 121 map->m_flags = EROFS_MAP_MAPPED; 122 } 123 goto out_unlock; 124 } 125 /* parse chunk indexes */ 126 idx = page_address(page) + erofs_blkoff(pos); 127 switch (le32_to_cpu(idx->blkaddr)) { 128 case EROFS_NULL_ADDR: 129 map->m_flags = 0; 130 break; 131 default: 132 map->m_deviceid = le16_to_cpu(idx->device_id) & 133 EROFS_SB(sb)->device_id_mask; 134 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr)); 135 map->m_flags = EROFS_MAP_MAPPED; 136 break; 137 } 138 out_unlock: 139 unlock_page(page); 140 put_page(page); 141 out: 142 if (!err) 143 map->m_llen = map->m_plen; 144 trace_erofs_map_blocks_exit(inode, map, flags, 0); 145 return err; 146 } 147 148 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) 149 { 150 struct erofs_dev_context *devs = EROFS_SB(sb)->devs; 151 struct erofs_device_info *dif; 152 int id; 153 154 /* primary device by default */ 155 map->m_bdev = sb->s_bdev; 156 map->m_daxdev = EROFS_SB(sb)->dax_dev; 157 158 if (map->m_deviceid) { 159 down_read(&devs->rwsem); 160 dif = idr_find(&devs->tree, map->m_deviceid - 1); 161 if (!dif) { 162 up_read(&devs->rwsem); 163 return -ENODEV; 164 } 165 map->m_bdev = dif->bdev; 166 map->m_daxdev = dif->dax_dev; 167 up_read(&devs->rwsem); 168 } else if (devs->extra_devices) { 169 down_read(&devs->rwsem); 170 idr_for_each_entry(&devs->tree, dif, id) { 171 erofs_off_t startoff, length; 172 173 if (!dif->mapped_blkaddr) 174 continue; 175 startoff = blknr_to_addr(dif->mapped_blkaddr); 176 length = blknr_to_addr(dif->blocks); 177 178 if (map->m_pa >= startoff && 179 map->m_pa < startoff + length) { 180 map->m_pa -= startoff; 181 map->m_bdev = dif->bdev; 182 map->m_daxdev = dif->dax_dev; 183 break; 184 } 185 } 186 up_read(&devs->rwsem); 187 } 188 return 0; 189 } 190 191 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 192 unsigned int flags, struct iomap *iomap, struct iomap *srcmap) 193 { 194 int ret; 195 struct erofs_map_blocks map; 196 struct erofs_map_dev mdev; 197 198 map.m_la = offset; 199 map.m_llen = length; 200 201 ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); 202 if (ret < 0) 203 return ret; 204 205 mdev = (struct erofs_map_dev) { 206 .m_deviceid = map.m_deviceid, 207 .m_pa = map.m_pa, 208 }; 209 ret = erofs_map_dev(inode->i_sb, &mdev); 210 if (ret) 211 return ret; 212 213 iomap->bdev = mdev.m_bdev; 214 iomap->dax_dev = mdev.m_daxdev; 215 iomap->offset = map.m_la; 216 iomap->length = map.m_llen; 217 iomap->flags = 0; 218 iomap->private = NULL; 219 220 if (!(map.m_flags & EROFS_MAP_MAPPED)) { 221 iomap->type = IOMAP_HOLE; 222 iomap->addr = IOMAP_NULL_ADDR; 223 if (!iomap->length) 224 iomap->length = length; 225 return 0; 226 } 227 228 if (map.m_flags & EROFS_MAP_META) { 229 struct page *ipage; 230 231 iomap->type = IOMAP_INLINE; 232 ipage = erofs_get_meta_page(inode->i_sb, 233 erofs_blknr(mdev.m_pa)); 234 if (IS_ERR(ipage)) 235 return PTR_ERR(ipage); 236 iomap->inline_data = page_address(ipage) + 237 erofs_blkoff(mdev.m_pa); 238 iomap->private = ipage; 239 } else { 240 iomap->type = IOMAP_MAPPED; 241 iomap->addr = mdev.m_pa; 242 } 243 return 0; 244 } 245 246 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, 247 ssize_t written, unsigned int flags, struct iomap *iomap) 248 { 249 struct page *ipage = iomap->private; 250 251 if (ipage) { 252 DBG_BUGON(iomap->type != IOMAP_INLINE); 253 unlock_page(ipage); 254 put_page(ipage); 255 } else { 256 DBG_BUGON(iomap->type == IOMAP_INLINE); 257 } 258 return written; 259 } 260 261 static const struct iomap_ops erofs_iomap_ops = { 262 .iomap_begin = erofs_iomap_begin, 263 .iomap_end = erofs_iomap_end, 264 }; 265 266 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 267 u64 start, u64 len) 268 { 269 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) { 270 #ifdef CONFIG_EROFS_FS_ZIP 271 return iomap_fiemap(inode, fieinfo, start, len, 272 &z_erofs_iomap_report_ops); 273 #else 274 return -EOPNOTSUPP; 275 #endif 276 } 277 return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops); 278 } 279 280 /* 281 * since we dont have write or truncate flows, so no inode 282 * locking needs to be held at the moment. 283 */ 284 static int erofs_readpage(struct file *file, struct page *page) 285 { 286 return iomap_readpage(page, &erofs_iomap_ops); 287 } 288 289 static void erofs_readahead(struct readahead_control *rac) 290 { 291 return iomap_readahead(rac, &erofs_iomap_ops); 292 } 293 294 static sector_t erofs_bmap(struct address_space *mapping, sector_t block) 295 { 296 return iomap_bmap(mapping, block, &erofs_iomap_ops); 297 } 298 299 static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to) 300 { 301 struct inode *inode = file_inode(iocb->ki_filp); 302 loff_t align = iocb->ki_pos | iov_iter_count(to) | 303 iov_iter_alignment(to); 304 struct block_device *bdev = inode->i_sb->s_bdev; 305 unsigned int blksize_mask; 306 307 if (bdev) 308 blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1; 309 else 310 blksize_mask = (1 << inode->i_blkbits) - 1; 311 312 if (align & blksize_mask) 313 return -EINVAL; 314 return 0; 315 } 316 317 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 318 { 319 /* no need taking (shared) inode lock since it's a ro filesystem */ 320 if (!iov_iter_count(to)) 321 return 0; 322 323 #ifdef CONFIG_FS_DAX 324 if (IS_DAX(iocb->ki_filp->f_mapping->host)) 325 return dax_iomap_rw(iocb, to, &erofs_iomap_ops); 326 #endif 327 if (iocb->ki_flags & IOCB_DIRECT) { 328 int err = erofs_prepare_dio(iocb, to); 329 330 if (!err) 331 return iomap_dio_rw(iocb, to, &erofs_iomap_ops, 332 NULL, 0, 0); 333 if (err < 0) 334 return err; 335 } 336 return filemap_read(iocb, to, 0); 337 } 338 339 /* for uncompressed (aligned) files and raw access for other files */ 340 const struct address_space_operations erofs_raw_access_aops = { 341 .readpage = erofs_readpage, 342 .readahead = erofs_readahead, 343 .bmap = erofs_bmap, 344 .direct_IO = noop_direct_IO, 345 }; 346 347 #ifdef CONFIG_FS_DAX 348 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf, 349 enum page_entry_size pe_size) 350 { 351 return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops); 352 } 353 354 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf) 355 { 356 return erofs_dax_huge_fault(vmf, PE_SIZE_PTE); 357 } 358 359 static const struct vm_operations_struct erofs_dax_vm_ops = { 360 .fault = erofs_dax_fault, 361 .huge_fault = erofs_dax_huge_fault, 362 }; 363 364 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) 365 { 366 if (!IS_DAX(file_inode(file))) 367 return generic_file_readonly_mmap(file, vma); 368 369 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 370 return -EINVAL; 371 372 vma->vm_ops = &erofs_dax_vm_ops; 373 vma->vm_flags |= VM_HUGEPAGE; 374 return 0; 375 } 376 #else 377 #define erofs_file_mmap generic_file_readonly_mmap 378 #endif 379 380 const struct file_operations erofs_file_fops = { 381 .llseek = generic_file_llseek, 382 .read_iter = erofs_file_read_iter, 383 .mmap = erofs_file_mmap, 384 .splice_read = generic_file_splice_read, 385 }; 386