1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * http://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #ifndef __EROFS_INTERNAL_H 8 #define __EROFS_INTERNAL_H 9 10 #include <linux/fs.h> 11 #include <linux/dcache.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/bio.h> 15 #include <linux/buffer_head.h> 16 #include <linux/magic.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include "erofs_fs.h" 20 21 /* redefine pr_fmt "erofs: " */ 22 #undef pr_fmt 23 #define pr_fmt(fmt) "erofs: " fmt 24 25 #define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) 26 #define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) 27 #ifdef CONFIG_EROFS_FS_DEBUG 28 #define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) 29 #define DBG_BUGON BUG_ON 30 #else 31 #define debugln(x, ...) ((void)0) 32 #define DBG_BUGON(x) ((void)(x)) 33 #endif /* !CONFIG_EROFS_FS_DEBUG */ 34 35 enum { 36 FAULT_KMALLOC, 37 FAULT_READ_IO, 38 FAULT_MAX, 39 }; 40 41 #ifdef CONFIG_EROFS_FAULT_INJECTION 42 extern const char *erofs_fault_name[FAULT_MAX]; 43 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) 44 45 struct erofs_fault_info { 46 atomic_t inject_ops; 47 unsigned int inject_rate; 48 unsigned int inject_type; 49 }; 50 #endif /* CONFIG_EROFS_FAULT_INJECTION */ 51 52 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ 53 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 54 55 typedef u64 erofs_nid_t; 56 typedef u64 erofs_off_t; 57 /* data type for filesystem-wide blocks number */ 58 typedef u32 erofs_blk_t; 59 60 struct erofs_sb_info { 61 #ifdef CONFIG_EROFS_FS_ZIP 62 /* list for all registered superblocks, mainly for shrinker */ 63 struct list_head list; 64 struct mutex umount_mutex; 65 66 /* the dedicated workstation for compression */ 67 struct radix_tree_root workstn_tree; 68 69 /* threshold for decompression synchronously */ 70 unsigned int max_sync_decompress_pages; 71 72 unsigned int shrinker_run_no; 73 74 /* current strategy of how to use managed cache */ 75 unsigned char cache_strategy; 76 77 /* pseudo inode to manage cached pages */ 78 struct inode *managed_cache; 79 #endif /* CONFIG_EROFS_FS_ZIP */ 80 u32 blocks; 81 u32 meta_blkaddr; 82 #ifdef CONFIG_EROFS_FS_XATTR 83 u32 xattr_blkaddr; 84 #endif 85 86 /* inode slot unit size in bit shift */ 87 unsigned char islotbits; 88 89 u32 build_time_nsec; 90 u64 build_time; 91 92 /* what we really care is nid, rather than ino.. */ 93 erofs_nid_t root_nid; 94 /* used for statfs, f_files - f_favail */ 95 u64 inos; 96 97 u8 uuid[16]; /* 128-bit uuid for volume */ 98 u8 volume_name[16]; /* volume name */ 99 u32 feature_incompat; 100 101 unsigned int mount_opt; 102 103 #ifdef CONFIG_EROFS_FAULT_INJECTION 104 struct erofs_fault_info fault_info; /* For fault injection */ 105 #endif 106 }; 107 108 #ifdef CONFIG_EROFS_FAULT_INJECTION 109 #define erofs_show_injection_info(type) \ 110 infoln("inject %s in %s of %pS", erofs_fault_name[type], \ 111 __func__, __builtin_return_address(0)) 112 113 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) 114 { 115 struct erofs_fault_info *ffi = &sbi->fault_info; 116 117 if (!ffi->inject_rate) 118 return false; 119 120 if (!IS_FAULT_SET(ffi, type)) 121 return false; 122 123 atomic_inc(&ffi->inject_ops); 124 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { 125 atomic_set(&ffi->inject_ops, 0); 126 return true; 127 } 128 return false; 129 } 130 #else 131 static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) 132 { 133 return false; 134 } 135 136 static inline void erofs_show_injection_info(int type) 137 { 138 } 139 #endif /* !CONFIG_EROFS_FAULT_INJECTION */ 140 141 static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, 142 size_t size, gfp_t flags) 143 { 144 if (time_to_inject(sbi, FAULT_KMALLOC)) { 145 erofs_show_injection_info(FAULT_KMALLOC); 146 return NULL; 147 } 148 return kmalloc(size, flags); 149 } 150 151 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) 152 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) 153 154 /* Mount flags set via mount options or defaults */ 155 #define EROFS_MOUNT_XATTR_USER 0x00000010 156 #define EROFS_MOUNT_POSIX_ACL 0x00000020 157 #define EROFS_MOUNT_FAULT_INJECTION 0x00000040 158 159 #define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) 160 #define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) 161 #define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) 162 163 #ifdef CONFIG_EROFS_FS_ZIP 164 enum { 165 EROFS_ZIP_CACHE_DISABLED, 166 EROFS_ZIP_CACHE_READAHEAD, 167 EROFS_ZIP_CACHE_READAROUND 168 }; 169 170 #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) 171 172 /* basic unit of the workstation of a super_block */ 173 struct erofs_workgroup { 174 /* the workgroup index in the workstation */ 175 pgoff_t index; 176 177 /* overall workgroup reference count */ 178 atomic_t refcount; 179 }; 180 181 #if defined(CONFIG_SMP) 182 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, 183 int val) 184 { 185 preempt_disable(); 186 if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { 187 preempt_enable(); 188 return false; 189 } 190 return true; 191 } 192 193 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, 194 int orig_val) 195 { 196 /* 197 * other observers should notice all modifications 198 * in the freezing period. 199 */ 200 smp_mb(); 201 atomic_set(&grp->refcount, orig_val); 202 preempt_enable(); 203 } 204 205 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) 206 { 207 return atomic_cond_read_relaxed(&grp->refcount, 208 VAL != EROFS_LOCKED_MAGIC); 209 } 210 #else 211 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, 212 int val) 213 { 214 preempt_disable(); 215 /* no need to spin on UP platforms, let's just disable preemption. */ 216 if (val != atomic_read(&grp->refcount)) { 217 preempt_enable(); 218 return false; 219 } 220 return true; 221 } 222 223 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, 224 int orig_val) 225 { 226 preempt_enable(); 227 } 228 229 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) 230 { 231 int v = atomic_read(&grp->refcount); 232 233 /* workgroup is never freezed on uniprocessor systems */ 234 DBG_BUGON(v == EROFS_LOCKED_MAGIC); 235 return v; 236 } 237 #endif /* !CONFIG_SMP */ 238 239 /* hard limit of pages per compressed cluster */ 240 #define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) 241 #define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES 242 #else 243 #define EROFS_PCPUBUF_NR_PAGES 0 244 #endif /* !CONFIG_EROFS_FS_ZIP */ 245 246 /* we strictly follow PAGE_SIZE and no buffer head yet */ 247 #define LOG_BLOCK_SIZE PAGE_SHIFT 248 249 #undef LOG_SECTORS_PER_BLOCK 250 #define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) 251 252 #undef SECTORS_PER_BLOCK 253 #define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) 254 255 #define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) 256 257 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) 258 #error erofs cannot be used in this platform 259 #endif 260 261 #define EROFS_IO_MAX_RETRIES_NOFAIL 5 262 263 #define ROOT_NID(sb) ((sb)->root_nid) 264 265 #define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) 266 #define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) 267 #define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) 268 269 static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) 270 { 271 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); 272 } 273 274 /* atomic flag definitions */ 275 #define EROFS_V_EA_INITED_BIT 0 276 #define EROFS_V_Z_INITED_BIT 1 277 278 /* bitlock definitions (arranged in reverse order) */ 279 #define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) 280 #define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) 281 282 struct erofs_vnode { 283 erofs_nid_t nid; 284 285 /* atomic flags (including bitlocks) */ 286 unsigned long flags; 287 288 unsigned char datalayout; 289 unsigned char inode_isize; 290 unsigned short xattr_isize; 291 292 unsigned int xattr_shared_count; 293 unsigned int *xattr_shared_xattrs; 294 295 union { 296 erofs_blk_t raw_blkaddr; 297 #ifdef CONFIG_EROFS_FS_ZIP 298 struct { 299 unsigned short z_advise; 300 unsigned char z_algorithmtype[2]; 301 unsigned char z_logical_clusterbits; 302 unsigned char z_physical_clusterbits[2]; 303 }; 304 #endif /* CONFIG_EROFS_FS_ZIP */ 305 }; 306 /* the corresponding vfs inode */ 307 struct inode vfs_inode; 308 }; 309 310 #define EROFS_V(ptr) \ 311 container_of(ptr, struct erofs_vnode, vfs_inode) 312 313 static inline unsigned long inode_datablocks(struct inode *inode) 314 { 315 /* since i_size cannot be changed */ 316 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); 317 } 318 319 static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit, 320 unsigned int bits) 321 { 322 323 return (value >> bit) & ((1 << bits) - 1); 324 } 325 326 327 static inline unsigned int erofs_inode_version(unsigned int value) 328 { 329 return erofs_bitrange(value, EROFS_I_VERSION_BIT, 330 EROFS_I_VERSION_BITS); 331 } 332 333 static inline unsigned int erofs_inode_datalayout(unsigned int value) 334 { 335 return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT, 336 EROFS_I_DATALAYOUT_BITS); 337 } 338 339 extern const struct super_operations erofs_sops; 340 341 extern const struct address_space_operations erofs_raw_access_aops; 342 #ifdef CONFIG_EROFS_FS_ZIP 343 extern const struct address_space_operations z_erofs_vle_normalaccess_aops; 344 #endif 345 346 /* 347 * Logical to physical block mapping, used by erofs_map_blocks() 348 * 349 * Different with other file systems, it is used for 2 access modes: 350 * 351 * 1) RAW access mode: 352 * 353 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, 354 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). 355 * 356 * Note that m_lblk in the RAW access mode refers to the number of 357 * the compressed ondisk block rather than the uncompressed 358 * in-memory block for the compressed file. 359 * 360 * m_pofs equals to m_lofs except for the inline data page. 361 * 362 * 2) Normal access mode: 363 * 364 * If the inode is not compressed, it has no difference with 365 * the RAW access mode. However, if the inode is compressed, 366 * users should pass a valid (m_lblk, m_lofs) pair, and get 367 * the needed m_pblk, m_pofs, m_len to get the compressed data 368 * and the updated m_lblk, m_lofs which indicates the start 369 * of the corresponding uncompressed data in the file. 370 */ 371 enum { 372 BH_Zipped = BH_PrivateStart, 373 BH_FullMapped, 374 }; 375 376 /* Has a disk mapping */ 377 #define EROFS_MAP_MAPPED (1 << BH_Mapped) 378 /* Located in metadata (could be copied from bd_inode) */ 379 #define EROFS_MAP_META (1 << BH_Meta) 380 /* The extent has been compressed */ 381 #define EROFS_MAP_ZIPPED (1 << BH_Zipped) 382 /* The length of extent is full */ 383 #define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) 384 385 struct erofs_map_blocks { 386 erofs_off_t m_pa, m_la; 387 u64 m_plen, m_llen; 388 389 unsigned int m_flags; 390 391 struct page *mpage; 392 }; 393 394 /* Flags used by erofs_map_blocks() */ 395 #define EROFS_GET_BLOCKS_RAW 0x0001 396 397 /* zmap.c */ 398 #ifdef CONFIG_EROFS_FS_ZIP 399 int z_erofs_fill_inode(struct inode *inode); 400 int z_erofs_map_blocks_iter(struct inode *inode, 401 struct erofs_map_blocks *map, 402 int flags); 403 #else 404 static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } 405 static inline int z_erofs_map_blocks_iter(struct inode *inode, 406 struct erofs_map_blocks *map, 407 int flags) 408 { 409 return -EOPNOTSUPP; 410 } 411 #endif /* !CONFIG_EROFS_FS_ZIP */ 412 413 /* data.c */ 414 static inline struct bio *erofs_grab_bio(struct super_block *sb, 415 erofs_blk_t blkaddr, 416 unsigned int nr_pages, 417 void *bi_private, bio_end_io_t endio, 418 bool nofail) 419 { 420 const gfp_t gfp = GFP_NOIO; 421 struct bio *bio; 422 423 do { 424 if (nr_pages == 1) { 425 bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); 426 if (!bio) { 427 DBG_BUGON(nofail); 428 return ERR_PTR(-ENOMEM); 429 } 430 break; 431 } 432 bio = bio_alloc(gfp, nr_pages); 433 nr_pages /= 2; 434 } while (!bio); 435 436 bio->bi_end_io = endio; 437 bio_set_dev(bio, sb->s_bdev); 438 bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; 439 bio->bi_private = bi_private; 440 return bio; 441 } 442 443 static inline void __submit_bio(struct bio *bio, unsigned int op, 444 unsigned int op_flags) 445 { 446 bio_set_op_attrs(bio, op, op_flags); 447 submit_bio(bio); 448 } 449 450 struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, 451 bool prio, bool nofail); 452 453 static inline struct page *erofs_get_meta_page(struct super_block *sb, 454 erofs_blk_t blkaddr, bool prio) 455 { 456 return __erofs_get_meta_page(sb, blkaddr, prio, false); 457 } 458 459 int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); 460 461 static inline struct page *erofs_get_inline_page(struct inode *inode, 462 erofs_blk_t blkaddr) 463 { 464 return erofs_get_meta_page(inode->i_sb, blkaddr, 465 S_ISDIR(inode->i_mode)); 466 } 467 468 /* inode.c */ 469 static inline unsigned long erofs_inode_hash(erofs_nid_t nid) 470 { 471 #if BITS_PER_LONG == 32 472 return (nid >> 32) ^ (nid & 0xffffffff); 473 #else 474 return nid; 475 #endif 476 } 477 478 extern const struct inode_operations erofs_generic_iops; 479 extern const struct inode_operations erofs_symlink_iops; 480 extern const struct inode_operations erofs_fast_symlink_iops; 481 482 static inline void set_inode_fast_symlink(struct inode *inode) 483 { 484 inode->i_op = &erofs_fast_symlink_iops; 485 } 486 487 static inline bool is_inode_fast_symlink(struct inode *inode) 488 { 489 return inode->i_op == &erofs_fast_symlink_iops; 490 } 491 492 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); 493 int erofs_getattr(const struct path *path, struct kstat *stat, 494 u32 request_mask, unsigned int query_flags); 495 496 /* namei.c */ 497 extern const struct inode_operations erofs_dir_iops; 498 499 int erofs_namei(struct inode *dir, struct qstr *name, 500 erofs_nid_t *nid, unsigned int *d_type); 501 502 /* dir.c */ 503 extern const struct file_operations erofs_dir_fops; 504 505 /* utils.c / zdata.c */ 506 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); 507 508 #if (EROFS_PCPUBUF_NR_PAGES > 0) 509 void *erofs_get_pcpubuf(unsigned int pagenr); 510 #define erofs_put_pcpubuf(buf) do { \ 511 (void)&(buf); \ 512 preempt_enable(); \ 513 } while (0) 514 #else 515 static inline void *erofs_get_pcpubuf(unsigned int pagenr) 516 { 517 return ERR_PTR(-EOPNOTSUPP); 518 } 519 520 #define erofs_put_pcpubuf(buf) do {} while (0) 521 #endif 522 523 #ifdef CONFIG_EROFS_FS_ZIP 524 int erofs_workgroup_put(struct erofs_workgroup *grp); 525 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, 526 pgoff_t index, bool *tag); 527 int erofs_register_workgroup(struct super_block *sb, 528 struct erofs_workgroup *grp, bool tag); 529 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); 530 void erofs_shrinker_register(struct super_block *sb); 531 void erofs_shrinker_unregister(struct super_block *sb); 532 int __init erofs_init_shrinker(void); 533 void erofs_exit_shrinker(void); 534 int __init z_erofs_init_zip_subsystem(void); 535 void z_erofs_exit_zip_subsystem(void); 536 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, 537 struct erofs_workgroup *egrp); 538 int erofs_try_to_free_cached_page(struct address_space *mapping, 539 struct page *page); 540 #else 541 static inline void erofs_shrinker_register(struct super_block *sb) {} 542 static inline void erofs_shrinker_unregister(struct super_block *sb) {} 543 static inline int erofs_init_shrinker(void) { return 0; } 544 static inline void erofs_exit_shrinker(void) {} 545 static inline int z_erofs_init_zip_subsystem(void) { return 0; } 546 static inline void z_erofs_exit_zip_subsystem(void) {} 547 #endif /* !CONFIG_EROFS_FS_ZIP */ 548 549 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ 550 551 #endif /* __EROFS_INTERNAL_H */ 552 553