xref: /openbmc/linux/fs/erofs/internal.h (revision 2cf1c348)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #ifndef __EROFS_INTERNAL_H
8 #define __EROFS_INTERNAL_H
9 
10 #include <linux/fs.h>
11 #include <linux/dcache.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/bio.h>
15 #include <linux/buffer_head.h>
16 #include <linux/magic.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/iomap.h>
20 #include "erofs_fs.h"
21 
22 /* redefine pr_fmt "erofs: " */
23 #undef pr_fmt
24 #define pr_fmt(fmt) "erofs: " fmt
25 
26 __printf(3, 4) void _erofs_err(struct super_block *sb,
27 			       const char *function, const char *fmt, ...);
28 #define erofs_err(sb, fmt, ...)	\
29 	_erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
30 __printf(3, 4) void _erofs_info(struct super_block *sb,
31 			       const char *function, const char *fmt, ...);
32 #define erofs_info(sb, fmt, ...) \
33 	_erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
34 #ifdef CONFIG_EROFS_FS_DEBUG
35 #define erofs_dbg(x, ...)       pr_debug(x "\n", ##__VA_ARGS__)
36 #define DBG_BUGON               BUG_ON
37 #else
38 #define erofs_dbg(x, ...)       ((void)0)
39 #define DBG_BUGON(x)            ((void)(x))
40 #endif	/* !CONFIG_EROFS_FS_DEBUG */
41 
42 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
43 #define EROFS_SUPER_MAGIC   EROFS_SUPER_MAGIC_V1
44 
45 typedef u64 erofs_nid_t;
46 typedef u64 erofs_off_t;
47 /* data type for filesystem-wide blocks number */
48 typedef u32 erofs_blk_t;
49 
50 struct erofs_device_info {
51 	char *path;
52 	struct block_device *bdev;
53 	struct dax_device *dax_dev;
54 	u64 dax_part_off;
55 
56 	u32 blocks;
57 	u32 mapped_blkaddr;
58 };
59 
60 enum {
61 	EROFS_SYNC_DECOMPRESS_AUTO,
62 	EROFS_SYNC_DECOMPRESS_FORCE_ON,
63 	EROFS_SYNC_DECOMPRESS_FORCE_OFF
64 };
65 
66 struct erofs_mount_opts {
67 #ifdef CONFIG_EROFS_FS_ZIP
68 	/* current strategy of how to use managed cache */
69 	unsigned char cache_strategy;
70 	/* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
71 	unsigned int sync_decompress;
72 
73 	/* threshold for decompression synchronously */
74 	unsigned int max_sync_decompress_pages;
75 #endif
76 	unsigned int mount_opt;
77 };
78 
79 struct erofs_dev_context {
80 	struct idr tree;
81 	struct rw_semaphore rwsem;
82 
83 	unsigned int extra_devices;
84 };
85 
86 struct erofs_fs_context {
87 	struct erofs_mount_opts opt;
88 	struct erofs_dev_context *devs;
89 };
90 
91 /* all filesystem-wide lz4 configurations */
92 struct erofs_sb_lz4_info {
93 	/* # of pages needed for EROFS lz4 rolling decompression */
94 	u16 max_distance_pages;
95 	/* maximum possible blocks for pclusters in the filesystem */
96 	u16 max_pclusterblks;
97 };
98 
99 struct erofs_sb_info {
100 	struct erofs_mount_opts opt;	/* options */
101 #ifdef CONFIG_EROFS_FS_ZIP
102 	/* list for all registered superblocks, mainly for shrinker */
103 	struct list_head list;
104 	struct mutex umount_mutex;
105 
106 	/* managed XArray arranged in physical block number */
107 	struct xarray managed_pslots;
108 
109 	unsigned int shrinker_run_no;
110 	u16 available_compr_algs;
111 
112 	/* pseudo inode to manage cached pages */
113 	struct inode *managed_cache;
114 
115 	struct erofs_sb_lz4_info lz4;
116 #endif	/* CONFIG_EROFS_FS_ZIP */
117 	struct erofs_dev_context *devs;
118 	struct dax_device *dax_dev;
119 	u64 dax_part_off;
120 	u64 total_blocks;
121 	u32 primarydevice_blocks;
122 
123 	u32 meta_blkaddr;
124 #ifdef CONFIG_EROFS_FS_XATTR
125 	u32 xattr_blkaddr;
126 #endif
127 	u16 device_id_mask;	/* valid bits of device id to be used */
128 
129 	/* inode slot unit size in bit shift */
130 	unsigned char islotbits;
131 
132 	u32 sb_size;			/* total superblock size */
133 	u32 build_time_nsec;
134 	u64 build_time;
135 
136 	/* what we really care is nid, rather than ino.. */
137 	erofs_nid_t root_nid;
138 	/* used for statfs, f_files - f_favail */
139 	u64 inos;
140 
141 	u8 uuid[16];                    /* 128-bit uuid for volume */
142 	u8 volume_name[16];             /* volume name */
143 	u32 feature_compat;
144 	u32 feature_incompat;
145 
146 	/* sysfs support */
147 	struct kobject s_kobj;		/* /sys/fs/erofs/<devname> */
148 	struct completion s_kobj_unregister;
149 };
150 
151 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
152 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
153 
154 /* Mount flags set via mount options or defaults */
155 #define EROFS_MOUNT_XATTR_USER		0x00000010
156 #define EROFS_MOUNT_POSIX_ACL		0x00000020
157 #define EROFS_MOUNT_DAX_ALWAYS		0x00000040
158 #define EROFS_MOUNT_DAX_NEVER		0x00000080
159 
160 #define clear_opt(opt, option)	((opt)->mount_opt &= ~EROFS_MOUNT_##option)
161 #define set_opt(opt, option)	((opt)->mount_opt |= EROFS_MOUNT_##option)
162 #define test_opt(opt, option)	((opt)->mount_opt & EROFS_MOUNT_##option)
163 
164 enum {
165 	EROFS_ZIP_CACHE_DISABLED,
166 	EROFS_ZIP_CACHE_READAHEAD,
167 	EROFS_ZIP_CACHE_READAROUND
168 };
169 
170 #ifdef CONFIG_EROFS_FS_ZIP
171 #define EROFS_LOCKED_MAGIC     (INT_MIN | 0xE0F510CCL)
172 
173 /* basic unit of the workstation of a super_block */
174 struct erofs_workgroup {
175 	/* the workgroup index in the workstation */
176 	pgoff_t index;
177 
178 	/* overall workgroup reference count */
179 	atomic_t refcount;
180 };
181 
182 #if defined(CONFIG_SMP)
183 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
184 						 int val)
185 {
186 	preempt_disable();
187 	if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
188 		preempt_enable();
189 		return false;
190 	}
191 	return true;
192 }
193 
194 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
195 					    int orig_val)
196 {
197 	/*
198 	 * other observers should notice all modifications
199 	 * in the freezing period.
200 	 */
201 	smp_mb();
202 	atomic_set(&grp->refcount, orig_val);
203 	preempt_enable();
204 }
205 
206 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
207 {
208 	return atomic_cond_read_relaxed(&grp->refcount,
209 					VAL != EROFS_LOCKED_MAGIC);
210 }
211 #else
212 static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
213 						 int val)
214 {
215 	preempt_disable();
216 	/* no need to spin on UP platforms, let's just disable preemption. */
217 	if (val != atomic_read(&grp->refcount)) {
218 		preempt_enable();
219 		return false;
220 	}
221 	return true;
222 }
223 
224 static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
225 					    int orig_val)
226 {
227 	preempt_enable();
228 }
229 
230 static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
231 {
232 	int v = atomic_read(&grp->refcount);
233 
234 	/* workgroup is never freezed on uniprocessor systems */
235 	DBG_BUGON(v == EROFS_LOCKED_MAGIC);
236 	return v;
237 }
238 #endif	/* !CONFIG_SMP */
239 #endif	/* !CONFIG_EROFS_FS_ZIP */
240 
241 /* we strictly follow PAGE_SIZE and no buffer head yet */
242 #define LOG_BLOCK_SIZE		PAGE_SHIFT
243 
244 #undef LOG_SECTORS_PER_BLOCK
245 #define LOG_SECTORS_PER_BLOCK	(PAGE_SHIFT - 9)
246 
247 #undef SECTORS_PER_BLOCK
248 #define SECTORS_PER_BLOCK	(1 << SECTORS_PER_BLOCK)
249 
250 #define EROFS_BLKSIZ		(1 << LOG_BLOCK_SIZE)
251 
252 #if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
253 #error erofs cannot be used in this platform
254 #endif
255 
256 enum erofs_kmap_type {
257 	EROFS_NO_KMAP,		/* don't map the buffer */
258 	EROFS_KMAP,		/* use kmap() to map the buffer */
259 	EROFS_KMAP_ATOMIC,	/* use kmap_atomic() to map the buffer */
260 };
261 
262 struct erofs_buf {
263 	struct page *page;
264 	void *base;
265 	enum erofs_kmap_type kmap_type;
266 };
267 #define __EROFS_BUF_INITIALIZER	((struct erofs_buf){ .page = NULL })
268 
269 #define ROOT_NID(sb)		((sb)->root_nid)
270 
271 #define erofs_blknr(addr)       ((addr) / EROFS_BLKSIZ)
272 #define erofs_blkoff(addr)      ((addr) % EROFS_BLKSIZ)
273 #define blknr_to_addr(nr)       ((erofs_off_t)(nr) * EROFS_BLKSIZ)
274 
275 static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
276 {
277 	return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
278 }
279 
280 #define EROFS_FEATURE_FUNCS(name, compat, feature) \
281 static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
282 { \
283 	return sbi->feature_##compat & EROFS_FEATURE_##feature; \
284 }
285 
286 EROFS_FEATURE_FUNCS(zero_padding, incompat, INCOMPAT_ZERO_PADDING)
287 EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
288 EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
289 EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE)
290 EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE)
291 EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2)
292 EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
293 EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
294 
295 /* atomic flag definitions */
296 #define EROFS_I_EA_INITED_BIT	0
297 #define EROFS_I_Z_INITED_BIT	1
298 
299 /* bitlock definitions (arranged in reverse order) */
300 #define EROFS_I_BL_XATTR_BIT	(BITS_PER_LONG - 1)
301 #define EROFS_I_BL_Z_BIT	(BITS_PER_LONG - 2)
302 
303 struct erofs_inode {
304 	erofs_nid_t nid;
305 
306 	/* atomic flags (including bitlocks) */
307 	unsigned long flags;
308 
309 	unsigned char datalayout;
310 	unsigned char inode_isize;
311 	unsigned short xattr_isize;
312 
313 	unsigned int xattr_shared_count;
314 	unsigned int *xattr_shared_xattrs;
315 
316 	union {
317 		erofs_blk_t raw_blkaddr;
318 		struct {
319 			unsigned short	chunkformat;
320 			unsigned char	chunkbits;
321 		};
322 #ifdef CONFIG_EROFS_FS_ZIP
323 		struct {
324 			unsigned short z_advise;
325 			unsigned char  z_algorithmtype[2];
326 			unsigned char  z_logical_clusterbits;
327 			unsigned long  z_tailextent_headlcn;
328 			unsigned int   z_idataoff;
329 			unsigned short z_idata_size;
330 		};
331 #endif	/* CONFIG_EROFS_FS_ZIP */
332 	};
333 	/* the corresponding vfs inode */
334 	struct inode vfs_inode;
335 };
336 
337 #define EROFS_I(ptr)	\
338 	container_of(ptr, struct erofs_inode, vfs_inode)
339 
340 static inline unsigned long erofs_inode_datablocks(struct inode *inode)
341 {
342 	/* since i_size cannot be changed */
343 	return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
344 }
345 
346 static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
347 					  unsigned int bits)
348 {
349 
350 	return (value >> bit) & ((1 << bits) - 1);
351 }
352 
353 
354 static inline unsigned int erofs_inode_version(unsigned int value)
355 {
356 	return erofs_bitrange(value, EROFS_I_VERSION_BIT,
357 			      EROFS_I_VERSION_BITS);
358 }
359 
360 static inline unsigned int erofs_inode_datalayout(unsigned int value)
361 {
362 	return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
363 			      EROFS_I_DATALAYOUT_BITS);
364 }
365 
366 /*
367  * Different from grab_cache_page_nowait(), reclaiming is never triggered
368  * when allocating new pages.
369  */
370 static inline
371 struct page *erofs_grab_cache_page_nowait(struct address_space *mapping,
372 					  pgoff_t index)
373 {
374 	return pagecache_get_page(mapping, index,
375 			FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
376 			readahead_gfp_mask(mapping) & ~__GFP_RECLAIM);
377 }
378 
379 extern const struct super_operations erofs_sops;
380 
381 extern const struct address_space_operations erofs_raw_access_aops;
382 extern const struct address_space_operations z_erofs_aops;
383 
384 /*
385  * Logical to physical block mapping
386  *
387  * Different with other file systems, it is used for 2 access modes:
388  *
389  * 1) RAW access mode:
390  *
391  * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
392  * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
393  *
394  * Note that m_lblk in the RAW access mode refers to the number of
395  * the compressed ondisk block rather than the uncompressed
396  * in-memory block for the compressed file.
397  *
398  * m_pofs equals to m_lofs except for the inline data page.
399  *
400  * 2) Normal access mode:
401  *
402  * If the inode is not compressed, it has no difference with
403  * the RAW access mode. However, if the inode is compressed,
404  * users should pass a valid (m_lblk, m_lofs) pair, and get
405  * the needed m_pblk, m_pofs, m_len to get the compressed data
406  * and the updated m_lblk, m_lofs which indicates the start
407  * of the corresponding uncompressed data in the file.
408  */
409 enum {
410 	BH_Encoded = BH_PrivateStart,
411 	BH_FullMapped,
412 };
413 
414 /* Has a disk mapping */
415 #define EROFS_MAP_MAPPED	(1 << BH_Mapped)
416 /* Located in metadata (could be copied from bd_inode) */
417 #define EROFS_MAP_META		(1 << BH_Meta)
418 /* The extent is encoded */
419 #define EROFS_MAP_ENCODED	(1 << BH_Encoded)
420 /* The length of extent is full */
421 #define EROFS_MAP_FULL_MAPPED	(1 << BH_FullMapped)
422 
423 struct erofs_map_blocks {
424 	struct erofs_buf buf;
425 
426 	erofs_off_t m_pa, m_la;
427 	u64 m_plen, m_llen;
428 
429 	unsigned short m_deviceid;
430 	char m_algorithmformat;
431 	unsigned int m_flags;
432 };
433 
434 /* Flags used by erofs_map_blocks_flatmode() */
435 #define EROFS_GET_BLOCKS_RAW    0x0001
436 /*
437  * Used to get the exact decompressed length, e.g. fiemap (consider lookback
438  * approach instead if possible since it's more metadata lightweight.)
439  */
440 #define EROFS_GET_BLOCKS_FIEMAP	0x0002
441 /* Used to map the whole extent if non-negligible data is requested for LZMA */
442 #define EROFS_GET_BLOCKS_READMORE	0x0004
443 /* Used to map tail extent for tailpacking inline pcluster */
444 #define EROFS_GET_BLOCKS_FINDTAIL	0x0008
445 
446 enum {
447 	Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
448 	Z_EROFS_COMPRESSION_RUNTIME_MAX
449 };
450 
451 /* zmap.c */
452 extern const struct iomap_ops z_erofs_iomap_report_ops;
453 
454 #ifdef CONFIG_EROFS_FS_ZIP
455 int z_erofs_fill_inode(struct inode *inode);
456 int z_erofs_map_blocks_iter(struct inode *inode,
457 			    struct erofs_map_blocks *map,
458 			    int flags);
459 #else
460 static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
461 static inline int z_erofs_map_blocks_iter(struct inode *inode,
462 					  struct erofs_map_blocks *map,
463 					  int flags)
464 {
465 	return -EOPNOTSUPP;
466 }
467 #endif	/* !CONFIG_EROFS_FS_ZIP */
468 
469 struct erofs_map_dev {
470 	struct block_device *m_bdev;
471 	struct dax_device *m_daxdev;
472 	u64 m_dax_part_off;
473 
474 	erofs_off_t m_pa;
475 	unsigned int m_deviceid;
476 };
477 
478 /* data.c */
479 extern const struct file_operations erofs_file_fops;
480 void erofs_unmap_metabuf(struct erofs_buf *buf);
481 void erofs_put_metabuf(struct erofs_buf *buf);
482 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
483 			 erofs_blk_t blkaddr, enum erofs_kmap_type type);
484 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
485 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
486 		 u64 start, u64 len);
487 
488 /* inode.c */
489 static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
490 {
491 #if BITS_PER_LONG == 32
492 	return (nid >> 32) ^ (nid & 0xffffffff);
493 #else
494 	return nid;
495 #endif
496 }
497 
498 extern const struct inode_operations erofs_generic_iops;
499 extern const struct inode_operations erofs_symlink_iops;
500 extern const struct inode_operations erofs_fast_symlink_iops;
501 
502 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
503 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path,
504 		  struct kstat *stat, u32 request_mask,
505 		  unsigned int query_flags);
506 
507 /* namei.c */
508 extern const struct inode_operations erofs_dir_iops;
509 
510 int erofs_namei(struct inode *dir, struct qstr *name,
511 		erofs_nid_t *nid, unsigned int *d_type);
512 
513 /* dir.c */
514 extern const struct file_operations erofs_dir_fops;
515 
516 static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
517 {
518 	int retried = 0;
519 
520 	while (1) {
521 		void *p = vm_map_ram(pages, count, -1);
522 
523 		/* retry two more times (totally 3 times) */
524 		if (p || ++retried >= 3)
525 			return p;
526 		vm_unmap_aliases();
527 	}
528 	return NULL;
529 }
530 
531 /* pcpubuf.c */
532 void *erofs_get_pcpubuf(unsigned int requiredpages);
533 void erofs_put_pcpubuf(void *ptr);
534 int erofs_pcpubuf_growsize(unsigned int nrpages);
535 void erofs_pcpubuf_init(void);
536 void erofs_pcpubuf_exit(void);
537 
538 /* sysfs.c */
539 int erofs_register_sysfs(struct super_block *sb);
540 void erofs_unregister_sysfs(struct super_block *sb);
541 int __init erofs_init_sysfs(void);
542 void erofs_exit_sysfs(void);
543 
544 /* utils.c / zdata.c */
545 struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
546 static inline void erofs_pagepool_add(struct page **pagepool,
547 		struct page *page)
548 {
549 	set_page_private(page, (unsigned long)*pagepool);
550 	*pagepool = page;
551 }
552 void erofs_release_pages(struct page **pagepool);
553 
554 #ifdef CONFIG_EROFS_FS_ZIP
555 int erofs_workgroup_put(struct erofs_workgroup *grp);
556 struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
557 					     pgoff_t index);
558 struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
559 					       struct erofs_workgroup *grp);
560 void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
561 void erofs_shrinker_register(struct super_block *sb);
562 void erofs_shrinker_unregister(struct super_block *sb);
563 int __init erofs_init_shrinker(void);
564 void erofs_exit_shrinker(void);
565 int __init z_erofs_init_zip_subsystem(void);
566 void z_erofs_exit_zip_subsystem(void);
567 int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
568 				       struct erofs_workgroup *egrp);
569 int erofs_try_to_free_cached_page(struct page *page);
570 int z_erofs_load_lz4_config(struct super_block *sb,
571 			    struct erofs_super_block *dsb,
572 			    struct z_erofs_lz4_cfgs *lz4, int len);
573 #else
574 static inline void erofs_shrinker_register(struct super_block *sb) {}
575 static inline void erofs_shrinker_unregister(struct super_block *sb) {}
576 static inline int erofs_init_shrinker(void) { return 0; }
577 static inline void erofs_exit_shrinker(void) {}
578 static inline int z_erofs_init_zip_subsystem(void) { return 0; }
579 static inline void z_erofs_exit_zip_subsystem(void) {}
580 static inline int z_erofs_load_lz4_config(struct super_block *sb,
581 				  struct erofs_super_block *dsb,
582 				  struct z_erofs_lz4_cfgs *lz4, int len)
583 {
584 	if (lz4 || dsb->u1.lz4_max_distance) {
585 		erofs_err(sb, "lz4 algorithm isn't enabled");
586 		return -EINVAL;
587 	}
588 	return 0;
589 }
590 #endif	/* !CONFIG_EROFS_FS_ZIP */
591 
592 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
593 int z_erofs_lzma_init(void);
594 void z_erofs_lzma_exit(void);
595 int z_erofs_load_lzma_config(struct super_block *sb,
596 			     struct erofs_super_block *dsb,
597 			     struct z_erofs_lzma_cfgs *lzma, int size);
598 #else
599 static inline int z_erofs_lzma_init(void) { return 0; }
600 static inline int z_erofs_lzma_exit(void) { return 0; }
601 static inline int z_erofs_load_lzma_config(struct super_block *sb,
602 			     struct erofs_super_block *dsb,
603 			     struct z_erofs_lzma_cfgs *lzma, int size) {
604 	if (lzma) {
605 		erofs_err(sb, "lzma algorithm isn't enabled");
606 		return -EINVAL;
607 	}
608 	return 0;
609 }
610 #endif	/* !CONFIG_EROFS_FS_ZIP */
611 
612 #define EFSCORRUPTED    EUCLEAN         /* Filesystem is corrupted */
613 
614 #endif	/* __EROFS_INTERNAL_H */
615