xref: /openbmc/linux/fs/btrfs/volumes.h (revision 1cd6121f)
19888c340SDavid Sterba /* SPDX-License-Identifier: GPL-2.0 */
20b86a832SChris Mason /*
30b86a832SChris Mason  * Copyright (C) 2007 Oracle.  All rights reserved.
40b86a832SChris Mason  */
50b86a832SChris Mason 
69888c340SDavid Sterba #ifndef BTRFS_VOLUMES_H
79888c340SDavid Sterba #define BTRFS_VOLUMES_H
88790d502SChris Mason 
9cea9e445SChris Mason #include <linux/bio.h>
10b2117a39SMiao Xie #include <linux/sort.h>
1155e301fdSFilipe Brandenburger #include <linux/btrfs.h>
128b712842SChris Mason #include "async-thread.h"
13cea9e445SChris Mason 
14fce466eaSQu Wenruo #define BTRFS_MAX_DATA_CHUNK_SIZE	(10ULL * SZ_1G)
15fce466eaSQu Wenruo 
1667a2c45eSMiao Xie extern struct mutex uuid_mutex;
1767a2c45eSMiao Xie 
18ee22184bSByongho Lee #define BTRFS_STRIPE_LEN	SZ_64K
19b2117a39SMiao Xie 
205f141126SNikolay Borisov struct btrfs_io_geometry {
215f141126SNikolay Borisov 	/* remaining bytes before crossing a stripe */
225f141126SNikolay Borisov 	u64 len;
235f141126SNikolay Borisov 	/* offset of logical address in chunk */
245f141126SNikolay Borisov 	u64 offset;
255f141126SNikolay Borisov 	/* length of single IO stripe */
265f141126SNikolay Borisov 	u64 stripe_len;
275f141126SNikolay Borisov 	/* number of stripe where address falls */
285f141126SNikolay Borisov 	u64 stripe_nr;
295f141126SNikolay Borisov 	/* offset of address in stripe */
305f141126SNikolay Borisov 	u64 stripe_offset;
315f141126SNikolay Borisov 	/* offset of raid56 stripe into the chunk */
325f141126SNikolay Borisov 	u64 raid56_stripe_offset;
335f141126SNikolay Borisov };
345f141126SNikolay Borisov 
357cc8e58dSMiao Xie /*
367cc8e58dSMiao Xie  * Use sequence counter to get consistent device stat data on
377cc8e58dSMiao Xie  * 32-bit processors.
387cc8e58dSMiao Xie  */
397cc8e58dSMiao Xie #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
407cc8e58dSMiao Xie #include <linux/seqlock.h>
417cc8e58dSMiao Xie #define __BTRFS_NEED_DEVICE_DATA_ORDERED
42c41ec452SSu Yue #define btrfs_device_data_ordered_init(device)	\
43c41ec452SSu Yue 	seqcount_init(&device->data_seqcount)
447cc8e58dSMiao Xie #else
45c41ec452SSu Yue #define btrfs_device_data_ordered_init(device) do { } while (0)
467cc8e58dSMiao Xie #endif
477cc8e58dSMiao Xie 
48ebbede42SAnand Jain #define BTRFS_DEV_STATE_WRITEABLE	(0)
49e12c9621SAnand Jain #define BTRFS_DEV_STATE_IN_FS_METADATA	(1)
50e6e674bdSAnand Jain #define BTRFS_DEV_STATE_MISSING		(2)
51401e29c1SAnand Jain #define BTRFS_DEV_STATE_REPLACE_TGT	(3)
521c3063b6SAnand Jain #define BTRFS_DEV_STATE_FLUSH_SENT	(4)
5366d204a1SFilipe Manana #define BTRFS_DEV_STATE_NO_READA	(5)
54ebbede42SAnand Jain 
555b316468SNaohiro Aota struct btrfs_zoned_device_info;
565b316468SNaohiro Aota 
570b86a832SChris Mason struct btrfs_device {
580b6f5d40SNikolay Borisov 	struct list_head dev_list; /* device_list_mutex */
590b6f5d40SNikolay Borisov 	struct list_head dev_alloc_list; /* chunk mutex */
60bbbf7243SNikolay Borisov 	struct list_head post_commit_list; /* chunk mutex */
612b82032cSYan Zheng 	struct btrfs_fs_devices *fs_devices;
62fb456252SJeff Mahoney 	struct btrfs_fs_info *fs_info;
63ffbd517dSChris Mason 
648d1a7aaeSMadhuparna Bhowmik 	struct rcu_string __rcu *name;
65d5ee37bcSMiao Xie 
66d5ee37bcSMiao Xie 	u64 generation;
67d5ee37bcSMiao Xie 
68d5ee37bcSMiao Xie 	struct block_device *bdev;
69d5ee37bcSMiao Xie 
705b316468SNaohiro Aota 	struct btrfs_zoned_device_info *zone_info;
715b316468SNaohiro Aota 
72d5ee37bcSMiao Xie 	/* the mode sent to blkdev_get */
73d5ee37bcSMiao Xie 	fmode_t mode;
74d5ee37bcSMiao Xie 
75ebbede42SAnand Jain 	unsigned long dev_state;
7658efbc9fSOmar Sandoval 	blk_status_t last_flush_error;
77b3075717SChris Mason 
787cc8e58dSMiao Xie #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
79c41ec452SSu Yue 	seqcount_t data_seqcount;
807cc8e58dSMiao Xie #endif
817cc8e58dSMiao Xie 
820b86a832SChris Mason 	/* the internal btrfs device id */
830b86a832SChris Mason 	u64 devid;
840b86a832SChris Mason 
856ba40b61SMiao Xie 	/* size of the device in memory */
860b86a832SChris Mason 	u64 total_bytes;
870b86a832SChris Mason 
886ba40b61SMiao Xie 	/* size of the device on disk */
89d6397baeSChris Ball 	u64 disk_total_bytes;
90d6397baeSChris Ball 
910b86a832SChris Mason 	/* bytes used */
920b86a832SChris Mason 	u64 bytes_used;
930b86a832SChris Mason 
940b86a832SChris Mason 	/* optimal io alignment for this device */
950b86a832SChris Mason 	u32 io_align;
960b86a832SChris Mason 
970b86a832SChris Mason 	/* optimal io width for this device */
980b86a832SChris Mason 	u32 io_width;
993c45bfc1SDulshani Gunawardhana 	/* type and info about this device */
1003c45bfc1SDulshani Gunawardhana 	u64 type;
1010b86a832SChris Mason 
1020b86a832SChris Mason 	/* minimal io size for this device */
1030b86a832SChris Mason 	u32 sector_size;
1040b86a832SChris Mason 
1050b86a832SChris Mason 	/* physical drive uuid (or lvm uuid) */
106e17cade2SChris Mason 	u8 uuid[BTRFS_UUID_SIZE];
1078b712842SChris Mason 
108935e5cc9SMiao Xie 	/*
109935e5cc9SMiao Xie 	 * size of the device on the current transaction
110935e5cc9SMiao Xie 	 *
111935e5cc9SMiao Xie 	 * This variant is update when committing the transaction,
112bbbf7243SNikolay Borisov 	 * and protected by chunk mutex
113935e5cc9SMiao Xie 	 */
114935e5cc9SMiao Xie 	u64 commit_total_bytes;
115935e5cc9SMiao Xie 
116ce7213c7SMiao Xie 	/* bytes used on the current transaction */
117ce7213c7SMiao Xie 	u64 commit_bytes_used;
118935e5cc9SMiao Xie 
1193c45bfc1SDulshani Gunawardhana 	/* for sending down flush barriers */
1203c45bfc1SDulshani Gunawardhana 	struct bio *flush_bio;
1213c45bfc1SDulshani Gunawardhana 	struct completion flush_wait;
1223c45bfc1SDulshani Gunawardhana 
123a2de733cSArne Jansen 	/* per-device scrub information */
124cadbc0a0SAnand Jain 	struct scrub_ctx *scrub_ctx;
125a2de733cSArne Jansen 
12690519d66SArne Jansen 	/* readahead state */
12790519d66SArne Jansen 	atomic_t reada_in_flight;
12890519d66SArne Jansen 	u64 reada_next;
12990519d66SArne Jansen 	struct reada_zone *reada_curr_zone;
13090519d66SArne Jansen 	struct radix_tree_root reada_zones;
13190519d66SArne Jansen 	struct radix_tree_root reada_extents;
132387125fcSChris Mason 
133442a4f63SStefan Behrens 	/* disk I/O failure stats. For detailed description refer to
134442a4f63SStefan Behrens 	 * enum btrfs_dev_stat_values in ioctl.h */
135733f4fbbSStefan Behrens 	int dev_stats_valid;
136addc3fa7SMiao Xie 
137addc3fa7SMiao Xie 	/* Counter to record the change of device stats */
138addc3fa7SMiao Xie 	atomic_t dev_stats_ccnt;
139442a4f63SStefan Behrens 	atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
1401c11b63eSJeff Mahoney 
1411c11b63eSJeff Mahoney 	struct extent_io_tree alloc_state;
142668e48afSAnand Jain 
143668e48afSAnand Jain 	struct completion kobj_unregister;
144668e48afSAnand Jain 	/* For sysfs/FSID/devinfo/devid/ */
145668e48afSAnand Jain 	struct kobject devid_kobj;
1460b86a832SChris Mason };
1470b86a832SChris Mason 
1487cc8e58dSMiao Xie /*
1497cc8e58dSMiao Xie  * If we read those variants at the context of their own lock, we needn't
1507cc8e58dSMiao Xie  * use the following helpers, reading them directly is safe.
1517cc8e58dSMiao Xie  */
1527cc8e58dSMiao Xie #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
1537cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name)					\
1547cc8e58dSMiao Xie static inline u64							\
1557cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev)			\
1567cc8e58dSMiao Xie {									\
1577cc8e58dSMiao Xie 	u64 size;							\
1587cc8e58dSMiao Xie 	unsigned int seq;						\
1597cc8e58dSMiao Xie 									\
1607cc8e58dSMiao Xie 	do {								\
1617cc8e58dSMiao Xie 		seq = read_seqcount_begin(&dev->data_seqcount);		\
1627cc8e58dSMiao Xie 		size = dev->name;					\
1637cc8e58dSMiao Xie 	} while (read_seqcount_retry(&dev->data_seqcount, seq));	\
1647cc8e58dSMiao Xie 	return size;							\
1657cc8e58dSMiao Xie }									\
1667cc8e58dSMiao Xie 									\
1677cc8e58dSMiao Xie static inline void							\
1687cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
1697cc8e58dSMiao Xie {									\
170c41ec452SSu Yue 	preempt_disable();						\
1717cc8e58dSMiao Xie 	write_seqcount_begin(&dev->data_seqcount);			\
1727cc8e58dSMiao Xie 	dev->name = size;						\
1737cc8e58dSMiao Xie 	write_seqcount_end(&dev->data_seqcount);			\
174c41ec452SSu Yue 	preempt_enable();						\
1757cc8e58dSMiao Xie }
17694545870SThomas Gleixner #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
1777cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name)					\
1787cc8e58dSMiao Xie static inline u64							\
1797cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev)			\
1807cc8e58dSMiao Xie {									\
1817cc8e58dSMiao Xie 	u64 size;							\
1827cc8e58dSMiao Xie 									\
1837cc8e58dSMiao Xie 	preempt_disable();						\
1847cc8e58dSMiao Xie 	size = dev->name;						\
1857cc8e58dSMiao Xie 	preempt_enable();						\
1867cc8e58dSMiao Xie 	return size;							\
1877cc8e58dSMiao Xie }									\
1887cc8e58dSMiao Xie 									\
1897cc8e58dSMiao Xie static inline void							\
1907cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
1917cc8e58dSMiao Xie {									\
1927cc8e58dSMiao Xie 	preempt_disable();						\
1937cc8e58dSMiao Xie 	dev->name = size;						\
1947cc8e58dSMiao Xie 	preempt_enable();						\
1957cc8e58dSMiao Xie }
1967cc8e58dSMiao Xie #else
1977cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name)					\
1987cc8e58dSMiao Xie static inline u64							\
1997cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev)			\
2007cc8e58dSMiao Xie {									\
2017cc8e58dSMiao Xie 	return dev->name;						\
2027cc8e58dSMiao Xie }									\
2037cc8e58dSMiao Xie 									\
2047cc8e58dSMiao Xie static inline void							\
2057cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size)		\
2067cc8e58dSMiao Xie {									\
2077cc8e58dSMiao Xie 	dev->name = size;						\
2087cc8e58dSMiao Xie }
2097cc8e58dSMiao Xie #endif
2107cc8e58dSMiao Xie 
2117cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(total_bytes);
2127cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes);
2137cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
2147cc8e58dSMiao Xie 
215c4a816c6SNaohiro Aota enum btrfs_chunk_allocation_policy {
216c4a816c6SNaohiro Aota 	BTRFS_CHUNK_ALLOC_REGULAR,
217*1cd6121fSNaohiro Aota 	BTRFS_CHUNK_ALLOC_ZONED,
218c4a816c6SNaohiro Aota };
219c4a816c6SNaohiro Aota 
22033fd2f71SAnand Jain /*
22133fd2f71SAnand Jain  * Read policies for mirrored block group profiles, read picks the stripe based
22233fd2f71SAnand Jain  * on these policies.
22333fd2f71SAnand Jain  */
22433fd2f71SAnand Jain enum btrfs_read_policy {
22533fd2f71SAnand Jain 	/* Use process PID to choose the stripe */
22633fd2f71SAnand Jain 	BTRFS_READ_POLICY_PID,
22733fd2f71SAnand Jain 	BTRFS_NR_READ_POLICY,
22833fd2f71SAnand Jain };
22933fd2f71SAnand Jain 
2308a4b83ccSChris Mason struct btrfs_fs_devices {
2318a4b83ccSChris Mason 	u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
2327239ff4bSNikolay Borisov 	u8 metadata_uuid[BTRFS_FSID_SIZE];
233d1a63002SNikolay Borisov 	bool fsid_change;
234c4babc5eSAnand Jain 	struct list_head fs_list;
2358a4b83ccSChris Mason 
2368a4b83ccSChris Mason 	u64 num_devices;
237a0af469bSChris Mason 	u64 open_devices;
2382b82032cSYan Zheng 	u64 rw_devices;
239cd02dca5SChris Mason 	u64 missing_devices;
2402b82032cSYan Zheng 	u64 total_rw_bytes;
24102db0844SJosef Bacik 	u64 total_devices;
242d1a63002SNikolay Borisov 
243d1a63002SNikolay Borisov 	/* Highest generation number of seen devices */
244d1a63002SNikolay Borisov 	u64 latest_generation;
245d1a63002SNikolay Borisov 
2468a4b83ccSChris Mason 	struct block_device *latest_bdev;
247e5e9a520SChris Mason 
248e5e9a520SChris Mason 	/* all of the devices in the FS, protected by a mutex
249e5e9a520SChris Mason 	 * so we can safely walk it to write out the supers without
2509b011adfSWang Shilong 	 * worrying about add/remove by the multi-device code.
2519b011adfSWang Shilong 	 * Scrubbing super can kick off supers writing by holding
2529b011adfSWang Shilong 	 * this mutex lock.
253e5e9a520SChris Mason 	 */
254e5e9a520SChris Mason 	struct mutex device_list_mutex;
2550b6f5d40SNikolay Borisov 
2560b6f5d40SNikolay Borisov 	/* List of all devices, protected by device_list_mutex */
2578a4b83ccSChris Mason 	struct list_head devices;
258b3075717SChris Mason 
2590b6f5d40SNikolay Borisov 	/*
2600b6f5d40SNikolay Borisov 	 * Devices which can satisfy space allocation. Protected by
2610b6f5d40SNikolay Borisov 	 * chunk_mutex
2620b6f5d40SNikolay Borisov 	 */
263b3075717SChris Mason 	struct list_head alloc_list;
2642b82032cSYan Zheng 
265944d3f9fSNikolay Borisov 	struct list_head seed_list;
2660395d84fSJohannes Thumshirn 	bool seeding;
2672b82032cSYan Zheng 
2682b82032cSYan Zheng 	int opened;
269c289811cSChris Mason 
270c289811cSChris Mason 	/* set when we find or add a device that doesn't have the
271c289811cSChris Mason 	 * nonrot flag set
272c289811cSChris Mason 	 */
2737f0432d0SJohannes Thumshirn 	bool rotating;
2742e7910d6SAnand Jain 
2755a13f430SAnand Jain 	struct btrfs_fs_info *fs_info;
2762e7910d6SAnand Jain 	/* sysfs kobjects */
277c1b7e474SAnand Jain 	struct kobject fsid_kobj;
278b5501504SAnand Jain 	struct kobject *devices_kobj;
279a013d141SAnand Jain 	struct kobject *devinfo_kobj;
2802e7910d6SAnand Jain 	struct completion kobj_unregister;
281c4a816c6SNaohiro Aota 
282c4a816c6SNaohiro Aota 	enum btrfs_chunk_allocation_policy chunk_alloc_policy;
28333fd2f71SAnand Jain 
28433fd2f71SAnand Jain 	/* Policy used to read the mirrored stripes */
28533fd2f71SAnand Jain 	enum btrfs_read_policy read_policy;
2868a4b83ccSChris Mason };
2878a4b83ccSChris Mason 
288facc8a22SMiao Xie #define BTRFS_BIO_INLINE_CSUM_SIZE	64
289facc8a22SMiao Xie 
290ab4ba2e1SQu Wenruo #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info)	\
291ab4ba2e1SQu Wenruo 			- sizeof(struct btrfs_chunk))		\
292ab4ba2e1SQu Wenruo 			/ sizeof(struct btrfs_stripe) + 1)
293ab4ba2e1SQu Wenruo 
294ab4ba2e1SQu Wenruo #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE	\
295ab4ba2e1SQu Wenruo 				- 2 * sizeof(struct btrfs_disk_key)	\
296ab4ba2e1SQu Wenruo 				- 2 * sizeof(struct btrfs_chunk))	\
297ab4ba2e1SQu Wenruo 				/ sizeof(struct btrfs_stripe) + 1)
298ab4ba2e1SQu Wenruo 
2999be3395bSChris Mason /*
3009be3395bSChris Mason  * we need the mirror number and stripe index to be passed around
3019be3395bSChris Mason  * the call chain while we are processing end_io (especially errors).
3029be3395bSChris Mason  * Really, what we need is a btrfs_bio structure that has this info
3039be3395bSChris Mason  * and is properly sized with its stripe array, but we're not there
3049be3395bSChris Mason  * quite yet.  We have our own btrfs bioset, and all of the bios
3059be3395bSChris Mason  * we allocate are actually btrfs_io_bios.  We'll cram as much of
3069be3395bSChris Mason  * struct btrfs_bio as we can into this over time.
3079be3395bSChris Mason  */
3089be3395bSChris Mason struct btrfs_io_bio {
309c1dc0896SMiao Xie 	unsigned int mirror_num;
310c31efbdfSNikolay Borisov 	struct btrfs_device *device;
311c1dc0896SMiao Xie 	u64 logical;
312facc8a22SMiao Xie 	u8 *csum;
313facc8a22SMiao Xie 	u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
31417347cecSLiu Bo 	struct bvec_iter iter;
315fa1bcbe0SDavid Sterba 	/*
316fa1bcbe0SDavid Sterba 	 * This member must come last, bio_alloc_bioset will allocate enough
317fa1bcbe0SDavid Sterba 	 * bytes for entire btrfs_io_bio but relies on bio being last.
318fa1bcbe0SDavid Sterba 	 */
3199be3395bSChris Mason 	struct bio bio;
3209be3395bSChris Mason };
3219be3395bSChris Mason 
3229be3395bSChris Mason static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
3239be3395bSChris Mason {
3249be3395bSChris Mason 	return container_of(bio, struct btrfs_io_bio, bio);
3259be3395bSChris Mason }
3269be3395bSChris Mason 
327b3a0dd50SDavid Sterba static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
328b3a0dd50SDavid Sterba {
329b3a0dd50SDavid Sterba 	if (io_bio->csum != io_bio->csum_inline) {
330b3a0dd50SDavid Sterba 		kfree(io_bio->csum);
331b3a0dd50SDavid Sterba 		io_bio->csum = NULL;
332b3a0dd50SDavid Sterba 	}
333b3a0dd50SDavid Sterba }
334b3a0dd50SDavid Sterba 
335cea9e445SChris Mason struct btrfs_bio_stripe {
336cea9e445SChris Mason 	struct btrfs_device *dev;
337cea9e445SChris Mason 	u64 physical;
338fce3bb9aSLi Dongyang 	u64 length; /* only used for discard mappings */
339cea9e445SChris Mason };
340cea9e445SChris Mason 
341a1d3c478SJan Schmidt struct btrfs_bio {
342140475aeSElena Reshetova 	refcount_t refs;
343cea9e445SChris Mason 	atomic_t stripes_pending;
344c404e0dcSMiao Xie 	struct btrfs_fs_info *fs_info;
34510f11900SZhao Lei 	u64 map_type; /* get from map_lookup->type */
346cea9e445SChris Mason 	bio_end_io_t *end_io;
3477d2b4daaSChris Mason 	struct bio *orig_bio;
348cea9e445SChris Mason 	void *private;
349a236aed1SChris Mason 	atomic_t error;
350a236aed1SChris Mason 	int max_errors;
351cea9e445SChris Mason 	int num_stripes;
352a1d3c478SJan Schmidt 	int mirror_num;
3532c8cdd6eSMiao Xie 	int num_tgtdevs;
3542c8cdd6eSMiao Xie 	int *tgtdev_map;
3558e5cfb55SZhao Lei 	/*
3568e5cfb55SZhao Lei 	 * logical block numbers for the start of each stripe
3578e5cfb55SZhao Lei 	 * The last one or two are p/q.  These are sorted,
3588e5cfb55SZhao Lei 	 * so raid_map[0] is the start of our full stripe
3598e5cfb55SZhao Lei 	 */
3608e5cfb55SZhao Lei 	u64 *raid_map;
361cea9e445SChris Mason 	struct btrfs_bio_stripe stripes[];
362cea9e445SChris Mason };
363cea9e445SChris Mason 
364b2117a39SMiao Xie struct btrfs_device_info {
365b2117a39SMiao Xie 	struct btrfs_device *dev;
366b2117a39SMiao Xie 	u64 dev_offset;
367b2117a39SMiao Xie 	u64 max_avail;
36873c5de00SArne Jansen 	u64 total_avail;
369b2117a39SMiao Xie };
370b2117a39SMiao Xie 
37131e50229SLiu Bo struct btrfs_raid_attr {
3728c3e3582SDavid Sterba 	u8 sub_stripes;		/* sub_stripes info for map */
3738c3e3582SDavid Sterba 	u8 dev_stripes;		/* stripes per dev */
3748c3e3582SDavid Sterba 	u8 devs_max;		/* max devs to use */
3758c3e3582SDavid Sterba 	u8 devs_min;		/* min devs needed */
3768c3e3582SDavid Sterba 	u8 tolerated_failures;	/* max tolerated fail devs */
3778c3e3582SDavid Sterba 	u8 devs_increment;	/* ndevs has to be a multiple of this */
3788c3e3582SDavid Sterba 	u8 ncopies;		/* how many copies to data has */
3798c3e3582SDavid Sterba 	u8 nparity;		/* number of stripes worth of bytes to store
380b50836edSHans van Kranenburg 				 * parity information */
3818c3e3582SDavid Sterba 	u8 mindev_error;	/* error code if min devs requisite is unmet */
382ed23467bSAnand Jain 	const char raid_name[8]; /* name of the raid */
38341a6e891SAnand Jain 	u64 bg_flag;		/* block group flag of the raid */
38431e50229SLiu Bo };
38531e50229SLiu Bo 
386af902047SZhao Lei extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES];
387af902047SZhao Lei 
3881abe9b8aSliubo struct map_lookup {
3891abe9b8aSliubo 	u64 type;
3901abe9b8aSliubo 	int io_align;
3911abe9b8aSliubo 	int io_width;
3923d8da678SLiu Bo 	u64 stripe_len;
3931abe9b8aSliubo 	int num_stripes;
3941abe9b8aSliubo 	int sub_stripes;
395cf90d884SQu Wenruo 	int verified_stripes; /* For mount time dev extent verification */
3961abe9b8aSliubo 	struct btrfs_bio_stripe stripes[];
3971abe9b8aSliubo };
3981abe9b8aSliubo 
399a2de733cSArne Jansen #define map_lookup_size(n) (sizeof(struct map_lookup) + \
400a2de733cSArne Jansen 			    (sizeof(struct btrfs_bio_stripe) * (n)))
401a2de733cSArne Jansen 
402c9e9f97bSIlya Dryomov struct btrfs_balance_args;
40319a39dceSIlya Dryomov struct btrfs_balance_progress;
404c9e9f97bSIlya Dryomov struct btrfs_balance_control {
405c9e9f97bSIlya Dryomov 	struct btrfs_balance_args data;
406c9e9f97bSIlya Dryomov 	struct btrfs_balance_args meta;
407c9e9f97bSIlya Dryomov 	struct btrfs_balance_args sys;
408c9e9f97bSIlya Dryomov 
409c9e9f97bSIlya Dryomov 	u64 flags;
41019a39dceSIlya Dryomov 
41119a39dceSIlya Dryomov 	struct btrfs_balance_progress stat;
412c9e9f97bSIlya Dryomov };
413c9e9f97bSIlya Dryomov 
414cf8cddd3SChristoph Hellwig enum btrfs_map_op {
415cf8cddd3SChristoph Hellwig 	BTRFS_MAP_READ,
416cf8cddd3SChristoph Hellwig 	BTRFS_MAP_WRITE,
417cf8cddd3SChristoph Hellwig 	BTRFS_MAP_DISCARD,
418cf8cddd3SChristoph Hellwig 	BTRFS_MAP_GET_READ_MIRRORS,
419cf8cddd3SChristoph Hellwig };
420cf8cddd3SChristoph Hellwig 
421cf8cddd3SChristoph Hellwig static inline enum btrfs_map_op btrfs_op(struct bio *bio)
422cf8cddd3SChristoph Hellwig {
423cf8cddd3SChristoph Hellwig 	switch (bio_op(bio)) {
424cf8cddd3SChristoph Hellwig 	case REQ_OP_DISCARD:
425cf8cddd3SChristoph Hellwig 		return BTRFS_MAP_DISCARD;
426cf8cddd3SChristoph Hellwig 	case REQ_OP_WRITE:
427cf8cddd3SChristoph Hellwig 		return BTRFS_MAP_WRITE;
428cf8cddd3SChristoph Hellwig 	default:
429cf8cddd3SChristoph Hellwig 		WARN_ON_ONCE(1);
430c730ae0cSMarcos Paulo de Souza 		fallthrough;
431cf8cddd3SChristoph Hellwig 	case REQ_OP_READ:
432cf8cddd3SChristoph Hellwig 		return BTRFS_MAP_READ;
433cf8cddd3SChristoph Hellwig 	}
434cf8cddd3SChristoph Hellwig }
435cf8cddd3SChristoph Hellwig 
4366e9606d2SZhao Lei void btrfs_get_bbio(struct btrfs_bio *bbio);
4376e9606d2SZhao Lei void btrfs_put_bbio(struct btrfs_bio *bbio);
438cf8cddd3SChristoph Hellwig int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
439cea9e445SChris Mason 		    u64 logical, u64 *length,
440a1d3c478SJan Schmidt 		    struct btrfs_bio **bbio_ret, int mirror_num);
441cf8cddd3SChristoph Hellwig int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
442af8e2d1dSMiao Xie 		     u64 logical, u64 *length,
443825ad4c9SDavid Sterba 		     struct btrfs_bio **bbio_ret);
44442034313SMichal Rostecki int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map,
44542034313SMichal Rostecki 			  enum btrfs_map_op op, u64 logical, u64 len,
44642034313SMichal Rostecki 			  struct btrfs_io_geometry *io_geom);
4476bccf3abSJeff Mahoney int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
4485b4aacefSJeff Mahoney int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
449c216b203SNikolay Borisov int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type);
450c8bf1b67SDavid Sterba void btrfs_mapping_tree_free(struct extent_map_tree *tree);
45158efbc9fSOmar Sandoval blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
45208635baeSChris Mason 			   int mirror_num);
4538a4b83ccSChris Mason int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
45497288f2cSChristoph Hellwig 		       fmode_t flags, void *holder);
45536350e95SGu Jinxiang struct btrfs_device *btrfs_scan_one_device(const char *path,
45636350e95SGu Jinxiang 					   fmode_t flags, void *holder);
457228a73abSAnand Jain int btrfs_forget_devices(const char *path);
45854eed6aeSNikolay Borisov void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
459bacce86aSAnand Jain void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
460d6507cf1SNikolay Borisov void btrfs_assign_next_active_device(struct btrfs_device *device,
461d6507cf1SNikolay Borisov 				     struct btrfs_device *this_dev);
462a27a94c2SNikolay Borisov struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info,
463a27a94c2SNikolay Borisov 						  u64 devid,
464a27a94c2SNikolay Borisov 						  const char *devpath);
46512bd2fc0SIlya Dryomov struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
46612bd2fc0SIlya Dryomov 					const u64 *devid,
46712bd2fc0SIlya Dryomov 					const u8 *uuid);
468a425f9d4SDavid Sterba void btrfs_free_device(struct btrfs_device *device);
4692ff7e61eSJeff Mahoney int btrfs_rm_device(struct btrfs_fs_info *fs_info,
470da353f6bSDavid Sterba 		    const char *device_path, u64 devid);
471ffc5a379SDavid Sterba void __exit btrfs_cleanup_fs_uuids(void);
4725d964051SStefan Behrens int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
4738f18cf13SChris Mason int btrfs_grow_device(struct btrfs_trans_handle *trans,
4748f18cf13SChris Mason 		      struct btrfs_device *device, u64 new_size);
475e4319cd9SAnand Jain struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
476b2598edfSAnand Jain 				       u64 devid, u8 *uuid, u8 *fsid);
4778f18cf13SChris Mason int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
478da353f6bSDavid Sterba int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
4796fcf6e2bSDavid Sterba int btrfs_balance(struct btrfs_fs_info *fs_info,
4806fcf6e2bSDavid Sterba 		  struct btrfs_balance_control *bctl,
481c9e9f97bSIlya Dryomov 		  struct btrfs_ioctl_balance_args *bargs);
482f89e09cfSAnand Jain void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
4832b6ba629SIlya Dryomov int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
48468310a5eSIlya Dryomov int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
485837d5b6eSIlya Dryomov int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
486a7e99c69SIlya Dryomov int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
487f7a81ea4SStefan Behrens int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
48897f4dd09SNikolay Borisov int btrfs_uuid_scan_kthread(void *data);
4892ff7e61eSJeff Mahoney int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
49060dfdf25SNikolay Borisov int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
491ba1bf481SJosef Bacik 			 u64 *start, u64 *max_avail);
492442a4f63SStefan Behrens void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
4932ff7e61eSJeff Mahoney int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
494b27f7c0cSDavid Sterba 			struct btrfs_ioctl_get_dev_stats *stats);
495cb517eabSMiao Xie void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
496733f4fbbSStefan Behrens int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
497196c9d8dSDavid Sterba int btrfs_run_dev_stats(struct btrfs_trans_handle *trans);
49868a9db5fSNikolay Borisov void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev);
49965237ee3SDavid Sterba void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev);
5004f5ad7bdSNikolay Borisov void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev);
501592d92eeSLiu Bo int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info,
502e4ff5fb5SNikolay Borisov 			   u64 logical, u64 len);
5032ff7e61eSJeff Mahoney unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
50453b381b3SDavid Woodhouse 				    u64 logical);
5056df9a95eSJosef Bacik int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5066df9a95eSJosef Bacik 			     u64 chunk_offset, u64 chunk_size);
50797aff912SNikolay Borisov int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
50860ca842eSOmar Sandoval struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
50960ca842eSOmar Sandoval 				       u64 logical, u64 length);
5108f32380dSJohannes Thumshirn void btrfs_release_disk_super(struct btrfs_super_block *super);
511addc3fa7SMiao Xie 
512442a4f63SStefan Behrens static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
513442a4f63SStefan Behrens 				      int index)
514442a4f63SStefan Behrens {
515442a4f63SStefan Behrens 	atomic_inc(dev->dev_stat_values + index);
5169deae968SNikolay Borisov 	/*
5179deae968SNikolay Borisov 	 * This memory barrier orders stores updating statistics before stores
5189deae968SNikolay Borisov 	 * updating dev_stats_ccnt.
5199deae968SNikolay Borisov 	 *
5209deae968SNikolay Borisov 	 * It pairs with smp_rmb() in btrfs_run_dev_stats().
5219deae968SNikolay Borisov 	 */
522addc3fa7SMiao Xie 	smp_mb__before_atomic();
523addc3fa7SMiao Xie 	atomic_inc(&dev->dev_stats_ccnt);
524442a4f63SStefan Behrens }
525442a4f63SStefan Behrens 
526442a4f63SStefan Behrens static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
527442a4f63SStefan Behrens 				      int index)
528442a4f63SStefan Behrens {
529442a4f63SStefan Behrens 	return atomic_read(dev->dev_stat_values + index);
530442a4f63SStefan Behrens }
531442a4f63SStefan Behrens 
532442a4f63SStefan Behrens static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
533442a4f63SStefan Behrens 						int index)
534442a4f63SStefan Behrens {
535442a4f63SStefan Behrens 	int ret;
536442a4f63SStefan Behrens 
537442a4f63SStefan Behrens 	ret = atomic_xchg(dev->dev_stat_values + index, 0);
5384660c49fSNikolay Borisov 	/*
5394660c49fSNikolay Borisov 	 * atomic_xchg implies a full memory barriers as per atomic_t.txt:
5404660c49fSNikolay Borisov 	 * - RMW operations that have a return value are fully ordered;
5414660c49fSNikolay Borisov 	 *
5424660c49fSNikolay Borisov 	 * This implicit memory barriers is paired with the smp_rmb in
5434660c49fSNikolay Borisov 	 * btrfs_run_dev_stats
5444660c49fSNikolay Borisov 	 */
545addc3fa7SMiao Xie 	atomic_inc(&dev->dev_stats_ccnt);
546442a4f63SStefan Behrens 	return ret;
547442a4f63SStefan Behrens }
548442a4f63SStefan Behrens 
549442a4f63SStefan Behrens static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
550442a4f63SStefan Behrens 				      int index, unsigned long val)
551442a4f63SStefan Behrens {
552442a4f63SStefan Behrens 	atomic_set(dev->dev_stat_values + index, val);
5539deae968SNikolay Borisov 	/*
5549deae968SNikolay Borisov 	 * This memory barrier orders stores updating statistics before stores
5559deae968SNikolay Borisov 	 * updating dev_stats_ccnt.
5569deae968SNikolay Borisov 	 *
5579deae968SNikolay Borisov 	 * It pairs with smp_rmb() in btrfs_run_dev_stats().
5589deae968SNikolay Borisov 	 */
559addc3fa7SMiao Xie 	smp_mb__before_atomic();
560addc3fa7SMiao Xie 	atomic_inc(&dev->dev_stats_ccnt);
561442a4f63SStefan Behrens }
562442a4f63SStefan Behrens 
5633e72ee88SQu Wenruo /*
5643e72ee88SQu Wenruo  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
5653e72ee88SQu Wenruo  * can be used as index to access btrfs_raid_array[].
5663e72ee88SQu Wenruo  */
5673e72ee88SQu Wenruo static inline enum btrfs_raid_types btrfs_bg_flags_to_raid_index(u64 flags)
5683e72ee88SQu Wenruo {
5693e72ee88SQu Wenruo 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
5703e72ee88SQu Wenruo 		return BTRFS_RAID_RAID10;
5713e72ee88SQu Wenruo 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
5723e72ee88SQu Wenruo 		return BTRFS_RAID_RAID1;
57347e6f742SDavid Sterba 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
57447e6f742SDavid Sterba 		return BTRFS_RAID_RAID1C3;
5758d6fac00SDavid Sterba 	else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
5768d6fac00SDavid Sterba 		return BTRFS_RAID_RAID1C4;
5773e72ee88SQu Wenruo 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
5783e72ee88SQu Wenruo 		return BTRFS_RAID_DUP;
5793e72ee88SQu Wenruo 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
5803e72ee88SQu Wenruo 		return BTRFS_RAID_RAID0;
5813e72ee88SQu Wenruo 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
5823e72ee88SQu Wenruo 		return BTRFS_RAID_RAID5;
5833e72ee88SQu Wenruo 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
5843e72ee88SQu Wenruo 		return BTRFS_RAID_RAID6;
5853e72ee88SQu Wenruo 
5863e72ee88SQu Wenruo 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
5873e72ee88SQu Wenruo }
5883e72ee88SQu Wenruo 
589bbbf7243SNikolay Borisov void btrfs_commit_device_sizes(struct btrfs_transaction *trans);
59004216820SFilipe Manana 
5914143cb8bSDavid Sterba struct list_head * __attribute_const__ btrfs_get_fs_uuids(void);
5926528b99dSAnand Jain bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
5936528b99dSAnand Jain 					struct btrfs_device *failing_dev);
594313b0858SJosef Bacik void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
595313b0858SJosef Bacik 			       struct block_device *bdev,
596313b0858SJosef Bacik 			       const char *device_path);
59721634a19SQu Wenruo 
59846df06b8SDavid Sterba int btrfs_bg_type_to_factor(u64 flags);
599158da513SDavid Sterba const char *btrfs_bg_type_to_raid_name(u64 flags);
600cf90d884SQu Wenruo int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
60146df06b8SDavid Sterba 
6020b86a832SChris Mason #endif
603