19888c340SDavid Sterba /* SPDX-License-Identifier: GPL-2.0 */ 20b86a832SChris Mason /* 30b86a832SChris Mason * Copyright (C) 2007 Oracle. All rights reserved. 40b86a832SChris Mason */ 50b86a832SChris Mason 69888c340SDavid Sterba #ifndef BTRFS_VOLUMES_H 79888c340SDavid Sterba #define BTRFS_VOLUMES_H 88790d502SChris Mason 9cea9e445SChris Mason #include <linux/bio.h> 10b2117a39SMiao Xie #include <linux/sort.h> 1155e301fdSFilipe Brandenburger #include <linux/btrfs.h> 128b712842SChris Mason #include "async-thread.h" 13cea9e445SChris Mason 14fce466eaSQu Wenruo #define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G) 15fce466eaSQu Wenruo 1667a2c45eSMiao Xie extern struct mutex uuid_mutex; 1767a2c45eSMiao Xie 18ee22184bSByongho Lee #define BTRFS_STRIPE_LEN SZ_64K 19b2117a39SMiao Xie 20719fae89SQu Wenruo /* Used by sanity check for btrfs_raid_types. */ 21719fae89SQu Wenruo #define const_ffs(n) (__builtin_ctzll(n) + 1) 22719fae89SQu Wenruo 23719fae89SQu Wenruo /* 24719fae89SQu Wenruo * The conversion from BTRFS_BLOCK_GROUP_* bits to btrfs_raid_type requires 25719fae89SQu Wenruo * RAID0 always to be the lowest profile bit. 26719fae89SQu Wenruo * Although it's part of on-disk format and should never change, do extra 27719fae89SQu Wenruo * compile-time sanity checks. 28719fae89SQu Wenruo */ 29719fae89SQu Wenruo static_assert(const_ffs(BTRFS_BLOCK_GROUP_RAID0) < 30719fae89SQu Wenruo const_ffs(BTRFS_BLOCK_GROUP_PROFILE_MASK & ~BTRFS_BLOCK_GROUP_RAID0)); 31719fae89SQu Wenruo static_assert(const_ilog2(BTRFS_BLOCK_GROUP_RAID0) > 32719fae89SQu Wenruo ilog2(BTRFS_BLOCK_GROUP_TYPE_MASK)); 33719fae89SQu Wenruo 34719fae89SQu Wenruo /* ilog2() can handle both constants and variables */ 35719fae89SQu Wenruo #define BTRFS_BG_FLAG_TO_INDEX(profile) \ 36719fae89SQu Wenruo ilog2((profile) >> (ilog2(BTRFS_BLOCK_GROUP_RAID0) - 1)) 37719fae89SQu Wenruo 38f04fbcc6SQu Wenruo enum btrfs_raid_types { 39719fae89SQu Wenruo /* SINGLE is the special one as it doesn't have on-disk bit. */ 40719fae89SQu Wenruo BTRFS_RAID_SINGLE = 0, 41719fae89SQu Wenruo 42719fae89SQu Wenruo BTRFS_RAID_RAID0 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID0), 43719fae89SQu Wenruo BTRFS_RAID_RAID1 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1), 44719fae89SQu Wenruo BTRFS_RAID_DUP = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_DUP), 45719fae89SQu Wenruo BTRFS_RAID_RAID10 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID10), 46719fae89SQu Wenruo BTRFS_RAID_RAID5 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID5), 47719fae89SQu Wenruo BTRFS_RAID_RAID6 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID6), 48719fae89SQu Wenruo BTRFS_RAID_RAID1C3 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C3), 49719fae89SQu Wenruo BTRFS_RAID_RAID1C4 = BTRFS_BG_FLAG_TO_INDEX(BTRFS_BLOCK_GROUP_RAID1C4), 50719fae89SQu Wenruo 51f04fbcc6SQu Wenruo BTRFS_NR_RAID_TYPES 52f04fbcc6SQu Wenruo }; 53f04fbcc6SQu Wenruo 545f141126SNikolay Borisov struct btrfs_io_geometry { 555f141126SNikolay Borisov /* remaining bytes before crossing a stripe */ 565f141126SNikolay Borisov u64 len; 575f141126SNikolay Borisov /* offset of logical address in chunk */ 585f141126SNikolay Borisov u64 offset; 595f141126SNikolay Borisov /* length of single IO stripe */ 60cc353a8bSQu Wenruo u32 stripe_len; 61cc353a8bSQu Wenruo /* offset of address in stripe */ 62cc353a8bSQu Wenruo u32 stripe_offset; 635f141126SNikolay Borisov /* number of stripe where address falls */ 645f141126SNikolay Borisov u64 stripe_nr; 655f141126SNikolay Borisov /* offset of raid56 stripe into the chunk */ 665f141126SNikolay Borisov u64 raid56_stripe_offset; 675f141126SNikolay Borisov }; 685f141126SNikolay Borisov 697cc8e58dSMiao Xie /* 707cc8e58dSMiao Xie * Use sequence counter to get consistent device stat data on 717cc8e58dSMiao Xie * 32-bit processors. 727cc8e58dSMiao Xie */ 737cc8e58dSMiao Xie #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 747cc8e58dSMiao Xie #include <linux/seqlock.h> 757cc8e58dSMiao Xie #define __BTRFS_NEED_DEVICE_DATA_ORDERED 76c41ec452SSu Yue #define btrfs_device_data_ordered_init(device) \ 77c41ec452SSu Yue seqcount_init(&device->data_seqcount) 787cc8e58dSMiao Xie #else 79c41ec452SSu Yue #define btrfs_device_data_ordered_init(device) do { } while (0) 807cc8e58dSMiao Xie #endif 817cc8e58dSMiao Xie 82ebbede42SAnand Jain #define BTRFS_DEV_STATE_WRITEABLE (0) 83e12c9621SAnand Jain #define BTRFS_DEV_STATE_IN_FS_METADATA (1) 84e6e674bdSAnand Jain #define BTRFS_DEV_STATE_MISSING (2) 85401e29c1SAnand Jain #define BTRFS_DEV_STATE_REPLACE_TGT (3) 861c3063b6SAnand Jain #define BTRFS_DEV_STATE_FLUSH_SENT (4) 8766d204a1SFilipe Manana #define BTRFS_DEV_STATE_NO_READA (5) 88ebbede42SAnand Jain 895b316468SNaohiro Aota struct btrfs_zoned_device_info; 905b316468SNaohiro Aota 910b86a832SChris Mason struct btrfs_device { 920b6f5d40SNikolay Borisov struct list_head dev_list; /* device_list_mutex */ 930b6f5d40SNikolay Borisov struct list_head dev_alloc_list; /* chunk mutex */ 94bbbf7243SNikolay Borisov struct list_head post_commit_list; /* chunk mutex */ 952b82032cSYan Zheng struct btrfs_fs_devices *fs_devices; 96fb456252SJeff Mahoney struct btrfs_fs_info *fs_info; 97ffbd517dSChris Mason 988d1a7aaeSMadhuparna Bhowmik struct rcu_string __rcu *name; 99d5ee37bcSMiao Xie 100d5ee37bcSMiao Xie u64 generation; 101d5ee37bcSMiao Xie 102d5ee37bcSMiao Xie struct block_device *bdev; 103d5ee37bcSMiao Xie 1045b316468SNaohiro Aota struct btrfs_zoned_device_info *zone_info; 1055b316468SNaohiro Aota 106d5ee37bcSMiao Xie /* the mode sent to blkdev_get */ 107d5ee37bcSMiao Xie fmode_t mode; 108d5ee37bcSMiao Xie 1094889bc05SAnand Jain /* 1104889bc05SAnand Jain * Device's major-minor number. Must be set even if the device is not 1114889bc05SAnand Jain * opened (bdev == NULL), unless the device is missing. 1124889bc05SAnand Jain */ 1134889bc05SAnand Jain dev_t devt; 114ebbede42SAnand Jain unsigned long dev_state; 11558efbc9fSOmar Sandoval blk_status_t last_flush_error; 116b3075717SChris Mason 1177cc8e58dSMiao Xie #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED 118c41ec452SSu Yue seqcount_t data_seqcount; 1197cc8e58dSMiao Xie #endif 1207cc8e58dSMiao Xie 1210b86a832SChris Mason /* the internal btrfs device id */ 1220b86a832SChris Mason u64 devid; 1230b86a832SChris Mason 1246ba40b61SMiao Xie /* size of the device in memory */ 1250b86a832SChris Mason u64 total_bytes; 1260b86a832SChris Mason 1276ba40b61SMiao Xie /* size of the device on disk */ 128d6397baeSChris Ball u64 disk_total_bytes; 129d6397baeSChris Ball 1300b86a832SChris Mason /* bytes used */ 1310b86a832SChris Mason u64 bytes_used; 1320b86a832SChris Mason 1330b86a832SChris Mason /* optimal io alignment for this device */ 1340b86a832SChris Mason u32 io_align; 1350b86a832SChris Mason 1360b86a832SChris Mason /* optimal io width for this device */ 1370b86a832SChris Mason u32 io_width; 1383c45bfc1SDulshani Gunawardhana /* type and info about this device */ 1393c45bfc1SDulshani Gunawardhana u64 type; 1400b86a832SChris Mason 1410b86a832SChris Mason /* minimal io size for this device */ 1420b86a832SChris Mason u32 sector_size; 1430b86a832SChris Mason 1440b86a832SChris Mason /* physical drive uuid (or lvm uuid) */ 145e17cade2SChris Mason u8 uuid[BTRFS_UUID_SIZE]; 1468b712842SChris Mason 147935e5cc9SMiao Xie /* 148935e5cc9SMiao Xie * size of the device on the current transaction 149935e5cc9SMiao Xie * 150935e5cc9SMiao Xie * This variant is update when committing the transaction, 151bbbf7243SNikolay Borisov * and protected by chunk mutex 152935e5cc9SMiao Xie */ 153935e5cc9SMiao Xie u64 commit_total_bytes; 154935e5cc9SMiao Xie 155ce7213c7SMiao Xie /* bytes used on the current transaction */ 156ce7213c7SMiao Xie u64 commit_bytes_used; 157935e5cc9SMiao Xie 158f9e69aa9SChristoph Hellwig /* Bio used for flushing device barriers */ 159f9e69aa9SChristoph Hellwig struct bio flush_bio; 1603c45bfc1SDulshani Gunawardhana struct completion flush_wait; 1613c45bfc1SDulshani Gunawardhana 162a2de733cSArne Jansen /* per-device scrub information */ 163cadbc0a0SAnand Jain struct scrub_ctx *scrub_ctx; 164a2de733cSArne Jansen 165442a4f63SStefan Behrens /* disk I/O failure stats. For detailed description refer to 166442a4f63SStefan Behrens * enum btrfs_dev_stat_values in ioctl.h */ 167733f4fbbSStefan Behrens int dev_stats_valid; 168addc3fa7SMiao Xie 169addc3fa7SMiao Xie /* Counter to record the change of device stats */ 170addc3fa7SMiao Xie atomic_t dev_stats_ccnt; 171442a4f63SStefan Behrens atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX]; 1721c11b63eSJeff Mahoney 1731c11b63eSJeff Mahoney struct extent_io_tree alloc_state; 174668e48afSAnand Jain 175668e48afSAnand Jain struct completion kobj_unregister; 176668e48afSAnand Jain /* For sysfs/FSID/devinfo/devid/ */ 177668e48afSAnand Jain struct kobject devid_kobj; 178eb3b5053SDavid Sterba 179eb3b5053SDavid Sterba /* Bandwidth limit for scrub, in bytes */ 180eb3b5053SDavid Sterba u64 scrub_speed_max; 1810b86a832SChris Mason }; 1820b86a832SChris Mason 1837cc8e58dSMiao Xie /* 1842103da3bSJosef Bacik * Block group or device which contains an active swapfile. Used for preventing 1852103da3bSJosef Bacik * unsafe operations while a swapfile is active. 1862103da3bSJosef Bacik * 1872103da3bSJosef Bacik * These are sorted on (ptr, inode) (note that a block group or device can 1882103da3bSJosef Bacik * contain more than one swapfile). We compare the pointer values because we 1892103da3bSJosef Bacik * don't actually care what the object is, we just need a quick check whether 1902103da3bSJosef Bacik * the object exists in the rbtree. 1912103da3bSJosef Bacik */ 1922103da3bSJosef Bacik struct btrfs_swapfile_pin { 1932103da3bSJosef Bacik struct rb_node node; 1942103da3bSJosef Bacik void *ptr; 1952103da3bSJosef Bacik struct inode *inode; 1962103da3bSJosef Bacik /* 1972103da3bSJosef Bacik * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr 1982103da3bSJosef Bacik * points to a struct btrfs_device. 1992103da3bSJosef Bacik */ 2002103da3bSJosef Bacik bool is_block_group; 2012103da3bSJosef Bacik /* 2022103da3bSJosef Bacik * Only used when 'is_block_group' is true and it is the number of 2032103da3bSJosef Bacik * extents used by a swapfile for this block group ('ptr' field). 2042103da3bSJosef Bacik */ 2052103da3bSJosef Bacik int bg_extent_count; 2062103da3bSJosef Bacik }; 2072103da3bSJosef Bacik 2082103da3bSJosef Bacik /* 2097cc8e58dSMiao Xie * If we read those variants at the context of their own lock, we needn't 2107cc8e58dSMiao Xie * use the following helpers, reading them directly is safe. 2117cc8e58dSMiao Xie */ 2127cc8e58dSMiao Xie #if BITS_PER_LONG==32 && defined(CONFIG_SMP) 2137cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 2147cc8e58dSMiao Xie static inline u64 \ 2157cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev) \ 2167cc8e58dSMiao Xie { \ 2177cc8e58dSMiao Xie u64 size; \ 2187cc8e58dSMiao Xie unsigned int seq; \ 2197cc8e58dSMiao Xie \ 2207cc8e58dSMiao Xie do { \ 2217cc8e58dSMiao Xie seq = read_seqcount_begin(&dev->data_seqcount); \ 2227cc8e58dSMiao Xie size = dev->name; \ 2237cc8e58dSMiao Xie } while (read_seqcount_retry(&dev->data_seqcount, seq)); \ 2247cc8e58dSMiao Xie return size; \ 2257cc8e58dSMiao Xie } \ 2267cc8e58dSMiao Xie \ 2277cc8e58dSMiao Xie static inline void \ 2287cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 2297cc8e58dSMiao Xie { \ 230c41ec452SSu Yue preempt_disable(); \ 2317cc8e58dSMiao Xie write_seqcount_begin(&dev->data_seqcount); \ 2327cc8e58dSMiao Xie dev->name = size; \ 2337cc8e58dSMiao Xie write_seqcount_end(&dev->data_seqcount); \ 234c41ec452SSu Yue preempt_enable(); \ 2357cc8e58dSMiao Xie } 23694545870SThomas Gleixner #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION) 2377cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 2387cc8e58dSMiao Xie static inline u64 \ 2397cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev) \ 2407cc8e58dSMiao Xie { \ 2417cc8e58dSMiao Xie u64 size; \ 2427cc8e58dSMiao Xie \ 2437cc8e58dSMiao Xie preempt_disable(); \ 2447cc8e58dSMiao Xie size = dev->name; \ 2457cc8e58dSMiao Xie preempt_enable(); \ 2467cc8e58dSMiao Xie return size; \ 2477cc8e58dSMiao Xie } \ 2487cc8e58dSMiao Xie \ 2497cc8e58dSMiao Xie static inline void \ 2507cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 2517cc8e58dSMiao Xie { \ 2527cc8e58dSMiao Xie preempt_disable(); \ 2537cc8e58dSMiao Xie dev->name = size; \ 2547cc8e58dSMiao Xie preempt_enable(); \ 2557cc8e58dSMiao Xie } 2567cc8e58dSMiao Xie #else 2577cc8e58dSMiao Xie #define BTRFS_DEVICE_GETSET_FUNCS(name) \ 2587cc8e58dSMiao Xie static inline u64 \ 2597cc8e58dSMiao Xie btrfs_device_get_##name(const struct btrfs_device *dev) \ 2607cc8e58dSMiao Xie { \ 2617cc8e58dSMiao Xie return dev->name; \ 2627cc8e58dSMiao Xie } \ 2637cc8e58dSMiao Xie \ 2647cc8e58dSMiao Xie static inline void \ 2657cc8e58dSMiao Xie btrfs_device_set_##name(struct btrfs_device *dev, u64 size) \ 2667cc8e58dSMiao Xie { \ 2677cc8e58dSMiao Xie dev->name = size; \ 2687cc8e58dSMiao Xie } 2697cc8e58dSMiao Xie #endif 2707cc8e58dSMiao Xie 2717cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(total_bytes); 2727cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(disk_total_bytes); 2737cc8e58dSMiao Xie BTRFS_DEVICE_GETSET_FUNCS(bytes_used); 2747cc8e58dSMiao Xie 275c4a816c6SNaohiro Aota enum btrfs_chunk_allocation_policy { 276c4a816c6SNaohiro Aota BTRFS_CHUNK_ALLOC_REGULAR, 2771cd6121fSNaohiro Aota BTRFS_CHUNK_ALLOC_ZONED, 278c4a816c6SNaohiro Aota }; 279c4a816c6SNaohiro Aota 28033fd2f71SAnand Jain /* 28133fd2f71SAnand Jain * Read policies for mirrored block group profiles, read picks the stripe based 28233fd2f71SAnand Jain * on these policies. 28333fd2f71SAnand Jain */ 28433fd2f71SAnand Jain enum btrfs_read_policy { 28533fd2f71SAnand Jain /* Use process PID to choose the stripe */ 28633fd2f71SAnand Jain BTRFS_READ_POLICY_PID, 28733fd2f71SAnand Jain BTRFS_NR_READ_POLICY, 28833fd2f71SAnand Jain }; 28933fd2f71SAnand Jain 2908a4b83ccSChris Mason struct btrfs_fs_devices { 2918a4b83ccSChris Mason u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 2927239ff4bSNikolay Borisov u8 metadata_uuid[BTRFS_FSID_SIZE]; 293d1a63002SNikolay Borisov bool fsid_change; 294c4babc5eSAnand Jain struct list_head fs_list; 2958a4b83ccSChris Mason 296add9745aSAnand Jain /* 297add9745aSAnand Jain * Number of devices under this fsid including missing and 298add9745aSAnand Jain * replace-target device and excludes seed devices. 299add9745aSAnand Jain */ 3008a4b83ccSChris Mason u64 num_devices; 301add9745aSAnand Jain 302add9745aSAnand Jain /* 303add9745aSAnand Jain * The number of devices that successfully opened, including 304add9745aSAnand Jain * replace-target, excludes seed devices. 305add9745aSAnand Jain */ 306a0af469bSChris Mason u64 open_devices; 307add9745aSAnand Jain 308add9745aSAnand Jain /* The number of devices that are under the chunk allocation list. */ 3092b82032cSYan Zheng u64 rw_devices; 310add9745aSAnand Jain 311add9745aSAnand Jain /* Count of missing devices under this fsid excluding seed device. */ 312cd02dca5SChris Mason u64 missing_devices; 3132b82032cSYan Zheng u64 total_rw_bytes; 314add9745aSAnand Jain 315add9745aSAnand Jain /* 316add9745aSAnand Jain * Count of devices from btrfs_super_block::num_devices for this fsid, 317add9745aSAnand Jain * which includes the seed device, excludes the transient replace-target 318add9745aSAnand Jain * device. 319add9745aSAnand Jain */ 32002db0844SJosef Bacik u64 total_devices; 321d1a63002SNikolay Borisov 322d1a63002SNikolay Borisov /* Highest generation number of seen devices */ 323d1a63002SNikolay Borisov u64 latest_generation; 324d1a63002SNikolay Borisov 325d24fa5c1SAnand Jain /* 326d24fa5c1SAnand Jain * The mount device or a device with highest generation after removal 327d24fa5c1SAnand Jain * or replace. 328d24fa5c1SAnand Jain */ 329d24fa5c1SAnand Jain struct btrfs_device *latest_dev; 330e5e9a520SChris Mason 331e5e9a520SChris Mason /* all of the devices in the FS, protected by a mutex 332e5e9a520SChris Mason * so we can safely walk it to write out the supers without 3339b011adfSWang Shilong * worrying about add/remove by the multi-device code. 3349b011adfSWang Shilong * Scrubbing super can kick off supers writing by holding 3359b011adfSWang Shilong * this mutex lock. 336e5e9a520SChris Mason */ 337e5e9a520SChris Mason struct mutex device_list_mutex; 3380b6f5d40SNikolay Borisov 3390b6f5d40SNikolay Borisov /* List of all devices, protected by device_list_mutex */ 3408a4b83ccSChris Mason struct list_head devices; 341b3075717SChris Mason 3420b6f5d40SNikolay Borisov /* 3430b6f5d40SNikolay Borisov * Devices which can satisfy space allocation. Protected by 3440b6f5d40SNikolay Borisov * chunk_mutex 3450b6f5d40SNikolay Borisov */ 346b3075717SChris Mason struct list_head alloc_list; 3472b82032cSYan Zheng 348944d3f9fSNikolay Borisov struct list_head seed_list; 3490395d84fSJohannes Thumshirn bool seeding; 3502b82032cSYan Zheng 3512b82032cSYan Zheng int opened; 352c289811cSChris Mason 353c289811cSChris Mason /* set when we find or add a device that doesn't have the 354c289811cSChris Mason * nonrot flag set 355c289811cSChris Mason */ 3567f0432d0SJohannes Thumshirn bool rotating; 3572e7910d6SAnand Jain 3585a13f430SAnand Jain struct btrfs_fs_info *fs_info; 3592e7910d6SAnand Jain /* sysfs kobjects */ 360c1b7e474SAnand Jain struct kobject fsid_kobj; 361b5501504SAnand Jain struct kobject *devices_kobj; 362a013d141SAnand Jain struct kobject *devinfo_kobj; 3632e7910d6SAnand Jain struct completion kobj_unregister; 364c4a816c6SNaohiro Aota 365c4a816c6SNaohiro Aota enum btrfs_chunk_allocation_policy chunk_alloc_policy; 36633fd2f71SAnand Jain 36733fd2f71SAnand Jain /* Policy used to read the mirrored stripes */ 36833fd2f71SAnand Jain enum btrfs_read_policy read_policy; 3698a4b83ccSChris Mason }; 3708a4b83ccSChris Mason 371facc8a22SMiao Xie #define BTRFS_BIO_INLINE_CSUM_SIZE 64 372facc8a22SMiao Xie 373ab4ba2e1SQu Wenruo #define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \ 374ab4ba2e1SQu Wenruo - sizeof(struct btrfs_chunk)) \ 375ab4ba2e1SQu Wenruo / sizeof(struct btrfs_stripe) + 1) 376ab4ba2e1SQu Wenruo 377ab4ba2e1SQu Wenruo #define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \ 378ab4ba2e1SQu Wenruo - 2 * sizeof(struct btrfs_disk_key) \ 379ab4ba2e1SQu Wenruo - 2 * sizeof(struct btrfs_chunk)) \ 380ab4ba2e1SQu Wenruo / sizeof(struct btrfs_stripe) + 1) 381ab4ba2e1SQu Wenruo 3829be3395bSChris Mason /* 383ee5b46a3SChristoph Hellwig * Maximum number of sectors for a single bio to limit the size of the 384ee5b46a3SChristoph Hellwig * checksum array. This matches the number of bio_vecs per bio and thus the 385ee5b46a3SChristoph Hellwig * I/O size for buffered I/O. 386ee5b46a3SChristoph Hellwig */ 387ee5b46a3SChristoph Hellwig #define BTRFS_MAX_BIO_SECTORS (256) 388ee5b46a3SChristoph Hellwig 389917f32a2SChristoph Hellwig typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio); 390917f32a2SChristoph Hellwig 391ee5b46a3SChristoph Hellwig /* 392c3a3b19bSQu Wenruo * Additional info to pass along bio. 393c3a3b19bSQu Wenruo * 394c3a3b19bSQu Wenruo * Mostly for btrfs specific features like csum and mirror_num. 3959be3395bSChris Mason */ 396c3a3b19bSQu Wenruo struct btrfs_bio { 397c1dc0896SMiao Xie unsigned int mirror_num; 398*ae0e5df4SDavid Sterba struct bvec_iter iter; 399c3a3b19bSQu Wenruo 40000d82525SChristoph Hellwig /* for direct I/O */ 40100d82525SChristoph Hellwig u64 file_offset; 40200d82525SChristoph Hellwig 403c3a3b19bSQu Wenruo /* @device is for stripe IO submission. */ 404c31efbdfSNikolay Borisov struct btrfs_device *device; 405facc8a22SMiao Xie u8 *csum; 406facc8a22SMiao Xie u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE]; 407c3a3b19bSQu Wenruo 408917f32a2SChristoph Hellwig /* End I/O information supplied to btrfs_bio_alloc */ 409917f32a2SChristoph Hellwig btrfs_bio_end_io_t end_io; 410917f32a2SChristoph Hellwig void *private; 411917f32a2SChristoph Hellwig 412d7b9416fSChristoph Hellwig /* For read end I/O handling */ 413d7b9416fSChristoph Hellwig struct work_struct end_io_work; 414d7b9416fSChristoph Hellwig 415fa1bcbe0SDavid Sterba /* 416fa1bcbe0SDavid Sterba * This member must come last, bio_alloc_bioset will allocate enough 417c3a3b19bSQu Wenruo * bytes for entire btrfs_bio but relies on bio being last. 418fa1bcbe0SDavid Sterba */ 4199be3395bSChris Mason struct bio bio; 4209be3395bSChris Mason }; 4219be3395bSChris Mason 422c3a3b19bSQu Wenruo static inline struct btrfs_bio *btrfs_bio(struct bio *bio) 4239be3395bSChris Mason { 424c3a3b19bSQu Wenruo return container_of(bio, struct btrfs_bio, bio); 4259be3395bSChris Mason } 4269be3395bSChris Mason 427d45cfb88SChristoph Hellwig int __init btrfs_bioset_init(void); 428d45cfb88SChristoph Hellwig void __cold btrfs_bioset_exit(void); 429d45cfb88SChristoph Hellwig 430917f32a2SChristoph Hellwig struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, 431917f32a2SChristoph Hellwig btrfs_bio_end_io_t end_io, void *private); 432917f32a2SChristoph Hellwig struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size, 433917f32a2SChristoph Hellwig btrfs_bio_end_io_t end_io, void *private); 434917f32a2SChristoph Hellwig 435917f32a2SChristoph Hellwig static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) 436917f32a2SChristoph Hellwig { 437917f32a2SChristoph Hellwig bbio->bio.bi_status = status; 438917f32a2SChristoph Hellwig bbio->end_io(bbio); 439917f32a2SChristoph Hellwig } 440d45cfb88SChristoph Hellwig 441c3a3b19bSQu Wenruo static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio) 442b3a0dd50SDavid Sterba { 443c3a3b19bSQu Wenruo if (bbio->csum != bbio->csum_inline) { 444c3a3b19bSQu Wenruo kfree(bbio->csum); 445c3a3b19bSQu Wenruo bbio->csum = NULL; 446b3a0dd50SDavid Sterba } 447b3a0dd50SDavid Sterba } 448b3a0dd50SDavid Sterba 449261d812bSQu Wenruo /* 450261d812bSQu Wenruo * Iterate through a btrfs_bio (@bbio) on a per-sector basis. 451261d812bSQu Wenruo * 452261d812bSQu Wenruo * bvl - struct bio_vec 453261d812bSQu Wenruo * bbio - struct btrfs_bio 454261d812bSQu Wenruo * iters - struct bvec_iter 455261d812bSQu Wenruo * bio_offset - unsigned int 456261d812bSQu Wenruo */ 457261d812bSQu Wenruo #define btrfs_bio_for_each_sector(fs_info, bvl, bbio, iter, bio_offset) \ 458261d812bSQu Wenruo for ((iter) = (bbio)->iter, (bio_offset) = 0; \ 459261d812bSQu Wenruo (iter).bi_size && \ 460261d812bSQu Wenruo (((bvl) = bio_iter_iovec((&(bbio)->bio), (iter))), 1); \ 461261d812bSQu Wenruo (bio_offset) += fs_info->sectorsize, \ 462261d812bSQu Wenruo bio_advance_iter_single(&(bbio)->bio, &(iter), \ 463261d812bSQu Wenruo (fs_info)->sectorsize)) 464261d812bSQu Wenruo 4654c664611SQu Wenruo struct btrfs_io_stripe { 466cea9e445SChris Mason struct btrfs_device *dev; 4679ff7ddd3SChristoph Hellwig union { 4689ff7ddd3SChristoph Hellwig /* Block mapping */ 469cea9e445SChris Mason u64 physical; 4709ff7ddd3SChristoph Hellwig /* For the endio handler */ 4719ff7ddd3SChristoph Hellwig struct btrfs_io_context *bioc; 4729ff7ddd3SChristoph Hellwig }; 473a4012f06SChristoph Hellwig }; 474a4012f06SChristoph Hellwig 475a4012f06SChristoph Hellwig struct btrfs_discard_stripe { 476a4012f06SChristoph Hellwig struct btrfs_device *dev; 477a4012f06SChristoph Hellwig u64 physical; 478a4012f06SChristoph Hellwig u64 length; 479cea9e445SChris Mason }; 480cea9e445SChris Mason 4814c664611SQu Wenruo /* 4824c664611SQu Wenruo * Context for IO subsmission for device stripe. 4834c664611SQu Wenruo * 4844c664611SQu Wenruo * - Track the unfinished mirrors for mirror based profiles 4854c664611SQu Wenruo * Mirror based profiles are SINGLE/DUP/RAID1/RAID10. 4864c664611SQu Wenruo * 4874c664611SQu Wenruo * - Contain the logical -> physical mapping info 4884c664611SQu Wenruo * Used by submit_stripe_bio() for mapping logical bio 4894c664611SQu Wenruo * into physical device address. 4904c664611SQu Wenruo * 4914c664611SQu Wenruo * - Contain device replace info 4924c664611SQu Wenruo * Used by handle_ops_on_dev_replace() to copy logical bios 4934c664611SQu Wenruo * into the new device. 4944c664611SQu Wenruo * 4954c664611SQu Wenruo * - Contain RAID56 full stripe logical bytenrs 4964c664611SQu Wenruo */ 4974c664611SQu Wenruo struct btrfs_io_context { 498140475aeSElena Reshetova refcount_t refs; 499c404e0dcSMiao Xie struct btrfs_fs_info *fs_info; 50010f11900SZhao Lei u64 map_type; /* get from map_lookup->type */ 5017d2b4daaSChris Mason struct bio *orig_bio; 502a236aed1SChris Mason atomic_t error; 503a236aed1SChris Mason int max_errors; 504cea9e445SChris Mason int num_stripes; 505a1d3c478SJan Schmidt int mirror_num; 5062c8cdd6eSMiao Xie int num_tgtdevs; 5072c8cdd6eSMiao Xie int *tgtdev_map; 5088e5cfb55SZhao Lei /* 5098e5cfb55SZhao Lei * logical block numbers for the start of each stripe 5108e5cfb55SZhao Lei * The last one or two are p/q. These are sorted, 5118e5cfb55SZhao Lei * so raid_map[0] is the start of our full stripe 5128e5cfb55SZhao Lei */ 5138e5cfb55SZhao Lei u64 *raid_map; 5144c664611SQu Wenruo struct btrfs_io_stripe stripes[]; 515cea9e445SChris Mason }; 516cea9e445SChris Mason 517b2117a39SMiao Xie struct btrfs_device_info { 518b2117a39SMiao Xie struct btrfs_device *dev; 519b2117a39SMiao Xie u64 dev_offset; 520b2117a39SMiao Xie u64 max_avail; 52173c5de00SArne Jansen u64 total_avail; 522b2117a39SMiao Xie }; 523b2117a39SMiao Xie 52431e50229SLiu Bo struct btrfs_raid_attr { 5258c3e3582SDavid Sterba u8 sub_stripes; /* sub_stripes info for map */ 5268c3e3582SDavid Sterba u8 dev_stripes; /* stripes per dev */ 5278c3e3582SDavid Sterba u8 devs_max; /* max devs to use */ 5288c3e3582SDavid Sterba u8 devs_min; /* min devs needed */ 5298c3e3582SDavid Sterba u8 tolerated_failures; /* max tolerated fail devs */ 5308c3e3582SDavid Sterba u8 devs_increment; /* ndevs has to be a multiple of this */ 5318c3e3582SDavid Sterba u8 ncopies; /* how many copies to data has */ 5328c3e3582SDavid Sterba u8 nparity; /* number of stripes worth of bytes to store 533b50836edSHans van Kranenburg * parity information */ 5348c3e3582SDavid Sterba u8 mindev_error; /* error code if min devs requisite is unmet */ 535ed23467bSAnand Jain const char raid_name[8]; /* name of the raid */ 53641a6e891SAnand Jain u64 bg_flag; /* block group flag of the raid */ 53731e50229SLiu Bo }; 53831e50229SLiu Bo 539af902047SZhao Lei extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES]; 540af902047SZhao Lei 5411abe9b8aSliubo struct map_lookup { 5421abe9b8aSliubo u64 type; 5431abe9b8aSliubo int io_align; 5441abe9b8aSliubo int io_width; 545cc353a8bSQu Wenruo u32 stripe_len; 5461abe9b8aSliubo int num_stripes; 5471abe9b8aSliubo int sub_stripes; 548cf90d884SQu Wenruo int verified_stripes; /* For mount time dev extent verification */ 5494c664611SQu Wenruo struct btrfs_io_stripe stripes[]; 5501abe9b8aSliubo }; 5511abe9b8aSliubo 552a2de733cSArne Jansen #define map_lookup_size(n) (sizeof(struct map_lookup) + \ 5534c664611SQu Wenruo (sizeof(struct btrfs_io_stripe) * (n))) 554a2de733cSArne Jansen 555c9e9f97bSIlya Dryomov struct btrfs_balance_args; 55619a39dceSIlya Dryomov struct btrfs_balance_progress; 557c9e9f97bSIlya Dryomov struct btrfs_balance_control { 558c9e9f97bSIlya Dryomov struct btrfs_balance_args data; 559c9e9f97bSIlya Dryomov struct btrfs_balance_args meta; 560c9e9f97bSIlya Dryomov struct btrfs_balance_args sys; 561c9e9f97bSIlya Dryomov 562c9e9f97bSIlya Dryomov u64 flags; 56319a39dceSIlya Dryomov 56419a39dceSIlya Dryomov struct btrfs_balance_progress stat; 565c9e9f97bSIlya Dryomov }; 566c9e9f97bSIlya Dryomov 567562d7b15SJosef Bacik /* 568562d7b15SJosef Bacik * Search for a given device by the set parameters 569562d7b15SJosef Bacik */ 570562d7b15SJosef Bacik struct btrfs_dev_lookup_args { 571562d7b15SJosef Bacik u64 devid; 572562d7b15SJosef Bacik u8 *uuid; 573562d7b15SJosef Bacik u8 *fsid; 574562d7b15SJosef Bacik bool missing; 575562d7b15SJosef Bacik }; 576562d7b15SJosef Bacik 577562d7b15SJosef Bacik /* We have to initialize to -1 because BTRFS_DEV_REPLACE_DEVID is 0 */ 578562d7b15SJosef Bacik #define BTRFS_DEV_LOOKUP_ARGS_INIT { .devid = (u64)-1 } 579562d7b15SJosef Bacik 580562d7b15SJosef Bacik #define BTRFS_DEV_LOOKUP_ARGS(name) \ 581562d7b15SJosef Bacik struct btrfs_dev_lookup_args name = BTRFS_DEV_LOOKUP_ARGS_INIT 582562d7b15SJosef Bacik 583cf8cddd3SChristoph Hellwig enum btrfs_map_op { 584cf8cddd3SChristoph Hellwig BTRFS_MAP_READ, 585cf8cddd3SChristoph Hellwig BTRFS_MAP_WRITE, 586cf8cddd3SChristoph Hellwig BTRFS_MAP_DISCARD, 587cf8cddd3SChristoph Hellwig BTRFS_MAP_GET_READ_MIRRORS, 588cf8cddd3SChristoph Hellwig }; 589cf8cddd3SChristoph Hellwig 590cf8cddd3SChristoph Hellwig static inline enum btrfs_map_op btrfs_op(struct bio *bio) 591cf8cddd3SChristoph Hellwig { 592cf8cddd3SChristoph Hellwig switch (bio_op(bio)) { 593cf8cddd3SChristoph Hellwig case REQ_OP_DISCARD: 594cf8cddd3SChristoph Hellwig return BTRFS_MAP_DISCARD; 595cf8cddd3SChristoph Hellwig case REQ_OP_WRITE: 596cfe94440SNaohiro Aota case REQ_OP_ZONE_APPEND: 597cf8cddd3SChristoph Hellwig return BTRFS_MAP_WRITE; 598cf8cddd3SChristoph Hellwig default: 599cf8cddd3SChristoph Hellwig WARN_ON_ONCE(1); 600c730ae0cSMarcos Paulo de Souza fallthrough; 601cf8cddd3SChristoph Hellwig case REQ_OP_READ: 602cf8cddd3SChristoph Hellwig return BTRFS_MAP_READ; 603cf8cddd3SChristoph Hellwig } 604cf8cddd3SChristoph Hellwig } 605cf8cddd3SChristoph Hellwig 6064c664611SQu Wenruo void btrfs_get_bioc(struct btrfs_io_context *bioc); 6074c664611SQu Wenruo void btrfs_put_bioc(struct btrfs_io_context *bioc); 608cf8cddd3SChristoph Hellwig int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 609cea9e445SChris Mason u64 logical, u64 *length, 6104c664611SQu Wenruo struct btrfs_io_context **bioc_ret, int mirror_num); 611cf8cddd3SChristoph Hellwig int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, 612af8e2d1dSMiao Xie u64 logical, u64 *length, 6134c664611SQu Wenruo struct btrfs_io_context **bioc_ret); 614a4012f06SChristoph Hellwig struct btrfs_discard_stripe *btrfs_map_discard(struct btrfs_fs_info *fs_info, 615a4012f06SChristoph Hellwig u64 logical, u64 *length_ret, 616a4012f06SChristoph Hellwig u32 *num_stripes); 61742034313SMichal Rostecki int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *map, 61843c0d1a5SQu Wenruo enum btrfs_map_op op, u64 logical, 61942034313SMichal Rostecki struct btrfs_io_geometry *io_geom); 6206bccf3abSJeff Mahoney int btrfs_read_sys_array(struct btrfs_fs_info *fs_info); 6215b4aacefSJeff Mahoney int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info); 622f6f39f7aSNikolay Borisov struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans, 62379bd3712SFilipe Manana u64 type); 624c8bf1b67SDavid Sterba void btrfs_mapping_tree_free(struct extent_map_tree *tree); 6251a722d8fSChristoph Hellwig void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror_num); 6268a4b83ccSChris Mason int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, 62797288f2cSChristoph Hellwig fmode_t flags, void *holder); 62836350e95SGu Jinxiang struct btrfs_device *btrfs_scan_one_device(const char *path, 62936350e95SGu Jinxiang fmode_t flags, void *holder); 63016cab91aSAnand Jain int btrfs_forget_devices(dev_t devt); 63154eed6aeSNikolay Borisov void btrfs_close_devices(struct btrfs_fs_devices *fs_devices); 632bacce86aSAnand Jain void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices); 633d6507cf1SNikolay Borisov void btrfs_assign_next_active_device(struct btrfs_device *device, 634d6507cf1SNikolay Borisov struct btrfs_device *this_dev); 635a27a94c2SNikolay Borisov struct btrfs_device *btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, 636a27a94c2SNikolay Borisov u64 devid, 637a27a94c2SNikolay Borisov const char *devpath); 638faa775c4SJosef Bacik int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info, 639faa775c4SJosef Bacik struct btrfs_dev_lookup_args *args, 640faa775c4SJosef Bacik const char *path); 64112bd2fc0SIlya Dryomov struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, 64212bd2fc0SIlya Dryomov const u64 *devid, 64312bd2fc0SIlya Dryomov const u8 *uuid); 644faa775c4SJosef Bacik void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args); 645a425f9d4SDavid Sterba void btrfs_free_device(struct btrfs_device *device); 6462ff7e61eSJeff Mahoney int btrfs_rm_device(struct btrfs_fs_info *fs_info, 6471a15eb72SJosef Bacik struct btrfs_dev_lookup_args *args, 6483fa421deSJosef Bacik struct block_device **bdev, fmode_t *mode); 649ffc5a379SDavid Sterba void __exit btrfs_cleanup_fs_uuids(void); 6505d964051SStefan Behrens int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); 6518f18cf13SChris Mason int btrfs_grow_device(struct btrfs_trans_handle *trans, 6528f18cf13SChris Mason struct btrfs_device *device, u64 new_size); 653562d7b15SJosef Bacik struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices, 654562d7b15SJosef Bacik const struct btrfs_dev_lookup_args *args); 6558f18cf13SChris Mason int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); 656da353f6bSDavid Sterba int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); 6576fcf6e2bSDavid Sterba int btrfs_balance(struct btrfs_fs_info *fs_info, 6586fcf6e2bSDavid Sterba struct btrfs_balance_control *bctl, 659c9e9f97bSIlya Dryomov struct btrfs_ioctl_balance_args *bargs); 660f89e09cfSAnand Jain void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf); 6612b6ba629SIlya Dryomov int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info); 66268310a5eSIlya Dryomov int btrfs_recover_balance(struct btrfs_fs_info *fs_info); 663837d5b6eSIlya Dryomov int btrfs_pause_balance(struct btrfs_fs_info *fs_info); 66418bb8bbfSJohannes Thumshirn int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset); 665a7e99c69SIlya Dryomov int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); 666f7a81ea4SStefan Behrens int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info); 66797f4dd09SNikolay Borisov int btrfs_uuid_scan_kthread(void *data); 668a09f23c3SAnand Jain bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset); 66960dfdf25SNikolay Borisov int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 670ba1bf481SJosef Bacik u64 *start, u64 *max_avail); 671442a4f63SStefan Behrens void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); 6722ff7e61eSJeff Mahoney int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, 673b27f7c0cSDavid Sterba struct btrfs_ioctl_get_dev_stats *stats); 674cb517eabSMiao Xie void btrfs_init_devices_late(struct btrfs_fs_info *fs_info); 675733f4fbbSStefan Behrens int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); 676196c9d8dSDavid Sterba int btrfs_run_dev_stats(struct btrfs_trans_handle *trans); 67768a9db5fSNikolay Borisov void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev); 67865237ee3SDavid Sterba void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev); 6794f5ad7bdSNikolay Borisov void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev); 680592d92eeSLiu Bo int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, 681e4ff5fb5SNikolay Borisov u64 logical, u64 len); 6822ff7e61eSJeff Mahoney unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, 68353b381b3SDavid Woodhouse u64 logical); 684bc88b486SQu Wenruo u64 btrfs_calc_stripe_length(const struct extent_map *em); 6850b30f719SQu Wenruo int btrfs_nr_parity_stripes(u64 type); 68679bd3712SFilipe Manana int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans, 68779bd3712SFilipe Manana struct btrfs_block_group *bg); 68897aff912SNikolay Borisov int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset); 68960ca842eSOmar Sandoval struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info, 69060ca842eSOmar Sandoval u64 logical, u64 length); 6918f32380dSJohannes Thumshirn void btrfs_release_disk_super(struct btrfs_super_block *super); 692addc3fa7SMiao Xie 693442a4f63SStefan Behrens static inline void btrfs_dev_stat_inc(struct btrfs_device *dev, 694442a4f63SStefan Behrens int index) 695442a4f63SStefan Behrens { 696442a4f63SStefan Behrens atomic_inc(dev->dev_stat_values + index); 6979deae968SNikolay Borisov /* 6989deae968SNikolay Borisov * This memory barrier orders stores updating statistics before stores 6999deae968SNikolay Borisov * updating dev_stats_ccnt. 7009deae968SNikolay Borisov * 7019deae968SNikolay Borisov * It pairs with smp_rmb() in btrfs_run_dev_stats(). 7029deae968SNikolay Borisov */ 703addc3fa7SMiao Xie smp_mb__before_atomic(); 704addc3fa7SMiao Xie atomic_inc(&dev->dev_stats_ccnt); 705442a4f63SStefan Behrens } 706442a4f63SStefan Behrens 707442a4f63SStefan Behrens static inline int btrfs_dev_stat_read(struct btrfs_device *dev, 708442a4f63SStefan Behrens int index) 709442a4f63SStefan Behrens { 710442a4f63SStefan Behrens return atomic_read(dev->dev_stat_values + index); 711442a4f63SStefan Behrens } 712442a4f63SStefan Behrens 713442a4f63SStefan Behrens static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev, 714442a4f63SStefan Behrens int index) 715442a4f63SStefan Behrens { 716442a4f63SStefan Behrens int ret; 717442a4f63SStefan Behrens 718442a4f63SStefan Behrens ret = atomic_xchg(dev->dev_stat_values + index, 0); 7194660c49fSNikolay Borisov /* 7204660c49fSNikolay Borisov * atomic_xchg implies a full memory barriers as per atomic_t.txt: 7214660c49fSNikolay Borisov * - RMW operations that have a return value are fully ordered; 7224660c49fSNikolay Borisov * 7234660c49fSNikolay Borisov * This implicit memory barriers is paired with the smp_rmb in 7244660c49fSNikolay Borisov * btrfs_run_dev_stats 7254660c49fSNikolay Borisov */ 726addc3fa7SMiao Xie atomic_inc(&dev->dev_stats_ccnt); 727442a4f63SStefan Behrens return ret; 728442a4f63SStefan Behrens } 729442a4f63SStefan Behrens 730442a4f63SStefan Behrens static inline void btrfs_dev_stat_set(struct btrfs_device *dev, 731442a4f63SStefan Behrens int index, unsigned long val) 732442a4f63SStefan Behrens { 733442a4f63SStefan Behrens atomic_set(dev->dev_stat_values + index, val); 7349deae968SNikolay Borisov /* 7359deae968SNikolay Borisov * This memory barrier orders stores updating statistics before stores 7369deae968SNikolay Borisov * updating dev_stats_ccnt. 7379deae968SNikolay Borisov * 7389deae968SNikolay Borisov * It pairs with smp_rmb() in btrfs_run_dev_stats(). 7399deae968SNikolay Borisov */ 740addc3fa7SMiao Xie smp_mb__before_atomic(); 741addc3fa7SMiao Xie atomic_inc(&dev->dev_stats_ccnt); 742442a4f63SStefan Behrens } 743442a4f63SStefan Behrens 744bbbf7243SNikolay Borisov void btrfs_commit_device_sizes(struct btrfs_transaction *trans); 74504216820SFilipe Manana 7464143cb8bSDavid Sterba struct list_head * __attribute_const__ btrfs_get_fs_uuids(void); 7476528b99dSAnand Jain bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, 7486528b99dSAnand Jain struct btrfs_device *failing_dev); 749313b0858SJosef Bacik void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 750313b0858SJosef Bacik struct block_device *bdev, 751313b0858SJosef Bacik const char *device_path); 75221634a19SQu Wenruo 753500a44c9SDavid Sterba enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags); 75446df06b8SDavid Sterba int btrfs_bg_type_to_factor(u64 flags); 755158da513SDavid Sterba const char *btrfs_bg_type_to_raid_name(u64 flags); 756cf90d884SQu Wenruo int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info); 757554aed7dSJohannes Thumshirn bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical); 75846df06b8SDavid Sterba 759c2e79e86SJosef Bacik bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); 760c2e79e86SJosef Bacik 7610b86a832SChris Mason #endif 762