1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/btrfs_tree.h> 37 #include <linux/workqueue.h> 38 #include <linux/security.h> 39 #include <linux/sizes.h> 40 #include "extent_io.h" 41 #include "extent_map.h" 42 #include "async-thread.h" 43 44 struct btrfs_trans_handle; 45 struct btrfs_transaction; 46 struct btrfs_pending_snapshot; 47 extern struct kmem_cache *btrfs_trans_handle_cachep; 48 extern struct kmem_cache *btrfs_transaction_cachep; 49 extern struct kmem_cache *btrfs_bit_radix_cachep; 50 extern struct kmem_cache *btrfs_path_cachep; 51 extern struct kmem_cache *btrfs_free_space_cachep; 52 struct btrfs_ordered_sum; 53 54 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 55 #define STATIC noinline 56 #else 57 #define STATIC static noinline 58 #endif 59 60 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 61 62 #define BTRFS_MAX_MIRRORS 3 63 64 #define BTRFS_MAX_LEVEL 8 65 66 #define BTRFS_COMPAT_EXTENT_TREE_V0 67 68 /* 69 * the max metadata block size. This limit is somewhat artificial, 70 * but the memmove costs go through the roof for larger blocks. 71 */ 72 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 73 74 /* 75 * we can actually store much bigger names, but lets not confuse the rest 76 * of linux 77 */ 78 #define BTRFS_NAME_LEN 255 79 80 /* 81 * Theoretical limit is larger, but we keep this down to a sane 82 * value. That should limit greatly the possibility of collisions on 83 * inode ref items. 84 */ 85 #define BTRFS_LINK_MAX 65535U 86 87 static const int btrfs_csum_sizes[] = { 4 }; 88 89 /* four bytes for CRC32 */ 90 #define BTRFS_EMPTY_DIR_SIZE 0 91 92 /* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 93 #define REQ_GET_READ_MIRRORS (1 << 30) 94 95 /* ioprio of readahead is set to idle */ 96 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 97 98 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M 99 100 #define BTRFS_MAX_EXTENT_SIZE SZ_128M 101 102 struct btrfs_mapping_tree { 103 struct extent_map_tree map_tree; 104 }; 105 106 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 107 { 108 BUG_ON(num_stripes == 0); 109 return sizeof(struct btrfs_chunk) + 110 sizeof(struct btrfs_stripe) * (num_stripes - 1); 111 } 112 113 /* 114 * File system states 115 */ 116 #define BTRFS_FS_STATE_ERROR 0 117 #define BTRFS_FS_STATE_REMOUNTING 1 118 #define BTRFS_FS_STATE_TRANS_ABORTED 2 119 #define BTRFS_FS_STATE_DEV_REPLACING 3 120 #define BTRFS_FS_STATE_DUMMY_FS_INFO 4 121 122 #define BTRFS_BACKREF_REV_MAX 256 123 #define BTRFS_BACKREF_REV_SHIFT 56 124 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 125 BTRFS_BACKREF_REV_SHIFT) 126 127 #define BTRFS_OLD_BACKREF_REV 0 128 #define BTRFS_MIXED_BACKREF_REV 1 129 130 /* 131 * every tree block (leaf or node) starts with this header. 132 */ 133 struct btrfs_header { 134 /* these first four must match the super block */ 135 u8 csum[BTRFS_CSUM_SIZE]; 136 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 137 __le64 bytenr; /* which block this node is supposed to live in */ 138 __le64 flags; 139 140 /* allowed to be different from the super from here on down */ 141 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 142 __le64 generation; 143 __le64 owner; 144 __le32 nritems; 145 u8 level; 146 } __attribute__ ((__packed__)); 147 148 /* 149 * this is a very generous portion of the super block, giving us 150 * room to translate 14 chunks with 3 stripes each. 151 */ 152 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 153 154 /* 155 * just in case we somehow lose the roots and are not able to mount, 156 * we store an array of the roots from previous transactions 157 * in the super. 158 */ 159 #define BTRFS_NUM_BACKUP_ROOTS 4 160 struct btrfs_root_backup { 161 __le64 tree_root; 162 __le64 tree_root_gen; 163 164 __le64 chunk_root; 165 __le64 chunk_root_gen; 166 167 __le64 extent_root; 168 __le64 extent_root_gen; 169 170 __le64 fs_root; 171 __le64 fs_root_gen; 172 173 __le64 dev_root; 174 __le64 dev_root_gen; 175 176 __le64 csum_root; 177 __le64 csum_root_gen; 178 179 __le64 total_bytes; 180 __le64 bytes_used; 181 __le64 num_devices; 182 /* future */ 183 __le64 unused_64[4]; 184 185 u8 tree_root_level; 186 u8 chunk_root_level; 187 u8 extent_root_level; 188 u8 fs_root_level; 189 u8 dev_root_level; 190 u8 csum_root_level; 191 /* future and to align */ 192 u8 unused_8[10]; 193 } __attribute__ ((__packed__)); 194 195 /* 196 * the super block basically lists the main trees of the FS 197 * it currently lacks any block count etc etc 198 */ 199 struct btrfs_super_block { 200 u8 csum[BTRFS_CSUM_SIZE]; 201 /* the first 4 fields must match struct btrfs_header */ 202 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 203 __le64 bytenr; /* this block number */ 204 __le64 flags; 205 206 /* allowed to be different from the btrfs_header from here own down */ 207 __le64 magic; 208 __le64 generation; 209 __le64 root; 210 __le64 chunk_root; 211 __le64 log_root; 212 213 /* this will help find the new super based on the log root */ 214 __le64 log_root_transid; 215 __le64 total_bytes; 216 __le64 bytes_used; 217 __le64 root_dir_objectid; 218 __le64 num_devices; 219 __le32 sectorsize; 220 __le32 nodesize; 221 __le32 __unused_leafsize; 222 __le32 stripesize; 223 __le32 sys_chunk_array_size; 224 __le64 chunk_root_generation; 225 __le64 compat_flags; 226 __le64 compat_ro_flags; 227 __le64 incompat_flags; 228 __le16 csum_type; 229 u8 root_level; 230 u8 chunk_root_level; 231 u8 log_root_level; 232 struct btrfs_dev_item dev_item; 233 234 char label[BTRFS_LABEL_SIZE]; 235 236 __le64 cache_generation; 237 __le64 uuid_tree_generation; 238 239 /* future expansion */ 240 __le64 reserved[30]; 241 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 242 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 243 } __attribute__ ((__packed__)); 244 245 /* 246 * Compat flags that we support. If any incompat flags are set other than the 247 * ones specified below then we will fail to mount 248 */ 249 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 250 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 251 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 252 253 #define BTRFS_FEATURE_COMPAT_RO_SUPP \ 254 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE) 255 256 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 257 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 258 259 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 260 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 261 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 262 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 263 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 264 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 265 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 266 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 267 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 268 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 269 270 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 271 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 272 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 273 274 /* 275 * A leaf is full of items. offset and size tell us where to find 276 * the item in the leaf (relative to the start of the data area) 277 */ 278 struct btrfs_item { 279 struct btrfs_disk_key key; 280 __le32 offset; 281 __le32 size; 282 } __attribute__ ((__packed__)); 283 284 /* 285 * leaves have an item area and a data area: 286 * [item0, item1....itemN] [free space] [dataN...data1, data0] 287 * 288 * The data is separate from the items to get the keys closer together 289 * during searches. 290 */ 291 struct btrfs_leaf { 292 struct btrfs_header header; 293 struct btrfs_item items[]; 294 } __attribute__ ((__packed__)); 295 296 /* 297 * all non-leaf blocks are nodes, they hold only keys and pointers to 298 * other blocks 299 */ 300 struct btrfs_key_ptr { 301 struct btrfs_disk_key key; 302 __le64 blockptr; 303 __le64 generation; 304 } __attribute__ ((__packed__)); 305 306 struct btrfs_node { 307 struct btrfs_header header; 308 struct btrfs_key_ptr ptrs[]; 309 } __attribute__ ((__packed__)); 310 311 /* 312 * btrfs_paths remember the path taken from the root down to the leaf. 313 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 314 * to any other levels that are present. 315 * 316 * The slots array records the index of the item or block pointer 317 * used while walking the tree. 318 */ 319 enum { READA_NONE = 0, READA_BACK, READA_FORWARD }; 320 struct btrfs_path { 321 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 322 int slots[BTRFS_MAX_LEVEL]; 323 /* if there is real range locking, this locks field will change */ 324 u8 locks[BTRFS_MAX_LEVEL]; 325 u8 reada; 326 /* keep some upper locks as we walk down */ 327 u8 lowest_level; 328 329 /* 330 * set by btrfs_split_item, tells search_slot to keep all locks 331 * and to force calls to keep space in the nodes 332 */ 333 unsigned int search_for_split:1; 334 unsigned int keep_locks:1; 335 unsigned int skip_locking:1; 336 unsigned int leave_spinning:1; 337 unsigned int search_commit_root:1; 338 unsigned int need_commit_sem:1; 339 unsigned int skip_release_on_error:1; 340 }; 341 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 342 sizeof(struct btrfs_item)) 343 struct btrfs_dev_replace { 344 u64 replace_state; /* see #define above */ 345 u64 time_started; /* seconds since 1-Jan-1970 */ 346 u64 time_stopped; /* seconds since 1-Jan-1970 */ 347 atomic64_t num_write_errors; 348 atomic64_t num_uncorrectable_read_errors; 349 350 u64 cursor_left; 351 u64 committed_cursor_left; 352 u64 cursor_left_last_write_of_item; 353 u64 cursor_right; 354 355 u64 cont_reading_from_srcdev_mode; /* see #define above */ 356 357 int is_valid; 358 int item_needs_writeback; 359 struct btrfs_device *srcdev; 360 struct btrfs_device *tgtdev; 361 362 pid_t lock_owner; 363 atomic_t nesting_level; 364 struct mutex lock_finishing_cancel_unmount; 365 rwlock_t lock; 366 atomic_t read_locks; 367 atomic_t blocking_readers; 368 wait_queue_head_t read_lock_wq; 369 370 struct btrfs_scrub_progress scrub_progress; 371 }; 372 373 /* For raid type sysfs entries */ 374 struct raid_kobject { 375 int raid_type; 376 struct kobject kobj; 377 }; 378 379 struct btrfs_space_info { 380 spinlock_t lock; 381 382 u64 total_bytes; /* total bytes in the space, 383 this doesn't take mirrors into account */ 384 u64 bytes_used; /* total bytes used, 385 this doesn't take mirrors into account */ 386 u64 bytes_pinned; /* total bytes pinned, will be freed when the 387 transaction finishes */ 388 u64 bytes_reserved; /* total bytes the allocator has reserved for 389 current allocations */ 390 u64 bytes_may_use; /* number of bytes that may be used for 391 delalloc/allocations */ 392 u64 bytes_readonly; /* total bytes that are read only */ 393 394 u64 max_extent_size; /* This will hold the maximum extent size of 395 the space info if we had an ENOSPC in the 396 allocator. */ 397 398 unsigned int full:1; /* indicates that we cannot allocate any more 399 chunks for this space */ 400 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 401 402 unsigned int flush:1; /* set if we are trying to make space */ 403 404 unsigned int force_alloc; /* set if we need to force a chunk 405 alloc for this space */ 406 407 u64 disk_used; /* total bytes used on disk */ 408 u64 disk_total; /* total bytes on disk, takes mirrors into 409 account */ 410 411 u64 flags; 412 413 /* 414 * bytes_pinned is kept in line with what is actually pinned, as in 415 * we've called update_block_group and dropped the bytes_used counter 416 * and increased the bytes_pinned counter. However this means that 417 * bytes_pinned does not reflect the bytes that will be pinned once the 418 * delayed refs are flushed, so this counter is inc'ed every time we 419 * call btrfs_free_extent so it is a realtime count of what will be 420 * freed once the transaction is committed. It will be zeroed every 421 * time the transaction commits. 422 */ 423 struct percpu_counter total_bytes_pinned; 424 425 struct list_head list; 426 /* Protected by the spinlock 'lock'. */ 427 struct list_head ro_bgs; 428 struct list_head priority_tickets; 429 struct list_head tickets; 430 431 struct rw_semaphore groups_sem; 432 /* for block groups in our same type */ 433 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 434 wait_queue_head_t wait; 435 436 struct kobject kobj; 437 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 438 }; 439 440 #define BTRFS_BLOCK_RSV_GLOBAL 1 441 #define BTRFS_BLOCK_RSV_DELALLOC 2 442 #define BTRFS_BLOCK_RSV_TRANS 3 443 #define BTRFS_BLOCK_RSV_CHUNK 4 444 #define BTRFS_BLOCK_RSV_DELOPS 5 445 #define BTRFS_BLOCK_RSV_EMPTY 6 446 #define BTRFS_BLOCK_RSV_TEMP 7 447 448 struct btrfs_block_rsv { 449 u64 size; 450 u64 reserved; 451 struct btrfs_space_info *space_info; 452 spinlock_t lock; 453 unsigned short full; 454 unsigned short type; 455 unsigned short failfast; 456 }; 457 458 /* 459 * free clusters are used to claim free space in relatively large chunks, 460 * allowing us to do less seeky writes. They are used for all metadata 461 * allocations and data allocations in ssd mode. 462 */ 463 struct btrfs_free_cluster { 464 spinlock_t lock; 465 spinlock_t refill_lock; 466 struct rb_root root; 467 468 /* largest extent in this cluster */ 469 u64 max_size; 470 471 /* first extent starting offset */ 472 u64 window_start; 473 474 /* We did a full search and couldn't create a cluster */ 475 bool fragmented; 476 477 struct btrfs_block_group_cache *block_group; 478 /* 479 * when a cluster is allocated from a block group, we put the 480 * cluster onto a list in the block group so that it can 481 * be freed before the block group is freed. 482 */ 483 struct list_head block_group_list; 484 }; 485 486 enum btrfs_caching_type { 487 BTRFS_CACHE_NO = 0, 488 BTRFS_CACHE_STARTED = 1, 489 BTRFS_CACHE_FAST = 2, 490 BTRFS_CACHE_FINISHED = 3, 491 BTRFS_CACHE_ERROR = 4, 492 }; 493 494 enum btrfs_disk_cache_state { 495 BTRFS_DC_WRITTEN = 0, 496 BTRFS_DC_ERROR = 1, 497 BTRFS_DC_CLEAR = 2, 498 BTRFS_DC_SETUP = 3, 499 }; 500 501 struct btrfs_caching_control { 502 struct list_head list; 503 struct mutex mutex; 504 wait_queue_head_t wait; 505 struct btrfs_work work; 506 struct btrfs_block_group_cache *block_group; 507 u64 progress; 508 atomic_t count; 509 }; 510 511 /* Once caching_thread() finds this much free space, it will wake up waiters. */ 512 #define CACHING_CTL_WAKE_UP (1024 * 1024 * 2) 513 514 struct btrfs_io_ctl { 515 void *cur, *orig; 516 struct page *page; 517 struct page **pages; 518 struct btrfs_root *root; 519 struct inode *inode; 520 unsigned long size; 521 int index; 522 int num_pages; 523 int entries; 524 int bitmaps; 525 unsigned check_crcs:1; 526 }; 527 528 struct btrfs_block_group_cache { 529 struct btrfs_key key; 530 struct btrfs_block_group_item item; 531 struct btrfs_fs_info *fs_info; 532 struct inode *inode; 533 spinlock_t lock; 534 u64 pinned; 535 u64 reserved; 536 u64 delalloc_bytes; 537 u64 bytes_super; 538 u64 flags; 539 u64 cache_generation; 540 u32 sectorsize; 541 542 /* 543 * If the free space extent count exceeds this number, convert the block 544 * group to bitmaps. 545 */ 546 u32 bitmap_high_thresh; 547 548 /* 549 * If the free space extent count drops below this number, convert the 550 * block group back to extents. 551 */ 552 u32 bitmap_low_thresh; 553 554 /* 555 * It is just used for the delayed data space allocation because 556 * only the data space allocation and the relative metadata update 557 * can be done cross the transaction. 558 */ 559 struct rw_semaphore data_rwsem; 560 561 /* for raid56, this is a full stripe, without parity */ 562 unsigned long full_stripe_len; 563 564 unsigned int ro; 565 unsigned int iref:1; 566 unsigned int has_caching_ctl:1; 567 unsigned int removed:1; 568 569 int disk_cache_state; 570 571 /* cache tracking stuff */ 572 int cached; 573 struct btrfs_caching_control *caching_ctl; 574 u64 last_byte_to_unpin; 575 576 struct btrfs_space_info *space_info; 577 578 /* free space cache stuff */ 579 struct btrfs_free_space_ctl *free_space_ctl; 580 581 /* block group cache stuff */ 582 struct rb_node cache_node; 583 584 /* for block groups in the same raid type */ 585 struct list_head list; 586 587 /* usage count */ 588 atomic_t count; 589 590 /* List of struct btrfs_free_clusters for this block group. 591 * Today it will only have one thing on it, but that may change 592 */ 593 struct list_head cluster_list; 594 595 /* For delayed block group creation or deletion of empty block groups */ 596 struct list_head bg_list; 597 598 /* For read-only block groups */ 599 struct list_head ro_list; 600 601 atomic_t trimming; 602 603 /* For dirty block groups */ 604 struct list_head dirty_list; 605 struct list_head io_list; 606 607 struct btrfs_io_ctl io_ctl; 608 609 /* 610 * Incremented when doing extent allocations and holding a read lock 611 * on the space_info's groups_sem semaphore. 612 * Decremented when an ordered extent that represents an IO against this 613 * block group's range is created (after it's added to its inode's 614 * root's list of ordered extents) or immediately after the allocation 615 * if it's a metadata extent or fallocate extent (for these cases we 616 * don't create ordered extents). 617 */ 618 atomic_t reservations; 619 620 /* 621 * Incremented while holding the spinlock *lock* by a task checking if 622 * it can perform a nocow write (incremented if the value for the *ro* 623 * field is 0). Decremented by such tasks once they create an ordered 624 * extent or before that if some error happens before reaching that step. 625 * This is to prevent races between block group relocation and nocow 626 * writes through direct IO. 627 */ 628 atomic_t nocow_writers; 629 630 /* Lock for free space tree operations. */ 631 struct mutex free_space_lock; 632 633 /* 634 * Does the block group need to be added to the free space tree? 635 * Protected by free_space_lock. 636 */ 637 int needs_free_space; 638 }; 639 640 /* delayed seq elem */ 641 struct seq_list { 642 struct list_head list; 643 u64 seq; 644 }; 645 646 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } 647 648 enum btrfs_orphan_cleanup_state { 649 ORPHAN_CLEANUP_STARTED = 1, 650 ORPHAN_CLEANUP_DONE = 2, 651 }; 652 653 /* used by the raid56 code to lock stripes for read/modify/write */ 654 struct btrfs_stripe_hash { 655 struct list_head hash_list; 656 wait_queue_head_t wait; 657 spinlock_t lock; 658 }; 659 660 /* used by the raid56 code to lock stripes for read/modify/write */ 661 struct btrfs_stripe_hash_table { 662 struct list_head stripe_cache; 663 spinlock_t cache_lock; 664 int cache_size; 665 struct btrfs_stripe_hash table[]; 666 }; 667 668 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 669 670 void btrfs_init_async_reclaim_work(struct work_struct *work); 671 672 /* fs_info */ 673 struct reloc_control; 674 struct btrfs_device; 675 struct btrfs_fs_devices; 676 struct btrfs_balance_control; 677 struct btrfs_delayed_root; 678 struct btrfs_fs_info { 679 u8 fsid[BTRFS_FSID_SIZE]; 680 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 681 struct btrfs_root *extent_root; 682 struct btrfs_root *tree_root; 683 struct btrfs_root *chunk_root; 684 struct btrfs_root *dev_root; 685 struct btrfs_root *fs_root; 686 struct btrfs_root *csum_root; 687 struct btrfs_root *quota_root; 688 struct btrfs_root *uuid_root; 689 struct btrfs_root *free_space_root; 690 691 /* the log root tree is a directory of all the other log roots */ 692 struct btrfs_root *log_root_tree; 693 694 spinlock_t fs_roots_radix_lock; 695 struct radix_tree_root fs_roots_radix; 696 697 /* block group cache stuff */ 698 spinlock_t block_group_cache_lock; 699 u64 first_logical_byte; 700 struct rb_root block_group_cache_tree; 701 702 /* keep track of unallocated space */ 703 spinlock_t free_chunk_lock; 704 u64 free_chunk_space; 705 706 struct extent_io_tree freed_extents[2]; 707 struct extent_io_tree *pinned_extents; 708 709 /* logical->physical extent mapping */ 710 struct btrfs_mapping_tree mapping_tree; 711 712 /* 713 * block reservation for extent, checksum, root tree and 714 * delayed dir index item 715 */ 716 struct btrfs_block_rsv global_block_rsv; 717 /* block reservation for delay allocation */ 718 struct btrfs_block_rsv delalloc_block_rsv; 719 /* block reservation for metadata operations */ 720 struct btrfs_block_rsv trans_block_rsv; 721 /* block reservation for chunk tree */ 722 struct btrfs_block_rsv chunk_block_rsv; 723 /* block reservation for delayed operations */ 724 struct btrfs_block_rsv delayed_block_rsv; 725 726 struct btrfs_block_rsv empty_block_rsv; 727 728 u64 generation; 729 u64 last_trans_committed; 730 u64 avg_delayed_ref_runtime; 731 732 /* 733 * this is updated to the current trans every time a full commit 734 * is required instead of the faster short fsync log commits 735 */ 736 u64 last_trans_log_full_commit; 737 unsigned long mount_opt; 738 /* 739 * Track requests for actions that need to be done during transaction 740 * commit (like for some mount options). 741 */ 742 unsigned long pending_changes; 743 unsigned long compress_type:4; 744 int commit_interval; 745 /* 746 * It is a suggestive number, the read side is safe even it gets a 747 * wrong number because we will write out the data into a regular 748 * extent. The write side(mount/remount) is under ->s_umount lock, 749 * so it is also safe. 750 */ 751 u64 max_inline; 752 /* 753 * Protected by ->chunk_mutex and sb->s_umount. 754 * 755 * The reason that we use two lock to protect it is because only 756 * remount and mount operations can change it and these two operations 757 * are under sb->s_umount, but the read side (chunk allocation) can not 758 * acquire sb->s_umount or the deadlock would happen. So we use two 759 * locks to protect it. On the write side, we must acquire two locks, 760 * and on the read side, we just need acquire one of them. 761 */ 762 u64 alloc_start; 763 struct btrfs_transaction *running_transaction; 764 wait_queue_head_t transaction_throttle; 765 wait_queue_head_t transaction_wait; 766 wait_queue_head_t transaction_blocked_wait; 767 wait_queue_head_t async_submit_wait; 768 769 /* 770 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 771 * when they are updated. 772 * 773 * Because we do not clear the flags for ever, so we needn't use 774 * the lock on the read side. 775 * 776 * We also needn't use the lock when we mount the fs, because 777 * there is no other task which will update the flag. 778 */ 779 spinlock_t super_lock; 780 struct btrfs_super_block *super_copy; 781 struct btrfs_super_block *super_for_commit; 782 struct block_device *__bdev; 783 struct super_block *sb; 784 struct inode *btree_inode; 785 struct backing_dev_info bdi; 786 struct mutex tree_log_mutex; 787 struct mutex transaction_kthread_mutex; 788 struct mutex cleaner_mutex; 789 struct mutex chunk_mutex; 790 struct mutex volume_mutex; 791 792 /* 793 * this is taken to make sure we don't set block groups ro after 794 * the free space cache has been allocated on them 795 */ 796 struct mutex ro_block_group_mutex; 797 798 /* this is used during read/modify/write to make sure 799 * no two ios are trying to mod the same stripe at the same 800 * time 801 */ 802 struct btrfs_stripe_hash_table *stripe_hash_table; 803 804 /* 805 * this protects the ordered operations list only while we are 806 * processing all of the entries on it. This way we make 807 * sure the commit code doesn't find the list temporarily empty 808 * because another function happens to be doing non-waiting preflush 809 * before jumping into the main commit. 810 */ 811 struct mutex ordered_operations_mutex; 812 813 struct rw_semaphore commit_root_sem; 814 815 struct rw_semaphore cleanup_work_sem; 816 817 struct rw_semaphore subvol_sem; 818 struct srcu_struct subvol_srcu; 819 820 spinlock_t trans_lock; 821 /* 822 * the reloc mutex goes with the trans lock, it is taken 823 * during commit to protect us from the relocation code 824 */ 825 struct mutex reloc_mutex; 826 827 struct list_head trans_list; 828 struct list_head dead_roots; 829 struct list_head caching_block_groups; 830 831 spinlock_t delayed_iput_lock; 832 struct list_head delayed_iputs; 833 struct mutex cleaner_delayed_iput_mutex; 834 835 /* this protects tree_mod_seq_list */ 836 spinlock_t tree_mod_seq_lock; 837 atomic64_t tree_mod_seq; 838 struct list_head tree_mod_seq_list; 839 840 /* this protects tree_mod_log */ 841 rwlock_t tree_mod_log_lock; 842 struct rb_root tree_mod_log; 843 844 atomic_t nr_async_submits; 845 atomic_t async_submit_draining; 846 atomic_t nr_async_bios; 847 atomic_t async_delalloc_pages; 848 atomic_t open_ioctl_trans; 849 850 /* 851 * this is used to protect the following list -- ordered_roots. 852 */ 853 spinlock_t ordered_root_lock; 854 855 /* 856 * all fs/file tree roots in which there are data=ordered extents 857 * pending writeback are added into this list. 858 * 859 * these can span multiple transactions and basically include 860 * every dirty data page that isn't from nodatacow 861 */ 862 struct list_head ordered_roots; 863 864 struct mutex delalloc_root_mutex; 865 spinlock_t delalloc_root_lock; 866 /* all fs/file tree roots that have delalloc inodes. */ 867 struct list_head delalloc_roots; 868 869 /* 870 * there is a pool of worker threads for checksumming during writes 871 * and a pool for checksumming after reads. This is because readers 872 * can run with FS locks held, and the writers may be waiting for 873 * those locks. We don't want ordering in the pending list to cause 874 * deadlocks, and so the two are serviced separately. 875 * 876 * A third pool does submit_bio to avoid deadlocking with the other 877 * two 878 */ 879 struct btrfs_workqueue *workers; 880 struct btrfs_workqueue *delalloc_workers; 881 struct btrfs_workqueue *flush_workers; 882 struct btrfs_workqueue *endio_workers; 883 struct btrfs_workqueue *endio_meta_workers; 884 struct btrfs_workqueue *endio_raid56_workers; 885 struct btrfs_workqueue *endio_repair_workers; 886 struct btrfs_workqueue *rmw_workers; 887 struct btrfs_workqueue *endio_meta_write_workers; 888 struct btrfs_workqueue *endio_write_workers; 889 struct btrfs_workqueue *endio_freespace_worker; 890 struct btrfs_workqueue *submit_workers; 891 struct btrfs_workqueue *caching_workers; 892 struct btrfs_workqueue *readahead_workers; 893 894 /* 895 * fixup workers take dirty pages that didn't properly go through 896 * the cow mechanism and make them safe to write. It happens 897 * for the sys_munmap function call path 898 */ 899 struct btrfs_workqueue *fixup_workers; 900 struct btrfs_workqueue *delayed_workers; 901 902 /* the extent workers do delayed refs on the extent allocation tree */ 903 struct btrfs_workqueue *extent_workers; 904 struct task_struct *transaction_kthread; 905 struct task_struct *cleaner_kthread; 906 int thread_pool_size; 907 908 struct kobject *space_info_kobj; 909 int do_barriers; 910 int closing; 911 int log_root_recovering; 912 int open; 913 914 u64 total_pinned; 915 916 /* used to keep from writing metadata until there is a nice batch */ 917 struct percpu_counter dirty_metadata_bytes; 918 struct percpu_counter delalloc_bytes; 919 s32 dirty_metadata_batch; 920 s32 delalloc_batch; 921 922 struct list_head dirty_cowonly_roots; 923 924 struct btrfs_fs_devices *fs_devices; 925 926 /* 927 * the space_info list is almost entirely read only. It only changes 928 * when we add a new raid type to the FS, and that happens 929 * very rarely. RCU is used to protect it. 930 */ 931 struct list_head space_info; 932 933 struct btrfs_space_info *data_sinfo; 934 935 struct reloc_control *reloc_ctl; 936 937 /* data_alloc_cluster is only used in ssd mode */ 938 struct btrfs_free_cluster data_alloc_cluster; 939 940 /* all metadata allocations go through this cluster */ 941 struct btrfs_free_cluster meta_alloc_cluster; 942 943 /* auto defrag inodes go here */ 944 spinlock_t defrag_inodes_lock; 945 struct rb_root defrag_inodes; 946 atomic_t defrag_running; 947 948 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 949 seqlock_t profiles_lock; 950 /* 951 * these three are in extended format (availability of single 952 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 953 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 954 */ 955 u64 avail_data_alloc_bits; 956 u64 avail_metadata_alloc_bits; 957 u64 avail_system_alloc_bits; 958 959 /* restriper state */ 960 spinlock_t balance_lock; 961 struct mutex balance_mutex; 962 atomic_t balance_running; 963 atomic_t balance_pause_req; 964 atomic_t balance_cancel_req; 965 struct btrfs_balance_control *balance_ctl; 966 wait_queue_head_t balance_wait_q; 967 968 unsigned data_chunk_allocations; 969 unsigned metadata_ratio; 970 971 void *bdev_holder; 972 973 /* private scrub information */ 974 struct mutex scrub_lock; 975 atomic_t scrubs_running; 976 atomic_t scrub_pause_req; 977 atomic_t scrubs_paused; 978 atomic_t scrub_cancel_req; 979 wait_queue_head_t scrub_pause_wait; 980 int scrub_workers_refcnt; 981 struct btrfs_workqueue *scrub_workers; 982 struct btrfs_workqueue *scrub_wr_completion_workers; 983 struct btrfs_workqueue *scrub_nocow_workers; 984 struct btrfs_workqueue *scrub_parity_workers; 985 986 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 987 u32 check_integrity_print_mask; 988 #endif 989 /* 990 * quota information 991 */ 992 unsigned int quota_enabled:1; 993 994 /* 995 * quota_enabled only changes state after a commit. This holds the 996 * next state. 997 */ 998 unsigned int pending_quota_state:1; 999 1000 /* is qgroup tracking in a consistent state? */ 1001 u64 qgroup_flags; 1002 1003 /* holds configuration and tracking. Protected by qgroup_lock */ 1004 struct rb_root qgroup_tree; 1005 struct rb_root qgroup_op_tree; 1006 spinlock_t qgroup_lock; 1007 spinlock_t qgroup_op_lock; 1008 atomic_t qgroup_op_seq; 1009 1010 /* 1011 * used to avoid frequently calling ulist_alloc()/ulist_free() 1012 * when doing qgroup accounting, it must be protected by qgroup_lock. 1013 */ 1014 struct ulist *qgroup_ulist; 1015 1016 /* protect user change for quota operations */ 1017 struct mutex qgroup_ioctl_lock; 1018 1019 /* list of dirty qgroups to be written at next commit */ 1020 struct list_head dirty_qgroups; 1021 1022 /* used by qgroup for an efficient tree traversal */ 1023 u64 qgroup_seq; 1024 1025 /* qgroup rescan items */ 1026 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1027 struct btrfs_key qgroup_rescan_progress; 1028 struct btrfs_workqueue *qgroup_rescan_workers; 1029 struct completion qgroup_rescan_completion; 1030 struct btrfs_work qgroup_rescan_work; 1031 1032 /* filesystem state */ 1033 unsigned long fs_state; 1034 1035 struct btrfs_delayed_root *delayed_root; 1036 1037 /* readahead tree */ 1038 spinlock_t reada_lock; 1039 struct radix_tree_root reada_tree; 1040 1041 /* readahead works cnt */ 1042 atomic_t reada_works_cnt; 1043 1044 /* Extent buffer radix tree */ 1045 spinlock_t buffer_lock; 1046 struct radix_tree_root buffer_radix; 1047 1048 /* next backup root to be overwritten */ 1049 int backup_root_index; 1050 1051 int num_tolerated_disk_barrier_failures; 1052 1053 /* device replace state */ 1054 struct btrfs_dev_replace dev_replace; 1055 1056 atomic_t mutually_exclusive_operation_running; 1057 1058 struct percpu_counter bio_counter; 1059 wait_queue_head_t replace_wait; 1060 1061 struct semaphore uuid_tree_rescan_sem; 1062 unsigned int update_uuid_tree_gen:1; 1063 1064 /* Used to reclaim the metadata space in the background. */ 1065 struct work_struct async_reclaim_work; 1066 1067 spinlock_t unused_bgs_lock; 1068 struct list_head unused_bgs; 1069 struct mutex unused_bg_unpin_mutex; 1070 struct mutex delete_unused_bgs_mutex; 1071 1072 /* For btrfs to record security options */ 1073 struct security_mnt_opts security_opts; 1074 1075 /* 1076 * Chunks that can't be freed yet (under a trim/discard operation) 1077 * and will be latter freed. Protected by fs_info->chunk_mutex. 1078 */ 1079 struct list_head pinned_chunks; 1080 1081 int creating_free_space_tree; 1082 }; 1083 1084 struct btrfs_subvolume_writers { 1085 struct percpu_counter counter; 1086 wait_queue_head_t wait; 1087 }; 1088 1089 /* 1090 * The state of btrfs root 1091 */ 1092 /* 1093 * btrfs_record_root_in_trans is a multi-step process, 1094 * and it can race with the balancing code. But the 1095 * race is very small, and only the first time the root 1096 * is added to each transaction. So IN_TRANS_SETUP 1097 * is used to tell us when more checks are required 1098 */ 1099 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1100 #define BTRFS_ROOT_REF_COWS 1 1101 #define BTRFS_ROOT_TRACK_DIRTY 2 1102 #define BTRFS_ROOT_IN_RADIX 3 1103 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4 1104 #define BTRFS_ROOT_DEFRAG_RUNNING 5 1105 #define BTRFS_ROOT_FORCE_COW 6 1106 #define BTRFS_ROOT_MULTI_LOG_TASKS 7 1107 #define BTRFS_ROOT_DIRTY 8 1108 1109 /* 1110 * in ram representation of the tree. extent_root is used for all allocations 1111 * and for the extent tree extent_root root. 1112 */ 1113 struct btrfs_root { 1114 struct extent_buffer *node; 1115 1116 struct extent_buffer *commit_root; 1117 struct btrfs_root *log_root; 1118 struct btrfs_root *reloc_root; 1119 1120 unsigned long state; 1121 struct btrfs_root_item root_item; 1122 struct btrfs_key root_key; 1123 struct btrfs_fs_info *fs_info; 1124 struct extent_io_tree dirty_log_pages; 1125 1126 struct mutex objectid_mutex; 1127 1128 spinlock_t accounting_lock; 1129 struct btrfs_block_rsv *block_rsv; 1130 1131 /* free ino cache stuff */ 1132 struct btrfs_free_space_ctl *free_ino_ctl; 1133 enum btrfs_caching_type ino_cache_state; 1134 spinlock_t ino_cache_lock; 1135 wait_queue_head_t ino_cache_wait; 1136 struct btrfs_free_space_ctl *free_ino_pinned; 1137 u64 ino_cache_progress; 1138 struct inode *ino_cache_inode; 1139 1140 struct mutex log_mutex; 1141 wait_queue_head_t log_writer_wait; 1142 wait_queue_head_t log_commit_wait[2]; 1143 struct list_head log_ctxs[2]; 1144 atomic_t log_writers; 1145 atomic_t log_commit[2]; 1146 atomic_t log_batch; 1147 int log_transid; 1148 /* No matter the commit succeeds or not*/ 1149 int log_transid_committed; 1150 /* Just be updated when the commit succeeds. */ 1151 int last_log_commit; 1152 pid_t log_start_pid; 1153 1154 u64 objectid; 1155 u64 last_trans; 1156 1157 /* data allocations are done in sectorsize units */ 1158 u32 sectorsize; 1159 1160 /* node allocations are done in nodesize units */ 1161 u32 nodesize; 1162 1163 u32 stripesize; 1164 1165 u32 type; 1166 1167 u64 highest_objectid; 1168 1169 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1170 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1171 u64 alloc_bytenr; 1172 #endif 1173 1174 u64 defrag_trans_start; 1175 struct btrfs_key defrag_progress; 1176 struct btrfs_key defrag_max; 1177 char *name; 1178 1179 /* the dirty list is only used by non-reference counted roots */ 1180 struct list_head dirty_list; 1181 1182 struct list_head root_list; 1183 1184 spinlock_t log_extents_lock[2]; 1185 struct list_head logged_list[2]; 1186 1187 spinlock_t orphan_lock; 1188 atomic_t orphan_inodes; 1189 struct btrfs_block_rsv *orphan_block_rsv; 1190 int orphan_cleanup_state; 1191 1192 spinlock_t inode_lock; 1193 /* red-black tree that keeps track of in-memory inodes */ 1194 struct rb_root inode_tree; 1195 1196 /* 1197 * radix tree that keeps track of delayed nodes of every inode, 1198 * protected by inode_lock 1199 */ 1200 struct radix_tree_root delayed_nodes_tree; 1201 /* 1202 * right now this just gets used so that a root has its own devid 1203 * for stat. It may be used for more later 1204 */ 1205 dev_t anon_dev; 1206 1207 spinlock_t root_item_lock; 1208 atomic_t refs; 1209 1210 struct mutex delalloc_mutex; 1211 spinlock_t delalloc_lock; 1212 /* 1213 * all of the inodes that have delalloc bytes. It is possible for 1214 * this list to be empty even when there is still dirty data=ordered 1215 * extents waiting to finish IO. 1216 */ 1217 struct list_head delalloc_inodes; 1218 struct list_head delalloc_root; 1219 u64 nr_delalloc_inodes; 1220 1221 struct mutex ordered_extent_mutex; 1222 /* 1223 * this is used by the balancing code to wait for all the pending 1224 * ordered extents 1225 */ 1226 spinlock_t ordered_extent_lock; 1227 1228 /* 1229 * all of the data=ordered extents pending writeback 1230 * these can span multiple transactions and basically include 1231 * every dirty data page that isn't from nodatacow 1232 */ 1233 struct list_head ordered_extents; 1234 struct list_head ordered_root; 1235 u64 nr_ordered_extents; 1236 1237 /* 1238 * Number of currently running SEND ioctls to prevent 1239 * manipulation with the read-only status via SUBVOL_SETFLAGS 1240 */ 1241 int send_in_progress; 1242 struct btrfs_subvolume_writers *subv_writers; 1243 atomic_t will_be_snapshoted; 1244 1245 /* For qgroup metadata space reserve */ 1246 atomic_t qgroup_meta_rsv; 1247 }; 1248 1249 static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize) 1250 { 1251 return blocksize - sizeof(struct btrfs_header); 1252 } 1253 1254 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root) 1255 { 1256 return __BTRFS_LEAF_DATA_SIZE(root->nodesize); 1257 } 1258 1259 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root) 1260 { 1261 return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); 1262 } 1263 1264 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root) 1265 { 1266 return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr); 1267 } 1268 1269 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 1270 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 1271 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root) 1272 { 1273 return BTRFS_MAX_ITEM_SIZE(root) - 1274 BTRFS_FILE_EXTENT_INLINE_DATA_START; 1275 } 1276 1277 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root) 1278 { 1279 return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item); 1280 } 1281 1282 /* 1283 * Flags for mount options. 1284 * 1285 * Note: don't forget to add new options to btrfs_show_options() 1286 */ 1287 #define BTRFS_MOUNT_NODATASUM (1 << 0) 1288 #define BTRFS_MOUNT_NODATACOW (1 << 1) 1289 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 1290 #define BTRFS_MOUNT_SSD (1 << 3) 1291 #define BTRFS_MOUNT_DEGRADED (1 << 4) 1292 #define BTRFS_MOUNT_COMPRESS (1 << 5) 1293 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 1294 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 1295 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 1296 #define BTRFS_MOUNT_NOSSD (1 << 9) 1297 #define BTRFS_MOUNT_DISCARD (1 << 10) 1298 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 1299 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 1300 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 1301 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1302 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1303 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 1304 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 1305 #define BTRFS_MOUNT_USEBACKUPROOT (1 << 18) 1306 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 1307 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 1308 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 1309 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 1310 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 1311 #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) 1312 #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) 1313 #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) 1314 #define BTRFS_MOUNT_NOLOGREPLAY (1 << 27) 1315 1316 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 1317 #define BTRFS_DEFAULT_MAX_INLINE (2048) 1318 1319 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1320 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1321 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 1322 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ 1323 BTRFS_MOUNT_##opt) 1324 1325 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \ 1326 { \ 1327 if (!btrfs_test_opt(fs_info, opt)) \ 1328 btrfs_info(fs_info, fmt, ##args); \ 1329 btrfs_set_opt(fs_info->mount_opt, opt); \ 1330 } 1331 1332 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \ 1333 { \ 1334 if (btrfs_test_opt(fs_info, opt)) \ 1335 btrfs_info(fs_info, fmt, ##args); \ 1336 btrfs_clear_opt(fs_info->mount_opt, opt); \ 1337 } 1338 1339 #ifdef CONFIG_BTRFS_DEBUG 1340 static inline int 1341 btrfs_should_fragment_free_space(struct btrfs_root *root, 1342 struct btrfs_block_group_cache *block_group) 1343 { 1344 return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) && 1345 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 1346 (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) && 1347 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 1348 } 1349 #endif 1350 1351 /* 1352 * Requests for changes that need to be done during transaction commit. 1353 * 1354 * Internal mount options that are used for special handling of the real 1355 * mount options (eg. cannot be set during remount and have to be set during 1356 * transaction commit) 1357 */ 1358 1359 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 1360 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 1361 #define BTRFS_PENDING_COMMIT (2) 1362 1363 #define btrfs_test_pending(info, opt) \ 1364 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1365 #define btrfs_set_pending(info, opt) \ 1366 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1367 #define btrfs_clear_pending(info, opt) \ 1368 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1369 1370 /* 1371 * Helpers for setting pending mount option changes. 1372 * 1373 * Expects corresponding macros 1374 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 1375 */ 1376 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 1377 do { \ 1378 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 1379 btrfs_info((info), fmt, ##args); \ 1380 btrfs_set_pending((info), SET_##opt); \ 1381 btrfs_clear_pending((info), CLEAR_##opt); \ 1382 } \ 1383 } while(0) 1384 1385 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 1386 do { \ 1387 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 1388 btrfs_info((info), fmt, ##args); \ 1389 btrfs_set_pending((info), CLEAR_##opt); \ 1390 btrfs_clear_pending((info), SET_##opt); \ 1391 } \ 1392 } while(0) 1393 1394 /* 1395 * Inode flags 1396 */ 1397 #define BTRFS_INODE_NODATASUM (1 << 0) 1398 #define BTRFS_INODE_NODATACOW (1 << 1) 1399 #define BTRFS_INODE_READONLY (1 << 2) 1400 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 1401 #define BTRFS_INODE_PREALLOC (1 << 4) 1402 #define BTRFS_INODE_SYNC (1 << 5) 1403 #define BTRFS_INODE_IMMUTABLE (1 << 6) 1404 #define BTRFS_INODE_APPEND (1 << 7) 1405 #define BTRFS_INODE_NODUMP (1 << 8) 1406 #define BTRFS_INODE_NOATIME (1 << 9) 1407 #define BTRFS_INODE_DIRSYNC (1 << 10) 1408 #define BTRFS_INODE_COMPRESS (1 << 11) 1409 1410 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 1411 1412 struct btrfs_map_token { 1413 struct extent_buffer *eb; 1414 char *kaddr; 1415 unsigned long offset; 1416 }; 1417 1418 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ 1419 ((bytes) >> (fs_info)->sb->s_blocksize_bits) 1420 1421 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 1422 { 1423 token->kaddr = NULL; 1424 } 1425 1426 /* some macros to generate set/get functions for the struct fields. This 1427 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1428 * one for u8: 1429 */ 1430 #define le8_to_cpu(v) (v) 1431 #define cpu_to_le8(v) (v) 1432 #define __le8 u8 1433 1434 #define read_eb_member(eb, ptr, type, member, result) ( \ 1435 read_extent_buffer(eb, (char *)(result), \ 1436 ((unsigned long)(ptr)) + \ 1437 offsetof(type, member), \ 1438 sizeof(((type *)0)->member))) 1439 1440 #define write_eb_member(eb, ptr, type, member, result) ( \ 1441 write_extent_buffer(eb, (char *)(result), \ 1442 ((unsigned long)(ptr)) + \ 1443 offsetof(type, member), \ 1444 sizeof(((type *)0)->member))) 1445 1446 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 1447 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 1448 unsigned long off, \ 1449 struct btrfs_map_token *token); \ 1450 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 1451 unsigned long off, u##bits val, \ 1452 struct btrfs_map_token *token); \ 1453 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 1454 unsigned long off) \ 1455 { \ 1456 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 1457 } \ 1458 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 1459 unsigned long off, u##bits val) \ 1460 { \ 1461 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 1462 } 1463 1464 DECLARE_BTRFS_SETGET_BITS(8) 1465 DECLARE_BTRFS_SETGET_BITS(16) 1466 DECLARE_BTRFS_SETGET_BITS(32) 1467 DECLARE_BTRFS_SETGET_BITS(64) 1468 1469 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 1470 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 1471 { \ 1472 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1473 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 1474 } \ 1475 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 1476 u##bits val) \ 1477 { \ 1478 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1479 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 1480 } \ 1481 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 1482 struct btrfs_map_token *token) \ 1483 { \ 1484 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1485 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 1486 } \ 1487 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 1488 type *s, u##bits val, \ 1489 struct btrfs_map_token *token) \ 1490 { \ 1491 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1492 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 1493 } 1494 1495 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 1496 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 1497 { \ 1498 type *p = page_address(eb->pages[0]); \ 1499 u##bits res = le##bits##_to_cpu(p->member); \ 1500 return res; \ 1501 } \ 1502 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 1503 u##bits val) \ 1504 { \ 1505 type *p = page_address(eb->pages[0]); \ 1506 p->member = cpu_to_le##bits(val); \ 1507 } 1508 1509 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 1510 static inline u##bits btrfs_##name(type *s) \ 1511 { \ 1512 return le##bits##_to_cpu(s->member); \ 1513 } \ 1514 static inline void btrfs_set_##name(type *s, u##bits val) \ 1515 { \ 1516 s->member = cpu_to_le##bits(val); \ 1517 } 1518 1519 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 1520 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 1521 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 1522 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 1523 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 1524 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 1525 start_offset, 64); 1526 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 1527 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 1528 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 1529 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 1530 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 1531 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 1532 1533 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 1534 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 1535 total_bytes, 64); 1536 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 1537 bytes_used, 64); 1538 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 1539 io_align, 32); 1540 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 1541 io_width, 32); 1542 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 1543 sector_size, 32); 1544 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 1545 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 1546 dev_group, 32); 1547 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 1548 seek_speed, 8); 1549 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 1550 bandwidth, 8); 1551 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 1552 generation, 64); 1553 1554 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 1555 { 1556 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 1557 } 1558 1559 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 1560 { 1561 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 1562 } 1563 1564 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 1565 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 1566 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 1567 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 1568 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 1569 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 1570 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 1571 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 1572 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 1573 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 1574 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 1575 1576 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 1577 { 1578 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 1579 } 1580 1581 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 1582 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 1583 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 1584 stripe_len, 64); 1585 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 1586 io_align, 32); 1587 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 1588 io_width, 32); 1589 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 1590 sector_size, 32); 1591 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 1592 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 1593 num_stripes, 16); 1594 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 1595 sub_stripes, 16); 1596 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 1597 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 1598 1599 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 1600 int nr) 1601 { 1602 unsigned long offset = (unsigned long)c; 1603 offset += offsetof(struct btrfs_chunk, stripe); 1604 offset += nr * sizeof(struct btrfs_stripe); 1605 return (struct btrfs_stripe *)offset; 1606 } 1607 1608 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 1609 { 1610 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 1611 } 1612 1613 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 1614 struct btrfs_chunk *c, int nr) 1615 { 1616 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 1617 } 1618 1619 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 1620 struct btrfs_chunk *c, int nr) 1621 { 1622 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 1623 } 1624 1625 /* struct btrfs_block_group_item */ 1626 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 1627 used, 64); 1628 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 1629 used, 64); 1630 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 1631 struct btrfs_block_group_item, chunk_objectid, 64); 1632 1633 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 1634 struct btrfs_block_group_item, chunk_objectid, 64); 1635 BTRFS_SETGET_FUNCS(disk_block_group_flags, 1636 struct btrfs_block_group_item, flags, 64); 1637 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 1638 struct btrfs_block_group_item, flags, 64); 1639 1640 /* struct btrfs_free_space_info */ 1641 BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, 1642 extent_count, 32); 1643 BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); 1644 1645 /* struct btrfs_inode_ref */ 1646 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 1647 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 1648 1649 /* struct btrfs_inode_extref */ 1650 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 1651 parent_objectid, 64); 1652 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 1653 name_len, 16); 1654 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 1655 1656 /* struct btrfs_inode_item */ 1657 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 1658 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 1659 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 1660 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 1661 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 1662 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 1663 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 1664 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 1665 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 1666 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 1667 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 1668 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 1669 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 1670 generation, 64); 1671 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 1672 sequence, 64); 1673 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 1674 transid, 64); 1675 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 1676 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 1677 nbytes, 64); 1678 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 1679 block_group, 64); 1680 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 1681 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 1682 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 1683 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 1684 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 1685 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 1686 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 1687 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 1688 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 1689 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 1690 1691 /* struct btrfs_dev_extent */ 1692 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 1693 chunk_tree, 64); 1694 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 1695 chunk_objectid, 64); 1696 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 1697 chunk_offset, 64); 1698 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 1699 1700 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 1701 { 1702 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 1703 return (unsigned long)dev + ptr; 1704 } 1705 1706 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 1707 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 1708 generation, 64); 1709 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 1710 1711 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 1712 1713 1714 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 1715 1716 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 1717 struct btrfs_tree_block_info *item, 1718 struct btrfs_disk_key *key) 1719 { 1720 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 1721 } 1722 1723 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 1724 struct btrfs_tree_block_info *item, 1725 struct btrfs_disk_key *key) 1726 { 1727 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 1728 } 1729 1730 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 1731 root, 64); 1732 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 1733 objectid, 64); 1734 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 1735 offset, 64); 1736 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 1737 count, 32); 1738 1739 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 1740 count, 32); 1741 1742 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 1743 type, 8); 1744 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 1745 offset, 64); 1746 1747 static inline u32 btrfs_extent_inline_ref_size(int type) 1748 { 1749 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1750 type == BTRFS_SHARED_BLOCK_REF_KEY) 1751 return sizeof(struct btrfs_extent_inline_ref); 1752 if (type == BTRFS_SHARED_DATA_REF_KEY) 1753 return sizeof(struct btrfs_shared_data_ref) + 1754 sizeof(struct btrfs_extent_inline_ref); 1755 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1756 return sizeof(struct btrfs_extent_data_ref) + 1757 offsetof(struct btrfs_extent_inline_ref, offset); 1758 BUG(); 1759 return 0; 1760 } 1761 1762 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 1763 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 1764 generation, 64); 1765 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 1766 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 1767 1768 /* struct btrfs_node */ 1769 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 1770 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 1771 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 1772 blockptr, 64); 1773 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 1774 generation, 64); 1775 1776 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 1777 { 1778 unsigned long ptr; 1779 ptr = offsetof(struct btrfs_node, ptrs) + 1780 sizeof(struct btrfs_key_ptr) * nr; 1781 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 1782 } 1783 1784 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 1785 int nr, u64 val) 1786 { 1787 unsigned long ptr; 1788 ptr = offsetof(struct btrfs_node, ptrs) + 1789 sizeof(struct btrfs_key_ptr) * nr; 1790 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 1791 } 1792 1793 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 1794 { 1795 unsigned long ptr; 1796 ptr = offsetof(struct btrfs_node, ptrs) + 1797 sizeof(struct btrfs_key_ptr) * nr; 1798 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 1799 } 1800 1801 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 1802 int nr, u64 val) 1803 { 1804 unsigned long ptr; 1805 ptr = offsetof(struct btrfs_node, ptrs) + 1806 sizeof(struct btrfs_key_ptr) * nr; 1807 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 1808 } 1809 1810 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 1811 { 1812 return offsetof(struct btrfs_node, ptrs) + 1813 sizeof(struct btrfs_key_ptr) * nr; 1814 } 1815 1816 void btrfs_node_key(struct extent_buffer *eb, 1817 struct btrfs_disk_key *disk_key, int nr); 1818 1819 static inline void btrfs_set_node_key(struct extent_buffer *eb, 1820 struct btrfs_disk_key *disk_key, int nr) 1821 { 1822 unsigned long ptr; 1823 ptr = btrfs_node_key_ptr_offset(nr); 1824 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 1825 struct btrfs_key_ptr, key, disk_key); 1826 } 1827 1828 /* struct btrfs_item */ 1829 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 1830 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 1831 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 1832 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 1833 1834 static inline unsigned long btrfs_item_nr_offset(int nr) 1835 { 1836 return offsetof(struct btrfs_leaf, items) + 1837 sizeof(struct btrfs_item) * nr; 1838 } 1839 1840 static inline struct btrfs_item *btrfs_item_nr(int nr) 1841 { 1842 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 1843 } 1844 1845 static inline u32 btrfs_item_end(struct extent_buffer *eb, 1846 struct btrfs_item *item) 1847 { 1848 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 1849 } 1850 1851 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 1852 { 1853 return btrfs_item_end(eb, btrfs_item_nr(nr)); 1854 } 1855 1856 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 1857 { 1858 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 1859 } 1860 1861 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 1862 { 1863 return btrfs_item_size(eb, btrfs_item_nr(nr)); 1864 } 1865 1866 static inline void btrfs_item_key(struct extent_buffer *eb, 1867 struct btrfs_disk_key *disk_key, int nr) 1868 { 1869 struct btrfs_item *item = btrfs_item_nr(nr); 1870 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 1871 } 1872 1873 static inline void btrfs_set_item_key(struct extent_buffer *eb, 1874 struct btrfs_disk_key *disk_key, int nr) 1875 { 1876 struct btrfs_item *item = btrfs_item_nr(nr); 1877 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 1878 } 1879 1880 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 1881 1882 /* 1883 * struct btrfs_root_ref 1884 */ 1885 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 1886 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 1887 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 1888 1889 /* struct btrfs_dir_item */ 1890 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 1891 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 1892 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 1893 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 1894 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 1895 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 1896 data_len, 16); 1897 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 1898 name_len, 16); 1899 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 1900 transid, 64); 1901 1902 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 1903 struct btrfs_dir_item *item, 1904 struct btrfs_disk_key *key) 1905 { 1906 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 1907 } 1908 1909 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 1910 struct btrfs_dir_item *item, 1911 struct btrfs_disk_key *key) 1912 { 1913 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 1914 } 1915 1916 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 1917 num_entries, 64); 1918 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 1919 num_bitmaps, 64); 1920 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 1921 generation, 64); 1922 1923 static inline void btrfs_free_space_key(struct extent_buffer *eb, 1924 struct btrfs_free_space_header *h, 1925 struct btrfs_disk_key *key) 1926 { 1927 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 1928 } 1929 1930 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 1931 struct btrfs_free_space_header *h, 1932 struct btrfs_disk_key *key) 1933 { 1934 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 1935 } 1936 1937 /* struct btrfs_disk_key */ 1938 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 1939 objectid, 64); 1940 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 1941 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 1942 1943 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 1944 struct btrfs_disk_key *disk) 1945 { 1946 cpu->offset = le64_to_cpu(disk->offset); 1947 cpu->type = disk->type; 1948 cpu->objectid = le64_to_cpu(disk->objectid); 1949 } 1950 1951 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 1952 struct btrfs_key *cpu) 1953 { 1954 disk->offset = cpu_to_le64(cpu->offset); 1955 disk->type = cpu->type; 1956 disk->objectid = cpu_to_le64(cpu->objectid); 1957 } 1958 1959 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 1960 struct btrfs_key *key, int nr) 1961 { 1962 struct btrfs_disk_key disk_key; 1963 btrfs_node_key(eb, &disk_key, nr); 1964 btrfs_disk_key_to_cpu(key, &disk_key); 1965 } 1966 1967 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 1968 struct btrfs_key *key, int nr) 1969 { 1970 struct btrfs_disk_key disk_key; 1971 btrfs_item_key(eb, &disk_key, nr); 1972 btrfs_disk_key_to_cpu(key, &disk_key); 1973 } 1974 1975 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 1976 struct btrfs_dir_item *item, 1977 struct btrfs_key *key) 1978 { 1979 struct btrfs_disk_key disk_key; 1980 btrfs_dir_item_key(eb, item, &disk_key); 1981 btrfs_disk_key_to_cpu(key, &disk_key); 1982 } 1983 1984 1985 static inline u8 btrfs_key_type(struct btrfs_key *key) 1986 { 1987 return key->type; 1988 } 1989 1990 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 1991 { 1992 key->type = val; 1993 } 1994 1995 /* struct btrfs_header */ 1996 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 1997 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 1998 generation, 64); 1999 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2000 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2001 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2002 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2003 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2004 generation, 64); 2005 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2006 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2007 nritems, 32); 2008 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2009 2010 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2011 { 2012 return (btrfs_header_flags(eb) & flag) == flag; 2013 } 2014 2015 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2016 { 2017 u64 flags = btrfs_header_flags(eb); 2018 btrfs_set_header_flags(eb, flags | flag); 2019 return (flags & flag) == flag; 2020 } 2021 2022 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2023 { 2024 u64 flags = btrfs_header_flags(eb); 2025 btrfs_set_header_flags(eb, flags & ~flag); 2026 return (flags & flag) == flag; 2027 } 2028 2029 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2030 { 2031 u64 flags = btrfs_header_flags(eb); 2032 return flags >> BTRFS_BACKREF_REV_SHIFT; 2033 } 2034 2035 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2036 int rev) 2037 { 2038 u64 flags = btrfs_header_flags(eb); 2039 flags &= ~BTRFS_BACKREF_REV_MASK; 2040 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2041 btrfs_set_header_flags(eb, flags); 2042 } 2043 2044 static inline unsigned long btrfs_header_fsid(void) 2045 { 2046 return offsetof(struct btrfs_header, fsid); 2047 } 2048 2049 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2050 { 2051 return offsetof(struct btrfs_header, chunk_tree_uuid); 2052 } 2053 2054 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2055 { 2056 return btrfs_header_level(eb) == 0; 2057 } 2058 2059 /* struct btrfs_root_item */ 2060 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2061 generation, 64); 2062 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2063 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2064 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2065 2066 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2067 generation, 64); 2068 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2069 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2070 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2071 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2072 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2073 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2074 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2075 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2076 last_snapshot, 64); 2077 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2078 generation_v2, 64); 2079 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2080 ctransid, 64); 2081 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2082 otransid, 64); 2083 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2084 stransid, 64); 2085 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2086 rtransid, 64); 2087 2088 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2089 { 2090 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2091 } 2092 2093 static inline bool btrfs_root_dead(struct btrfs_root *root) 2094 { 2095 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2096 } 2097 2098 /* struct btrfs_root_backup */ 2099 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2100 tree_root, 64); 2101 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2102 tree_root_gen, 64); 2103 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2104 tree_root_level, 8); 2105 2106 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2107 chunk_root, 64); 2108 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2109 chunk_root_gen, 64); 2110 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2111 chunk_root_level, 8); 2112 2113 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2114 extent_root, 64); 2115 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2116 extent_root_gen, 64); 2117 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2118 extent_root_level, 8); 2119 2120 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2121 fs_root, 64); 2122 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2123 fs_root_gen, 64); 2124 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2125 fs_root_level, 8); 2126 2127 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2128 dev_root, 64); 2129 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2130 dev_root_gen, 64); 2131 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2132 dev_root_level, 8); 2133 2134 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2135 csum_root, 64); 2136 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2137 csum_root_gen, 64); 2138 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2139 csum_root_level, 8); 2140 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2141 total_bytes, 64); 2142 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2143 bytes_used, 64); 2144 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2145 num_devices, 64); 2146 2147 /* struct btrfs_balance_item */ 2148 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2149 2150 static inline void btrfs_balance_data(struct extent_buffer *eb, 2151 struct btrfs_balance_item *bi, 2152 struct btrfs_disk_balance_args *ba) 2153 { 2154 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2155 } 2156 2157 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2158 struct btrfs_balance_item *bi, 2159 struct btrfs_disk_balance_args *ba) 2160 { 2161 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2162 } 2163 2164 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2165 struct btrfs_balance_item *bi, 2166 struct btrfs_disk_balance_args *ba) 2167 { 2168 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2169 } 2170 2171 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2172 struct btrfs_balance_item *bi, 2173 struct btrfs_disk_balance_args *ba) 2174 { 2175 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2176 } 2177 2178 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2179 struct btrfs_balance_item *bi, 2180 struct btrfs_disk_balance_args *ba) 2181 { 2182 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2183 } 2184 2185 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 2186 struct btrfs_balance_item *bi, 2187 struct btrfs_disk_balance_args *ba) 2188 { 2189 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2190 } 2191 2192 static inline void 2193 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 2194 struct btrfs_disk_balance_args *disk) 2195 { 2196 memset(cpu, 0, sizeof(*cpu)); 2197 2198 cpu->profiles = le64_to_cpu(disk->profiles); 2199 cpu->usage = le64_to_cpu(disk->usage); 2200 cpu->devid = le64_to_cpu(disk->devid); 2201 cpu->pstart = le64_to_cpu(disk->pstart); 2202 cpu->pend = le64_to_cpu(disk->pend); 2203 cpu->vstart = le64_to_cpu(disk->vstart); 2204 cpu->vend = le64_to_cpu(disk->vend); 2205 cpu->target = le64_to_cpu(disk->target); 2206 cpu->flags = le64_to_cpu(disk->flags); 2207 cpu->limit = le64_to_cpu(disk->limit); 2208 } 2209 2210 static inline void 2211 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 2212 struct btrfs_balance_args *cpu) 2213 { 2214 memset(disk, 0, sizeof(*disk)); 2215 2216 disk->profiles = cpu_to_le64(cpu->profiles); 2217 disk->usage = cpu_to_le64(cpu->usage); 2218 disk->devid = cpu_to_le64(cpu->devid); 2219 disk->pstart = cpu_to_le64(cpu->pstart); 2220 disk->pend = cpu_to_le64(cpu->pend); 2221 disk->vstart = cpu_to_le64(cpu->vstart); 2222 disk->vend = cpu_to_le64(cpu->vend); 2223 disk->target = cpu_to_le64(cpu->target); 2224 disk->flags = cpu_to_le64(cpu->flags); 2225 disk->limit = cpu_to_le64(cpu->limit); 2226 } 2227 2228 /* struct btrfs_super_block */ 2229 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 2230 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 2231 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 2232 generation, 64); 2233 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 2234 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 2235 struct btrfs_super_block, sys_chunk_array_size, 32); 2236 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 2237 struct btrfs_super_block, chunk_root_generation, 64); 2238 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 2239 root_level, 8); 2240 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 2241 chunk_root, 64); 2242 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 2243 chunk_root_level, 8); 2244 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 2245 log_root, 64); 2246 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 2247 log_root_transid, 64); 2248 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 2249 log_root_level, 8); 2250 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 2251 total_bytes, 64); 2252 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 2253 bytes_used, 64); 2254 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 2255 sectorsize, 32); 2256 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 2257 nodesize, 32); 2258 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 2259 stripesize, 32); 2260 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 2261 root_dir_objectid, 64); 2262 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 2263 num_devices, 64); 2264 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 2265 compat_flags, 64); 2266 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 2267 compat_ro_flags, 64); 2268 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 2269 incompat_flags, 64); 2270 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 2271 csum_type, 16); 2272 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 2273 cache_generation, 64); 2274 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 2275 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 2276 uuid_tree_generation, 64); 2277 2278 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 2279 { 2280 u16 t = btrfs_super_csum_type(s); 2281 /* 2282 * csum type is validated at mount time 2283 */ 2284 return btrfs_csum_sizes[t]; 2285 } 2286 2287 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 2288 { 2289 return offsetof(struct btrfs_leaf, items); 2290 } 2291 2292 /* struct btrfs_file_extent_item */ 2293 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 2294 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 2295 struct btrfs_file_extent_item, disk_bytenr, 64); 2296 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 2297 struct btrfs_file_extent_item, offset, 64); 2298 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 2299 struct btrfs_file_extent_item, generation, 64); 2300 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 2301 struct btrfs_file_extent_item, num_bytes, 64); 2302 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 2303 struct btrfs_file_extent_item, disk_num_bytes, 64); 2304 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 2305 struct btrfs_file_extent_item, compression, 8); 2306 2307 static inline unsigned long 2308 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 2309 { 2310 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 2311 } 2312 2313 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 2314 { 2315 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 2316 } 2317 2318 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 2319 disk_bytenr, 64); 2320 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 2321 generation, 64); 2322 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 2323 disk_num_bytes, 64); 2324 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 2325 offset, 64); 2326 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 2327 num_bytes, 64); 2328 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 2329 ram_bytes, 64); 2330 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 2331 compression, 8); 2332 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 2333 encryption, 8); 2334 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 2335 other_encoding, 16); 2336 2337 /* 2338 * this returns the number of bytes used by the item on disk, minus the 2339 * size of any extent headers. If a file is compressed on disk, this is 2340 * the compressed size 2341 */ 2342 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 2343 struct btrfs_item *e) 2344 { 2345 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 2346 } 2347 2348 /* this returns the number of file bytes represented by the inline item. 2349 * If an item is compressed, this is the uncompressed size 2350 */ 2351 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 2352 int slot, 2353 struct btrfs_file_extent_item *fi) 2354 { 2355 struct btrfs_map_token token; 2356 2357 btrfs_init_map_token(&token); 2358 /* 2359 * return the space used on disk if this item isn't 2360 * compressed or encoded 2361 */ 2362 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 2363 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 2364 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 2365 return btrfs_file_extent_inline_item_len(eb, 2366 btrfs_item_nr(slot)); 2367 } 2368 2369 /* otherwise use the ram bytes field */ 2370 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 2371 } 2372 2373 2374 /* btrfs_dev_stats_item */ 2375 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 2376 struct btrfs_dev_stats_item *ptr, 2377 int index) 2378 { 2379 u64 val; 2380 2381 read_extent_buffer(eb, &val, 2382 offsetof(struct btrfs_dev_stats_item, values) + 2383 ((unsigned long)ptr) + (index * sizeof(u64)), 2384 sizeof(val)); 2385 return val; 2386 } 2387 2388 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 2389 struct btrfs_dev_stats_item *ptr, 2390 int index, u64 val) 2391 { 2392 write_extent_buffer(eb, &val, 2393 offsetof(struct btrfs_dev_stats_item, values) + 2394 ((unsigned long)ptr) + (index * sizeof(u64)), 2395 sizeof(val)); 2396 } 2397 2398 /* btrfs_qgroup_status_item */ 2399 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 2400 generation, 64); 2401 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 2402 version, 64); 2403 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 2404 flags, 64); 2405 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 2406 rescan, 64); 2407 2408 /* btrfs_qgroup_info_item */ 2409 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 2410 generation, 64); 2411 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 2412 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 2413 rfer_cmpr, 64); 2414 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 2415 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 2416 excl_cmpr, 64); 2417 2418 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 2419 struct btrfs_qgroup_info_item, generation, 64); 2420 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 2421 rfer, 64); 2422 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 2423 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 2424 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 2425 excl, 64); 2426 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 2427 struct btrfs_qgroup_info_item, excl_cmpr, 64); 2428 2429 /* btrfs_qgroup_limit_item */ 2430 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 2431 flags, 64); 2432 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 2433 max_rfer, 64); 2434 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 2435 max_excl, 64); 2436 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 2437 rsv_rfer, 64); 2438 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 2439 rsv_excl, 64); 2440 2441 /* btrfs_dev_replace_item */ 2442 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 2443 struct btrfs_dev_replace_item, src_devid, 64); 2444 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 2445 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 2446 64); 2447 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 2448 replace_state, 64); 2449 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 2450 time_started, 64); 2451 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 2452 time_stopped, 64); 2453 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 2454 num_write_errors, 64); 2455 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 2456 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 2457 64); 2458 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 2459 cursor_left, 64); 2460 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 2461 cursor_right, 64); 2462 2463 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 2464 struct btrfs_dev_replace_item, src_devid, 64); 2465 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 2466 struct btrfs_dev_replace_item, 2467 cont_reading_from_srcdev_mode, 64); 2468 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 2469 struct btrfs_dev_replace_item, replace_state, 64); 2470 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 2471 struct btrfs_dev_replace_item, time_started, 64); 2472 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 2473 struct btrfs_dev_replace_item, time_stopped, 64); 2474 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 2475 struct btrfs_dev_replace_item, num_write_errors, 64); 2476 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 2477 struct btrfs_dev_replace_item, 2478 num_uncorrectable_read_errors, 64); 2479 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 2480 struct btrfs_dev_replace_item, cursor_left, 64); 2481 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 2482 struct btrfs_dev_replace_item, cursor_right, 64); 2483 2484 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 2485 { 2486 return sb->s_fs_info; 2487 } 2488 2489 /* helper function to cast into the data area of the leaf. */ 2490 #define btrfs_item_ptr(leaf, slot, type) \ 2491 ((type *)(btrfs_leaf_data(leaf) + \ 2492 btrfs_item_offset_nr(leaf, slot))) 2493 2494 #define btrfs_item_ptr_offset(leaf, slot) \ 2495 ((unsigned long)(btrfs_leaf_data(leaf) + \ 2496 btrfs_item_offset_nr(leaf, slot))) 2497 2498 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 2499 { 2500 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 2501 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 2502 } 2503 2504 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 2505 { 2506 return mapping_gfp_constraint(mapping, ~__GFP_FS); 2507 } 2508 2509 /* extent-tree.c */ 2510 2511 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes); 2512 2513 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 2514 unsigned num_items) 2515 { 2516 return root->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2517 } 2518 2519 /* 2520 * Doing a truncate won't result in new nodes or leaves, just what we need for 2521 * COW. 2522 */ 2523 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 2524 unsigned num_items) 2525 { 2526 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 2527 } 2528 2529 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2530 struct btrfs_root *root); 2531 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 2532 struct btrfs_root *root); 2533 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 2534 const u64 start); 2535 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); 2536 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); 2537 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); 2538 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); 2539 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2540 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2541 struct btrfs_root *root, unsigned long count); 2542 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2543 unsigned long count, u64 transid, int wait); 2544 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 2545 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2546 struct btrfs_root *root, u64 bytenr, 2547 u64 offset, int metadata, u64 *refs, u64 *flags); 2548 int btrfs_pin_extent(struct btrfs_root *root, 2549 u64 bytenr, u64 num, int reserved); 2550 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 2551 u64 bytenr, u64 num_bytes); 2552 int btrfs_exclude_logged_extents(struct btrfs_root *root, 2553 struct extent_buffer *eb); 2554 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 2555 struct btrfs_root *root, 2556 u64 objectid, u64 offset, u64 bytenr); 2557 struct btrfs_block_group_cache *btrfs_lookup_block_group( 2558 struct btrfs_fs_info *info, 2559 u64 bytenr); 2560 void btrfs_get_block_group(struct btrfs_block_group_cache *cache); 2561 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2562 int get_block_group_index(struct btrfs_block_group_cache *cache); 2563 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 2564 struct btrfs_root *root, u64 parent, 2565 u64 root_objectid, 2566 struct btrfs_disk_key *key, int level, 2567 u64 hint, u64 empty_size); 2568 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 2569 struct btrfs_root *root, 2570 struct extent_buffer *buf, 2571 u64 parent, int last_ref); 2572 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 2573 struct btrfs_root *root, 2574 u64 root_objectid, u64 owner, 2575 u64 offset, u64 ram_bytes, 2576 struct btrfs_key *ins); 2577 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 2578 struct btrfs_root *root, 2579 u64 root_objectid, u64 owner, u64 offset, 2580 struct btrfs_key *ins); 2581 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 2582 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 2583 struct btrfs_key *ins, int is_data, int delalloc); 2584 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2585 struct extent_buffer *buf, int full_backref); 2586 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2587 struct extent_buffer *buf, int full_backref); 2588 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2589 struct btrfs_root *root, 2590 u64 bytenr, u64 num_bytes, u64 flags, 2591 int level, int is_data); 2592 int btrfs_free_extent(struct btrfs_trans_handle *trans, 2593 struct btrfs_root *root, 2594 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 2595 u64 owner, u64 offset); 2596 2597 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 2598 int delalloc); 2599 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 2600 u64 start, u64 len); 2601 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 2602 struct btrfs_root *root); 2603 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 2604 struct btrfs_root *root); 2605 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 2606 struct btrfs_root *root, 2607 u64 bytenr, u64 num_bytes, u64 parent, 2608 u64 root_objectid, u64 owner, u64 offset); 2609 2610 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, 2611 struct btrfs_root *root); 2612 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2613 struct btrfs_root *root); 2614 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, 2615 struct btrfs_root *root); 2616 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 2617 int btrfs_free_block_groups(struct btrfs_fs_info *info); 2618 int btrfs_read_block_groups(struct btrfs_root *root); 2619 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 2620 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2621 struct btrfs_root *root, u64 bytes_used, 2622 u64 type, u64 chunk_objectid, u64 chunk_offset, 2623 u64 size); 2624 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2625 struct btrfs_fs_info *fs_info, 2626 const u64 chunk_offset); 2627 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 2628 struct btrfs_root *root, u64 group_start, 2629 struct extent_map *em); 2630 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 2631 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); 2632 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); 2633 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 2634 struct btrfs_root *root); 2635 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 2636 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2637 2638 enum btrfs_reserve_flush_enum { 2639 /* If we are in the transaction, we can't flush anything.*/ 2640 BTRFS_RESERVE_NO_FLUSH, 2641 /* 2642 * Flushing delalloc may cause deadlock somewhere, in this 2643 * case, use FLUSH LIMIT 2644 */ 2645 BTRFS_RESERVE_FLUSH_LIMIT, 2646 BTRFS_RESERVE_FLUSH_ALL, 2647 }; 2648 2649 enum btrfs_flush_state { 2650 FLUSH_DELAYED_ITEMS_NR = 1, 2651 FLUSH_DELAYED_ITEMS = 2, 2652 FLUSH_DELALLOC = 3, 2653 FLUSH_DELALLOC_WAIT = 4, 2654 ALLOC_CHUNK = 5, 2655 COMMIT_TRANS = 6, 2656 }; 2657 2658 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); 2659 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes); 2660 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); 2661 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, 2662 u64 len); 2663 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 2664 struct btrfs_root *root); 2665 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); 2666 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 2667 struct inode *inode); 2668 void btrfs_orphan_release_metadata(struct inode *inode); 2669 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 2670 struct btrfs_block_rsv *rsv, 2671 int nitems, 2672 u64 *qgroup_reserved, bool use_global_rsv); 2673 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 2674 struct btrfs_block_rsv *rsv, 2675 u64 qgroup_reserved); 2676 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 2677 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 2678 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); 2679 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); 2680 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 2681 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 2682 unsigned short type); 2683 void btrfs_free_block_rsv(struct btrfs_root *root, 2684 struct btrfs_block_rsv *rsv); 2685 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv); 2686 int btrfs_block_rsv_add(struct btrfs_root *root, 2687 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 2688 enum btrfs_reserve_flush_enum flush); 2689 int btrfs_block_rsv_check(struct btrfs_root *root, 2690 struct btrfs_block_rsv *block_rsv, int min_factor); 2691 int btrfs_block_rsv_refill(struct btrfs_root *root, 2692 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 2693 enum btrfs_reserve_flush_enum flush); 2694 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2695 struct btrfs_block_rsv *dst_rsv, u64 num_bytes, 2696 int update_size); 2697 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 2698 struct btrfs_block_rsv *dest, u64 num_bytes, 2699 int min_factor); 2700 void btrfs_block_rsv_release(struct btrfs_root *root, 2701 struct btrfs_block_rsv *block_rsv, 2702 u64 num_bytes); 2703 int btrfs_inc_block_group_ro(struct btrfs_root *root, 2704 struct btrfs_block_group_cache *cache); 2705 void btrfs_dec_block_group_ro(struct btrfs_root *root, 2706 struct btrfs_block_group_cache *cache); 2707 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 2708 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 2709 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2710 u64 start, u64 end); 2711 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 2712 u64 num_bytes, u64 *actual_bytes); 2713 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 2714 struct btrfs_root *root, u64 type); 2715 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 2716 2717 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 2718 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 2719 struct btrfs_fs_info *fs_info); 2720 int __get_raid_index(u64 flags); 2721 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 2722 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 2723 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); 2724 void check_system_chunk(struct btrfs_trans_handle *trans, 2725 struct btrfs_root *root, 2726 const u64 type); 2727 u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 2728 struct btrfs_fs_info *info, u64 start, u64 end); 2729 2730 /* ctree.c */ 2731 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2732 int level, int *slot); 2733 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 2734 int btrfs_previous_item(struct btrfs_root *root, 2735 struct btrfs_path *path, u64 min_objectid, 2736 int type); 2737 int btrfs_previous_extent_item(struct btrfs_root *root, 2738 struct btrfs_path *path, u64 min_objectid); 2739 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2740 struct btrfs_path *path, 2741 struct btrfs_key *new_key); 2742 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 2743 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 2744 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 2745 struct btrfs_key *key, int lowest_level, 2746 u64 min_trans); 2747 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 2748 struct btrfs_path *path, 2749 u64 min_trans); 2750 enum btrfs_compare_tree_result { 2751 BTRFS_COMPARE_TREE_NEW, 2752 BTRFS_COMPARE_TREE_DELETED, 2753 BTRFS_COMPARE_TREE_CHANGED, 2754 BTRFS_COMPARE_TREE_SAME, 2755 }; 2756 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 2757 struct btrfs_root *right_root, 2758 struct btrfs_path *left_path, 2759 struct btrfs_path *right_path, 2760 struct btrfs_key *key, 2761 enum btrfs_compare_tree_result result, 2762 void *ctx); 2763 int btrfs_compare_trees(struct btrfs_root *left_root, 2764 struct btrfs_root *right_root, 2765 btrfs_changed_cb_t cb, void *ctx); 2766 int btrfs_cow_block(struct btrfs_trans_handle *trans, 2767 struct btrfs_root *root, struct extent_buffer *buf, 2768 struct extent_buffer *parent, int parent_slot, 2769 struct extent_buffer **cow_ret); 2770 int btrfs_copy_root(struct btrfs_trans_handle *trans, 2771 struct btrfs_root *root, 2772 struct extent_buffer *buf, 2773 struct extent_buffer **cow_ret, u64 new_root_objectid); 2774 int btrfs_block_can_be_shared(struct btrfs_root *root, 2775 struct extent_buffer *buf); 2776 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 2777 u32 data_size); 2778 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 2779 u32 new_size, int from_end); 2780 int btrfs_split_item(struct btrfs_trans_handle *trans, 2781 struct btrfs_root *root, 2782 struct btrfs_path *path, 2783 struct btrfs_key *new_key, 2784 unsigned long split_offset); 2785 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 2786 struct btrfs_root *root, 2787 struct btrfs_path *path, 2788 struct btrfs_key *new_key); 2789 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 2790 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 2791 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2792 *root, struct btrfs_key *key, struct btrfs_path *p, int 2793 ins_len, int cow); 2794 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 2795 struct btrfs_path *p, u64 time_seq); 2796 int btrfs_search_slot_for_read(struct btrfs_root *root, 2797 struct btrfs_key *key, struct btrfs_path *p, 2798 int find_higher, int return_any); 2799 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 2800 struct btrfs_root *root, struct extent_buffer *parent, 2801 int start_slot, u64 *last_ret, 2802 struct btrfs_key *progress); 2803 void btrfs_release_path(struct btrfs_path *p); 2804 struct btrfs_path *btrfs_alloc_path(void); 2805 void btrfs_free_path(struct btrfs_path *p); 2806 void btrfs_set_path_blocking(struct btrfs_path *p); 2807 void btrfs_clear_path_blocking(struct btrfs_path *p, 2808 struct extent_buffer *held, int held_rw); 2809 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 2810 2811 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2812 struct btrfs_path *path, int slot, int nr); 2813 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 2814 struct btrfs_root *root, 2815 struct btrfs_path *path) 2816 { 2817 return btrfs_del_items(trans, root, path, path->slots[0], 1); 2818 } 2819 2820 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 2821 struct btrfs_key *cpu_key, u32 *data_size, 2822 u32 total_data, u32 total_size, int nr); 2823 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 2824 *root, struct btrfs_key *key, void *data, u32 data_size); 2825 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 2826 struct btrfs_root *root, 2827 struct btrfs_path *path, 2828 struct btrfs_key *cpu_key, u32 *data_size, int nr); 2829 2830 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 2831 struct btrfs_root *root, 2832 struct btrfs_path *path, 2833 struct btrfs_key *key, 2834 u32 data_size) 2835 { 2836 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 2837 } 2838 2839 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2840 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2841 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 2842 u64 time_seq); 2843 static inline int btrfs_next_old_item(struct btrfs_root *root, 2844 struct btrfs_path *p, u64 time_seq) 2845 { 2846 ++p->slots[0]; 2847 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 2848 return btrfs_next_old_leaf(root, p, time_seq); 2849 return 0; 2850 } 2851 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 2852 { 2853 return btrfs_next_old_item(root, p, 0); 2854 } 2855 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2856 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 2857 struct btrfs_block_rsv *block_rsv, 2858 int update_ref, int for_reloc); 2859 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2860 struct btrfs_root *root, 2861 struct extent_buffer *node, 2862 struct extent_buffer *parent); 2863 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 2864 { 2865 /* 2866 * Get synced with close_ctree() 2867 */ 2868 smp_mb(); 2869 return fs_info->closing; 2870 } 2871 2872 /* 2873 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 2874 * anything except sleeping. This function is used to check the status of 2875 * the fs. 2876 */ 2877 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 2878 { 2879 return (root->fs_info->sb->s_flags & MS_RDONLY || 2880 btrfs_fs_closing(root->fs_info)); 2881 } 2882 2883 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 2884 { 2885 kfree(fs_info->balance_ctl); 2886 kfree(fs_info->delayed_root); 2887 kfree(fs_info->extent_root); 2888 kfree(fs_info->tree_root); 2889 kfree(fs_info->chunk_root); 2890 kfree(fs_info->dev_root); 2891 kfree(fs_info->csum_root); 2892 kfree(fs_info->quota_root); 2893 kfree(fs_info->uuid_root); 2894 kfree(fs_info->free_space_root); 2895 kfree(fs_info->super_copy); 2896 kfree(fs_info->super_for_commit); 2897 security_free_mnt_opts(&fs_info->security_opts); 2898 kfree(fs_info); 2899 } 2900 2901 /* tree mod log functions from ctree.c */ 2902 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 2903 struct seq_list *elem); 2904 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 2905 struct seq_list *elem); 2906 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 2907 2908 /* root-item.c */ 2909 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 2910 struct btrfs_root *tree_root, 2911 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 2912 const char *name, int name_len); 2913 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 2914 struct btrfs_root *tree_root, 2915 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 2916 const char *name, int name_len); 2917 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2918 struct btrfs_key *key); 2919 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 2920 *root, struct btrfs_key *key, struct btrfs_root_item 2921 *item); 2922 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 2923 struct btrfs_root *root, 2924 struct btrfs_key *key, 2925 struct btrfs_root_item *item); 2926 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 2927 struct btrfs_path *path, struct btrfs_root_item *root_item, 2928 struct btrfs_key *root_key); 2929 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 2930 void btrfs_set_root_node(struct btrfs_root_item *item, 2931 struct extent_buffer *node); 2932 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 2933 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 2934 struct btrfs_root *root); 2935 2936 /* uuid-tree.c */ 2937 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 2938 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 2939 u64 subid); 2940 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 2941 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 2942 u64 subid); 2943 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 2944 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 2945 u64)); 2946 2947 /* dir-item.c */ 2948 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 2949 const char *name, int name_len); 2950 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 2951 struct btrfs_root *root, const char *name, 2952 int name_len, struct inode *dir, 2953 struct btrfs_key *location, u8 type, u64 index); 2954 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 2955 struct btrfs_root *root, 2956 struct btrfs_path *path, u64 dir, 2957 const char *name, int name_len, 2958 int mod); 2959 struct btrfs_dir_item * 2960 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 2961 struct btrfs_root *root, 2962 struct btrfs_path *path, u64 dir, 2963 u64 objectid, const char *name, int name_len, 2964 int mod); 2965 struct btrfs_dir_item * 2966 btrfs_search_dir_index_item(struct btrfs_root *root, 2967 struct btrfs_path *path, u64 dirid, 2968 const char *name, int name_len); 2969 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 2970 struct btrfs_root *root, 2971 struct btrfs_path *path, 2972 struct btrfs_dir_item *di); 2973 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 2974 struct btrfs_root *root, 2975 struct btrfs_path *path, u64 objectid, 2976 const char *name, u16 name_len, 2977 const void *data, u16 data_len); 2978 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 2979 struct btrfs_root *root, 2980 struct btrfs_path *path, u64 dir, 2981 const char *name, u16 name_len, 2982 int mod); 2983 int verify_dir_item(struct btrfs_root *root, 2984 struct extent_buffer *leaf, 2985 struct btrfs_dir_item *dir_item); 2986 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 2987 struct btrfs_path *path, 2988 const char *name, 2989 int name_len); 2990 2991 /* orphan.c */ 2992 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 2993 struct btrfs_root *root, u64 offset); 2994 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 2995 struct btrfs_root *root, u64 offset); 2996 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 2997 2998 /* inode-item.c */ 2999 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3000 struct btrfs_root *root, 3001 const char *name, int name_len, 3002 u64 inode_objectid, u64 ref_objectid, u64 index); 3003 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3004 struct btrfs_root *root, 3005 const char *name, int name_len, 3006 u64 inode_objectid, u64 ref_objectid, u64 *index); 3007 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3008 struct btrfs_root *root, 3009 struct btrfs_path *path, u64 objectid); 3010 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3011 *root, struct btrfs_path *path, 3012 struct btrfs_key *location, int mod); 3013 3014 struct btrfs_inode_extref * 3015 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3016 struct btrfs_root *root, 3017 struct btrfs_path *path, 3018 const char *name, int name_len, 3019 u64 inode_objectid, u64 ref_objectid, int ins_len, 3020 int cow); 3021 3022 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3023 u64 ref_objectid, const char *name, 3024 int name_len, 3025 struct btrfs_inode_extref **extref_ret); 3026 3027 /* file-item.c */ 3028 struct btrfs_dio_private; 3029 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3030 struct btrfs_root *root, u64 bytenr, u64 len); 3031 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3032 struct bio *bio, u32 *dst); 3033 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3034 struct bio *bio, u64 logical_offset); 3035 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3036 struct btrfs_root *root, 3037 u64 objectid, u64 pos, 3038 u64 disk_offset, u64 disk_num_bytes, 3039 u64 num_bytes, u64 offset, u64 ram_bytes, 3040 u8 compression, u8 encryption, u16 other_encoding); 3041 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3042 struct btrfs_root *root, 3043 struct btrfs_path *path, u64 objectid, 3044 u64 bytenr, int mod); 3045 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3046 struct btrfs_root *root, 3047 struct btrfs_ordered_sum *sums); 3048 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3049 struct bio *bio, u64 file_start, int contig); 3050 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3051 struct list_head *list, int search_commit); 3052 void btrfs_extent_item_to_extent_map(struct inode *inode, 3053 const struct btrfs_path *path, 3054 struct btrfs_file_extent_item *fi, 3055 const bool new_inline, 3056 struct extent_map *em); 3057 3058 /* inode.c */ 3059 struct btrfs_delalloc_work { 3060 struct inode *inode; 3061 int delay_iput; 3062 struct completion completion; 3063 struct list_head list; 3064 struct btrfs_work work; 3065 }; 3066 3067 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3068 int delay_iput); 3069 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3070 3071 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3072 size_t pg_offset, u64 start, u64 len, 3073 int create); 3074 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3075 u64 *orig_start, u64 *orig_block_len, 3076 u64 *ram_bytes); 3077 3078 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3079 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3080 #define ClearPageChecked ClearPageFsMisc 3081 #define SetPageChecked SetPageFsMisc 3082 #define PageChecked PageFsMisc 3083 #endif 3084 3085 /* This forces readahead on a given range of bytes in an inode */ 3086 static inline void btrfs_force_ra(struct address_space *mapping, 3087 struct file_ra_state *ra, struct file *file, 3088 pgoff_t offset, unsigned long req_size) 3089 { 3090 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3091 } 3092 3093 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3094 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3095 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3096 struct btrfs_root *root, 3097 struct inode *dir, struct inode *inode, 3098 const char *name, int name_len); 3099 int btrfs_add_link(struct btrfs_trans_handle *trans, 3100 struct inode *parent_inode, struct inode *inode, 3101 const char *name, int name_len, int add_backref, u64 index); 3102 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3103 struct btrfs_root *root, 3104 struct inode *dir, u64 objectid, 3105 const char *name, int name_len); 3106 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, 3107 int front); 3108 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3109 struct btrfs_root *root, 3110 struct inode *inode, u64 new_size, 3111 u32 min_type); 3112 3113 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3114 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3115 int nr); 3116 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3117 struct extent_state **cached_state); 3118 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3119 struct btrfs_root *new_root, 3120 struct btrfs_root *parent_root, 3121 u64 new_dirid); 3122 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 3123 size_t size, struct bio *bio, 3124 unsigned long bio_flags); 3125 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3126 int btrfs_readpage(struct file *file, struct page *page); 3127 void btrfs_evict_inode(struct inode *inode); 3128 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3129 struct inode *btrfs_alloc_inode(struct super_block *sb); 3130 void btrfs_destroy_inode(struct inode *inode); 3131 int btrfs_drop_inode(struct inode *inode); 3132 int btrfs_init_cachep(void); 3133 void btrfs_destroy_cachep(void); 3134 long btrfs_ioctl_trans_end(struct file *file); 3135 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3136 struct btrfs_root *root, int *was_new); 3137 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3138 size_t pg_offset, u64 start, u64 end, 3139 int create); 3140 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3141 struct btrfs_root *root, 3142 struct inode *inode); 3143 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3144 struct btrfs_root *root, struct inode *inode); 3145 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3146 int btrfs_orphan_cleanup(struct btrfs_root *root); 3147 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3148 struct btrfs_root *root); 3149 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3150 void btrfs_invalidate_inodes(struct btrfs_root *root); 3151 void btrfs_add_delayed_iput(struct inode *inode); 3152 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3153 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3154 u64 start, u64 num_bytes, u64 min_size, 3155 loff_t actual_len, u64 *alloc_hint); 3156 int btrfs_prealloc_file_range_trans(struct inode *inode, 3157 struct btrfs_trans_handle *trans, int mode, 3158 u64 start, u64 num_bytes, u64 min_size, 3159 loff_t actual_len, u64 *alloc_hint); 3160 int btrfs_inode_check_errors(struct inode *inode); 3161 extern const struct dentry_operations btrfs_dentry_operations; 3162 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3163 void btrfs_test_inode_set_ops(struct inode *inode); 3164 #endif 3165 3166 /* ioctl.c */ 3167 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3168 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3169 int btrfs_ioctl_get_supported_features(void __user *arg); 3170 void btrfs_update_iflags(struct inode *inode); 3171 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3172 int btrfs_is_empty_uuid(u8 *uuid); 3173 int btrfs_defrag_file(struct inode *inode, struct file *file, 3174 struct btrfs_ioctl_defrag_range_args *range, 3175 u64 newer_than, unsigned long max_pages); 3176 void btrfs_get_block_group_info(struct list_head *groups_list, 3177 struct btrfs_ioctl_space_info *space); 3178 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3179 struct btrfs_ioctl_balance_args *bargs); 3180 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, 3181 struct file *dst_file, u64 dst_loff); 3182 3183 /* file.c */ 3184 int btrfs_auto_defrag_init(void); 3185 void btrfs_auto_defrag_exit(void); 3186 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3187 struct inode *inode); 3188 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3189 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3190 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3191 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3192 int skip_pinned); 3193 extern const struct file_operations btrfs_file_operations; 3194 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3195 struct btrfs_root *root, struct inode *inode, 3196 struct btrfs_path *path, u64 start, u64 end, 3197 u64 *drop_end, int drop_cache, 3198 int replace_extent, 3199 u32 extent_item_size, 3200 int *key_inserted); 3201 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3202 struct btrfs_root *root, struct inode *inode, u64 start, 3203 u64 end, int drop_cache); 3204 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3205 struct inode *inode, u64 start, u64 end); 3206 int btrfs_release_file(struct inode *inode, struct file *file); 3207 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3208 struct page **pages, size_t num_pages, 3209 loff_t pos, size_t write_bytes, 3210 struct extent_state **cached); 3211 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 3212 ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in, 3213 struct file *file_out, loff_t pos_out, 3214 size_t len, unsigned int flags); 3215 int btrfs_clone_file_range(struct file *file_in, loff_t pos_in, 3216 struct file *file_out, loff_t pos_out, u64 len); 3217 3218 /* tree-defrag.c */ 3219 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3220 struct btrfs_root *root); 3221 3222 /* sysfs.c */ 3223 int btrfs_init_sysfs(void); 3224 void btrfs_exit_sysfs(void); 3225 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); 3226 void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); 3227 3228 /* xattr.c */ 3229 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 3230 3231 /* super.c */ 3232 int btrfs_parse_options(struct btrfs_root *root, char *options, 3233 unsigned long new_flags); 3234 int btrfs_sync_fs(struct super_block *sb, int wait); 3235 3236 #ifdef CONFIG_PRINTK 3237 __printf(2, 3) 3238 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 3239 #else 3240 static inline __printf(2, 3) 3241 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 3242 { 3243 } 3244 #endif 3245 3246 #define btrfs_emerg(fs_info, fmt, args...) \ 3247 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 3248 #define btrfs_alert(fs_info, fmt, args...) \ 3249 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 3250 #define btrfs_crit(fs_info, fmt, args...) \ 3251 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 3252 #define btrfs_err(fs_info, fmt, args...) \ 3253 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 3254 #define btrfs_warn(fs_info, fmt, args...) \ 3255 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 3256 #define btrfs_notice(fs_info, fmt, args...) \ 3257 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 3258 #define btrfs_info(fs_info, fmt, args...) \ 3259 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 3260 3261 /* 3262 * Wrappers that use printk_in_rcu 3263 */ 3264 #define btrfs_emerg_in_rcu(fs_info, fmt, args...) \ 3265 btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args) 3266 #define btrfs_alert_in_rcu(fs_info, fmt, args...) \ 3267 btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args) 3268 #define btrfs_crit_in_rcu(fs_info, fmt, args...) \ 3269 btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args) 3270 #define btrfs_err_in_rcu(fs_info, fmt, args...) \ 3271 btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args) 3272 #define btrfs_warn_in_rcu(fs_info, fmt, args...) \ 3273 btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args) 3274 #define btrfs_notice_in_rcu(fs_info, fmt, args...) \ 3275 btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args) 3276 #define btrfs_info_in_rcu(fs_info, fmt, args...) \ 3277 btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args) 3278 3279 /* 3280 * Wrappers that use a ratelimited printk_in_rcu 3281 */ 3282 #define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \ 3283 btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args) 3284 #define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \ 3285 btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args) 3286 #define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \ 3287 btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) 3288 #define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \ 3289 btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) 3290 #define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \ 3291 btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) 3292 #define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \ 3293 btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args) 3294 #define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \ 3295 btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) 3296 3297 /* 3298 * Wrappers that use a ratelimited printk 3299 */ 3300 #define btrfs_emerg_rl(fs_info, fmt, args...) \ 3301 btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args) 3302 #define btrfs_alert_rl(fs_info, fmt, args...) \ 3303 btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args) 3304 #define btrfs_crit_rl(fs_info, fmt, args...) \ 3305 btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args) 3306 #define btrfs_err_rl(fs_info, fmt, args...) \ 3307 btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args) 3308 #define btrfs_warn_rl(fs_info, fmt, args...) \ 3309 btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args) 3310 #define btrfs_notice_rl(fs_info, fmt, args...) \ 3311 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) 3312 #define btrfs_info_rl(fs_info, fmt, args...) \ 3313 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) 3314 #ifdef DEBUG 3315 #define btrfs_debug(fs_info, fmt, args...) \ 3316 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 3317 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3318 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3319 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3320 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3321 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3322 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) 3323 #else 3324 #define btrfs_debug(fs_info, fmt, args...) \ 3325 no_printk(KERN_DEBUG fmt, ##args) 3326 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3327 no_printk(KERN_DEBUG fmt, ##args) 3328 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3329 no_printk(KERN_DEBUG fmt, ##args) 3330 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3331 no_printk(KERN_DEBUG fmt, ##args) 3332 #endif 3333 3334 #define btrfs_printk_in_rcu(fs_info, fmt, args...) \ 3335 do { \ 3336 rcu_read_lock(); \ 3337 btrfs_printk(fs_info, fmt, ##args); \ 3338 rcu_read_unlock(); \ 3339 } while (0) 3340 3341 #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ 3342 do { \ 3343 static DEFINE_RATELIMIT_STATE(_rs, \ 3344 DEFAULT_RATELIMIT_INTERVAL, \ 3345 DEFAULT_RATELIMIT_BURST); \ 3346 if (__ratelimit(&_rs)) \ 3347 btrfs_printk(fs_info, fmt, ##args); \ 3348 } while (0) 3349 3350 #define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ 3351 do { \ 3352 rcu_read_lock(); \ 3353 btrfs_printk_ratelimited(fs_info, fmt, ##args); \ 3354 rcu_read_unlock(); \ 3355 } while (0) 3356 3357 #ifdef CONFIG_BTRFS_ASSERT 3358 3359 __cold 3360 static inline void assfail(char *expr, char *file, int line) 3361 { 3362 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 3363 expr, file, line); 3364 BUG(); 3365 } 3366 3367 #define ASSERT(expr) \ 3368 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 3369 #else 3370 #define ASSERT(expr) ((void)0) 3371 #endif 3372 3373 __printf(5, 6) 3374 __cold 3375 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, 3376 unsigned int line, int errno, const char *fmt, ...); 3377 3378 const char *btrfs_decode_error(int errno); 3379 3380 __cold 3381 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 3382 const char *function, 3383 unsigned int line, int errno); 3384 3385 /* 3386 * Call btrfs_abort_transaction as early as possible when an error condition is 3387 * detected, that way the exact line number is reported. 3388 */ 3389 #define btrfs_abort_transaction(trans, errno) \ 3390 do { \ 3391 /* Report first abort since mount */ \ 3392 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ 3393 &((trans)->fs_info->fs_state))) { \ 3394 WARN(1, KERN_DEBUG \ 3395 "BTRFS: Transaction aborted (error %d)\n", \ 3396 (errno)); \ 3397 } \ 3398 __btrfs_abort_transaction((trans), __func__, \ 3399 __LINE__, (errno)); \ 3400 } while (0) 3401 3402 #define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ 3403 do { \ 3404 __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ 3405 (errno), fmt, ##args); \ 3406 } while (0) 3407 3408 __printf(5, 6) 3409 __cold 3410 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 3411 unsigned int line, int errno, const char *fmt, ...); 3412 /* 3413 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 3414 * will panic(). Otherwise we BUG() here. 3415 */ 3416 #define btrfs_panic(fs_info, errno, fmt, args...) \ 3417 do { \ 3418 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 3419 BUG(); \ 3420 } while (0) 3421 3422 3423 /* compatibility and incompatibility defines */ 3424 3425 #define btrfs_set_fs_incompat(__fs_info, opt) \ 3426 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3427 3428 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 3429 u64 flag) 3430 { 3431 struct btrfs_super_block *disk_super; 3432 u64 features; 3433 3434 disk_super = fs_info->super_copy; 3435 features = btrfs_super_incompat_flags(disk_super); 3436 if (!(features & flag)) { 3437 spin_lock(&fs_info->super_lock); 3438 features = btrfs_super_incompat_flags(disk_super); 3439 if (!(features & flag)) { 3440 features |= flag; 3441 btrfs_set_super_incompat_flags(disk_super, features); 3442 btrfs_info(fs_info, "setting %llu feature flag", 3443 flag); 3444 } 3445 spin_unlock(&fs_info->super_lock); 3446 } 3447 } 3448 3449 #define btrfs_clear_fs_incompat(__fs_info, opt) \ 3450 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3451 3452 static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, 3453 u64 flag) 3454 { 3455 struct btrfs_super_block *disk_super; 3456 u64 features; 3457 3458 disk_super = fs_info->super_copy; 3459 features = btrfs_super_incompat_flags(disk_super); 3460 if (features & flag) { 3461 spin_lock(&fs_info->super_lock); 3462 features = btrfs_super_incompat_flags(disk_super); 3463 if (features & flag) { 3464 features &= ~flag; 3465 btrfs_set_super_incompat_flags(disk_super, features); 3466 btrfs_info(fs_info, "clearing %llu feature flag", 3467 flag); 3468 } 3469 spin_unlock(&fs_info->super_lock); 3470 } 3471 } 3472 3473 #define btrfs_fs_incompat(fs_info, opt) \ 3474 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3475 3476 static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 3477 { 3478 struct btrfs_super_block *disk_super; 3479 disk_super = fs_info->super_copy; 3480 return !!(btrfs_super_incompat_flags(disk_super) & flag); 3481 } 3482 3483 #define btrfs_set_fs_compat_ro(__fs_info, opt) \ 3484 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3485 3486 static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, 3487 u64 flag) 3488 { 3489 struct btrfs_super_block *disk_super; 3490 u64 features; 3491 3492 disk_super = fs_info->super_copy; 3493 features = btrfs_super_compat_ro_flags(disk_super); 3494 if (!(features & flag)) { 3495 spin_lock(&fs_info->super_lock); 3496 features = btrfs_super_compat_ro_flags(disk_super); 3497 if (!(features & flag)) { 3498 features |= flag; 3499 btrfs_set_super_compat_ro_flags(disk_super, features); 3500 btrfs_info(fs_info, "setting %llu ro feature flag", 3501 flag); 3502 } 3503 spin_unlock(&fs_info->super_lock); 3504 } 3505 } 3506 3507 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ 3508 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3509 3510 static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, 3511 u64 flag) 3512 { 3513 struct btrfs_super_block *disk_super; 3514 u64 features; 3515 3516 disk_super = fs_info->super_copy; 3517 features = btrfs_super_compat_ro_flags(disk_super); 3518 if (features & flag) { 3519 spin_lock(&fs_info->super_lock); 3520 features = btrfs_super_compat_ro_flags(disk_super); 3521 if (features & flag) { 3522 features &= ~flag; 3523 btrfs_set_super_compat_ro_flags(disk_super, features); 3524 btrfs_info(fs_info, "clearing %llu ro feature flag", 3525 flag); 3526 } 3527 spin_unlock(&fs_info->super_lock); 3528 } 3529 } 3530 3531 #define btrfs_fs_compat_ro(fs_info, opt) \ 3532 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3533 3534 static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) 3535 { 3536 struct btrfs_super_block *disk_super; 3537 disk_super = fs_info->super_copy; 3538 return !!(btrfs_super_compat_ro_flags(disk_super) & flag); 3539 } 3540 3541 /* acl.c */ 3542 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 3543 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 3544 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 3545 int btrfs_init_acl(struct btrfs_trans_handle *trans, 3546 struct inode *inode, struct inode *dir); 3547 #else 3548 #define btrfs_get_acl NULL 3549 #define btrfs_set_acl NULL 3550 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 3551 struct inode *inode, struct inode *dir) 3552 { 3553 return 0; 3554 } 3555 #endif 3556 3557 /* relocation.c */ 3558 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 3559 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 3560 struct btrfs_root *root); 3561 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 3562 struct btrfs_root *root); 3563 int btrfs_recover_relocation(struct btrfs_root *root); 3564 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 3565 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 3566 struct btrfs_root *root, struct extent_buffer *buf, 3567 struct extent_buffer *cow); 3568 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 3569 u64 *bytes_to_reserve); 3570 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 3571 struct btrfs_pending_snapshot *pending); 3572 3573 /* scrub.c */ 3574 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 3575 u64 end, struct btrfs_scrub_progress *progress, 3576 int readonly, int is_dev_replace); 3577 void btrfs_scrub_pause(struct btrfs_root *root); 3578 void btrfs_scrub_continue(struct btrfs_root *root); 3579 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 3580 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 3581 struct btrfs_device *dev); 3582 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3583 struct btrfs_scrub_progress *progress); 3584 3585 /* dev-replace.c */ 3586 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 3587 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 3588 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 3589 3590 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 3591 { 3592 btrfs_bio_counter_sub(fs_info, 1); 3593 } 3594 3595 /* reada.c */ 3596 struct reada_control { 3597 struct btrfs_root *root; /* tree to prefetch */ 3598 struct btrfs_key key_start; 3599 struct btrfs_key key_end; /* exclusive */ 3600 atomic_t elems; 3601 struct kref refcnt; 3602 wait_queue_head_t wait; 3603 }; 3604 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 3605 struct btrfs_key *start, struct btrfs_key *end); 3606 int btrfs_reada_wait(void *handle); 3607 void btrfs_reada_detach(void *handle); 3608 int btree_readahead_hook(struct btrfs_fs_info *fs_info, 3609 struct extent_buffer *eb, u64 start, int err); 3610 3611 static inline int is_fstree(u64 rootid) 3612 { 3613 if (rootid == BTRFS_FS_TREE_OBJECTID || 3614 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 3615 !btrfs_qgroup_level(rootid))) 3616 return 1; 3617 return 0; 3618 } 3619 3620 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 3621 { 3622 return signal_pending(current); 3623 } 3624 3625 /* Sanity test specific functions */ 3626 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3627 void btrfs_test_destroy_inode(struct inode *inode); 3628 #endif 3629 3630 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) 3631 { 3632 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3633 if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, 3634 &fs_info->fs_state))) 3635 return 1; 3636 #endif 3637 return 0; 3638 } 3639 #endif 3640