1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/btrfs_tree.h> 37 #include <linux/workqueue.h> 38 #include <linux/security.h> 39 #include <linux/sizes.h> 40 #include <linux/dynamic_debug.h> 41 #include "extent_io.h" 42 #include "extent_map.h" 43 #include "async-thread.h" 44 45 struct btrfs_trans_handle; 46 struct btrfs_transaction; 47 struct btrfs_pending_snapshot; 48 extern struct kmem_cache *btrfs_trans_handle_cachep; 49 extern struct kmem_cache *btrfs_transaction_cachep; 50 extern struct kmem_cache *btrfs_bit_radix_cachep; 51 extern struct kmem_cache *btrfs_path_cachep; 52 extern struct kmem_cache *btrfs_free_space_cachep; 53 struct btrfs_ordered_sum; 54 55 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 56 #define STATIC noinline 57 #else 58 #define STATIC static noinline 59 #endif 60 61 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 62 63 #define BTRFS_MAX_MIRRORS 3 64 65 #define BTRFS_MAX_LEVEL 8 66 67 #define BTRFS_COMPAT_EXTENT_TREE_V0 68 69 /* 70 * the max metadata block size. This limit is somewhat artificial, 71 * but the memmove costs go through the roof for larger blocks. 72 */ 73 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 74 75 /* 76 * we can actually store much bigger names, but lets not confuse the rest 77 * of linux 78 */ 79 #define BTRFS_NAME_LEN 255 80 81 /* 82 * Theoretical limit is larger, but we keep this down to a sane 83 * value. That should limit greatly the possibility of collisions on 84 * inode ref items. 85 */ 86 #define BTRFS_LINK_MAX 65535U 87 88 static const int btrfs_csum_sizes[] = { 4 }; 89 90 /* four bytes for CRC32 */ 91 #define BTRFS_EMPTY_DIR_SIZE 0 92 93 /* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 94 #define REQ_GET_READ_MIRRORS (1 << 30) 95 96 /* ioprio of readahead is set to idle */ 97 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 98 99 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M 100 101 #define BTRFS_MAX_EXTENT_SIZE SZ_128M 102 103 struct btrfs_mapping_tree { 104 struct extent_map_tree map_tree; 105 }; 106 107 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 108 { 109 BUG_ON(num_stripes == 0); 110 return sizeof(struct btrfs_chunk) + 111 sizeof(struct btrfs_stripe) * (num_stripes - 1); 112 } 113 114 /* 115 * File system states 116 */ 117 #define BTRFS_FS_STATE_ERROR 0 118 #define BTRFS_FS_STATE_REMOUNTING 1 119 #define BTRFS_FS_STATE_TRANS_ABORTED 2 120 #define BTRFS_FS_STATE_DEV_REPLACING 3 121 #define BTRFS_FS_STATE_DUMMY_FS_INFO 4 122 123 #define BTRFS_BACKREF_REV_MAX 256 124 #define BTRFS_BACKREF_REV_SHIFT 56 125 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 126 BTRFS_BACKREF_REV_SHIFT) 127 128 #define BTRFS_OLD_BACKREF_REV 0 129 #define BTRFS_MIXED_BACKREF_REV 1 130 131 /* 132 * every tree block (leaf or node) starts with this header. 133 */ 134 struct btrfs_header { 135 /* these first four must match the super block */ 136 u8 csum[BTRFS_CSUM_SIZE]; 137 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 138 __le64 bytenr; /* which block this node is supposed to live in */ 139 __le64 flags; 140 141 /* allowed to be different from the super from here on down */ 142 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 143 __le64 generation; 144 __le64 owner; 145 __le32 nritems; 146 u8 level; 147 } __attribute__ ((__packed__)); 148 149 /* 150 * this is a very generous portion of the super block, giving us 151 * room to translate 14 chunks with 3 stripes each. 152 */ 153 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 154 155 /* 156 * just in case we somehow lose the roots and are not able to mount, 157 * we store an array of the roots from previous transactions 158 * in the super. 159 */ 160 #define BTRFS_NUM_BACKUP_ROOTS 4 161 struct btrfs_root_backup { 162 __le64 tree_root; 163 __le64 tree_root_gen; 164 165 __le64 chunk_root; 166 __le64 chunk_root_gen; 167 168 __le64 extent_root; 169 __le64 extent_root_gen; 170 171 __le64 fs_root; 172 __le64 fs_root_gen; 173 174 __le64 dev_root; 175 __le64 dev_root_gen; 176 177 __le64 csum_root; 178 __le64 csum_root_gen; 179 180 __le64 total_bytes; 181 __le64 bytes_used; 182 __le64 num_devices; 183 /* future */ 184 __le64 unused_64[4]; 185 186 u8 tree_root_level; 187 u8 chunk_root_level; 188 u8 extent_root_level; 189 u8 fs_root_level; 190 u8 dev_root_level; 191 u8 csum_root_level; 192 /* future and to align */ 193 u8 unused_8[10]; 194 } __attribute__ ((__packed__)); 195 196 /* 197 * the super block basically lists the main trees of the FS 198 * it currently lacks any block count etc etc 199 */ 200 struct btrfs_super_block { 201 u8 csum[BTRFS_CSUM_SIZE]; 202 /* the first 4 fields must match struct btrfs_header */ 203 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 204 __le64 bytenr; /* this block number */ 205 __le64 flags; 206 207 /* allowed to be different from the btrfs_header from here own down */ 208 __le64 magic; 209 __le64 generation; 210 __le64 root; 211 __le64 chunk_root; 212 __le64 log_root; 213 214 /* this will help find the new super based on the log root */ 215 __le64 log_root_transid; 216 __le64 total_bytes; 217 __le64 bytes_used; 218 __le64 root_dir_objectid; 219 __le64 num_devices; 220 __le32 sectorsize; 221 __le32 nodesize; 222 __le32 __unused_leafsize; 223 __le32 stripesize; 224 __le32 sys_chunk_array_size; 225 __le64 chunk_root_generation; 226 __le64 compat_flags; 227 __le64 compat_ro_flags; 228 __le64 incompat_flags; 229 __le16 csum_type; 230 u8 root_level; 231 u8 chunk_root_level; 232 u8 log_root_level; 233 struct btrfs_dev_item dev_item; 234 235 char label[BTRFS_LABEL_SIZE]; 236 237 __le64 cache_generation; 238 __le64 uuid_tree_generation; 239 240 /* future expansion */ 241 __le64 reserved[30]; 242 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 243 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 244 } __attribute__ ((__packed__)); 245 246 /* 247 * Compat flags that we support. If any incompat flags are set other than the 248 * ones specified below then we will fail to mount 249 */ 250 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 251 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 252 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 253 254 #define BTRFS_FEATURE_COMPAT_RO_SUPP \ 255 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ 256 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID) 257 258 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 259 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 260 261 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 262 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 263 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 264 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 265 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 266 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 267 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 268 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 269 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 270 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 271 272 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 273 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 274 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 275 276 /* 277 * A leaf is full of items. offset and size tell us where to find 278 * the item in the leaf (relative to the start of the data area) 279 */ 280 struct btrfs_item { 281 struct btrfs_disk_key key; 282 __le32 offset; 283 __le32 size; 284 } __attribute__ ((__packed__)); 285 286 /* 287 * leaves have an item area and a data area: 288 * [item0, item1....itemN] [free space] [dataN...data1, data0] 289 * 290 * The data is separate from the items to get the keys closer together 291 * during searches. 292 */ 293 struct btrfs_leaf { 294 struct btrfs_header header; 295 struct btrfs_item items[]; 296 } __attribute__ ((__packed__)); 297 298 /* 299 * all non-leaf blocks are nodes, they hold only keys and pointers to 300 * other blocks 301 */ 302 struct btrfs_key_ptr { 303 struct btrfs_disk_key key; 304 __le64 blockptr; 305 __le64 generation; 306 } __attribute__ ((__packed__)); 307 308 struct btrfs_node { 309 struct btrfs_header header; 310 struct btrfs_key_ptr ptrs[]; 311 } __attribute__ ((__packed__)); 312 313 /* 314 * btrfs_paths remember the path taken from the root down to the leaf. 315 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 316 * to any other levels that are present. 317 * 318 * The slots array records the index of the item or block pointer 319 * used while walking the tree. 320 */ 321 enum { READA_NONE = 0, READA_BACK, READA_FORWARD }; 322 struct btrfs_path { 323 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 324 int slots[BTRFS_MAX_LEVEL]; 325 /* if there is real range locking, this locks field will change */ 326 u8 locks[BTRFS_MAX_LEVEL]; 327 u8 reada; 328 /* keep some upper locks as we walk down */ 329 u8 lowest_level; 330 331 /* 332 * set by btrfs_split_item, tells search_slot to keep all locks 333 * and to force calls to keep space in the nodes 334 */ 335 unsigned int search_for_split:1; 336 unsigned int keep_locks:1; 337 unsigned int skip_locking:1; 338 unsigned int leave_spinning:1; 339 unsigned int search_commit_root:1; 340 unsigned int need_commit_sem:1; 341 unsigned int skip_release_on_error:1; 342 }; 343 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 344 sizeof(struct btrfs_item)) 345 struct btrfs_dev_replace { 346 u64 replace_state; /* see #define above */ 347 u64 time_started; /* seconds since 1-Jan-1970 */ 348 u64 time_stopped; /* seconds since 1-Jan-1970 */ 349 atomic64_t num_write_errors; 350 atomic64_t num_uncorrectable_read_errors; 351 352 u64 cursor_left; 353 u64 committed_cursor_left; 354 u64 cursor_left_last_write_of_item; 355 u64 cursor_right; 356 357 u64 cont_reading_from_srcdev_mode; /* see #define above */ 358 359 int is_valid; 360 int item_needs_writeback; 361 struct btrfs_device *srcdev; 362 struct btrfs_device *tgtdev; 363 364 pid_t lock_owner; 365 atomic_t nesting_level; 366 struct mutex lock_finishing_cancel_unmount; 367 rwlock_t lock; 368 atomic_t read_locks; 369 atomic_t blocking_readers; 370 wait_queue_head_t read_lock_wq; 371 372 struct btrfs_scrub_progress scrub_progress; 373 }; 374 375 /* For raid type sysfs entries */ 376 struct raid_kobject { 377 int raid_type; 378 struct kobject kobj; 379 }; 380 381 struct btrfs_space_info { 382 spinlock_t lock; 383 384 u64 total_bytes; /* total bytes in the space, 385 this doesn't take mirrors into account */ 386 u64 bytes_used; /* total bytes used, 387 this doesn't take mirrors into account */ 388 u64 bytes_pinned; /* total bytes pinned, will be freed when the 389 transaction finishes */ 390 u64 bytes_reserved; /* total bytes the allocator has reserved for 391 current allocations */ 392 u64 bytes_may_use; /* number of bytes that may be used for 393 delalloc/allocations */ 394 u64 bytes_readonly; /* total bytes that are read only */ 395 396 u64 max_extent_size; /* This will hold the maximum extent size of 397 the space info if we had an ENOSPC in the 398 allocator. */ 399 400 unsigned int full:1; /* indicates that we cannot allocate any more 401 chunks for this space */ 402 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 403 404 unsigned int flush:1; /* set if we are trying to make space */ 405 406 unsigned int force_alloc; /* set if we need to force a chunk 407 alloc for this space */ 408 409 u64 disk_used; /* total bytes used on disk */ 410 u64 disk_total; /* total bytes on disk, takes mirrors into 411 account */ 412 413 u64 flags; 414 415 /* 416 * bytes_pinned is kept in line with what is actually pinned, as in 417 * we've called update_block_group and dropped the bytes_used counter 418 * and increased the bytes_pinned counter. However this means that 419 * bytes_pinned does not reflect the bytes that will be pinned once the 420 * delayed refs are flushed, so this counter is inc'ed every time we 421 * call btrfs_free_extent so it is a realtime count of what will be 422 * freed once the transaction is committed. It will be zeroed every 423 * time the transaction commits. 424 */ 425 struct percpu_counter total_bytes_pinned; 426 427 struct list_head list; 428 /* Protected by the spinlock 'lock'. */ 429 struct list_head ro_bgs; 430 struct list_head priority_tickets; 431 struct list_head tickets; 432 u64 tickets_id; 433 434 struct rw_semaphore groups_sem; 435 /* for block groups in our same type */ 436 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 437 wait_queue_head_t wait; 438 439 struct kobject kobj; 440 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 441 }; 442 443 #define BTRFS_BLOCK_RSV_GLOBAL 1 444 #define BTRFS_BLOCK_RSV_DELALLOC 2 445 #define BTRFS_BLOCK_RSV_TRANS 3 446 #define BTRFS_BLOCK_RSV_CHUNK 4 447 #define BTRFS_BLOCK_RSV_DELOPS 5 448 #define BTRFS_BLOCK_RSV_EMPTY 6 449 #define BTRFS_BLOCK_RSV_TEMP 7 450 451 struct btrfs_block_rsv { 452 u64 size; 453 u64 reserved; 454 struct btrfs_space_info *space_info; 455 spinlock_t lock; 456 unsigned short full; 457 unsigned short type; 458 unsigned short failfast; 459 }; 460 461 /* 462 * free clusters are used to claim free space in relatively large chunks, 463 * allowing us to do less seeky writes. They are used for all metadata 464 * allocations and data allocations in ssd mode. 465 */ 466 struct btrfs_free_cluster { 467 spinlock_t lock; 468 spinlock_t refill_lock; 469 struct rb_root root; 470 471 /* largest extent in this cluster */ 472 u64 max_size; 473 474 /* first extent starting offset */ 475 u64 window_start; 476 477 /* We did a full search and couldn't create a cluster */ 478 bool fragmented; 479 480 struct btrfs_block_group_cache *block_group; 481 /* 482 * when a cluster is allocated from a block group, we put the 483 * cluster onto a list in the block group so that it can 484 * be freed before the block group is freed. 485 */ 486 struct list_head block_group_list; 487 }; 488 489 enum btrfs_caching_type { 490 BTRFS_CACHE_NO = 0, 491 BTRFS_CACHE_STARTED = 1, 492 BTRFS_CACHE_FAST = 2, 493 BTRFS_CACHE_FINISHED = 3, 494 BTRFS_CACHE_ERROR = 4, 495 }; 496 497 enum btrfs_disk_cache_state { 498 BTRFS_DC_WRITTEN = 0, 499 BTRFS_DC_ERROR = 1, 500 BTRFS_DC_CLEAR = 2, 501 BTRFS_DC_SETUP = 3, 502 }; 503 504 struct btrfs_caching_control { 505 struct list_head list; 506 struct mutex mutex; 507 wait_queue_head_t wait; 508 struct btrfs_work work; 509 struct btrfs_block_group_cache *block_group; 510 u64 progress; 511 atomic_t count; 512 }; 513 514 /* Once caching_thread() finds this much free space, it will wake up waiters. */ 515 #define CACHING_CTL_WAKE_UP (1024 * 1024 * 2) 516 517 struct btrfs_io_ctl { 518 void *cur, *orig; 519 struct page *page; 520 struct page **pages; 521 struct btrfs_root *root; 522 struct inode *inode; 523 unsigned long size; 524 int index; 525 int num_pages; 526 int entries; 527 int bitmaps; 528 unsigned check_crcs:1; 529 }; 530 531 struct btrfs_block_group_cache { 532 struct btrfs_key key; 533 struct btrfs_block_group_item item; 534 struct btrfs_fs_info *fs_info; 535 struct inode *inode; 536 spinlock_t lock; 537 u64 pinned; 538 u64 reserved; 539 u64 delalloc_bytes; 540 u64 bytes_super; 541 u64 flags; 542 u64 cache_generation; 543 u32 sectorsize; 544 545 /* 546 * If the free space extent count exceeds this number, convert the block 547 * group to bitmaps. 548 */ 549 u32 bitmap_high_thresh; 550 551 /* 552 * If the free space extent count drops below this number, convert the 553 * block group back to extents. 554 */ 555 u32 bitmap_low_thresh; 556 557 /* 558 * It is just used for the delayed data space allocation because 559 * only the data space allocation and the relative metadata update 560 * can be done cross the transaction. 561 */ 562 struct rw_semaphore data_rwsem; 563 564 /* for raid56, this is a full stripe, without parity */ 565 unsigned long full_stripe_len; 566 567 unsigned int ro; 568 unsigned int iref:1; 569 unsigned int has_caching_ctl:1; 570 unsigned int removed:1; 571 572 int disk_cache_state; 573 574 /* cache tracking stuff */ 575 int cached; 576 struct btrfs_caching_control *caching_ctl; 577 u64 last_byte_to_unpin; 578 579 struct btrfs_space_info *space_info; 580 581 /* free space cache stuff */ 582 struct btrfs_free_space_ctl *free_space_ctl; 583 584 /* block group cache stuff */ 585 struct rb_node cache_node; 586 587 /* for block groups in the same raid type */ 588 struct list_head list; 589 590 /* usage count */ 591 atomic_t count; 592 593 /* List of struct btrfs_free_clusters for this block group. 594 * Today it will only have one thing on it, but that may change 595 */ 596 struct list_head cluster_list; 597 598 /* For delayed block group creation or deletion of empty block groups */ 599 struct list_head bg_list; 600 601 /* For read-only block groups */ 602 struct list_head ro_list; 603 604 atomic_t trimming; 605 606 /* For dirty block groups */ 607 struct list_head dirty_list; 608 struct list_head io_list; 609 610 struct btrfs_io_ctl io_ctl; 611 612 /* 613 * Incremented when doing extent allocations and holding a read lock 614 * on the space_info's groups_sem semaphore. 615 * Decremented when an ordered extent that represents an IO against this 616 * block group's range is created (after it's added to its inode's 617 * root's list of ordered extents) or immediately after the allocation 618 * if it's a metadata extent or fallocate extent (for these cases we 619 * don't create ordered extents). 620 */ 621 atomic_t reservations; 622 623 /* 624 * Incremented while holding the spinlock *lock* by a task checking if 625 * it can perform a nocow write (incremented if the value for the *ro* 626 * field is 0). Decremented by such tasks once they create an ordered 627 * extent or before that if some error happens before reaching that step. 628 * This is to prevent races between block group relocation and nocow 629 * writes through direct IO. 630 */ 631 atomic_t nocow_writers; 632 633 /* Lock for free space tree operations. */ 634 struct mutex free_space_lock; 635 636 /* 637 * Does the block group need to be added to the free space tree? 638 * Protected by free_space_lock. 639 */ 640 int needs_free_space; 641 }; 642 643 /* delayed seq elem */ 644 struct seq_list { 645 struct list_head list; 646 u64 seq; 647 }; 648 649 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } 650 651 enum btrfs_orphan_cleanup_state { 652 ORPHAN_CLEANUP_STARTED = 1, 653 ORPHAN_CLEANUP_DONE = 2, 654 }; 655 656 /* used by the raid56 code to lock stripes for read/modify/write */ 657 struct btrfs_stripe_hash { 658 struct list_head hash_list; 659 wait_queue_head_t wait; 660 spinlock_t lock; 661 }; 662 663 /* used by the raid56 code to lock stripes for read/modify/write */ 664 struct btrfs_stripe_hash_table { 665 struct list_head stripe_cache; 666 spinlock_t cache_lock; 667 int cache_size; 668 struct btrfs_stripe_hash table[]; 669 }; 670 671 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 672 673 void btrfs_init_async_reclaim_work(struct work_struct *work); 674 675 /* fs_info */ 676 struct reloc_control; 677 struct btrfs_device; 678 struct btrfs_fs_devices; 679 struct btrfs_balance_control; 680 struct btrfs_delayed_root; 681 682 #define BTRFS_FS_BARRIER 1 683 #define BTRFS_FS_CLOSING_START 2 684 #define BTRFS_FS_CLOSING_DONE 3 685 #define BTRFS_FS_LOG_RECOVERING 4 686 #define BTRFS_FS_OPEN 5 687 #define BTRFS_FS_QUOTA_ENABLED 6 688 #define BTRFS_FS_QUOTA_ENABLING 7 689 #define BTRFS_FS_QUOTA_DISABLING 8 690 #define BTRFS_FS_UPDATE_UUID_TREE_GEN 9 691 #define BTRFS_FS_CREATING_FREE_SPACE_TREE 10 692 #define BTRFS_FS_BTREE_ERR 11 693 #define BTRFS_FS_LOG1_ERR 12 694 #define BTRFS_FS_LOG2_ERR 13 695 696 struct btrfs_fs_info { 697 u8 fsid[BTRFS_FSID_SIZE]; 698 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 699 unsigned long flags; 700 struct btrfs_root *extent_root; 701 struct btrfs_root *tree_root; 702 struct btrfs_root *chunk_root; 703 struct btrfs_root *dev_root; 704 struct btrfs_root *fs_root; 705 struct btrfs_root *csum_root; 706 struct btrfs_root *quota_root; 707 struct btrfs_root *uuid_root; 708 struct btrfs_root *free_space_root; 709 710 /* the log root tree is a directory of all the other log roots */ 711 struct btrfs_root *log_root_tree; 712 713 spinlock_t fs_roots_radix_lock; 714 struct radix_tree_root fs_roots_radix; 715 716 /* block group cache stuff */ 717 spinlock_t block_group_cache_lock; 718 u64 first_logical_byte; 719 struct rb_root block_group_cache_tree; 720 721 /* keep track of unallocated space */ 722 spinlock_t free_chunk_lock; 723 u64 free_chunk_space; 724 725 struct extent_io_tree freed_extents[2]; 726 struct extent_io_tree *pinned_extents; 727 728 /* logical->physical extent mapping */ 729 struct btrfs_mapping_tree mapping_tree; 730 731 /* 732 * block reservation for extent, checksum, root tree and 733 * delayed dir index item 734 */ 735 struct btrfs_block_rsv global_block_rsv; 736 /* block reservation for delay allocation */ 737 struct btrfs_block_rsv delalloc_block_rsv; 738 /* block reservation for metadata operations */ 739 struct btrfs_block_rsv trans_block_rsv; 740 /* block reservation for chunk tree */ 741 struct btrfs_block_rsv chunk_block_rsv; 742 /* block reservation for delayed operations */ 743 struct btrfs_block_rsv delayed_block_rsv; 744 745 struct btrfs_block_rsv empty_block_rsv; 746 747 u64 generation; 748 u64 last_trans_committed; 749 u64 avg_delayed_ref_runtime; 750 751 /* 752 * this is updated to the current trans every time a full commit 753 * is required instead of the faster short fsync log commits 754 */ 755 u64 last_trans_log_full_commit; 756 unsigned long mount_opt; 757 /* 758 * Track requests for actions that need to be done during transaction 759 * commit (like for some mount options). 760 */ 761 unsigned long pending_changes; 762 unsigned long compress_type:4; 763 int commit_interval; 764 /* 765 * It is a suggestive number, the read side is safe even it gets a 766 * wrong number because we will write out the data into a regular 767 * extent. The write side(mount/remount) is under ->s_umount lock, 768 * so it is also safe. 769 */ 770 u64 max_inline; 771 /* 772 * Protected by ->chunk_mutex and sb->s_umount. 773 * 774 * The reason that we use two lock to protect it is because only 775 * remount and mount operations can change it and these two operations 776 * are under sb->s_umount, but the read side (chunk allocation) can not 777 * acquire sb->s_umount or the deadlock would happen. So we use two 778 * locks to protect it. On the write side, we must acquire two locks, 779 * and on the read side, we just need acquire one of them. 780 */ 781 u64 alloc_start; 782 struct btrfs_transaction *running_transaction; 783 wait_queue_head_t transaction_throttle; 784 wait_queue_head_t transaction_wait; 785 wait_queue_head_t transaction_blocked_wait; 786 wait_queue_head_t async_submit_wait; 787 788 /* 789 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 790 * when they are updated. 791 * 792 * Because we do not clear the flags for ever, so we needn't use 793 * the lock on the read side. 794 * 795 * We also needn't use the lock when we mount the fs, because 796 * there is no other task which will update the flag. 797 */ 798 spinlock_t super_lock; 799 struct btrfs_super_block *super_copy; 800 struct btrfs_super_block *super_for_commit; 801 struct block_device *__bdev; 802 struct super_block *sb; 803 struct inode *btree_inode; 804 struct backing_dev_info bdi; 805 struct mutex tree_log_mutex; 806 struct mutex transaction_kthread_mutex; 807 struct mutex cleaner_mutex; 808 struct mutex chunk_mutex; 809 struct mutex volume_mutex; 810 811 /* 812 * this is taken to make sure we don't set block groups ro after 813 * the free space cache has been allocated on them 814 */ 815 struct mutex ro_block_group_mutex; 816 817 /* this is used during read/modify/write to make sure 818 * no two ios are trying to mod the same stripe at the same 819 * time 820 */ 821 struct btrfs_stripe_hash_table *stripe_hash_table; 822 823 /* 824 * this protects the ordered operations list only while we are 825 * processing all of the entries on it. This way we make 826 * sure the commit code doesn't find the list temporarily empty 827 * because another function happens to be doing non-waiting preflush 828 * before jumping into the main commit. 829 */ 830 struct mutex ordered_operations_mutex; 831 832 struct rw_semaphore commit_root_sem; 833 834 struct rw_semaphore cleanup_work_sem; 835 836 struct rw_semaphore subvol_sem; 837 struct srcu_struct subvol_srcu; 838 839 spinlock_t trans_lock; 840 /* 841 * the reloc mutex goes with the trans lock, it is taken 842 * during commit to protect us from the relocation code 843 */ 844 struct mutex reloc_mutex; 845 846 struct list_head trans_list; 847 struct list_head dead_roots; 848 struct list_head caching_block_groups; 849 850 spinlock_t delayed_iput_lock; 851 struct list_head delayed_iputs; 852 struct mutex cleaner_delayed_iput_mutex; 853 854 /* this protects tree_mod_seq_list */ 855 spinlock_t tree_mod_seq_lock; 856 atomic64_t tree_mod_seq; 857 struct list_head tree_mod_seq_list; 858 859 /* this protects tree_mod_log */ 860 rwlock_t tree_mod_log_lock; 861 struct rb_root tree_mod_log; 862 863 atomic_t nr_async_submits; 864 atomic_t async_submit_draining; 865 atomic_t nr_async_bios; 866 atomic_t async_delalloc_pages; 867 atomic_t open_ioctl_trans; 868 869 /* 870 * this is used to protect the following list -- ordered_roots. 871 */ 872 spinlock_t ordered_root_lock; 873 874 /* 875 * all fs/file tree roots in which there are data=ordered extents 876 * pending writeback are added into this list. 877 * 878 * these can span multiple transactions and basically include 879 * every dirty data page that isn't from nodatacow 880 */ 881 struct list_head ordered_roots; 882 883 struct mutex delalloc_root_mutex; 884 spinlock_t delalloc_root_lock; 885 /* all fs/file tree roots that have delalloc inodes. */ 886 struct list_head delalloc_roots; 887 888 /* 889 * there is a pool of worker threads for checksumming during writes 890 * and a pool for checksumming after reads. This is because readers 891 * can run with FS locks held, and the writers may be waiting for 892 * those locks. We don't want ordering in the pending list to cause 893 * deadlocks, and so the two are serviced separately. 894 * 895 * A third pool does submit_bio to avoid deadlocking with the other 896 * two 897 */ 898 struct btrfs_workqueue *workers; 899 struct btrfs_workqueue *delalloc_workers; 900 struct btrfs_workqueue *flush_workers; 901 struct btrfs_workqueue *endio_workers; 902 struct btrfs_workqueue *endio_meta_workers; 903 struct btrfs_workqueue *endio_raid56_workers; 904 struct btrfs_workqueue *endio_repair_workers; 905 struct btrfs_workqueue *rmw_workers; 906 struct btrfs_workqueue *endio_meta_write_workers; 907 struct btrfs_workqueue *endio_write_workers; 908 struct btrfs_workqueue *endio_freespace_worker; 909 struct btrfs_workqueue *submit_workers; 910 struct btrfs_workqueue *caching_workers; 911 struct btrfs_workqueue *readahead_workers; 912 913 /* 914 * fixup workers take dirty pages that didn't properly go through 915 * the cow mechanism and make them safe to write. It happens 916 * for the sys_munmap function call path 917 */ 918 struct btrfs_workqueue *fixup_workers; 919 struct btrfs_workqueue *delayed_workers; 920 921 /* the extent workers do delayed refs on the extent allocation tree */ 922 struct btrfs_workqueue *extent_workers; 923 struct task_struct *transaction_kthread; 924 struct task_struct *cleaner_kthread; 925 int thread_pool_size; 926 927 struct kobject *space_info_kobj; 928 929 u64 total_pinned; 930 931 /* used to keep from writing metadata until there is a nice batch */ 932 struct percpu_counter dirty_metadata_bytes; 933 struct percpu_counter delalloc_bytes; 934 s32 dirty_metadata_batch; 935 s32 delalloc_batch; 936 937 struct list_head dirty_cowonly_roots; 938 939 struct btrfs_fs_devices *fs_devices; 940 941 /* 942 * the space_info list is almost entirely read only. It only changes 943 * when we add a new raid type to the FS, and that happens 944 * very rarely. RCU is used to protect it. 945 */ 946 struct list_head space_info; 947 948 struct btrfs_space_info *data_sinfo; 949 950 struct reloc_control *reloc_ctl; 951 952 /* data_alloc_cluster is only used in ssd mode */ 953 struct btrfs_free_cluster data_alloc_cluster; 954 955 /* all metadata allocations go through this cluster */ 956 struct btrfs_free_cluster meta_alloc_cluster; 957 958 /* auto defrag inodes go here */ 959 spinlock_t defrag_inodes_lock; 960 struct rb_root defrag_inodes; 961 atomic_t defrag_running; 962 963 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 964 seqlock_t profiles_lock; 965 /* 966 * these three are in extended format (availability of single 967 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 968 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 969 */ 970 u64 avail_data_alloc_bits; 971 u64 avail_metadata_alloc_bits; 972 u64 avail_system_alloc_bits; 973 974 /* restriper state */ 975 spinlock_t balance_lock; 976 struct mutex balance_mutex; 977 atomic_t balance_running; 978 atomic_t balance_pause_req; 979 atomic_t balance_cancel_req; 980 struct btrfs_balance_control *balance_ctl; 981 wait_queue_head_t balance_wait_q; 982 983 unsigned data_chunk_allocations; 984 unsigned metadata_ratio; 985 986 void *bdev_holder; 987 988 /* private scrub information */ 989 struct mutex scrub_lock; 990 atomic_t scrubs_running; 991 atomic_t scrub_pause_req; 992 atomic_t scrubs_paused; 993 atomic_t scrub_cancel_req; 994 wait_queue_head_t scrub_pause_wait; 995 int scrub_workers_refcnt; 996 struct btrfs_workqueue *scrub_workers; 997 struct btrfs_workqueue *scrub_wr_completion_workers; 998 struct btrfs_workqueue *scrub_nocow_workers; 999 struct btrfs_workqueue *scrub_parity_workers; 1000 1001 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1002 u32 check_integrity_print_mask; 1003 #endif 1004 /* is qgroup tracking in a consistent state? */ 1005 u64 qgroup_flags; 1006 1007 /* holds configuration and tracking. Protected by qgroup_lock */ 1008 struct rb_root qgroup_tree; 1009 struct rb_root qgroup_op_tree; 1010 spinlock_t qgroup_lock; 1011 spinlock_t qgroup_op_lock; 1012 atomic_t qgroup_op_seq; 1013 1014 /* 1015 * used to avoid frequently calling ulist_alloc()/ulist_free() 1016 * when doing qgroup accounting, it must be protected by qgroup_lock. 1017 */ 1018 struct ulist *qgroup_ulist; 1019 1020 /* protect user change for quota operations */ 1021 struct mutex qgroup_ioctl_lock; 1022 1023 /* list of dirty qgroups to be written at next commit */ 1024 struct list_head dirty_qgroups; 1025 1026 /* used by qgroup for an efficient tree traversal */ 1027 u64 qgroup_seq; 1028 1029 /* qgroup rescan items */ 1030 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1031 struct btrfs_key qgroup_rescan_progress; 1032 struct btrfs_workqueue *qgroup_rescan_workers; 1033 struct completion qgroup_rescan_completion; 1034 struct btrfs_work qgroup_rescan_work; 1035 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */ 1036 1037 /* filesystem state */ 1038 unsigned long fs_state; 1039 1040 struct btrfs_delayed_root *delayed_root; 1041 1042 /* readahead tree */ 1043 spinlock_t reada_lock; 1044 struct radix_tree_root reada_tree; 1045 1046 /* readahead works cnt */ 1047 atomic_t reada_works_cnt; 1048 1049 /* Extent buffer radix tree */ 1050 spinlock_t buffer_lock; 1051 struct radix_tree_root buffer_radix; 1052 1053 /* next backup root to be overwritten */ 1054 int backup_root_index; 1055 1056 int num_tolerated_disk_barrier_failures; 1057 1058 /* device replace state */ 1059 struct btrfs_dev_replace dev_replace; 1060 1061 atomic_t mutually_exclusive_operation_running; 1062 1063 struct percpu_counter bio_counter; 1064 wait_queue_head_t replace_wait; 1065 1066 struct semaphore uuid_tree_rescan_sem; 1067 1068 /* Used to reclaim the metadata space in the background. */ 1069 struct work_struct async_reclaim_work; 1070 1071 spinlock_t unused_bgs_lock; 1072 struct list_head unused_bgs; 1073 struct mutex unused_bg_unpin_mutex; 1074 struct mutex delete_unused_bgs_mutex; 1075 1076 /* For btrfs to record security options */ 1077 struct security_mnt_opts security_opts; 1078 1079 /* 1080 * Chunks that can't be freed yet (under a trim/discard operation) 1081 * and will be latter freed. Protected by fs_info->chunk_mutex. 1082 */ 1083 struct list_head pinned_chunks; 1084 1085 /* Used to record internally whether fs has been frozen */ 1086 int fs_frozen; 1087 }; 1088 1089 struct btrfs_subvolume_writers { 1090 struct percpu_counter counter; 1091 wait_queue_head_t wait; 1092 }; 1093 1094 /* 1095 * The state of btrfs root 1096 */ 1097 /* 1098 * btrfs_record_root_in_trans is a multi-step process, 1099 * and it can race with the balancing code. But the 1100 * race is very small, and only the first time the root 1101 * is added to each transaction. So IN_TRANS_SETUP 1102 * is used to tell us when more checks are required 1103 */ 1104 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1105 #define BTRFS_ROOT_REF_COWS 1 1106 #define BTRFS_ROOT_TRACK_DIRTY 2 1107 #define BTRFS_ROOT_IN_RADIX 3 1108 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4 1109 #define BTRFS_ROOT_DEFRAG_RUNNING 5 1110 #define BTRFS_ROOT_FORCE_COW 6 1111 #define BTRFS_ROOT_MULTI_LOG_TASKS 7 1112 #define BTRFS_ROOT_DIRTY 8 1113 1114 /* 1115 * in ram representation of the tree. extent_root is used for all allocations 1116 * and for the extent tree extent_root root. 1117 */ 1118 struct btrfs_root { 1119 struct extent_buffer *node; 1120 1121 struct extent_buffer *commit_root; 1122 struct btrfs_root *log_root; 1123 struct btrfs_root *reloc_root; 1124 1125 unsigned long state; 1126 struct btrfs_root_item root_item; 1127 struct btrfs_key root_key; 1128 struct btrfs_fs_info *fs_info; 1129 struct extent_io_tree dirty_log_pages; 1130 1131 struct mutex objectid_mutex; 1132 1133 spinlock_t accounting_lock; 1134 struct btrfs_block_rsv *block_rsv; 1135 1136 /* free ino cache stuff */ 1137 struct btrfs_free_space_ctl *free_ino_ctl; 1138 enum btrfs_caching_type ino_cache_state; 1139 spinlock_t ino_cache_lock; 1140 wait_queue_head_t ino_cache_wait; 1141 struct btrfs_free_space_ctl *free_ino_pinned; 1142 u64 ino_cache_progress; 1143 struct inode *ino_cache_inode; 1144 1145 struct mutex log_mutex; 1146 wait_queue_head_t log_writer_wait; 1147 wait_queue_head_t log_commit_wait[2]; 1148 struct list_head log_ctxs[2]; 1149 atomic_t log_writers; 1150 atomic_t log_commit[2]; 1151 atomic_t log_batch; 1152 int log_transid; 1153 /* No matter the commit succeeds or not*/ 1154 int log_transid_committed; 1155 /* Just be updated when the commit succeeds. */ 1156 int last_log_commit; 1157 pid_t log_start_pid; 1158 1159 u64 objectid; 1160 u64 last_trans; 1161 1162 /* data allocations are done in sectorsize units */ 1163 u32 sectorsize; 1164 1165 /* node allocations are done in nodesize units */ 1166 u32 nodesize; 1167 1168 u32 stripesize; 1169 1170 u32 type; 1171 1172 u64 highest_objectid; 1173 1174 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1175 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1176 u64 alloc_bytenr; 1177 #endif 1178 1179 u64 defrag_trans_start; 1180 struct btrfs_key defrag_progress; 1181 struct btrfs_key defrag_max; 1182 char *name; 1183 1184 /* the dirty list is only used by non-reference counted roots */ 1185 struct list_head dirty_list; 1186 1187 struct list_head root_list; 1188 1189 spinlock_t log_extents_lock[2]; 1190 struct list_head logged_list[2]; 1191 1192 spinlock_t orphan_lock; 1193 atomic_t orphan_inodes; 1194 struct btrfs_block_rsv *orphan_block_rsv; 1195 int orphan_cleanup_state; 1196 1197 spinlock_t inode_lock; 1198 /* red-black tree that keeps track of in-memory inodes */ 1199 struct rb_root inode_tree; 1200 1201 /* 1202 * radix tree that keeps track of delayed nodes of every inode, 1203 * protected by inode_lock 1204 */ 1205 struct radix_tree_root delayed_nodes_tree; 1206 /* 1207 * right now this just gets used so that a root has its own devid 1208 * for stat. It may be used for more later 1209 */ 1210 dev_t anon_dev; 1211 1212 spinlock_t root_item_lock; 1213 atomic_t refs; 1214 1215 struct mutex delalloc_mutex; 1216 spinlock_t delalloc_lock; 1217 /* 1218 * all of the inodes that have delalloc bytes. It is possible for 1219 * this list to be empty even when there is still dirty data=ordered 1220 * extents waiting to finish IO. 1221 */ 1222 struct list_head delalloc_inodes; 1223 struct list_head delalloc_root; 1224 u64 nr_delalloc_inodes; 1225 1226 struct mutex ordered_extent_mutex; 1227 /* 1228 * this is used by the balancing code to wait for all the pending 1229 * ordered extents 1230 */ 1231 spinlock_t ordered_extent_lock; 1232 1233 /* 1234 * all of the data=ordered extents pending writeback 1235 * these can span multiple transactions and basically include 1236 * every dirty data page that isn't from nodatacow 1237 */ 1238 struct list_head ordered_extents; 1239 struct list_head ordered_root; 1240 u64 nr_ordered_extents; 1241 1242 /* 1243 * Number of currently running SEND ioctls to prevent 1244 * manipulation with the read-only status via SUBVOL_SETFLAGS 1245 */ 1246 int send_in_progress; 1247 struct btrfs_subvolume_writers *subv_writers; 1248 atomic_t will_be_snapshoted; 1249 1250 /* For qgroup metadata space reserve */ 1251 atomic_t qgroup_meta_rsv; 1252 }; 1253 1254 static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize) 1255 { 1256 return blocksize - sizeof(struct btrfs_header); 1257 } 1258 1259 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root) 1260 { 1261 return __BTRFS_LEAF_DATA_SIZE(root->nodesize); 1262 } 1263 1264 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root) 1265 { 1266 return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item); 1267 } 1268 1269 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root) 1270 { 1271 return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr); 1272 } 1273 1274 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 1275 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 1276 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root) 1277 { 1278 return BTRFS_MAX_ITEM_SIZE(root) - 1279 BTRFS_FILE_EXTENT_INLINE_DATA_START; 1280 } 1281 1282 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root) 1283 { 1284 return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item); 1285 } 1286 1287 /* 1288 * Flags for mount options. 1289 * 1290 * Note: don't forget to add new options to btrfs_show_options() 1291 */ 1292 #define BTRFS_MOUNT_NODATASUM (1 << 0) 1293 #define BTRFS_MOUNT_NODATACOW (1 << 1) 1294 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 1295 #define BTRFS_MOUNT_SSD (1 << 3) 1296 #define BTRFS_MOUNT_DEGRADED (1 << 4) 1297 #define BTRFS_MOUNT_COMPRESS (1 << 5) 1298 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 1299 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 1300 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 1301 #define BTRFS_MOUNT_NOSSD (1 << 9) 1302 #define BTRFS_MOUNT_DISCARD (1 << 10) 1303 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 1304 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 1305 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 1306 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1307 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1308 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 1309 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 1310 #define BTRFS_MOUNT_USEBACKUPROOT (1 << 18) 1311 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 1312 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 1313 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 1314 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 1315 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 1316 #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) 1317 #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) 1318 #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) 1319 #define BTRFS_MOUNT_NOLOGREPLAY (1 << 27) 1320 1321 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 1322 #define BTRFS_DEFAULT_MAX_INLINE (2048) 1323 1324 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1325 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1326 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 1327 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ 1328 BTRFS_MOUNT_##opt) 1329 1330 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \ 1331 { \ 1332 if (!btrfs_test_opt(fs_info, opt)) \ 1333 btrfs_info(fs_info, fmt, ##args); \ 1334 btrfs_set_opt(fs_info->mount_opt, opt); \ 1335 } 1336 1337 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \ 1338 { \ 1339 if (btrfs_test_opt(fs_info, opt)) \ 1340 btrfs_info(fs_info, fmt, ##args); \ 1341 btrfs_clear_opt(fs_info->mount_opt, opt); \ 1342 } 1343 1344 #ifdef CONFIG_BTRFS_DEBUG 1345 static inline int 1346 btrfs_should_fragment_free_space(struct btrfs_root *root, 1347 struct btrfs_block_group_cache *block_group) 1348 { 1349 return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) && 1350 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || 1351 (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) && 1352 block_group->flags & BTRFS_BLOCK_GROUP_DATA); 1353 } 1354 #endif 1355 1356 /* 1357 * Requests for changes that need to be done during transaction commit. 1358 * 1359 * Internal mount options that are used for special handling of the real 1360 * mount options (eg. cannot be set during remount and have to be set during 1361 * transaction commit) 1362 */ 1363 1364 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 1365 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 1366 #define BTRFS_PENDING_COMMIT (2) 1367 1368 #define btrfs_test_pending(info, opt) \ 1369 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1370 #define btrfs_set_pending(info, opt) \ 1371 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1372 #define btrfs_clear_pending(info, opt) \ 1373 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 1374 1375 /* 1376 * Helpers for setting pending mount option changes. 1377 * 1378 * Expects corresponding macros 1379 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 1380 */ 1381 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 1382 do { \ 1383 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 1384 btrfs_info((info), fmt, ##args); \ 1385 btrfs_set_pending((info), SET_##opt); \ 1386 btrfs_clear_pending((info), CLEAR_##opt); \ 1387 } \ 1388 } while(0) 1389 1390 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 1391 do { \ 1392 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 1393 btrfs_info((info), fmt, ##args); \ 1394 btrfs_set_pending((info), CLEAR_##opt); \ 1395 btrfs_clear_pending((info), SET_##opt); \ 1396 } \ 1397 } while(0) 1398 1399 /* 1400 * Inode flags 1401 */ 1402 #define BTRFS_INODE_NODATASUM (1 << 0) 1403 #define BTRFS_INODE_NODATACOW (1 << 1) 1404 #define BTRFS_INODE_READONLY (1 << 2) 1405 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 1406 #define BTRFS_INODE_PREALLOC (1 << 4) 1407 #define BTRFS_INODE_SYNC (1 << 5) 1408 #define BTRFS_INODE_IMMUTABLE (1 << 6) 1409 #define BTRFS_INODE_APPEND (1 << 7) 1410 #define BTRFS_INODE_NODUMP (1 << 8) 1411 #define BTRFS_INODE_NOATIME (1 << 9) 1412 #define BTRFS_INODE_DIRSYNC (1 << 10) 1413 #define BTRFS_INODE_COMPRESS (1 << 11) 1414 1415 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 1416 1417 struct btrfs_map_token { 1418 struct extent_buffer *eb; 1419 char *kaddr; 1420 unsigned long offset; 1421 }; 1422 1423 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ 1424 ((bytes) >> (fs_info)->sb->s_blocksize_bits) 1425 1426 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 1427 { 1428 token->kaddr = NULL; 1429 } 1430 1431 /* some macros to generate set/get functions for the struct fields. This 1432 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 1433 * one for u8: 1434 */ 1435 #define le8_to_cpu(v) (v) 1436 #define cpu_to_le8(v) (v) 1437 #define __le8 u8 1438 1439 #define read_eb_member(eb, ptr, type, member, result) (\ 1440 read_extent_buffer(eb, (char *)(result), \ 1441 ((unsigned long)(ptr)) + \ 1442 offsetof(type, member), \ 1443 sizeof(((type *)0)->member))) 1444 1445 #define write_eb_member(eb, ptr, type, member, result) (\ 1446 write_extent_buffer(eb, (char *)(result), \ 1447 ((unsigned long)(ptr)) + \ 1448 offsetof(type, member), \ 1449 sizeof(((type *)0)->member))) 1450 1451 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 1452 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 1453 unsigned long off, \ 1454 struct btrfs_map_token *token); \ 1455 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 1456 unsigned long off, u##bits val, \ 1457 struct btrfs_map_token *token); \ 1458 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 1459 unsigned long off) \ 1460 { \ 1461 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 1462 } \ 1463 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 1464 unsigned long off, u##bits val) \ 1465 { \ 1466 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 1467 } 1468 1469 DECLARE_BTRFS_SETGET_BITS(8) 1470 DECLARE_BTRFS_SETGET_BITS(16) 1471 DECLARE_BTRFS_SETGET_BITS(32) 1472 DECLARE_BTRFS_SETGET_BITS(64) 1473 1474 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 1475 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 1476 { \ 1477 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1478 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 1479 } \ 1480 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 1481 u##bits val) \ 1482 { \ 1483 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1484 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 1485 } \ 1486 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 1487 struct btrfs_map_token *token) \ 1488 { \ 1489 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1490 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 1491 } \ 1492 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 1493 type *s, u##bits val, \ 1494 struct btrfs_map_token *token) \ 1495 { \ 1496 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 1497 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 1498 } 1499 1500 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 1501 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 1502 { \ 1503 type *p = page_address(eb->pages[0]); \ 1504 u##bits res = le##bits##_to_cpu(p->member); \ 1505 return res; \ 1506 } \ 1507 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 1508 u##bits val) \ 1509 { \ 1510 type *p = page_address(eb->pages[0]); \ 1511 p->member = cpu_to_le##bits(val); \ 1512 } 1513 1514 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 1515 static inline u##bits btrfs_##name(type *s) \ 1516 { \ 1517 return le##bits##_to_cpu(s->member); \ 1518 } \ 1519 static inline void btrfs_set_##name(type *s, u##bits val) \ 1520 { \ 1521 s->member = cpu_to_le##bits(val); \ 1522 } 1523 1524 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 1525 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 1526 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 1527 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 1528 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 1529 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 1530 start_offset, 64); 1531 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 1532 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 1533 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 1534 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 1535 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 1536 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 1537 1538 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 1539 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 1540 total_bytes, 64); 1541 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 1542 bytes_used, 64); 1543 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 1544 io_align, 32); 1545 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 1546 io_width, 32); 1547 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 1548 sector_size, 32); 1549 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 1550 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 1551 dev_group, 32); 1552 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 1553 seek_speed, 8); 1554 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 1555 bandwidth, 8); 1556 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 1557 generation, 64); 1558 1559 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 1560 { 1561 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 1562 } 1563 1564 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 1565 { 1566 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 1567 } 1568 1569 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 1570 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 1571 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 1572 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 1573 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 1574 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 1575 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 1576 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 1577 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 1578 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 1579 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 1580 1581 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 1582 { 1583 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 1584 } 1585 1586 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 1587 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 1588 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 1589 stripe_len, 64); 1590 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 1591 io_align, 32); 1592 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 1593 io_width, 32); 1594 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 1595 sector_size, 32); 1596 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 1597 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 1598 num_stripes, 16); 1599 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 1600 sub_stripes, 16); 1601 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 1602 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 1603 1604 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 1605 int nr) 1606 { 1607 unsigned long offset = (unsigned long)c; 1608 offset += offsetof(struct btrfs_chunk, stripe); 1609 offset += nr * sizeof(struct btrfs_stripe); 1610 return (struct btrfs_stripe *)offset; 1611 } 1612 1613 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 1614 { 1615 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 1616 } 1617 1618 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 1619 struct btrfs_chunk *c, int nr) 1620 { 1621 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 1622 } 1623 1624 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 1625 struct btrfs_chunk *c, int nr) 1626 { 1627 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 1628 } 1629 1630 /* struct btrfs_block_group_item */ 1631 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 1632 used, 64); 1633 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 1634 used, 64); 1635 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 1636 struct btrfs_block_group_item, chunk_objectid, 64); 1637 1638 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 1639 struct btrfs_block_group_item, chunk_objectid, 64); 1640 BTRFS_SETGET_FUNCS(disk_block_group_flags, 1641 struct btrfs_block_group_item, flags, 64); 1642 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 1643 struct btrfs_block_group_item, flags, 64); 1644 1645 /* struct btrfs_free_space_info */ 1646 BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, 1647 extent_count, 32); 1648 BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); 1649 1650 /* struct btrfs_inode_ref */ 1651 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 1652 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 1653 1654 /* struct btrfs_inode_extref */ 1655 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 1656 parent_objectid, 64); 1657 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 1658 name_len, 16); 1659 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 1660 1661 /* struct btrfs_inode_item */ 1662 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 1663 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 1664 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 1665 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 1666 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 1667 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 1668 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 1669 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 1670 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 1671 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 1672 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 1673 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 1674 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 1675 generation, 64); 1676 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 1677 sequence, 64); 1678 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 1679 transid, 64); 1680 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 1681 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 1682 nbytes, 64); 1683 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 1684 block_group, 64); 1685 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 1686 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 1687 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 1688 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 1689 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 1690 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 1691 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 1692 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 1693 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 1694 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 1695 1696 /* struct btrfs_dev_extent */ 1697 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 1698 chunk_tree, 64); 1699 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 1700 chunk_objectid, 64); 1701 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 1702 chunk_offset, 64); 1703 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 1704 1705 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 1706 { 1707 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 1708 return (unsigned long)dev + ptr; 1709 } 1710 1711 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 1712 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 1713 generation, 64); 1714 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 1715 1716 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 1717 1718 1719 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 1720 1721 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 1722 struct btrfs_tree_block_info *item, 1723 struct btrfs_disk_key *key) 1724 { 1725 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 1726 } 1727 1728 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 1729 struct btrfs_tree_block_info *item, 1730 struct btrfs_disk_key *key) 1731 { 1732 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 1733 } 1734 1735 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 1736 root, 64); 1737 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 1738 objectid, 64); 1739 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 1740 offset, 64); 1741 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 1742 count, 32); 1743 1744 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 1745 count, 32); 1746 1747 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 1748 type, 8); 1749 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 1750 offset, 64); 1751 1752 static inline u32 btrfs_extent_inline_ref_size(int type) 1753 { 1754 if (type == BTRFS_TREE_BLOCK_REF_KEY || 1755 type == BTRFS_SHARED_BLOCK_REF_KEY) 1756 return sizeof(struct btrfs_extent_inline_ref); 1757 if (type == BTRFS_SHARED_DATA_REF_KEY) 1758 return sizeof(struct btrfs_shared_data_ref) + 1759 sizeof(struct btrfs_extent_inline_ref); 1760 if (type == BTRFS_EXTENT_DATA_REF_KEY) 1761 return sizeof(struct btrfs_extent_data_ref) + 1762 offsetof(struct btrfs_extent_inline_ref, offset); 1763 BUG(); 1764 return 0; 1765 } 1766 1767 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 1768 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 1769 generation, 64); 1770 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 1771 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 1772 1773 /* struct btrfs_node */ 1774 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 1775 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 1776 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 1777 blockptr, 64); 1778 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 1779 generation, 64); 1780 1781 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 1782 { 1783 unsigned long ptr; 1784 ptr = offsetof(struct btrfs_node, ptrs) + 1785 sizeof(struct btrfs_key_ptr) * nr; 1786 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 1787 } 1788 1789 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 1790 int nr, u64 val) 1791 { 1792 unsigned long ptr; 1793 ptr = offsetof(struct btrfs_node, ptrs) + 1794 sizeof(struct btrfs_key_ptr) * nr; 1795 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 1796 } 1797 1798 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 1799 { 1800 unsigned long ptr; 1801 ptr = offsetof(struct btrfs_node, ptrs) + 1802 sizeof(struct btrfs_key_ptr) * nr; 1803 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 1804 } 1805 1806 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 1807 int nr, u64 val) 1808 { 1809 unsigned long ptr; 1810 ptr = offsetof(struct btrfs_node, ptrs) + 1811 sizeof(struct btrfs_key_ptr) * nr; 1812 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 1813 } 1814 1815 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 1816 { 1817 return offsetof(struct btrfs_node, ptrs) + 1818 sizeof(struct btrfs_key_ptr) * nr; 1819 } 1820 1821 void btrfs_node_key(struct extent_buffer *eb, 1822 struct btrfs_disk_key *disk_key, int nr); 1823 1824 static inline void btrfs_set_node_key(struct extent_buffer *eb, 1825 struct btrfs_disk_key *disk_key, int nr) 1826 { 1827 unsigned long ptr; 1828 ptr = btrfs_node_key_ptr_offset(nr); 1829 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 1830 struct btrfs_key_ptr, key, disk_key); 1831 } 1832 1833 /* struct btrfs_item */ 1834 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 1835 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 1836 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 1837 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 1838 1839 static inline unsigned long btrfs_item_nr_offset(int nr) 1840 { 1841 return offsetof(struct btrfs_leaf, items) + 1842 sizeof(struct btrfs_item) * nr; 1843 } 1844 1845 static inline struct btrfs_item *btrfs_item_nr(int nr) 1846 { 1847 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 1848 } 1849 1850 static inline u32 btrfs_item_end(struct extent_buffer *eb, 1851 struct btrfs_item *item) 1852 { 1853 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 1854 } 1855 1856 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 1857 { 1858 return btrfs_item_end(eb, btrfs_item_nr(nr)); 1859 } 1860 1861 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 1862 { 1863 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 1864 } 1865 1866 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 1867 { 1868 return btrfs_item_size(eb, btrfs_item_nr(nr)); 1869 } 1870 1871 static inline void btrfs_item_key(struct extent_buffer *eb, 1872 struct btrfs_disk_key *disk_key, int nr) 1873 { 1874 struct btrfs_item *item = btrfs_item_nr(nr); 1875 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 1876 } 1877 1878 static inline void btrfs_set_item_key(struct extent_buffer *eb, 1879 struct btrfs_disk_key *disk_key, int nr) 1880 { 1881 struct btrfs_item *item = btrfs_item_nr(nr); 1882 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 1883 } 1884 1885 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 1886 1887 /* 1888 * struct btrfs_root_ref 1889 */ 1890 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 1891 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 1892 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 1893 1894 /* struct btrfs_dir_item */ 1895 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 1896 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 1897 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 1898 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 1899 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 1900 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 1901 data_len, 16); 1902 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 1903 name_len, 16); 1904 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 1905 transid, 64); 1906 1907 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 1908 struct btrfs_dir_item *item, 1909 struct btrfs_disk_key *key) 1910 { 1911 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 1912 } 1913 1914 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 1915 struct btrfs_dir_item *item, 1916 struct btrfs_disk_key *key) 1917 { 1918 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 1919 } 1920 1921 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 1922 num_entries, 64); 1923 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 1924 num_bitmaps, 64); 1925 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 1926 generation, 64); 1927 1928 static inline void btrfs_free_space_key(struct extent_buffer *eb, 1929 struct btrfs_free_space_header *h, 1930 struct btrfs_disk_key *key) 1931 { 1932 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 1933 } 1934 1935 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 1936 struct btrfs_free_space_header *h, 1937 struct btrfs_disk_key *key) 1938 { 1939 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 1940 } 1941 1942 /* struct btrfs_disk_key */ 1943 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 1944 objectid, 64); 1945 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 1946 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 1947 1948 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 1949 struct btrfs_disk_key *disk) 1950 { 1951 cpu->offset = le64_to_cpu(disk->offset); 1952 cpu->type = disk->type; 1953 cpu->objectid = le64_to_cpu(disk->objectid); 1954 } 1955 1956 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 1957 struct btrfs_key *cpu) 1958 { 1959 disk->offset = cpu_to_le64(cpu->offset); 1960 disk->type = cpu->type; 1961 disk->objectid = cpu_to_le64(cpu->objectid); 1962 } 1963 1964 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 1965 struct btrfs_key *key, int nr) 1966 { 1967 struct btrfs_disk_key disk_key; 1968 btrfs_node_key(eb, &disk_key, nr); 1969 btrfs_disk_key_to_cpu(key, &disk_key); 1970 } 1971 1972 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 1973 struct btrfs_key *key, int nr) 1974 { 1975 struct btrfs_disk_key disk_key; 1976 btrfs_item_key(eb, &disk_key, nr); 1977 btrfs_disk_key_to_cpu(key, &disk_key); 1978 } 1979 1980 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 1981 struct btrfs_dir_item *item, 1982 struct btrfs_key *key) 1983 { 1984 struct btrfs_disk_key disk_key; 1985 btrfs_dir_item_key(eb, item, &disk_key); 1986 btrfs_disk_key_to_cpu(key, &disk_key); 1987 } 1988 1989 1990 static inline u8 btrfs_key_type(struct btrfs_key *key) 1991 { 1992 return key->type; 1993 } 1994 1995 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 1996 { 1997 key->type = val; 1998 } 1999 2000 /* struct btrfs_header */ 2001 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 2002 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 2003 generation, 64); 2004 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2005 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2006 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2007 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2008 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2009 generation, 64); 2010 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2011 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2012 nritems, 32); 2013 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2014 2015 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2016 { 2017 return (btrfs_header_flags(eb) & flag) == flag; 2018 } 2019 2020 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2021 { 2022 u64 flags = btrfs_header_flags(eb); 2023 btrfs_set_header_flags(eb, flags | flag); 2024 return (flags & flag) == flag; 2025 } 2026 2027 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2028 { 2029 u64 flags = btrfs_header_flags(eb); 2030 btrfs_set_header_flags(eb, flags & ~flag); 2031 return (flags & flag) == flag; 2032 } 2033 2034 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2035 { 2036 u64 flags = btrfs_header_flags(eb); 2037 return flags >> BTRFS_BACKREF_REV_SHIFT; 2038 } 2039 2040 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2041 int rev) 2042 { 2043 u64 flags = btrfs_header_flags(eb); 2044 flags &= ~BTRFS_BACKREF_REV_MASK; 2045 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2046 btrfs_set_header_flags(eb, flags); 2047 } 2048 2049 static inline unsigned long btrfs_header_fsid(void) 2050 { 2051 return offsetof(struct btrfs_header, fsid); 2052 } 2053 2054 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2055 { 2056 return offsetof(struct btrfs_header, chunk_tree_uuid); 2057 } 2058 2059 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2060 { 2061 return btrfs_header_level(eb) == 0; 2062 } 2063 2064 /* struct btrfs_root_item */ 2065 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2066 generation, 64); 2067 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2068 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2069 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2070 2071 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2072 generation, 64); 2073 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2074 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2075 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2076 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2077 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2078 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2079 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2080 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2081 last_snapshot, 64); 2082 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2083 generation_v2, 64); 2084 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2085 ctransid, 64); 2086 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2087 otransid, 64); 2088 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2089 stransid, 64); 2090 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2091 rtransid, 64); 2092 2093 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2094 { 2095 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2096 } 2097 2098 static inline bool btrfs_root_dead(struct btrfs_root *root) 2099 { 2100 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2101 } 2102 2103 /* struct btrfs_root_backup */ 2104 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2105 tree_root, 64); 2106 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2107 tree_root_gen, 64); 2108 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2109 tree_root_level, 8); 2110 2111 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2112 chunk_root, 64); 2113 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2114 chunk_root_gen, 64); 2115 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2116 chunk_root_level, 8); 2117 2118 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2119 extent_root, 64); 2120 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2121 extent_root_gen, 64); 2122 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2123 extent_root_level, 8); 2124 2125 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2126 fs_root, 64); 2127 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2128 fs_root_gen, 64); 2129 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2130 fs_root_level, 8); 2131 2132 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2133 dev_root, 64); 2134 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2135 dev_root_gen, 64); 2136 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2137 dev_root_level, 8); 2138 2139 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2140 csum_root, 64); 2141 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2142 csum_root_gen, 64); 2143 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2144 csum_root_level, 8); 2145 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2146 total_bytes, 64); 2147 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2148 bytes_used, 64); 2149 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2150 num_devices, 64); 2151 2152 /* struct btrfs_balance_item */ 2153 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2154 2155 static inline void btrfs_balance_data(struct extent_buffer *eb, 2156 struct btrfs_balance_item *bi, 2157 struct btrfs_disk_balance_args *ba) 2158 { 2159 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2160 } 2161 2162 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2163 struct btrfs_balance_item *bi, 2164 struct btrfs_disk_balance_args *ba) 2165 { 2166 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2167 } 2168 2169 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2170 struct btrfs_balance_item *bi, 2171 struct btrfs_disk_balance_args *ba) 2172 { 2173 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2174 } 2175 2176 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2177 struct btrfs_balance_item *bi, 2178 struct btrfs_disk_balance_args *ba) 2179 { 2180 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2181 } 2182 2183 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2184 struct btrfs_balance_item *bi, 2185 struct btrfs_disk_balance_args *ba) 2186 { 2187 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2188 } 2189 2190 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 2191 struct btrfs_balance_item *bi, 2192 struct btrfs_disk_balance_args *ba) 2193 { 2194 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2195 } 2196 2197 static inline void 2198 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 2199 struct btrfs_disk_balance_args *disk) 2200 { 2201 memset(cpu, 0, sizeof(*cpu)); 2202 2203 cpu->profiles = le64_to_cpu(disk->profiles); 2204 cpu->usage = le64_to_cpu(disk->usage); 2205 cpu->devid = le64_to_cpu(disk->devid); 2206 cpu->pstart = le64_to_cpu(disk->pstart); 2207 cpu->pend = le64_to_cpu(disk->pend); 2208 cpu->vstart = le64_to_cpu(disk->vstart); 2209 cpu->vend = le64_to_cpu(disk->vend); 2210 cpu->target = le64_to_cpu(disk->target); 2211 cpu->flags = le64_to_cpu(disk->flags); 2212 cpu->limit = le64_to_cpu(disk->limit); 2213 } 2214 2215 static inline void 2216 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 2217 struct btrfs_balance_args *cpu) 2218 { 2219 memset(disk, 0, sizeof(*disk)); 2220 2221 disk->profiles = cpu_to_le64(cpu->profiles); 2222 disk->usage = cpu_to_le64(cpu->usage); 2223 disk->devid = cpu_to_le64(cpu->devid); 2224 disk->pstart = cpu_to_le64(cpu->pstart); 2225 disk->pend = cpu_to_le64(cpu->pend); 2226 disk->vstart = cpu_to_le64(cpu->vstart); 2227 disk->vend = cpu_to_le64(cpu->vend); 2228 disk->target = cpu_to_le64(cpu->target); 2229 disk->flags = cpu_to_le64(cpu->flags); 2230 disk->limit = cpu_to_le64(cpu->limit); 2231 } 2232 2233 /* struct btrfs_super_block */ 2234 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 2235 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 2236 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 2237 generation, 64); 2238 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 2239 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 2240 struct btrfs_super_block, sys_chunk_array_size, 32); 2241 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 2242 struct btrfs_super_block, chunk_root_generation, 64); 2243 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 2244 root_level, 8); 2245 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 2246 chunk_root, 64); 2247 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 2248 chunk_root_level, 8); 2249 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 2250 log_root, 64); 2251 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 2252 log_root_transid, 64); 2253 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 2254 log_root_level, 8); 2255 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 2256 total_bytes, 64); 2257 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 2258 bytes_used, 64); 2259 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 2260 sectorsize, 32); 2261 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 2262 nodesize, 32); 2263 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 2264 stripesize, 32); 2265 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 2266 root_dir_objectid, 64); 2267 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 2268 num_devices, 64); 2269 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 2270 compat_flags, 64); 2271 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 2272 compat_ro_flags, 64); 2273 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 2274 incompat_flags, 64); 2275 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 2276 csum_type, 16); 2277 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 2278 cache_generation, 64); 2279 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 2280 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 2281 uuid_tree_generation, 64); 2282 2283 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 2284 { 2285 u16 t = btrfs_super_csum_type(s); 2286 /* 2287 * csum type is validated at mount time 2288 */ 2289 return btrfs_csum_sizes[t]; 2290 } 2291 2292 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 2293 { 2294 return offsetof(struct btrfs_leaf, items); 2295 } 2296 2297 /* 2298 * The leaf data grows from end-to-front in the node. 2299 * this returns the address of the start of the last item, 2300 * which is the stop of the leaf data stack 2301 */ 2302 static inline unsigned int leaf_data_end(struct btrfs_root *root, 2303 struct extent_buffer *leaf) 2304 { 2305 u32 nr = btrfs_header_nritems(leaf); 2306 2307 if (nr == 0) 2308 return BTRFS_LEAF_DATA_SIZE(root); 2309 return btrfs_item_offset_nr(leaf, nr - 1); 2310 } 2311 2312 /* struct btrfs_file_extent_item */ 2313 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 2314 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 2315 struct btrfs_file_extent_item, disk_bytenr, 64); 2316 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 2317 struct btrfs_file_extent_item, offset, 64); 2318 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 2319 struct btrfs_file_extent_item, generation, 64); 2320 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 2321 struct btrfs_file_extent_item, num_bytes, 64); 2322 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 2323 struct btrfs_file_extent_item, disk_num_bytes, 64); 2324 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 2325 struct btrfs_file_extent_item, compression, 8); 2326 2327 static inline unsigned long 2328 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 2329 { 2330 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 2331 } 2332 2333 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 2334 { 2335 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 2336 } 2337 2338 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 2339 disk_bytenr, 64); 2340 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 2341 generation, 64); 2342 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 2343 disk_num_bytes, 64); 2344 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 2345 offset, 64); 2346 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 2347 num_bytes, 64); 2348 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 2349 ram_bytes, 64); 2350 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 2351 compression, 8); 2352 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 2353 encryption, 8); 2354 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 2355 other_encoding, 16); 2356 2357 /* 2358 * this returns the number of bytes used by the item on disk, minus the 2359 * size of any extent headers. If a file is compressed on disk, this is 2360 * the compressed size 2361 */ 2362 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 2363 struct btrfs_item *e) 2364 { 2365 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 2366 } 2367 2368 /* this returns the number of file bytes represented by the inline item. 2369 * If an item is compressed, this is the uncompressed size 2370 */ 2371 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 2372 int slot, 2373 struct btrfs_file_extent_item *fi) 2374 { 2375 struct btrfs_map_token token; 2376 2377 btrfs_init_map_token(&token); 2378 /* 2379 * return the space used on disk if this item isn't 2380 * compressed or encoded 2381 */ 2382 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 2383 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 2384 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 2385 return btrfs_file_extent_inline_item_len(eb, 2386 btrfs_item_nr(slot)); 2387 } 2388 2389 /* otherwise use the ram bytes field */ 2390 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 2391 } 2392 2393 2394 /* btrfs_dev_stats_item */ 2395 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 2396 struct btrfs_dev_stats_item *ptr, 2397 int index) 2398 { 2399 u64 val; 2400 2401 read_extent_buffer(eb, &val, 2402 offsetof(struct btrfs_dev_stats_item, values) + 2403 ((unsigned long)ptr) + (index * sizeof(u64)), 2404 sizeof(val)); 2405 return val; 2406 } 2407 2408 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 2409 struct btrfs_dev_stats_item *ptr, 2410 int index, u64 val) 2411 { 2412 write_extent_buffer(eb, &val, 2413 offsetof(struct btrfs_dev_stats_item, values) + 2414 ((unsigned long)ptr) + (index * sizeof(u64)), 2415 sizeof(val)); 2416 } 2417 2418 /* btrfs_qgroup_status_item */ 2419 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 2420 generation, 64); 2421 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 2422 version, 64); 2423 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 2424 flags, 64); 2425 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 2426 rescan, 64); 2427 2428 /* btrfs_qgroup_info_item */ 2429 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 2430 generation, 64); 2431 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 2432 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 2433 rfer_cmpr, 64); 2434 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 2435 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 2436 excl_cmpr, 64); 2437 2438 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 2439 struct btrfs_qgroup_info_item, generation, 64); 2440 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 2441 rfer, 64); 2442 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 2443 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 2444 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 2445 excl, 64); 2446 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 2447 struct btrfs_qgroup_info_item, excl_cmpr, 64); 2448 2449 /* btrfs_qgroup_limit_item */ 2450 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 2451 flags, 64); 2452 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 2453 max_rfer, 64); 2454 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 2455 max_excl, 64); 2456 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 2457 rsv_rfer, 64); 2458 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 2459 rsv_excl, 64); 2460 2461 /* btrfs_dev_replace_item */ 2462 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 2463 struct btrfs_dev_replace_item, src_devid, 64); 2464 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 2465 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 2466 64); 2467 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 2468 replace_state, 64); 2469 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 2470 time_started, 64); 2471 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 2472 time_stopped, 64); 2473 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 2474 num_write_errors, 64); 2475 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 2476 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 2477 64); 2478 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 2479 cursor_left, 64); 2480 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 2481 cursor_right, 64); 2482 2483 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 2484 struct btrfs_dev_replace_item, src_devid, 64); 2485 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 2486 struct btrfs_dev_replace_item, 2487 cont_reading_from_srcdev_mode, 64); 2488 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 2489 struct btrfs_dev_replace_item, replace_state, 64); 2490 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 2491 struct btrfs_dev_replace_item, time_started, 64); 2492 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 2493 struct btrfs_dev_replace_item, time_stopped, 64); 2494 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 2495 struct btrfs_dev_replace_item, num_write_errors, 64); 2496 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 2497 struct btrfs_dev_replace_item, 2498 num_uncorrectable_read_errors, 64); 2499 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 2500 struct btrfs_dev_replace_item, cursor_left, 64); 2501 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 2502 struct btrfs_dev_replace_item, cursor_right, 64); 2503 2504 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 2505 { 2506 return sb->s_fs_info; 2507 } 2508 2509 /* helper function to cast into the data area of the leaf. */ 2510 #define btrfs_item_ptr(leaf, slot, type) \ 2511 ((type *)(btrfs_leaf_data(leaf) + \ 2512 btrfs_item_offset_nr(leaf, slot))) 2513 2514 #define btrfs_item_ptr_offset(leaf, slot) \ 2515 ((unsigned long)(btrfs_leaf_data(leaf) + \ 2516 btrfs_item_offset_nr(leaf, slot))) 2517 2518 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 2519 { 2520 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 2521 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 2522 } 2523 2524 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 2525 { 2526 return mapping_gfp_constraint(mapping, ~__GFP_FS); 2527 } 2528 2529 /* extent-tree.c */ 2530 2531 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes); 2532 2533 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 2534 unsigned num_items) 2535 { 2536 return root->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2537 } 2538 2539 /* 2540 * Doing a truncate won't result in new nodes or leaves, just what we need for 2541 * COW. 2542 */ 2543 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 2544 unsigned num_items) 2545 { 2546 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 2547 } 2548 2549 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2550 struct btrfs_root *root); 2551 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 2552 struct btrfs_root *root); 2553 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, 2554 const u64 start); 2555 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); 2556 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); 2557 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); 2558 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); 2559 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2560 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 2561 struct btrfs_root *root, unsigned long count); 2562 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 2563 unsigned long count, u64 transid, int wait); 2564 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 2565 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2566 struct btrfs_root *root, u64 bytenr, 2567 u64 offset, int metadata, u64 *refs, u64 *flags); 2568 int btrfs_pin_extent(struct btrfs_root *root, 2569 u64 bytenr, u64 num, int reserved); 2570 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 2571 u64 bytenr, u64 num_bytes); 2572 int btrfs_exclude_logged_extents(struct btrfs_root *root, 2573 struct extent_buffer *eb); 2574 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 2575 struct btrfs_root *root, 2576 u64 objectid, u64 offset, u64 bytenr); 2577 struct btrfs_block_group_cache *btrfs_lookup_block_group( 2578 struct btrfs_fs_info *info, 2579 u64 bytenr); 2580 void btrfs_get_block_group(struct btrfs_block_group_cache *cache); 2581 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 2582 int get_block_group_index(struct btrfs_block_group_cache *cache); 2583 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 2584 struct btrfs_root *root, u64 parent, 2585 u64 root_objectid, 2586 struct btrfs_disk_key *key, int level, 2587 u64 hint, u64 empty_size); 2588 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 2589 struct btrfs_root *root, 2590 struct extent_buffer *buf, 2591 u64 parent, int last_ref); 2592 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 2593 struct btrfs_root *root, 2594 u64 root_objectid, u64 owner, 2595 u64 offset, u64 ram_bytes, 2596 struct btrfs_key *ins); 2597 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 2598 struct btrfs_root *root, 2599 u64 root_objectid, u64 owner, u64 offset, 2600 struct btrfs_key *ins); 2601 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes, 2602 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 2603 struct btrfs_key *ins, int is_data, int delalloc); 2604 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2605 struct extent_buffer *buf, int full_backref); 2606 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2607 struct extent_buffer *buf, int full_backref); 2608 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2609 struct btrfs_root *root, 2610 u64 bytenr, u64 num_bytes, u64 flags, 2611 int level, int is_data); 2612 int btrfs_free_extent(struct btrfs_trans_handle *trans, 2613 struct btrfs_root *root, 2614 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 2615 u64 owner, u64 offset); 2616 2617 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 2618 int delalloc); 2619 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 2620 u64 start, u64 len); 2621 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 2622 struct btrfs_root *root); 2623 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 2624 struct btrfs_root *root); 2625 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 2626 struct btrfs_root *root, 2627 u64 bytenr, u64 num_bytes, u64 parent, 2628 u64 root_objectid, u64 owner, u64 offset); 2629 2630 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, 2631 struct btrfs_root *root); 2632 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2633 struct btrfs_root *root); 2634 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, 2635 struct btrfs_root *root); 2636 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 2637 int btrfs_free_block_groups(struct btrfs_fs_info *info); 2638 int btrfs_read_block_groups(struct btrfs_root *root); 2639 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 2640 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2641 struct btrfs_root *root, u64 bytes_used, 2642 u64 type, u64 chunk_objectid, u64 chunk_offset, 2643 u64 size); 2644 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2645 struct btrfs_fs_info *fs_info, 2646 const u64 chunk_offset); 2647 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 2648 struct btrfs_root *root, u64 group_start, 2649 struct extent_map *em); 2650 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 2651 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); 2652 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); 2653 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 2654 struct btrfs_root *root); 2655 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 2656 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 2657 2658 enum btrfs_reserve_flush_enum { 2659 /* If we are in the transaction, we can't flush anything.*/ 2660 BTRFS_RESERVE_NO_FLUSH, 2661 /* 2662 * Flushing delalloc may cause deadlock somewhere, in this 2663 * case, use FLUSH LIMIT 2664 */ 2665 BTRFS_RESERVE_FLUSH_LIMIT, 2666 BTRFS_RESERVE_FLUSH_ALL, 2667 }; 2668 2669 enum btrfs_flush_state { 2670 FLUSH_DELAYED_ITEMS_NR = 1, 2671 FLUSH_DELAYED_ITEMS = 2, 2672 FLUSH_DELALLOC = 3, 2673 FLUSH_DELALLOC_WAIT = 4, 2674 ALLOC_CHUNK = 5, 2675 COMMIT_TRANS = 6, 2676 }; 2677 2678 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); 2679 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes); 2680 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); 2681 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, 2682 u64 len); 2683 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 2684 struct btrfs_root *root); 2685 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); 2686 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 2687 struct inode *inode); 2688 void btrfs_orphan_release_metadata(struct inode *inode); 2689 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 2690 struct btrfs_block_rsv *rsv, 2691 int nitems, 2692 u64 *qgroup_reserved, bool use_global_rsv); 2693 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 2694 struct btrfs_block_rsv *rsv, 2695 u64 qgroup_reserved); 2696 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 2697 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 2698 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); 2699 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); 2700 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 2701 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 2702 unsigned short type); 2703 void btrfs_free_block_rsv(struct btrfs_root *root, 2704 struct btrfs_block_rsv *rsv); 2705 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv); 2706 int btrfs_block_rsv_add(struct btrfs_root *root, 2707 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 2708 enum btrfs_reserve_flush_enum flush); 2709 int btrfs_block_rsv_check(struct btrfs_root *root, 2710 struct btrfs_block_rsv *block_rsv, int min_factor); 2711 int btrfs_block_rsv_refill(struct btrfs_root *root, 2712 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 2713 enum btrfs_reserve_flush_enum flush); 2714 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 2715 struct btrfs_block_rsv *dst_rsv, u64 num_bytes, 2716 int update_size); 2717 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 2718 struct btrfs_block_rsv *dest, u64 num_bytes, 2719 int min_factor); 2720 void btrfs_block_rsv_release(struct btrfs_root *root, 2721 struct btrfs_block_rsv *block_rsv, 2722 u64 num_bytes); 2723 int btrfs_inc_block_group_ro(struct btrfs_root *root, 2724 struct btrfs_block_group_cache *cache); 2725 void btrfs_dec_block_group_ro(struct btrfs_root *root, 2726 struct btrfs_block_group_cache *cache); 2727 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 2728 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 2729 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 2730 u64 start, u64 end); 2731 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 2732 u64 num_bytes, u64 *actual_bytes); 2733 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 2734 struct btrfs_root *root, u64 type); 2735 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 2736 2737 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 2738 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 2739 struct btrfs_fs_info *fs_info); 2740 int __get_raid_index(u64 flags); 2741 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 2742 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 2743 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); 2744 void check_system_chunk(struct btrfs_trans_handle *trans, 2745 struct btrfs_root *root, 2746 const u64 type); 2747 u64 add_new_free_space(struct btrfs_block_group_cache *block_group, 2748 struct btrfs_fs_info *info, u64 start, u64 end); 2749 2750 /* ctree.c */ 2751 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2752 int level, int *slot); 2753 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 2754 int btrfs_previous_item(struct btrfs_root *root, 2755 struct btrfs_path *path, u64 min_objectid, 2756 int type); 2757 int btrfs_previous_extent_item(struct btrfs_root *root, 2758 struct btrfs_path *path, u64 min_objectid); 2759 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 2760 struct btrfs_path *path, 2761 struct btrfs_key *new_key); 2762 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 2763 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 2764 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 2765 struct btrfs_key *key, int lowest_level, 2766 u64 min_trans); 2767 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 2768 struct btrfs_path *path, 2769 u64 min_trans); 2770 enum btrfs_compare_tree_result { 2771 BTRFS_COMPARE_TREE_NEW, 2772 BTRFS_COMPARE_TREE_DELETED, 2773 BTRFS_COMPARE_TREE_CHANGED, 2774 BTRFS_COMPARE_TREE_SAME, 2775 }; 2776 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 2777 struct btrfs_root *right_root, 2778 struct btrfs_path *left_path, 2779 struct btrfs_path *right_path, 2780 struct btrfs_key *key, 2781 enum btrfs_compare_tree_result result, 2782 void *ctx); 2783 int btrfs_compare_trees(struct btrfs_root *left_root, 2784 struct btrfs_root *right_root, 2785 btrfs_changed_cb_t cb, void *ctx); 2786 int btrfs_cow_block(struct btrfs_trans_handle *trans, 2787 struct btrfs_root *root, struct extent_buffer *buf, 2788 struct extent_buffer *parent, int parent_slot, 2789 struct extent_buffer **cow_ret); 2790 int btrfs_copy_root(struct btrfs_trans_handle *trans, 2791 struct btrfs_root *root, 2792 struct extent_buffer *buf, 2793 struct extent_buffer **cow_ret, u64 new_root_objectid); 2794 int btrfs_block_can_be_shared(struct btrfs_root *root, 2795 struct extent_buffer *buf); 2796 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 2797 u32 data_size); 2798 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 2799 u32 new_size, int from_end); 2800 int btrfs_split_item(struct btrfs_trans_handle *trans, 2801 struct btrfs_root *root, 2802 struct btrfs_path *path, 2803 struct btrfs_key *new_key, 2804 unsigned long split_offset); 2805 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 2806 struct btrfs_root *root, 2807 struct btrfs_path *path, 2808 struct btrfs_key *new_key); 2809 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 2810 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 2811 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2812 *root, struct btrfs_key *key, struct btrfs_path *p, int 2813 ins_len, int cow); 2814 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 2815 struct btrfs_path *p, u64 time_seq); 2816 int btrfs_search_slot_for_read(struct btrfs_root *root, 2817 struct btrfs_key *key, struct btrfs_path *p, 2818 int find_higher, int return_any); 2819 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 2820 struct btrfs_root *root, struct extent_buffer *parent, 2821 int start_slot, u64 *last_ret, 2822 struct btrfs_key *progress); 2823 void btrfs_release_path(struct btrfs_path *p); 2824 struct btrfs_path *btrfs_alloc_path(void); 2825 void btrfs_free_path(struct btrfs_path *p); 2826 void btrfs_set_path_blocking(struct btrfs_path *p); 2827 void btrfs_clear_path_blocking(struct btrfs_path *p, 2828 struct extent_buffer *held, int held_rw); 2829 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 2830 2831 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2832 struct btrfs_path *path, int slot, int nr); 2833 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 2834 struct btrfs_root *root, 2835 struct btrfs_path *path) 2836 { 2837 return btrfs_del_items(trans, root, path, path->slots[0], 1); 2838 } 2839 2840 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 2841 struct btrfs_key *cpu_key, u32 *data_size, 2842 u32 total_data, u32 total_size, int nr); 2843 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 2844 *root, struct btrfs_key *key, void *data, u32 data_size); 2845 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 2846 struct btrfs_root *root, 2847 struct btrfs_path *path, 2848 struct btrfs_key *cpu_key, u32 *data_size, int nr); 2849 2850 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 2851 struct btrfs_root *root, 2852 struct btrfs_path *path, 2853 struct btrfs_key *key, 2854 u32 data_size) 2855 { 2856 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 2857 } 2858 2859 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2860 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2861 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 2862 u64 time_seq); 2863 static inline int btrfs_next_old_item(struct btrfs_root *root, 2864 struct btrfs_path *p, u64 time_seq) 2865 { 2866 ++p->slots[0]; 2867 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 2868 return btrfs_next_old_leaf(root, p, time_seq); 2869 return 0; 2870 } 2871 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 2872 { 2873 return btrfs_next_old_item(root, p, 0); 2874 } 2875 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2876 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 2877 struct btrfs_block_rsv *block_rsv, 2878 int update_ref, int for_reloc); 2879 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 2880 struct btrfs_root *root, 2881 struct extent_buffer *node, 2882 struct extent_buffer *parent); 2883 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 2884 { 2885 /* 2886 * Do it this way so we only ever do one test_bit in the normal case. 2887 */ 2888 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { 2889 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) 2890 return 2; 2891 return 1; 2892 } 2893 return 0; 2894 } 2895 2896 /* 2897 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 2898 * anything except sleeping. This function is used to check the status of 2899 * the fs. 2900 */ 2901 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 2902 { 2903 return (root->fs_info->sb->s_flags & MS_RDONLY || 2904 btrfs_fs_closing(root->fs_info)); 2905 } 2906 2907 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 2908 { 2909 kfree(fs_info->balance_ctl); 2910 kfree(fs_info->delayed_root); 2911 kfree(fs_info->extent_root); 2912 kfree(fs_info->tree_root); 2913 kfree(fs_info->chunk_root); 2914 kfree(fs_info->dev_root); 2915 kfree(fs_info->csum_root); 2916 kfree(fs_info->quota_root); 2917 kfree(fs_info->uuid_root); 2918 kfree(fs_info->free_space_root); 2919 kfree(fs_info->super_copy); 2920 kfree(fs_info->super_for_commit); 2921 security_free_mnt_opts(&fs_info->security_opts); 2922 kfree(fs_info); 2923 } 2924 2925 /* tree mod log functions from ctree.c */ 2926 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 2927 struct seq_list *elem); 2928 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 2929 struct seq_list *elem); 2930 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 2931 2932 /* root-item.c */ 2933 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 2934 struct btrfs_root *tree_root, 2935 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 2936 const char *name, int name_len); 2937 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 2938 struct btrfs_root *tree_root, 2939 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 2940 const char *name, int name_len); 2941 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 2942 struct btrfs_key *key); 2943 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 2944 *root, struct btrfs_key *key, struct btrfs_root_item 2945 *item); 2946 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 2947 struct btrfs_root *root, 2948 struct btrfs_key *key, 2949 struct btrfs_root_item *item); 2950 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 2951 struct btrfs_path *path, struct btrfs_root_item *root_item, 2952 struct btrfs_key *root_key); 2953 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 2954 void btrfs_set_root_node(struct btrfs_root_item *item, 2955 struct extent_buffer *node); 2956 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 2957 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 2958 struct btrfs_root *root); 2959 2960 /* uuid-tree.c */ 2961 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 2962 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 2963 u64 subid); 2964 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 2965 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 2966 u64 subid); 2967 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 2968 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 2969 u64)); 2970 2971 /* dir-item.c */ 2972 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 2973 const char *name, int name_len); 2974 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 2975 struct btrfs_root *root, const char *name, 2976 int name_len, struct inode *dir, 2977 struct btrfs_key *location, u8 type, u64 index); 2978 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 2979 struct btrfs_root *root, 2980 struct btrfs_path *path, u64 dir, 2981 const char *name, int name_len, 2982 int mod); 2983 struct btrfs_dir_item * 2984 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 2985 struct btrfs_root *root, 2986 struct btrfs_path *path, u64 dir, 2987 u64 objectid, const char *name, int name_len, 2988 int mod); 2989 struct btrfs_dir_item * 2990 btrfs_search_dir_index_item(struct btrfs_root *root, 2991 struct btrfs_path *path, u64 dirid, 2992 const char *name, int name_len); 2993 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 2994 struct btrfs_root *root, 2995 struct btrfs_path *path, 2996 struct btrfs_dir_item *di); 2997 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 2998 struct btrfs_root *root, 2999 struct btrfs_path *path, u64 objectid, 3000 const char *name, u16 name_len, 3001 const void *data, u16 data_len); 3002 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 3003 struct btrfs_root *root, 3004 struct btrfs_path *path, u64 dir, 3005 const char *name, u16 name_len, 3006 int mod); 3007 int verify_dir_item(struct btrfs_root *root, 3008 struct extent_buffer *leaf, 3009 struct btrfs_dir_item *dir_item); 3010 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3011 struct btrfs_path *path, 3012 const char *name, 3013 int name_len); 3014 3015 /* orphan.c */ 3016 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3017 struct btrfs_root *root, u64 offset); 3018 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 3019 struct btrfs_root *root, u64 offset); 3020 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 3021 3022 /* inode-item.c */ 3023 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3024 struct btrfs_root *root, 3025 const char *name, int name_len, 3026 u64 inode_objectid, u64 ref_objectid, u64 index); 3027 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3028 struct btrfs_root *root, 3029 const char *name, int name_len, 3030 u64 inode_objectid, u64 ref_objectid, u64 *index); 3031 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3032 struct btrfs_root *root, 3033 struct btrfs_path *path, u64 objectid); 3034 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3035 *root, struct btrfs_path *path, 3036 struct btrfs_key *location, int mod); 3037 3038 struct btrfs_inode_extref * 3039 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3040 struct btrfs_root *root, 3041 struct btrfs_path *path, 3042 const char *name, int name_len, 3043 u64 inode_objectid, u64 ref_objectid, int ins_len, 3044 int cow); 3045 3046 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3047 u64 ref_objectid, const char *name, 3048 int name_len, 3049 struct btrfs_inode_extref **extref_ret); 3050 3051 /* file-item.c */ 3052 struct btrfs_dio_private; 3053 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3054 struct btrfs_root *root, u64 bytenr, u64 len); 3055 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3056 struct bio *bio, u32 *dst); 3057 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3058 struct bio *bio, u64 logical_offset); 3059 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3060 struct btrfs_root *root, 3061 u64 objectid, u64 pos, 3062 u64 disk_offset, u64 disk_num_bytes, 3063 u64 num_bytes, u64 offset, u64 ram_bytes, 3064 u8 compression, u8 encryption, u16 other_encoding); 3065 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3066 struct btrfs_root *root, 3067 struct btrfs_path *path, u64 objectid, 3068 u64 bytenr, int mod); 3069 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3070 struct btrfs_root *root, 3071 struct btrfs_ordered_sum *sums); 3072 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3073 struct bio *bio, u64 file_start, int contig); 3074 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3075 struct list_head *list, int search_commit); 3076 void btrfs_extent_item_to_extent_map(struct inode *inode, 3077 const struct btrfs_path *path, 3078 struct btrfs_file_extent_item *fi, 3079 const bool new_inline, 3080 struct extent_map *em); 3081 3082 /* inode.c */ 3083 struct btrfs_delalloc_work { 3084 struct inode *inode; 3085 int delay_iput; 3086 struct completion completion; 3087 struct list_head list; 3088 struct btrfs_work work; 3089 }; 3090 3091 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3092 int delay_iput); 3093 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3094 3095 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3096 size_t pg_offset, u64 start, u64 len, 3097 int create); 3098 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3099 u64 *orig_start, u64 *orig_block_len, 3100 u64 *ram_bytes); 3101 3102 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3103 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3104 #define ClearPageChecked ClearPageFsMisc 3105 #define SetPageChecked SetPageFsMisc 3106 #define PageChecked PageFsMisc 3107 #endif 3108 3109 /* This forces readahead on a given range of bytes in an inode */ 3110 static inline void btrfs_force_ra(struct address_space *mapping, 3111 struct file_ra_state *ra, struct file *file, 3112 pgoff_t offset, unsigned long req_size) 3113 { 3114 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3115 } 3116 3117 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3118 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3119 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3120 struct btrfs_root *root, 3121 struct inode *dir, struct inode *inode, 3122 const char *name, int name_len); 3123 int btrfs_add_link(struct btrfs_trans_handle *trans, 3124 struct inode *parent_inode, struct inode *inode, 3125 const char *name, int name_len, int add_backref, u64 index); 3126 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3127 struct btrfs_root *root, 3128 struct inode *dir, u64 objectid, 3129 const char *name, int name_len); 3130 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, 3131 int front); 3132 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3133 struct btrfs_root *root, 3134 struct inode *inode, u64 new_size, 3135 u32 min_type); 3136 3137 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3138 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3139 int nr); 3140 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3141 struct extent_state **cached_state, int dedupe); 3142 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3143 struct btrfs_root *new_root, 3144 struct btrfs_root *parent_root, 3145 u64 new_dirid); 3146 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 3147 size_t size, struct bio *bio, 3148 unsigned long bio_flags); 3149 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3150 int btrfs_readpage(struct file *file, struct page *page); 3151 void btrfs_evict_inode(struct inode *inode); 3152 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3153 struct inode *btrfs_alloc_inode(struct super_block *sb); 3154 void btrfs_destroy_inode(struct inode *inode); 3155 int btrfs_drop_inode(struct inode *inode); 3156 int btrfs_init_cachep(void); 3157 void btrfs_destroy_cachep(void); 3158 long btrfs_ioctl_trans_end(struct file *file); 3159 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3160 struct btrfs_root *root, int *was_new); 3161 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3162 size_t pg_offset, u64 start, u64 end, 3163 int create); 3164 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3165 struct btrfs_root *root, 3166 struct inode *inode); 3167 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3168 struct btrfs_root *root, struct inode *inode); 3169 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3170 int btrfs_orphan_cleanup(struct btrfs_root *root); 3171 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3172 struct btrfs_root *root); 3173 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3174 void btrfs_invalidate_inodes(struct btrfs_root *root); 3175 void btrfs_add_delayed_iput(struct inode *inode); 3176 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3177 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3178 u64 start, u64 num_bytes, u64 min_size, 3179 loff_t actual_len, u64 *alloc_hint); 3180 int btrfs_prealloc_file_range_trans(struct inode *inode, 3181 struct btrfs_trans_handle *trans, int mode, 3182 u64 start, u64 num_bytes, u64 min_size, 3183 loff_t actual_len, u64 *alloc_hint); 3184 extern const struct dentry_operations btrfs_dentry_operations; 3185 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3186 void btrfs_test_inode_set_ops(struct inode *inode); 3187 #endif 3188 3189 /* ioctl.c */ 3190 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3191 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3192 int btrfs_ioctl_get_supported_features(void __user *arg); 3193 void btrfs_update_iflags(struct inode *inode); 3194 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3195 int btrfs_is_empty_uuid(u8 *uuid); 3196 int btrfs_defrag_file(struct inode *inode, struct file *file, 3197 struct btrfs_ioctl_defrag_range_args *range, 3198 u64 newer_than, unsigned long max_pages); 3199 void btrfs_get_block_group_info(struct list_head *groups_list, 3200 struct btrfs_ioctl_space_info *space); 3201 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3202 struct btrfs_ioctl_balance_args *bargs); 3203 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, 3204 struct file *dst_file, u64 dst_loff); 3205 3206 /* file.c */ 3207 int btrfs_auto_defrag_init(void); 3208 void btrfs_auto_defrag_exit(void); 3209 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3210 struct inode *inode); 3211 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3212 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3213 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3214 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3215 int skip_pinned); 3216 extern const struct file_operations btrfs_file_operations; 3217 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3218 struct btrfs_root *root, struct inode *inode, 3219 struct btrfs_path *path, u64 start, u64 end, 3220 u64 *drop_end, int drop_cache, 3221 int replace_extent, 3222 u32 extent_item_size, 3223 int *key_inserted); 3224 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3225 struct btrfs_root *root, struct inode *inode, u64 start, 3226 u64 end, int drop_cache); 3227 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3228 struct inode *inode, u64 start, u64 end); 3229 int btrfs_release_file(struct inode *inode, struct file *file); 3230 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3231 struct page **pages, size_t num_pages, 3232 loff_t pos, size_t write_bytes, 3233 struct extent_state **cached); 3234 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 3235 ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in, 3236 struct file *file_out, loff_t pos_out, 3237 size_t len, unsigned int flags); 3238 int btrfs_clone_file_range(struct file *file_in, loff_t pos_in, 3239 struct file *file_out, loff_t pos_out, u64 len); 3240 3241 /* tree-defrag.c */ 3242 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3243 struct btrfs_root *root); 3244 3245 /* sysfs.c */ 3246 int btrfs_init_sysfs(void); 3247 void btrfs_exit_sysfs(void); 3248 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); 3249 void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); 3250 3251 /* xattr.c */ 3252 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 3253 3254 /* super.c */ 3255 int btrfs_parse_options(struct btrfs_root *root, char *options, 3256 unsigned long new_flags); 3257 int btrfs_sync_fs(struct super_block *sb, int wait); 3258 3259 static inline __printf(2, 3) 3260 void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 3261 { 3262 } 3263 3264 #ifdef CONFIG_PRINTK 3265 __printf(2, 3) 3266 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 3267 #else 3268 #define btrfs_printk(fs_info, fmt, args...) \ 3269 btrfs_no_printk(fs_info, fmt, ##args) 3270 #endif 3271 3272 #define btrfs_emerg(fs_info, fmt, args...) \ 3273 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 3274 #define btrfs_alert(fs_info, fmt, args...) \ 3275 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 3276 #define btrfs_crit(fs_info, fmt, args...) \ 3277 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 3278 #define btrfs_err(fs_info, fmt, args...) \ 3279 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 3280 #define btrfs_warn(fs_info, fmt, args...) \ 3281 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 3282 #define btrfs_notice(fs_info, fmt, args...) \ 3283 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 3284 #define btrfs_info(fs_info, fmt, args...) \ 3285 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 3286 3287 /* 3288 * Wrappers that use printk_in_rcu 3289 */ 3290 #define btrfs_emerg_in_rcu(fs_info, fmt, args...) \ 3291 btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args) 3292 #define btrfs_alert_in_rcu(fs_info, fmt, args...) \ 3293 btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args) 3294 #define btrfs_crit_in_rcu(fs_info, fmt, args...) \ 3295 btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args) 3296 #define btrfs_err_in_rcu(fs_info, fmt, args...) \ 3297 btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args) 3298 #define btrfs_warn_in_rcu(fs_info, fmt, args...) \ 3299 btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args) 3300 #define btrfs_notice_in_rcu(fs_info, fmt, args...) \ 3301 btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args) 3302 #define btrfs_info_in_rcu(fs_info, fmt, args...) \ 3303 btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args) 3304 3305 /* 3306 * Wrappers that use a ratelimited printk_in_rcu 3307 */ 3308 #define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \ 3309 btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args) 3310 #define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \ 3311 btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args) 3312 #define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \ 3313 btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) 3314 #define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \ 3315 btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) 3316 #define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \ 3317 btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) 3318 #define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \ 3319 btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args) 3320 #define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \ 3321 btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) 3322 3323 /* 3324 * Wrappers that use a ratelimited printk 3325 */ 3326 #define btrfs_emerg_rl(fs_info, fmt, args...) \ 3327 btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args) 3328 #define btrfs_alert_rl(fs_info, fmt, args...) \ 3329 btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args) 3330 #define btrfs_crit_rl(fs_info, fmt, args...) \ 3331 btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args) 3332 #define btrfs_err_rl(fs_info, fmt, args...) \ 3333 btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args) 3334 #define btrfs_warn_rl(fs_info, fmt, args...) \ 3335 btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args) 3336 #define btrfs_notice_rl(fs_info, fmt, args...) \ 3337 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) 3338 #define btrfs_info_rl(fs_info, fmt, args...) \ 3339 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) 3340 3341 #if defined(CONFIG_DYNAMIC_DEBUG) 3342 #define btrfs_debug(fs_info, fmt, args...) \ 3343 do { \ 3344 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 3345 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ 3346 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \ 3347 } while (0) 3348 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3349 do { \ 3350 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 3351 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ 3352 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \ 3353 } while (0) 3354 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3355 do { \ 3356 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 3357 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ 3358 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \ 3359 ##args);\ 3360 } while (0) 3361 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3362 do { \ 3363 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ 3364 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ 3365 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \ 3366 ##args); \ 3367 } while (0) 3368 #elif defined(DEBUG) 3369 #define btrfs_debug(fs_info, fmt, args...) \ 3370 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 3371 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3372 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3373 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3374 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) 3375 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3376 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) 3377 #else 3378 #define btrfs_debug(fs_info, fmt, args...) \ 3379 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3380 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3381 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3382 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3383 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3384 #define btrfs_debug_rl(fs_info, fmt, args...) \ 3385 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3386 #endif 3387 3388 #define btrfs_printk_in_rcu(fs_info, fmt, args...) \ 3389 do { \ 3390 rcu_read_lock(); \ 3391 btrfs_printk(fs_info, fmt, ##args); \ 3392 rcu_read_unlock(); \ 3393 } while (0) 3394 3395 #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ 3396 do { \ 3397 static DEFINE_RATELIMIT_STATE(_rs, \ 3398 DEFAULT_RATELIMIT_INTERVAL, \ 3399 DEFAULT_RATELIMIT_BURST); \ 3400 if (__ratelimit(&_rs)) \ 3401 btrfs_printk(fs_info, fmt, ##args); \ 3402 } while (0) 3403 3404 #define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ 3405 do { \ 3406 rcu_read_lock(); \ 3407 btrfs_printk_ratelimited(fs_info, fmt, ##args); \ 3408 rcu_read_unlock(); \ 3409 } while (0) 3410 3411 #ifdef CONFIG_BTRFS_ASSERT 3412 3413 __cold 3414 static inline void assfail(char *expr, char *file, int line) 3415 { 3416 pr_err("assertion failed: %s, file: %s, line: %d\n", 3417 expr, file, line); 3418 BUG(); 3419 } 3420 3421 #define ASSERT(expr) \ 3422 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 3423 #else 3424 #define ASSERT(expr) ((void)0) 3425 #endif 3426 3427 __printf(5, 6) 3428 __cold 3429 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, 3430 unsigned int line, int errno, const char *fmt, ...); 3431 3432 const char *btrfs_decode_error(int errno); 3433 3434 __cold 3435 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 3436 const char *function, 3437 unsigned int line, int errno); 3438 3439 /* 3440 * Call btrfs_abort_transaction as early as possible when an error condition is 3441 * detected, that way the exact line number is reported. 3442 */ 3443 #define btrfs_abort_transaction(trans, errno) \ 3444 do { \ 3445 /* Report first abort since mount */ \ 3446 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ 3447 &((trans)->fs_info->fs_state))) { \ 3448 WARN(1, KERN_DEBUG \ 3449 "BTRFS: Transaction aborted (error %d)\n", \ 3450 (errno)); \ 3451 } \ 3452 __btrfs_abort_transaction((trans), __func__, \ 3453 __LINE__, (errno)); \ 3454 } while (0) 3455 3456 #define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ 3457 do { \ 3458 __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ 3459 (errno), fmt, ##args); \ 3460 } while (0) 3461 3462 __printf(5, 6) 3463 __cold 3464 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 3465 unsigned int line, int errno, const char *fmt, ...); 3466 /* 3467 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 3468 * will panic(). Otherwise we BUG() here. 3469 */ 3470 #define btrfs_panic(fs_info, errno, fmt, args...) \ 3471 do { \ 3472 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 3473 BUG(); \ 3474 } while (0) 3475 3476 3477 /* compatibility and incompatibility defines */ 3478 3479 #define btrfs_set_fs_incompat(__fs_info, opt) \ 3480 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3481 3482 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 3483 u64 flag) 3484 { 3485 struct btrfs_super_block *disk_super; 3486 u64 features; 3487 3488 disk_super = fs_info->super_copy; 3489 features = btrfs_super_incompat_flags(disk_super); 3490 if (!(features & flag)) { 3491 spin_lock(&fs_info->super_lock); 3492 features = btrfs_super_incompat_flags(disk_super); 3493 if (!(features & flag)) { 3494 features |= flag; 3495 btrfs_set_super_incompat_flags(disk_super, features); 3496 btrfs_info(fs_info, "setting %llu feature flag", 3497 flag); 3498 } 3499 spin_unlock(&fs_info->super_lock); 3500 } 3501 } 3502 3503 #define btrfs_clear_fs_incompat(__fs_info, opt) \ 3504 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3505 3506 static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, 3507 u64 flag) 3508 { 3509 struct btrfs_super_block *disk_super; 3510 u64 features; 3511 3512 disk_super = fs_info->super_copy; 3513 features = btrfs_super_incompat_flags(disk_super); 3514 if (features & flag) { 3515 spin_lock(&fs_info->super_lock); 3516 features = btrfs_super_incompat_flags(disk_super); 3517 if (features & flag) { 3518 features &= ~flag; 3519 btrfs_set_super_incompat_flags(disk_super, features); 3520 btrfs_info(fs_info, "clearing %llu feature flag", 3521 flag); 3522 } 3523 spin_unlock(&fs_info->super_lock); 3524 } 3525 } 3526 3527 #define btrfs_fs_incompat(fs_info, opt) \ 3528 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 3529 3530 static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 3531 { 3532 struct btrfs_super_block *disk_super; 3533 disk_super = fs_info->super_copy; 3534 return !!(btrfs_super_incompat_flags(disk_super) & flag); 3535 } 3536 3537 #define btrfs_set_fs_compat_ro(__fs_info, opt) \ 3538 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3539 3540 static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, 3541 u64 flag) 3542 { 3543 struct btrfs_super_block *disk_super; 3544 u64 features; 3545 3546 disk_super = fs_info->super_copy; 3547 features = btrfs_super_compat_ro_flags(disk_super); 3548 if (!(features & flag)) { 3549 spin_lock(&fs_info->super_lock); 3550 features = btrfs_super_compat_ro_flags(disk_super); 3551 if (!(features & flag)) { 3552 features |= flag; 3553 btrfs_set_super_compat_ro_flags(disk_super, features); 3554 btrfs_info(fs_info, "setting %llu ro feature flag", 3555 flag); 3556 } 3557 spin_unlock(&fs_info->super_lock); 3558 } 3559 } 3560 3561 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ 3562 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3563 3564 static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, 3565 u64 flag) 3566 { 3567 struct btrfs_super_block *disk_super; 3568 u64 features; 3569 3570 disk_super = fs_info->super_copy; 3571 features = btrfs_super_compat_ro_flags(disk_super); 3572 if (features & flag) { 3573 spin_lock(&fs_info->super_lock); 3574 features = btrfs_super_compat_ro_flags(disk_super); 3575 if (features & flag) { 3576 features &= ~flag; 3577 btrfs_set_super_compat_ro_flags(disk_super, features); 3578 btrfs_info(fs_info, "clearing %llu ro feature flag", 3579 flag); 3580 } 3581 spin_unlock(&fs_info->super_lock); 3582 } 3583 } 3584 3585 #define btrfs_fs_compat_ro(fs_info, opt) \ 3586 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) 3587 3588 static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) 3589 { 3590 struct btrfs_super_block *disk_super; 3591 disk_super = fs_info->super_copy; 3592 return !!(btrfs_super_compat_ro_flags(disk_super) & flag); 3593 } 3594 3595 /* acl.c */ 3596 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 3597 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 3598 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 3599 int btrfs_init_acl(struct btrfs_trans_handle *trans, 3600 struct inode *inode, struct inode *dir); 3601 #else 3602 #define btrfs_get_acl NULL 3603 #define btrfs_set_acl NULL 3604 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 3605 struct inode *inode, struct inode *dir) 3606 { 3607 return 0; 3608 } 3609 #endif 3610 3611 /* relocation.c */ 3612 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 3613 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 3614 struct btrfs_root *root); 3615 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 3616 struct btrfs_root *root); 3617 int btrfs_recover_relocation(struct btrfs_root *root); 3618 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 3619 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 3620 struct btrfs_root *root, struct extent_buffer *buf, 3621 struct extent_buffer *cow); 3622 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, 3623 u64 *bytes_to_reserve); 3624 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 3625 struct btrfs_pending_snapshot *pending); 3626 3627 /* scrub.c */ 3628 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 3629 u64 end, struct btrfs_scrub_progress *progress, 3630 int readonly, int is_dev_replace); 3631 void btrfs_scrub_pause(struct btrfs_root *root); 3632 void btrfs_scrub_continue(struct btrfs_root *root); 3633 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 3634 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 3635 struct btrfs_device *dev); 3636 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 3637 struct btrfs_scrub_progress *progress); 3638 3639 /* dev-replace.c */ 3640 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 3641 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 3642 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 3643 3644 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 3645 { 3646 btrfs_bio_counter_sub(fs_info, 1); 3647 } 3648 3649 /* reada.c */ 3650 struct reada_control { 3651 struct btrfs_root *root; /* tree to prefetch */ 3652 struct btrfs_key key_start; 3653 struct btrfs_key key_end; /* exclusive */ 3654 atomic_t elems; 3655 struct kref refcnt; 3656 wait_queue_head_t wait; 3657 }; 3658 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 3659 struct btrfs_key *start, struct btrfs_key *end); 3660 int btrfs_reada_wait(void *handle); 3661 void btrfs_reada_detach(void *handle); 3662 int btree_readahead_hook(struct btrfs_fs_info *fs_info, 3663 struct extent_buffer *eb, u64 start, int err); 3664 3665 static inline int is_fstree(u64 rootid) 3666 { 3667 if (rootid == BTRFS_FS_TREE_OBJECTID || 3668 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 3669 !btrfs_qgroup_level(rootid))) 3670 return 1; 3671 return 0; 3672 } 3673 3674 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 3675 { 3676 return signal_pending(current); 3677 } 3678 3679 /* Sanity test specific functions */ 3680 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3681 void btrfs_test_destroy_inode(struct inode *inode); 3682 #endif 3683 3684 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) 3685 { 3686 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3687 if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, 3688 &fs_info->fs_state))) 3689 return 1; 3690 #endif 3691 return 0; 3692 } 3693 #endif 3694