1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/workqueue.h> 37 #include <linux/security.h> 38 #include "extent_io.h" 39 #include "extent_map.h" 40 #include "async-thread.h" 41 42 struct btrfs_trans_handle; 43 struct btrfs_transaction; 44 struct btrfs_pending_snapshot; 45 extern struct kmem_cache *btrfs_trans_handle_cachep; 46 extern struct kmem_cache *btrfs_transaction_cachep; 47 extern struct kmem_cache *btrfs_bit_radix_cachep; 48 extern struct kmem_cache *btrfs_path_cachep; 49 extern struct kmem_cache *btrfs_free_space_cachep; 50 struct btrfs_ordered_sum; 51 52 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 53 #define STATIC noinline 54 #else 55 #define STATIC static noinline 56 #endif 57 58 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 59 60 #define BTRFS_MAX_MIRRORS 3 61 62 #define BTRFS_MAX_LEVEL 8 63 64 #define BTRFS_COMPAT_EXTENT_TREE_V0 65 66 /* holds pointers to all of the tree roots */ 67 #define BTRFS_ROOT_TREE_OBJECTID 1ULL 68 69 /* stores information about which extents are in use, and reference counts */ 70 #define BTRFS_EXTENT_TREE_OBJECTID 2ULL 71 72 /* 73 * chunk tree stores translations from logical -> physical block numbering 74 * the super block points to the chunk tree 75 */ 76 #define BTRFS_CHUNK_TREE_OBJECTID 3ULL 77 78 /* 79 * stores information about which areas of a given device are in use. 80 * one per device. The tree of tree roots points to the device tree 81 */ 82 #define BTRFS_DEV_TREE_OBJECTID 4ULL 83 84 /* one per subvolume, storing files and directories */ 85 #define BTRFS_FS_TREE_OBJECTID 5ULL 86 87 /* directory objectid inside the root tree */ 88 #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL 89 90 /* holds checksums of all the data extents */ 91 #define BTRFS_CSUM_TREE_OBJECTID 7ULL 92 93 /* holds quota configuration and tracking */ 94 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL 95 96 /* for storing items that use the BTRFS_UUID_KEY* types */ 97 #define BTRFS_UUID_TREE_OBJECTID 9ULL 98 99 /* for storing balance parameters in the root tree */ 100 #define BTRFS_BALANCE_OBJECTID -4ULL 101 102 /* orhpan objectid for tracking unlinked/truncated files */ 103 #define BTRFS_ORPHAN_OBJECTID -5ULL 104 105 /* does write ahead logging to speed up fsyncs */ 106 #define BTRFS_TREE_LOG_OBJECTID -6ULL 107 #define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL 108 109 /* for space balancing */ 110 #define BTRFS_TREE_RELOC_OBJECTID -8ULL 111 #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL 112 113 /* 114 * extent checksums all have this objectid 115 * this allows them to share the logging tree 116 * for fsyncs 117 */ 118 #define BTRFS_EXTENT_CSUM_OBJECTID -10ULL 119 120 /* For storing free space cache */ 121 #define BTRFS_FREE_SPACE_OBJECTID -11ULL 122 123 /* 124 * The inode number assigned to the special inode for storing 125 * free ino cache 126 */ 127 #define BTRFS_FREE_INO_OBJECTID -12ULL 128 129 /* dummy objectid represents multiple objectids */ 130 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL 131 132 /* 133 * All files have objectids in this range. 134 */ 135 #define BTRFS_FIRST_FREE_OBJECTID 256ULL 136 #define BTRFS_LAST_FREE_OBJECTID -256ULL 137 #define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL 138 139 140 /* 141 * the device items go into the chunk tree. The key is in the form 142 * [ 1 BTRFS_DEV_ITEM_KEY device_id ] 143 */ 144 #define BTRFS_DEV_ITEMS_OBJECTID 1ULL 145 146 #define BTRFS_BTREE_INODE_OBJECTID 1 147 148 #define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 149 150 #define BTRFS_DEV_REPLACE_DEVID 0ULL 151 152 /* 153 * the max metadata block size. This limit is somewhat artificial, 154 * but the memmove costs go through the roof for larger blocks. 155 */ 156 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 157 158 /* 159 * we can actually store much bigger names, but lets not confuse the rest 160 * of linux 161 */ 162 #define BTRFS_NAME_LEN 255 163 164 /* 165 * Theoretical limit is larger, but we keep this down to a sane 166 * value. That should limit greatly the possibility of collisions on 167 * inode ref items. 168 */ 169 #define BTRFS_LINK_MAX 65535U 170 171 /* 32 bytes in various csum fields */ 172 #define BTRFS_CSUM_SIZE 32 173 174 /* csum types */ 175 #define BTRFS_CSUM_TYPE_CRC32 0 176 177 static int btrfs_csum_sizes[] = { 4, 0 }; 178 179 /* four bytes for CRC32 */ 180 #define BTRFS_EMPTY_DIR_SIZE 0 181 182 /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 183 #define REQ_GET_READ_MIRRORS (1 << 30) 184 185 #define BTRFS_FT_UNKNOWN 0 186 #define BTRFS_FT_REG_FILE 1 187 #define BTRFS_FT_DIR 2 188 #define BTRFS_FT_CHRDEV 3 189 #define BTRFS_FT_BLKDEV 4 190 #define BTRFS_FT_FIFO 5 191 #define BTRFS_FT_SOCK 6 192 #define BTRFS_FT_SYMLINK 7 193 #define BTRFS_FT_XATTR 8 194 #define BTRFS_FT_MAX 9 195 196 /* ioprio of readahead is set to idle */ 197 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 198 199 #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 200 201 #define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) 202 203 /* 204 * The key defines the order in the tree, and so it also defines (optimal) 205 * block layout. 206 * 207 * objectid corresponds to the inode number. 208 * 209 * type tells us things about the object, and is a kind of stream selector. 210 * so for a given inode, keys with type of 1 might refer to the inode data, 211 * type of 2 may point to file data in the btree and type == 3 may point to 212 * extents. 213 * 214 * offset is the starting byte offset for this key in the stream. 215 * 216 * btrfs_disk_key is in disk byte order. struct btrfs_key is always 217 * in cpu native order. Otherwise they are identical and their sizes 218 * should be the same (ie both packed) 219 */ 220 struct btrfs_disk_key { 221 __le64 objectid; 222 u8 type; 223 __le64 offset; 224 } __attribute__ ((__packed__)); 225 226 struct btrfs_key { 227 u64 objectid; 228 u8 type; 229 u64 offset; 230 } __attribute__ ((__packed__)); 231 232 struct btrfs_mapping_tree { 233 struct extent_map_tree map_tree; 234 }; 235 236 struct btrfs_dev_item { 237 /* the internal btrfs device id */ 238 __le64 devid; 239 240 /* size of the device */ 241 __le64 total_bytes; 242 243 /* bytes used */ 244 __le64 bytes_used; 245 246 /* optimal io alignment for this device */ 247 __le32 io_align; 248 249 /* optimal io width for this device */ 250 __le32 io_width; 251 252 /* minimal io size for this device */ 253 __le32 sector_size; 254 255 /* type and info about this device */ 256 __le64 type; 257 258 /* expected generation for this device */ 259 __le64 generation; 260 261 /* 262 * starting byte of this partition on the device, 263 * to allow for stripe alignment in the future 264 */ 265 __le64 start_offset; 266 267 /* grouping information for allocation decisions */ 268 __le32 dev_group; 269 270 /* seek speed 0-100 where 100 is fastest */ 271 u8 seek_speed; 272 273 /* bandwidth 0-100 where 100 is fastest */ 274 u8 bandwidth; 275 276 /* btrfs generated uuid for this device */ 277 u8 uuid[BTRFS_UUID_SIZE]; 278 279 /* uuid of FS who owns this device */ 280 u8 fsid[BTRFS_UUID_SIZE]; 281 } __attribute__ ((__packed__)); 282 283 struct btrfs_stripe { 284 __le64 devid; 285 __le64 offset; 286 u8 dev_uuid[BTRFS_UUID_SIZE]; 287 } __attribute__ ((__packed__)); 288 289 struct btrfs_chunk { 290 /* size of this chunk in bytes */ 291 __le64 length; 292 293 /* objectid of the root referencing this chunk */ 294 __le64 owner; 295 296 __le64 stripe_len; 297 __le64 type; 298 299 /* optimal io alignment for this chunk */ 300 __le32 io_align; 301 302 /* optimal io width for this chunk */ 303 __le32 io_width; 304 305 /* minimal io size for this chunk */ 306 __le32 sector_size; 307 308 /* 2^16 stripes is quite a lot, a second limit is the size of a single 309 * item in the btree 310 */ 311 __le16 num_stripes; 312 313 /* sub stripes only matter for raid10 */ 314 __le16 sub_stripes; 315 struct btrfs_stripe stripe; 316 /* additional stripes go here */ 317 } __attribute__ ((__packed__)); 318 319 #define BTRFS_FREE_SPACE_EXTENT 1 320 #define BTRFS_FREE_SPACE_BITMAP 2 321 322 struct btrfs_free_space_entry { 323 __le64 offset; 324 __le64 bytes; 325 u8 type; 326 } __attribute__ ((__packed__)); 327 328 struct btrfs_free_space_header { 329 struct btrfs_disk_key location; 330 __le64 generation; 331 __le64 num_entries; 332 __le64 num_bitmaps; 333 } __attribute__ ((__packed__)); 334 335 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 336 { 337 BUG_ON(num_stripes == 0); 338 return sizeof(struct btrfs_chunk) + 339 sizeof(struct btrfs_stripe) * (num_stripes - 1); 340 } 341 342 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 343 #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 344 345 /* 346 * File system states 347 */ 348 #define BTRFS_FS_STATE_ERROR 0 349 #define BTRFS_FS_STATE_REMOUNTING 1 350 #define BTRFS_FS_STATE_TRANS_ABORTED 2 351 #define BTRFS_FS_STATE_DEV_REPLACING 3 352 353 /* Super block flags */ 354 /* Errors detected */ 355 #define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) 356 357 #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 358 #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 359 360 #define BTRFS_BACKREF_REV_MAX 256 361 #define BTRFS_BACKREF_REV_SHIFT 56 362 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 363 BTRFS_BACKREF_REV_SHIFT) 364 365 #define BTRFS_OLD_BACKREF_REV 0 366 #define BTRFS_MIXED_BACKREF_REV 1 367 368 /* 369 * every tree block (leaf or node) starts with this header. 370 */ 371 struct btrfs_header { 372 /* these first four must match the super block */ 373 u8 csum[BTRFS_CSUM_SIZE]; 374 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 375 __le64 bytenr; /* which block this node is supposed to live in */ 376 __le64 flags; 377 378 /* allowed to be different from the super from here on down */ 379 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 380 __le64 generation; 381 __le64 owner; 382 __le32 nritems; 383 u8 level; 384 } __attribute__ ((__packed__)); 385 386 #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ 387 sizeof(struct btrfs_header)) / \ 388 sizeof(struct btrfs_key_ptr)) 389 #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) 390 #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize)) 391 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 392 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 393 #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 394 sizeof(struct btrfs_item) - \ 395 BTRFS_FILE_EXTENT_INLINE_DATA_START) 396 #define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 397 sizeof(struct btrfs_item) -\ 398 sizeof(struct btrfs_dir_item)) 399 400 401 /* 402 * this is a very generous portion of the super block, giving us 403 * room to translate 14 chunks with 3 stripes each. 404 */ 405 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 406 #define BTRFS_LABEL_SIZE 256 407 408 /* 409 * just in case we somehow lose the roots and are not able to mount, 410 * we store an array of the roots from previous transactions 411 * in the super. 412 */ 413 #define BTRFS_NUM_BACKUP_ROOTS 4 414 struct btrfs_root_backup { 415 __le64 tree_root; 416 __le64 tree_root_gen; 417 418 __le64 chunk_root; 419 __le64 chunk_root_gen; 420 421 __le64 extent_root; 422 __le64 extent_root_gen; 423 424 __le64 fs_root; 425 __le64 fs_root_gen; 426 427 __le64 dev_root; 428 __le64 dev_root_gen; 429 430 __le64 csum_root; 431 __le64 csum_root_gen; 432 433 __le64 total_bytes; 434 __le64 bytes_used; 435 __le64 num_devices; 436 /* future */ 437 __le64 unused_64[4]; 438 439 u8 tree_root_level; 440 u8 chunk_root_level; 441 u8 extent_root_level; 442 u8 fs_root_level; 443 u8 dev_root_level; 444 u8 csum_root_level; 445 /* future and to align */ 446 u8 unused_8[10]; 447 } __attribute__ ((__packed__)); 448 449 /* 450 * the super block basically lists the main trees of the FS 451 * it currently lacks any block count etc etc 452 */ 453 struct btrfs_super_block { 454 u8 csum[BTRFS_CSUM_SIZE]; 455 /* the first 4 fields must match struct btrfs_header */ 456 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 457 __le64 bytenr; /* this block number */ 458 __le64 flags; 459 460 /* allowed to be different from the btrfs_header from here own down */ 461 __le64 magic; 462 __le64 generation; 463 __le64 root; 464 __le64 chunk_root; 465 __le64 log_root; 466 467 /* this will help find the new super based on the log root */ 468 __le64 log_root_transid; 469 __le64 total_bytes; 470 __le64 bytes_used; 471 __le64 root_dir_objectid; 472 __le64 num_devices; 473 __le32 sectorsize; 474 __le32 nodesize; 475 __le32 __unused_leafsize; 476 __le32 stripesize; 477 __le32 sys_chunk_array_size; 478 __le64 chunk_root_generation; 479 __le64 compat_flags; 480 __le64 compat_ro_flags; 481 __le64 incompat_flags; 482 __le16 csum_type; 483 u8 root_level; 484 u8 chunk_root_level; 485 u8 log_root_level; 486 struct btrfs_dev_item dev_item; 487 488 char label[BTRFS_LABEL_SIZE]; 489 490 __le64 cache_generation; 491 __le64 uuid_tree_generation; 492 493 /* future expansion */ 494 __le64 reserved[30]; 495 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 496 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 497 } __attribute__ ((__packed__)); 498 499 /* 500 * Compat flags that we support. If any incompat flags are set other than the 501 * ones specified below then we will fail to mount 502 */ 503 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) 504 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) 505 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) 506 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) 507 /* 508 * some patches floated around with a second compression method 509 * lets save that incompat here for when they do get in 510 * Note we don't actually support it, we're just reserving the 511 * number 512 */ 513 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) 514 515 /* 516 * older kernels tried to do bigger metadata blocks, but the 517 * code was pretty buggy. Lets not let them try anymore. 518 */ 519 #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) 520 521 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) 522 #define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) 523 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) 524 #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) 525 526 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 527 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 528 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 529 #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 530 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 531 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 532 533 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 534 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 535 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 536 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 537 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 538 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 539 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 540 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 541 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 542 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 543 544 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 545 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 546 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 547 548 /* 549 * A leaf is full of items. offset and size tell us where to find 550 * the item in the leaf (relative to the start of the data area) 551 */ 552 struct btrfs_item { 553 struct btrfs_disk_key key; 554 __le32 offset; 555 __le32 size; 556 } __attribute__ ((__packed__)); 557 558 /* 559 * leaves have an item area and a data area: 560 * [item0, item1....itemN] [free space] [dataN...data1, data0] 561 * 562 * The data is separate from the items to get the keys closer together 563 * during searches. 564 */ 565 struct btrfs_leaf { 566 struct btrfs_header header; 567 struct btrfs_item items[]; 568 } __attribute__ ((__packed__)); 569 570 /* 571 * all non-leaf blocks are nodes, they hold only keys and pointers to 572 * other blocks 573 */ 574 struct btrfs_key_ptr { 575 struct btrfs_disk_key key; 576 __le64 blockptr; 577 __le64 generation; 578 } __attribute__ ((__packed__)); 579 580 struct btrfs_node { 581 struct btrfs_header header; 582 struct btrfs_key_ptr ptrs[]; 583 } __attribute__ ((__packed__)); 584 585 /* 586 * btrfs_paths remember the path taken from the root down to the leaf. 587 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 588 * to any other levels that are present. 589 * 590 * The slots array records the index of the item or block pointer 591 * used while walking the tree. 592 */ 593 struct btrfs_path { 594 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 595 int slots[BTRFS_MAX_LEVEL]; 596 /* if there is real range locking, this locks field will change */ 597 int locks[BTRFS_MAX_LEVEL]; 598 int reada; 599 /* keep some upper locks as we walk down */ 600 int lowest_level; 601 602 /* 603 * set by btrfs_split_item, tells search_slot to keep all locks 604 * and to force calls to keep space in the nodes 605 */ 606 unsigned int search_for_split:1; 607 unsigned int keep_locks:1; 608 unsigned int skip_locking:1; 609 unsigned int leave_spinning:1; 610 unsigned int search_commit_root:1; 611 unsigned int need_commit_sem:1; 612 unsigned int skip_release_on_error:1; 613 }; 614 615 /* 616 * items in the extent btree are used to record the objectid of the 617 * owner of the block and the number of references 618 */ 619 620 struct btrfs_extent_item { 621 __le64 refs; 622 __le64 generation; 623 __le64 flags; 624 } __attribute__ ((__packed__)); 625 626 struct btrfs_extent_item_v0 { 627 __le32 refs; 628 } __attribute__ ((__packed__)); 629 630 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 631 sizeof(struct btrfs_item)) 632 633 #define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) 634 #define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) 635 636 /* following flags only apply to tree blocks */ 637 638 /* use full backrefs for extent pointers in the block */ 639 #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) 640 641 /* 642 * this flag is only used internally by scrub and may be changed at any time 643 * it is only declared here to avoid collisions 644 */ 645 #define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) 646 647 struct btrfs_tree_block_info { 648 struct btrfs_disk_key key; 649 u8 level; 650 } __attribute__ ((__packed__)); 651 652 struct btrfs_extent_data_ref { 653 __le64 root; 654 __le64 objectid; 655 __le64 offset; 656 __le32 count; 657 } __attribute__ ((__packed__)); 658 659 struct btrfs_shared_data_ref { 660 __le32 count; 661 } __attribute__ ((__packed__)); 662 663 struct btrfs_extent_inline_ref { 664 u8 type; 665 __le64 offset; 666 } __attribute__ ((__packed__)); 667 668 /* old style backrefs item */ 669 struct btrfs_extent_ref_v0 { 670 __le64 root; 671 __le64 generation; 672 __le64 objectid; 673 __le32 count; 674 } __attribute__ ((__packed__)); 675 676 677 /* dev extents record free space on individual devices. The owner 678 * field points back to the chunk allocation mapping tree that allocated 679 * the extent. The chunk tree uuid field is a way to double check the owner 680 */ 681 struct btrfs_dev_extent { 682 __le64 chunk_tree; 683 __le64 chunk_objectid; 684 __le64 chunk_offset; 685 __le64 length; 686 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 687 } __attribute__ ((__packed__)); 688 689 struct btrfs_inode_ref { 690 __le64 index; 691 __le16 name_len; 692 /* name goes here */ 693 } __attribute__ ((__packed__)); 694 695 struct btrfs_inode_extref { 696 __le64 parent_objectid; 697 __le64 index; 698 __le16 name_len; 699 __u8 name[0]; 700 /* name goes here */ 701 } __attribute__ ((__packed__)); 702 703 struct btrfs_timespec { 704 __le64 sec; 705 __le32 nsec; 706 } __attribute__ ((__packed__)); 707 708 enum btrfs_compression_type { 709 BTRFS_COMPRESS_NONE = 0, 710 BTRFS_COMPRESS_ZLIB = 1, 711 BTRFS_COMPRESS_LZO = 2, 712 BTRFS_COMPRESS_TYPES = 2, 713 BTRFS_COMPRESS_LAST = 3, 714 }; 715 716 struct btrfs_inode_item { 717 /* nfs style generation number */ 718 __le64 generation; 719 /* transid that last touched this inode */ 720 __le64 transid; 721 __le64 size; 722 __le64 nbytes; 723 __le64 block_group; 724 __le32 nlink; 725 __le32 uid; 726 __le32 gid; 727 __le32 mode; 728 __le64 rdev; 729 __le64 flags; 730 731 /* modification sequence number for NFS */ 732 __le64 sequence; 733 734 /* 735 * a little future expansion, for more than this we can 736 * just grow the inode item and version it 737 */ 738 __le64 reserved[4]; 739 struct btrfs_timespec atime; 740 struct btrfs_timespec ctime; 741 struct btrfs_timespec mtime; 742 struct btrfs_timespec otime; 743 } __attribute__ ((__packed__)); 744 745 struct btrfs_dir_log_item { 746 __le64 end; 747 } __attribute__ ((__packed__)); 748 749 struct btrfs_dir_item { 750 struct btrfs_disk_key location; 751 __le64 transid; 752 __le16 data_len; 753 __le16 name_len; 754 u8 type; 755 } __attribute__ ((__packed__)); 756 757 #define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) 758 759 /* 760 * Internal in-memory flag that a subvolume has been marked for deletion but 761 * still visible as a directory 762 */ 763 #define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) 764 765 struct btrfs_root_item { 766 struct btrfs_inode_item inode; 767 __le64 generation; 768 __le64 root_dirid; 769 __le64 bytenr; 770 __le64 byte_limit; 771 __le64 bytes_used; 772 __le64 last_snapshot; 773 __le64 flags; 774 __le32 refs; 775 struct btrfs_disk_key drop_progress; 776 u8 drop_level; 777 u8 level; 778 779 /* 780 * The following fields appear after subvol_uuids+subvol_times 781 * were introduced. 782 */ 783 784 /* 785 * This generation number is used to test if the new fields are valid 786 * and up to date while reading the root item. Everytime the root item 787 * is written out, the "generation" field is copied into this field. If 788 * anyone ever mounted the fs with an older kernel, we will have 789 * mismatching generation values here and thus must invalidate the 790 * new fields. See btrfs_update_root and btrfs_find_last_root for 791 * details. 792 * the offset of generation_v2 is also used as the start for the memset 793 * when invalidating the fields. 794 */ 795 __le64 generation_v2; 796 u8 uuid[BTRFS_UUID_SIZE]; 797 u8 parent_uuid[BTRFS_UUID_SIZE]; 798 u8 received_uuid[BTRFS_UUID_SIZE]; 799 __le64 ctransid; /* updated when an inode changes */ 800 __le64 otransid; /* trans when created */ 801 __le64 stransid; /* trans when sent. non-zero for received subvol */ 802 __le64 rtransid; /* trans when received. non-zero for received subvol */ 803 struct btrfs_timespec ctime; 804 struct btrfs_timespec otime; 805 struct btrfs_timespec stime; 806 struct btrfs_timespec rtime; 807 __le64 reserved[8]; /* for future */ 808 } __attribute__ ((__packed__)); 809 810 /* 811 * this is used for both forward and backward root refs 812 */ 813 struct btrfs_root_ref { 814 __le64 dirid; 815 __le64 sequence; 816 __le16 name_len; 817 } __attribute__ ((__packed__)); 818 819 struct btrfs_disk_balance_args { 820 /* 821 * profiles to operate on, single is denoted by 822 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 823 */ 824 __le64 profiles; 825 826 /* usage filter */ 827 __le64 usage; 828 829 /* devid filter */ 830 __le64 devid; 831 832 /* devid subset filter [pstart..pend) */ 833 __le64 pstart; 834 __le64 pend; 835 836 /* btrfs virtual address space subset filter [vstart..vend) */ 837 __le64 vstart; 838 __le64 vend; 839 840 /* 841 * profile to convert to, single is denoted by 842 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 843 */ 844 __le64 target; 845 846 /* BTRFS_BALANCE_ARGS_* */ 847 __le64 flags; 848 849 /* BTRFS_BALANCE_ARGS_LIMIT value */ 850 __le64 limit; 851 852 __le64 unused[7]; 853 } __attribute__ ((__packed__)); 854 855 /* 856 * store balance parameters to disk so that balance can be properly 857 * resumed after crash or unmount 858 */ 859 struct btrfs_balance_item { 860 /* BTRFS_BALANCE_* */ 861 __le64 flags; 862 863 struct btrfs_disk_balance_args data; 864 struct btrfs_disk_balance_args meta; 865 struct btrfs_disk_balance_args sys; 866 867 __le64 unused[4]; 868 } __attribute__ ((__packed__)); 869 870 #define BTRFS_FILE_EXTENT_INLINE 0 871 #define BTRFS_FILE_EXTENT_REG 1 872 #define BTRFS_FILE_EXTENT_PREALLOC 2 873 874 struct btrfs_file_extent_item { 875 /* 876 * transaction id that created this extent 877 */ 878 __le64 generation; 879 /* 880 * max number of bytes to hold this extent in ram 881 * when we split a compressed extent we can't know how big 882 * each of the resulting pieces will be. So, this is 883 * an upper limit on the size of the extent in ram instead of 884 * an exact limit. 885 */ 886 __le64 ram_bytes; 887 888 /* 889 * 32 bits for the various ways we might encode the data, 890 * including compression and encryption. If any of these 891 * are set to something a given disk format doesn't understand 892 * it is treated like an incompat flag for reading and writing, 893 * but not for stat. 894 */ 895 u8 compression; 896 u8 encryption; 897 __le16 other_encoding; /* spare for later use */ 898 899 /* are we inline data or a real extent? */ 900 u8 type; 901 902 /* 903 * disk space consumed by the extent, checksum blocks are included 904 * in these numbers 905 * 906 * At this offset in the structure, the inline extent data start. 907 */ 908 __le64 disk_bytenr; 909 __le64 disk_num_bytes; 910 /* 911 * the logical offset in file blocks (no csums) 912 * this extent record is for. This allows a file extent to point 913 * into the middle of an existing extent on disk, sharing it 914 * between two snapshots (useful if some bytes in the middle of the 915 * extent have changed 916 */ 917 __le64 offset; 918 /* 919 * the logical number of file blocks (no csums included). This 920 * always reflects the size uncompressed and without encoding. 921 */ 922 __le64 num_bytes; 923 924 } __attribute__ ((__packed__)); 925 926 struct btrfs_csum_item { 927 u8 csum; 928 } __attribute__ ((__packed__)); 929 930 struct btrfs_dev_stats_item { 931 /* 932 * grow this item struct at the end for future enhancements and keep 933 * the existing values unchanged 934 */ 935 __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; 936 } __attribute__ ((__packed__)); 937 938 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 939 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 940 #define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 941 #define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 942 #define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 943 #define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 944 #define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 945 946 struct btrfs_dev_replace { 947 u64 replace_state; /* see #define above */ 948 u64 time_started; /* seconds since 1-Jan-1970 */ 949 u64 time_stopped; /* seconds since 1-Jan-1970 */ 950 atomic64_t num_write_errors; 951 atomic64_t num_uncorrectable_read_errors; 952 953 u64 cursor_left; 954 u64 committed_cursor_left; 955 u64 cursor_left_last_write_of_item; 956 u64 cursor_right; 957 958 u64 cont_reading_from_srcdev_mode; /* see #define above */ 959 960 int is_valid; 961 int item_needs_writeback; 962 struct btrfs_device *srcdev; 963 struct btrfs_device *tgtdev; 964 965 pid_t lock_owner; 966 atomic_t nesting_level; 967 struct mutex lock_finishing_cancel_unmount; 968 struct mutex lock_management_lock; 969 struct mutex lock; 970 971 struct btrfs_scrub_progress scrub_progress; 972 }; 973 974 struct btrfs_dev_replace_item { 975 /* 976 * grow this item struct at the end for future enhancements and keep 977 * the existing values unchanged 978 */ 979 __le64 src_devid; 980 __le64 cursor_left; 981 __le64 cursor_right; 982 __le64 cont_reading_from_srcdev_mode; 983 984 __le64 replace_state; 985 __le64 time_started; 986 __le64 time_stopped; 987 __le64 num_write_errors; 988 __le64 num_uncorrectable_read_errors; 989 } __attribute__ ((__packed__)); 990 991 /* different types of block groups (and chunks) */ 992 #define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) 993 #define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) 994 #define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) 995 #define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) 996 #define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) 997 #define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) 998 #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) 999 #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) 1000 #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) 1001 #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ 1002 BTRFS_SPACE_INFO_GLOBAL_RSV) 1003 1004 enum btrfs_raid_types { 1005 BTRFS_RAID_RAID10, 1006 BTRFS_RAID_RAID1, 1007 BTRFS_RAID_DUP, 1008 BTRFS_RAID_RAID0, 1009 BTRFS_RAID_SINGLE, 1010 BTRFS_RAID_RAID5, 1011 BTRFS_RAID_RAID6, 1012 BTRFS_NR_RAID_TYPES 1013 }; 1014 1015 #define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ 1016 BTRFS_BLOCK_GROUP_SYSTEM | \ 1017 BTRFS_BLOCK_GROUP_METADATA) 1018 1019 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 1020 BTRFS_BLOCK_GROUP_RAID1 | \ 1021 BTRFS_BLOCK_GROUP_RAID5 | \ 1022 BTRFS_BLOCK_GROUP_RAID6 | \ 1023 BTRFS_BLOCK_GROUP_DUP | \ 1024 BTRFS_BLOCK_GROUP_RAID10) 1025 #define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ 1026 BTRFS_BLOCK_GROUP_RAID6) 1027 1028 /* 1029 * We need a bit for restriper to be able to tell when chunks of type 1030 * SINGLE are available. This "extended" profile format is used in 1031 * fs_info->avail_*_alloc_bits (in-memory) and balance item fields 1032 * (on-disk). The corresponding on-disk bit in chunk.type is reserved 1033 * to avoid remappings between two formats in future. 1034 */ 1035 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 1036 1037 /* 1038 * A fake block group type that is used to communicate global block reserve 1039 * size to userspace via the SPACE_INFO ioctl. 1040 */ 1041 #define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) 1042 1043 #define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ 1044 BTRFS_AVAIL_ALLOC_BIT_SINGLE) 1045 1046 static inline u64 chunk_to_extended(u64 flags) 1047 { 1048 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) 1049 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1050 1051 return flags; 1052 } 1053 static inline u64 extended_to_chunk(u64 flags) 1054 { 1055 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1056 } 1057 1058 struct btrfs_block_group_item { 1059 __le64 used; 1060 __le64 chunk_objectid; 1061 __le64 flags; 1062 } __attribute__ ((__packed__)); 1063 1064 #define BTRFS_QGROUP_LEVEL_SHIFT 48 1065 static inline u64 btrfs_qgroup_level(u64 qgroupid) 1066 { 1067 return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; 1068 } 1069 1070 /* 1071 * is subvolume quota turned on? 1072 */ 1073 #define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) 1074 /* 1075 * RESCAN is set during the initialization phase 1076 */ 1077 #define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) 1078 /* 1079 * Some qgroup entries are known to be out of date, 1080 * either because the configuration has changed in a way that 1081 * makes a rescan necessary, or because the fs has been mounted 1082 * with a non-qgroup-aware version. 1083 * Turning qouta off and on again makes it inconsistent, too. 1084 */ 1085 #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) 1086 1087 #define BTRFS_QGROUP_STATUS_VERSION 1 1088 1089 struct btrfs_qgroup_status_item { 1090 __le64 version; 1091 /* 1092 * the generation is updated during every commit. As older 1093 * versions of btrfs are not aware of qgroups, it will be 1094 * possible to detect inconsistencies by checking the 1095 * generation on mount time 1096 */ 1097 __le64 generation; 1098 1099 /* flag definitions see above */ 1100 __le64 flags; 1101 1102 /* 1103 * only used during scanning to record the progress 1104 * of the scan. It contains a logical address 1105 */ 1106 __le64 rescan; 1107 } __attribute__ ((__packed__)); 1108 1109 struct btrfs_qgroup_info_item { 1110 __le64 generation; 1111 __le64 rfer; 1112 __le64 rfer_cmpr; 1113 __le64 excl; 1114 __le64 excl_cmpr; 1115 } __attribute__ ((__packed__)); 1116 1117 /* flags definition for qgroup limits */ 1118 #define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) 1119 #define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) 1120 #define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) 1121 #define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) 1122 #define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) 1123 #define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) 1124 1125 struct btrfs_qgroup_limit_item { 1126 /* 1127 * only updated when any of the other values change 1128 */ 1129 __le64 flags; 1130 __le64 max_rfer; 1131 __le64 max_excl; 1132 __le64 rsv_rfer; 1133 __le64 rsv_excl; 1134 } __attribute__ ((__packed__)); 1135 1136 /* For raid type sysfs entries */ 1137 struct raid_kobject { 1138 int raid_type; 1139 struct kobject kobj; 1140 }; 1141 1142 struct btrfs_space_info { 1143 spinlock_t lock; 1144 1145 u64 total_bytes; /* total bytes in the space, 1146 this doesn't take mirrors into account */ 1147 u64 bytes_used; /* total bytes used, 1148 this doesn't take mirrors into account */ 1149 u64 bytes_pinned; /* total bytes pinned, will be freed when the 1150 transaction finishes */ 1151 u64 bytes_reserved; /* total bytes the allocator has reserved for 1152 current allocations */ 1153 u64 bytes_may_use; /* number of bytes that may be used for 1154 delalloc/allocations */ 1155 u64 bytes_readonly; /* total bytes that are read only */ 1156 1157 unsigned int full:1; /* indicates that we cannot allocate any more 1158 chunks for this space */ 1159 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 1160 1161 unsigned int flush:1; /* set if we are trying to make space */ 1162 1163 unsigned int force_alloc; /* set if we need to force a chunk 1164 alloc for this space */ 1165 1166 u64 disk_used; /* total bytes used on disk */ 1167 u64 disk_total; /* total bytes on disk, takes mirrors into 1168 account */ 1169 1170 u64 flags; 1171 1172 /* 1173 * bytes_pinned is kept in line with what is actually pinned, as in 1174 * we've called update_block_group and dropped the bytes_used counter 1175 * and increased the bytes_pinned counter. However this means that 1176 * bytes_pinned does not reflect the bytes that will be pinned once the 1177 * delayed refs are flushed, so this counter is inc'ed everytime we call 1178 * btrfs_free_extent so it is a realtime count of what will be freed 1179 * once the transaction is committed. It will be zero'ed everytime the 1180 * transaction commits. 1181 */ 1182 struct percpu_counter total_bytes_pinned; 1183 1184 struct list_head list; 1185 /* Protected by the spinlock 'lock'. */ 1186 struct list_head ro_bgs; 1187 1188 struct rw_semaphore groups_sem; 1189 /* for block groups in our same type */ 1190 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 1191 wait_queue_head_t wait; 1192 1193 struct kobject kobj; 1194 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 1195 }; 1196 1197 #define BTRFS_BLOCK_RSV_GLOBAL 1 1198 #define BTRFS_BLOCK_RSV_DELALLOC 2 1199 #define BTRFS_BLOCK_RSV_TRANS 3 1200 #define BTRFS_BLOCK_RSV_CHUNK 4 1201 #define BTRFS_BLOCK_RSV_DELOPS 5 1202 #define BTRFS_BLOCK_RSV_EMPTY 6 1203 #define BTRFS_BLOCK_RSV_TEMP 7 1204 1205 struct btrfs_block_rsv { 1206 u64 size; 1207 u64 reserved; 1208 struct btrfs_space_info *space_info; 1209 spinlock_t lock; 1210 unsigned short full; 1211 unsigned short type; 1212 unsigned short failfast; 1213 }; 1214 1215 /* 1216 * free clusters are used to claim free space in relatively large chunks, 1217 * allowing us to do less seeky writes. They are used for all metadata 1218 * allocations and data allocations in ssd mode. 1219 */ 1220 struct btrfs_free_cluster { 1221 spinlock_t lock; 1222 spinlock_t refill_lock; 1223 struct rb_root root; 1224 1225 /* largest extent in this cluster */ 1226 u64 max_size; 1227 1228 /* first extent starting offset */ 1229 u64 window_start; 1230 1231 struct btrfs_block_group_cache *block_group; 1232 /* 1233 * when a cluster is allocated from a block group, we put the 1234 * cluster onto a list in the block group so that it can 1235 * be freed before the block group is freed. 1236 */ 1237 struct list_head block_group_list; 1238 }; 1239 1240 enum btrfs_caching_type { 1241 BTRFS_CACHE_NO = 0, 1242 BTRFS_CACHE_STARTED = 1, 1243 BTRFS_CACHE_FAST = 2, 1244 BTRFS_CACHE_FINISHED = 3, 1245 BTRFS_CACHE_ERROR = 4, 1246 }; 1247 1248 enum btrfs_disk_cache_state { 1249 BTRFS_DC_WRITTEN = 0, 1250 BTRFS_DC_ERROR = 1, 1251 BTRFS_DC_CLEAR = 2, 1252 BTRFS_DC_SETUP = 3, 1253 }; 1254 1255 struct btrfs_caching_control { 1256 struct list_head list; 1257 struct mutex mutex; 1258 wait_queue_head_t wait; 1259 struct btrfs_work work; 1260 struct btrfs_block_group_cache *block_group; 1261 u64 progress; 1262 atomic_t count; 1263 }; 1264 1265 struct btrfs_io_ctl { 1266 void *cur, *orig; 1267 struct page *page; 1268 struct page **pages; 1269 struct btrfs_root *root; 1270 struct inode *inode; 1271 unsigned long size; 1272 int index; 1273 int num_pages; 1274 int entries; 1275 int bitmaps; 1276 unsigned check_crcs:1; 1277 }; 1278 1279 struct btrfs_block_group_cache { 1280 struct btrfs_key key; 1281 struct btrfs_block_group_item item; 1282 struct btrfs_fs_info *fs_info; 1283 struct inode *inode; 1284 spinlock_t lock; 1285 u64 pinned; 1286 u64 reserved; 1287 u64 delalloc_bytes; 1288 u64 bytes_super; 1289 u64 flags; 1290 u64 sectorsize; 1291 u64 cache_generation; 1292 1293 /* 1294 * It is just used for the delayed data space allocation because 1295 * only the data space allocation and the relative metadata update 1296 * can be done cross the transaction. 1297 */ 1298 struct rw_semaphore data_rwsem; 1299 1300 /* for raid56, this is a full stripe, without parity */ 1301 unsigned long full_stripe_len; 1302 1303 unsigned int ro:1; 1304 unsigned int iref:1; 1305 unsigned int has_caching_ctl:1; 1306 unsigned int removed:1; 1307 1308 int disk_cache_state; 1309 1310 /* cache tracking stuff */ 1311 int cached; 1312 struct btrfs_caching_control *caching_ctl; 1313 u64 last_byte_to_unpin; 1314 1315 struct btrfs_space_info *space_info; 1316 1317 /* free space cache stuff */ 1318 struct btrfs_free_space_ctl *free_space_ctl; 1319 1320 /* block group cache stuff */ 1321 struct rb_node cache_node; 1322 1323 /* for block groups in the same raid type */ 1324 struct list_head list; 1325 1326 /* usage count */ 1327 atomic_t count; 1328 1329 /* List of struct btrfs_free_clusters for this block group. 1330 * Today it will only have one thing on it, but that may change 1331 */ 1332 struct list_head cluster_list; 1333 1334 /* For delayed block group creation or deletion of empty block groups */ 1335 struct list_head bg_list; 1336 1337 /* For read-only block groups */ 1338 struct list_head ro_list; 1339 1340 atomic_t trimming; 1341 1342 /* For dirty block groups */ 1343 struct list_head dirty_list; 1344 struct list_head io_list; 1345 1346 struct btrfs_io_ctl io_ctl; 1347 }; 1348 1349 /* delayed seq elem */ 1350 struct seq_list { 1351 struct list_head list; 1352 u64 seq; 1353 }; 1354 1355 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } 1356 1357 enum btrfs_orphan_cleanup_state { 1358 ORPHAN_CLEANUP_STARTED = 1, 1359 ORPHAN_CLEANUP_DONE = 2, 1360 }; 1361 1362 /* used by the raid56 code to lock stripes for read/modify/write */ 1363 struct btrfs_stripe_hash { 1364 struct list_head hash_list; 1365 wait_queue_head_t wait; 1366 spinlock_t lock; 1367 }; 1368 1369 /* used by the raid56 code to lock stripes for read/modify/write */ 1370 struct btrfs_stripe_hash_table { 1371 struct list_head stripe_cache; 1372 spinlock_t cache_lock; 1373 int cache_size; 1374 struct btrfs_stripe_hash table[]; 1375 }; 1376 1377 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 1378 1379 void btrfs_init_async_reclaim_work(struct work_struct *work); 1380 1381 /* fs_info */ 1382 struct reloc_control; 1383 struct btrfs_device; 1384 struct btrfs_fs_devices; 1385 struct btrfs_balance_control; 1386 struct btrfs_delayed_root; 1387 struct btrfs_fs_info { 1388 u8 fsid[BTRFS_FSID_SIZE]; 1389 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 1390 struct btrfs_root *extent_root; 1391 struct btrfs_root *tree_root; 1392 struct btrfs_root *chunk_root; 1393 struct btrfs_root *dev_root; 1394 struct btrfs_root *fs_root; 1395 struct btrfs_root *csum_root; 1396 struct btrfs_root *quota_root; 1397 struct btrfs_root *uuid_root; 1398 1399 /* the log root tree is a directory of all the other log roots */ 1400 struct btrfs_root *log_root_tree; 1401 1402 spinlock_t fs_roots_radix_lock; 1403 struct radix_tree_root fs_roots_radix; 1404 1405 /* block group cache stuff */ 1406 spinlock_t block_group_cache_lock; 1407 u64 first_logical_byte; 1408 struct rb_root block_group_cache_tree; 1409 1410 /* keep track of unallocated space */ 1411 spinlock_t free_chunk_lock; 1412 u64 free_chunk_space; 1413 1414 struct extent_io_tree freed_extents[2]; 1415 struct extent_io_tree *pinned_extents; 1416 1417 /* logical->physical extent mapping */ 1418 struct btrfs_mapping_tree mapping_tree; 1419 1420 /* 1421 * block reservation for extent, checksum, root tree and 1422 * delayed dir index item 1423 */ 1424 struct btrfs_block_rsv global_block_rsv; 1425 /* block reservation for delay allocation */ 1426 struct btrfs_block_rsv delalloc_block_rsv; 1427 /* block reservation for metadata operations */ 1428 struct btrfs_block_rsv trans_block_rsv; 1429 /* block reservation for chunk tree */ 1430 struct btrfs_block_rsv chunk_block_rsv; 1431 /* block reservation for delayed operations */ 1432 struct btrfs_block_rsv delayed_block_rsv; 1433 1434 struct btrfs_block_rsv empty_block_rsv; 1435 1436 u64 generation; 1437 u64 last_trans_committed; 1438 u64 avg_delayed_ref_runtime; 1439 1440 /* 1441 * this is updated to the current trans every time a full commit 1442 * is required instead of the faster short fsync log commits 1443 */ 1444 u64 last_trans_log_full_commit; 1445 unsigned long mount_opt; 1446 /* 1447 * Track requests for actions that need to be done during transaction 1448 * commit (like for some mount options). 1449 */ 1450 unsigned long pending_changes; 1451 unsigned long compress_type:4; 1452 int commit_interval; 1453 /* 1454 * It is a suggestive number, the read side is safe even it gets a 1455 * wrong number because we will write out the data into a regular 1456 * extent. The write side(mount/remount) is under ->s_umount lock, 1457 * so it is also safe. 1458 */ 1459 u64 max_inline; 1460 /* 1461 * Protected by ->chunk_mutex and sb->s_umount. 1462 * 1463 * The reason that we use two lock to protect it is because only 1464 * remount and mount operations can change it and these two operations 1465 * are under sb->s_umount, but the read side (chunk allocation) can not 1466 * acquire sb->s_umount or the deadlock would happen. So we use two 1467 * locks to protect it. On the write side, we must acquire two locks, 1468 * and on the read side, we just need acquire one of them. 1469 */ 1470 u64 alloc_start; 1471 struct btrfs_transaction *running_transaction; 1472 wait_queue_head_t transaction_throttle; 1473 wait_queue_head_t transaction_wait; 1474 wait_queue_head_t transaction_blocked_wait; 1475 wait_queue_head_t async_submit_wait; 1476 1477 /* 1478 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 1479 * when they are updated. 1480 * 1481 * Because we do not clear the flags for ever, so we needn't use 1482 * the lock on the read side. 1483 * 1484 * We also needn't use the lock when we mount the fs, because 1485 * there is no other task which will update the flag. 1486 */ 1487 spinlock_t super_lock; 1488 struct btrfs_super_block *super_copy; 1489 struct btrfs_super_block *super_for_commit; 1490 struct block_device *__bdev; 1491 struct super_block *sb; 1492 struct inode *btree_inode; 1493 struct backing_dev_info bdi; 1494 struct mutex tree_log_mutex; 1495 struct mutex transaction_kthread_mutex; 1496 struct mutex cleaner_mutex; 1497 struct mutex chunk_mutex; 1498 struct mutex volume_mutex; 1499 1500 /* 1501 * this is taken to make sure we don't set block groups ro after 1502 * the free space cache has been allocated on them 1503 */ 1504 struct mutex ro_block_group_mutex; 1505 1506 /* this is used during read/modify/write to make sure 1507 * no two ios are trying to mod the same stripe at the same 1508 * time 1509 */ 1510 struct btrfs_stripe_hash_table *stripe_hash_table; 1511 1512 /* 1513 * this protects the ordered operations list only while we are 1514 * processing all of the entries on it. This way we make 1515 * sure the commit code doesn't find the list temporarily empty 1516 * because another function happens to be doing non-waiting preflush 1517 * before jumping into the main commit. 1518 */ 1519 struct mutex ordered_operations_mutex; 1520 1521 /* 1522 * Same as ordered_operations_mutex except this is for ordered extents 1523 * and not the operations. 1524 */ 1525 struct mutex ordered_extent_flush_mutex; 1526 1527 struct rw_semaphore commit_root_sem; 1528 1529 struct rw_semaphore cleanup_work_sem; 1530 1531 struct rw_semaphore subvol_sem; 1532 struct srcu_struct subvol_srcu; 1533 1534 spinlock_t trans_lock; 1535 /* 1536 * the reloc mutex goes with the trans lock, it is taken 1537 * during commit to protect us from the relocation code 1538 */ 1539 struct mutex reloc_mutex; 1540 1541 struct list_head trans_list; 1542 struct list_head dead_roots; 1543 struct list_head caching_block_groups; 1544 1545 spinlock_t delayed_iput_lock; 1546 struct list_head delayed_iputs; 1547 struct rw_semaphore delayed_iput_sem; 1548 1549 /* this protects tree_mod_seq_list */ 1550 spinlock_t tree_mod_seq_lock; 1551 atomic64_t tree_mod_seq; 1552 struct list_head tree_mod_seq_list; 1553 1554 /* this protects tree_mod_log */ 1555 rwlock_t tree_mod_log_lock; 1556 struct rb_root tree_mod_log; 1557 1558 atomic_t nr_async_submits; 1559 atomic_t async_submit_draining; 1560 atomic_t nr_async_bios; 1561 atomic_t async_delalloc_pages; 1562 atomic_t open_ioctl_trans; 1563 1564 /* 1565 * this is used to protect the following list -- ordered_roots. 1566 */ 1567 spinlock_t ordered_root_lock; 1568 1569 /* 1570 * all fs/file tree roots in which there are data=ordered extents 1571 * pending writeback are added into this list. 1572 * 1573 * these can span multiple transactions and basically include 1574 * every dirty data page that isn't from nodatacow 1575 */ 1576 struct list_head ordered_roots; 1577 1578 struct mutex delalloc_root_mutex; 1579 spinlock_t delalloc_root_lock; 1580 /* all fs/file tree roots that have delalloc inodes. */ 1581 struct list_head delalloc_roots; 1582 1583 /* 1584 * there is a pool of worker threads for checksumming during writes 1585 * and a pool for checksumming after reads. This is because readers 1586 * can run with FS locks held, and the writers may be waiting for 1587 * those locks. We don't want ordering in the pending list to cause 1588 * deadlocks, and so the two are serviced separately. 1589 * 1590 * A third pool does submit_bio to avoid deadlocking with the other 1591 * two 1592 */ 1593 struct btrfs_workqueue *workers; 1594 struct btrfs_workqueue *delalloc_workers; 1595 struct btrfs_workqueue *flush_workers; 1596 struct btrfs_workqueue *endio_workers; 1597 struct btrfs_workqueue *endio_meta_workers; 1598 struct btrfs_workqueue *endio_raid56_workers; 1599 struct btrfs_workqueue *endio_repair_workers; 1600 struct btrfs_workqueue *rmw_workers; 1601 struct btrfs_workqueue *endio_meta_write_workers; 1602 struct btrfs_workqueue *endio_write_workers; 1603 struct btrfs_workqueue *endio_freespace_worker; 1604 struct btrfs_workqueue *submit_workers; 1605 struct btrfs_workqueue *caching_workers; 1606 struct btrfs_workqueue *readahead_workers; 1607 1608 /* 1609 * fixup workers take dirty pages that didn't properly go through 1610 * the cow mechanism and make them safe to write. It happens 1611 * for the sys_munmap function call path 1612 */ 1613 struct btrfs_workqueue *fixup_workers; 1614 struct btrfs_workqueue *delayed_workers; 1615 1616 /* the extent workers do delayed refs on the extent allocation tree */ 1617 struct btrfs_workqueue *extent_workers; 1618 struct task_struct *transaction_kthread; 1619 struct task_struct *cleaner_kthread; 1620 int thread_pool_size; 1621 1622 struct kobject super_kobj; 1623 struct kobject *space_info_kobj; 1624 struct kobject *device_dir_kobj; 1625 struct completion kobj_unregister; 1626 int do_barriers; 1627 int closing; 1628 int log_root_recovering; 1629 int open; 1630 1631 u64 total_pinned; 1632 1633 /* used to keep from writing metadata until there is a nice batch */ 1634 struct percpu_counter dirty_metadata_bytes; 1635 struct percpu_counter delalloc_bytes; 1636 s32 dirty_metadata_batch; 1637 s32 delalloc_batch; 1638 1639 struct list_head dirty_cowonly_roots; 1640 1641 struct btrfs_fs_devices *fs_devices; 1642 1643 /* 1644 * the space_info list is almost entirely read only. It only changes 1645 * when we add a new raid type to the FS, and that happens 1646 * very rarely. RCU is used to protect it. 1647 */ 1648 struct list_head space_info; 1649 1650 struct btrfs_space_info *data_sinfo; 1651 1652 struct reloc_control *reloc_ctl; 1653 1654 /* data_alloc_cluster is only used in ssd mode */ 1655 struct btrfs_free_cluster data_alloc_cluster; 1656 1657 /* all metadata allocations go through this cluster */ 1658 struct btrfs_free_cluster meta_alloc_cluster; 1659 1660 /* auto defrag inodes go here */ 1661 spinlock_t defrag_inodes_lock; 1662 struct rb_root defrag_inodes; 1663 atomic_t defrag_running; 1664 1665 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 1666 seqlock_t profiles_lock; 1667 /* 1668 * these three are in extended format (availability of single 1669 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 1670 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 1671 */ 1672 u64 avail_data_alloc_bits; 1673 u64 avail_metadata_alloc_bits; 1674 u64 avail_system_alloc_bits; 1675 1676 /* restriper state */ 1677 spinlock_t balance_lock; 1678 struct mutex balance_mutex; 1679 atomic_t balance_running; 1680 atomic_t balance_pause_req; 1681 atomic_t balance_cancel_req; 1682 struct btrfs_balance_control *balance_ctl; 1683 wait_queue_head_t balance_wait_q; 1684 1685 unsigned data_chunk_allocations; 1686 unsigned metadata_ratio; 1687 1688 void *bdev_holder; 1689 1690 /* private scrub information */ 1691 struct mutex scrub_lock; 1692 atomic_t scrubs_running; 1693 atomic_t scrub_pause_req; 1694 atomic_t scrubs_paused; 1695 atomic_t scrub_cancel_req; 1696 wait_queue_head_t scrub_pause_wait; 1697 int scrub_workers_refcnt; 1698 struct btrfs_workqueue *scrub_workers; 1699 struct btrfs_workqueue *scrub_wr_completion_workers; 1700 struct btrfs_workqueue *scrub_nocow_workers; 1701 1702 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1703 u32 check_integrity_print_mask; 1704 #endif 1705 /* 1706 * quota information 1707 */ 1708 unsigned int quota_enabled:1; 1709 1710 /* 1711 * quota_enabled only changes state after a commit. This holds the 1712 * next state. 1713 */ 1714 unsigned int pending_quota_state:1; 1715 1716 /* is qgroup tracking in a consistent state? */ 1717 u64 qgroup_flags; 1718 1719 /* holds configuration and tracking. Protected by qgroup_lock */ 1720 struct rb_root qgroup_tree; 1721 struct rb_root qgroup_op_tree; 1722 spinlock_t qgroup_lock; 1723 spinlock_t qgroup_op_lock; 1724 atomic_t qgroup_op_seq; 1725 1726 /* 1727 * used to avoid frequently calling ulist_alloc()/ulist_free() 1728 * when doing qgroup accounting, it must be protected by qgroup_lock. 1729 */ 1730 struct ulist *qgroup_ulist; 1731 1732 /* protect user change for quota operations */ 1733 struct mutex qgroup_ioctl_lock; 1734 1735 /* list of dirty qgroups to be written at next commit */ 1736 struct list_head dirty_qgroups; 1737 1738 /* used by btrfs_qgroup_record_ref for an efficient tree traversal */ 1739 u64 qgroup_seq; 1740 1741 /* qgroup rescan items */ 1742 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1743 struct btrfs_key qgroup_rescan_progress; 1744 struct btrfs_workqueue *qgroup_rescan_workers; 1745 struct completion qgroup_rescan_completion; 1746 struct btrfs_work qgroup_rescan_work; 1747 1748 /* filesystem state */ 1749 unsigned long fs_state; 1750 1751 struct btrfs_delayed_root *delayed_root; 1752 1753 /* readahead tree */ 1754 spinlock_t reada_lock; 1755 struct radix_tree_root reada_tree; 1756 1757 /* Extent buffer radix tree */ 1758 spinlock_t buffer_lock; 1759 struct radix_tree_root buffer_radix; 1760 1761 /* next backup root to be overwritten */ 1762 int backup_root_index; 1763 1764 int num_tolerated_disk_barrier_failures; 1765 1766 /* device replace state */ 1767 struct btrfs_dev_replace dev_replace; 1768 1769 atomic_t mutually_exclusive_operation_running; 1770 1771 struct percpu_counter bio_counter; 1772 wait_queue_head_t replace_wait; 1773 1774 struct semaphore uuid_tree_rescan_sem; 1775 unsigned int update_uuid_tree_gen:1; 1776 1777 /* Used to reclaim the metadata space in the background. */ 1778 struct work_struct async_reclaim_work; 1779 1780 spinlock_t unused_bgs_lock; 1781 struct list_head unused_bgs; 1782 struct mutex unused_bg_unpin_mutex; 1783 1784 /* For btrfs to record security options */ 1785 struct security_mnt_opts security_opts; 1786 1787 /* 1788 * Chunks that can't be freed yet (under a trim/discard operation) 1789 * and will be latter freed. Protected by fs_info->chunk_mutex. 1790 */ 1791 struct list_head pinned_chunks; 1792 }; 1793 1794 struct btrfs_subvolume_writers { 1795 struct percpu_counter counter; 1796 wait_queue_head_t wait; 1797 }; 1798 1799 /* 1800 * The state of btrfs root 1801 */ 1802 /* 1803 * btrfs_record_root_in_trans is a multi-step process, 1804 * and it can race with the balancing code. But the 1805 * race is very small, and only the first time the root 1806 * is added to each transaction. So IN_TRANS_SETUP 1807 * is used to tell us when more checks are required 1808 */ 1809 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1810 #define BTRFS_ROOT_REF_COWS 1 1811 #define BTRFS_ROOT_TRACK_DIRTY 2 1812 #define BTRFS_ROOT_IN_RADIX 3 1813 #define BTRFS_ROOT_DUMMY_ROOT 4 1814 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5 1815 #define BTRFS_ROOT_DEFRAG_RUNNING 6 1816 #define BTRFS_ROOT_FORCE_COW 7 1817 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 1818 #define BTRFS_ROOT_DIRTY 9 1819 1820 /* 1821 * in ram representation of the tree. extent_root is used for all allocations 1822 * and for the extent tree extent_root root. 1823 */ 1824 struct btrfs_root { 1825 struct extent_buffer *node; 1826 1827 struct extent_buffer *commit_root; 1828 struct btrfs_root *log_root; 1829 struct btrfs_root *reloc_root; 1830 1831 unsigned long state; 1832 struct btrfs_root_item root_item; 1833 struct btrfs_key root_key; 1834 struct btrfs_fs_info *fs_info; 1835 struct extent_io_tree dirty_log_pages; 1836 1837 struct mutex objectid_mutex; 1838 1839 spinlock_t accounting_lock; 1840 struct btrfs_block_rsv *block_rsv; 1841 1842 /* free ino cache stuff */ 1843 struct btrfs_free_space_ctl *free_ino_ctl; 1844 enum btrfs_caching_type ino_cache_state; 1845 spinlock_t ino_cache_lock; 1846 wait_queue_head_t ino_cache_wait; 1847 struct btrfs_free_space_ctl *free_ino_pinned; 1848 u64 ino_cache_progress; 1849 struct inode *ino_cache_inode; 1850 1851 struct mutex log_mutex; 1852 wait_queue_head_t log_writer_wait; 1853 wait_queue_head_t log_commit_wait[2]; 1854 struct list_head log_ctxs[2]; 1855 atomic_t log_writers; 1856 atomic_t log_commit[2]; 1857 atomic_t log_batch; 1858 int log_transid; 1859 /* No matter the commit succeeds or not*/ 1860 int log_transid_committed; 1861 /* Just be updated when the commit succeeds. */ 1862 int last_log_commit; 1863 pid_t log_start_pid; 1864 1865 u64 objectid; 1866 u64 last_trans; 1867 1868 /* data allocations are done in sectorsize units */ 1869 u32 sectorsize; 1870 1871 /* node allocations are done in nodesize units */ 1872 u32 nodesize; 1873 1874 u32 stripesize; 1875 1876 u32 type; 1877 1878 u64 highest_objectid; 1879 1880 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1881 u64 alloc_bytenr; 1882 1883 u64 defrag_trans_start; 1884 struct btrfs_key defrag_progress; 1885 struct btrfs_key defrag_max; 1886 char *name; 1887 1888 /* the dirty list is only used by non-reference counted roots */ 1889 struct list_head dirty_list; 1890 1891 struct list_head root_list; 1892 1893 spinlock_t log_extents_lock[2]; 1894 struct list_head logged_list[2]; 1895 1896 spinlock_t orphan_lock; 1897 atomic_t orphan_inodes; 1898 struct btrfs_block_rsv *orphan_block_rsv; 1899 int orphan_cleanup_state; 1900 1901 spinlock_t inode_lock; 1902 /* red-black tree that keeps track of in-memory inodes */ 1903 struct rb_root inode_tree; 1904 1905 /* 1906 * radix tree that keeps track of delayed nodes of every inode, 1907 * protected by inode_lock 1908 */ 1909 struct radix_tree_root delayed_nodes_tree; 1910 /* 1911 * right now this just gets used so that a root has its own devid 1912 * for stat. It may be used for more later 1913 */ 1914 dev_t anon_dev; 1915 1916 spinlock_t root_item_lock; 1917 atomic_t refs; 1918 1919 struct mutex delalloc_mutex; 1920 spinlock_t delalloc_lock; 1921 /* 1922 * all of the inodes that have delalloc bytes. It is possible for 1923 * this list to be empty even when there is still dirty data=ordered 1924 * extents waiting to finish IO. 1925 */ 1926 struct list_head delalloc_inodes; 1927 struct list_head delalloc_root; 1928 u64 nr_delalloc_inodes; 1929 1930 struct mutex ordered_extent_mutex; 1931 /* 1932 * this is used by the balancing code to wait for all the pending 1933 * ordered extents 1934 */ 1935 spinlock_t ordered_extent_lock; 1936 1937 /* 1938 * all of the data=ordered extents pending writeback 1939 * these can span multiple transactions and basically include 1940 * every dirty data page that isn't from nodatacow 1941 */ 1942 struct list_head ordered_extents; 1943 struct list_head ordered_root; 1944 u64 nr_ordered_extents; 1945 1946 /* 1947 * Number of currently running SEND ioctls to prevent 1948 * manipulation with the read-only status via SUBVOL_SETFLAGS 1949 */ 1950 int send_in_progress; 1951 struct btrfs_subvolume_writers *subv_writers; 1952 atomic_t will_be_snapshoted; 1953 }; 1954 1955 struct btrfs_ioctl_defrag_range_args { 1956 /* start of the defrag operation */ 1957 __u64 start; 1958 1959 /* number of bytes to defrag, use (u64)-1 to say all */ 1960 __u64 len; 1961 1962 /* 1963 * flags for the operation, which can include turning 1964 * on compression for this one defrag 1965 */ 1966 __u64 flags; 1967 1968 /* 1969 * any extent bigger than this will be considered 1970 * already defragged. Use 0 to take the kernel default 1971 * Use 1 to say every single extent must be rewritten 1972 */ 1973 __u32 extent_thresh; 1974 1975 /* 1976 * which compression method to use if turning on compression 1977 * for this defrag operation. If unspecified, zlib will 1978 * be used 1979 */ 1980 __u32 compress_type; 1981 1982 /* spare for later */ 1983 __u32 unused[4]; 1984 }; 1985 1986 1987 /* 1988 * inode items have the data typically returned from stat and store other 1989 * info about object characteristics. There is one for every file and dir in 1990 * the FS 1991 */ 1992 #define BTRFS_INODE_ITEM_KEY 1 1993 #define BTRFS_INODE_REF_KEY 12 1994 #define BTRFS_INODE_EXTREF_KEY 13 1995 #define BTRFS_XATTR_ITEM_KEY 24 1996 #define BTRFS_ORPHAN_ITEM_KEY 48 1997 /* reserve 2-15 close to the inode for later flexibility */ 1998 1999 /* 2000 * dir items are the name -> inode pointers in a directory. There is one 2001 * for every name in a directory. 2002 */ 2003 #define BTRFS_DIR_LOG_ITEM_KEY 60 2004 #define BTRFS_DIR_LOG_INDEX_KEY 72 2005 #define BTRFS_DIR_ITEM_KEY 84 2006 #define BTRFS_DIR_INDEX_KEY 96 2007 /* 2008 * extent data is for file data 2009 */ 2010 #define BTRFS_EXTENT_DATA_KEY 108 2011 2012 /* 2013 * extent csums are stored in a separate tree and hold csums for 2014 * an entire extent on disk. 2015 */ 2016 #define BTRFS_EXTENT_CSUM_KEY 128 2017 2018 /* 2019 * root items point to tree roots. They are typically in the root 2020 * tree used by the super block to find all the other trees 2021 */ 2022 #define BTRFS_ROOT_ITEM_KEY 132 2023 2024 /* 2025 * root backrefs tie subvols and snapshots to the directory entries that 2026 * reference them 2027 */ 2028 #define BTRFS_ROOT_BACKREF_KEY 144 2029 2030 /* 2031 * root refs make a fast index for listing all of the snapshots and 2032 * subvolumes referenced by a given root. They point directly to the 2033 * directory item in the root that references the subvol 2034 */ 2035 #define BTRFS_ROOT_REF_KEY 156 2036 2037 /* 2038 * extent items are in the extent map tree. These record which blocks 2039 * are used, and how many references there are to each block 2040 */ 2041 #define BTRFS_EXTENT_ITEM_KEY 168 2042 2043 /* 2044 * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know 2045 * the length, so we save the level in key->offset instead of the length. 2046 */ 2047 #define BTRFS_METADATA_ITEM_KEY 169 2048 2049 #define BTRFS_TREE_BLOCK_REF_KEY 176 2050 2051 #define BTRFS_EXTENT_DATA_REF_KEY 178 2052 2053 #define BTRFS_EXTENT_REF_V0_KEY 180 2054 2055 #define BTRFS_SHARED_BLOCK_REF_KEY 182 2056 2057 #define BTRFS_SHARED_DATA_REF_KEY 184 2058 2059 /* 2060 * block groups give us hints into the extent allocation trees. Which 2061 * blocks are free etc etc 2062 */ 2063 #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 2064 2065 #define BTRFS_DEV_EXTENT_KEY 204 2066 #define BTRFS_DEV_ITEM_KEY 216 2067 #define BTRFS_CHUNK_ITEM_KEY 228 2068 2069 /* 2070 * Records the overall state of the qgroups. 2071 * There's only one instance of this key present, 2072 * (0, BTRFS_QGROUP_STATUS_KEY, 0) 2073 */ 2074 #define BTRFS_QGROUP_STATUS_KEY 240 2075 /* 2076 * Records the currently used space of the qgroup. 2077 * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). 2078 */ 2079 #define BTRFS_QGROUP_INFO_KEY 242 2080 /* 2081 * Contains the user configured limits for the qgroup. 2082 * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). 2083 */ 2084 #define BTRFS_QGROUP_LIMIT_KEY 244 2085 /* 2086 * Records the child-parent relationship of qgroups. For 2087 * each relation, 2 keys are present: 2088 * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) 2089 * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) 2090 */ 2091 #define BTRFS_QGROUP_RELATION_KEY 246 2092 2093 #define BTRFS_BALANCE_ITEM_KEY 248 2094 2095 /* 2096 * Persistantly stores the io stats in the device tree. 2097 * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid). 2098 */ 2099 #define BTRFS_DEV_STATS_KEY 249 2100 2101 /* 2102 * Persistantly stores the device replace state in the device tree. 2103 * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). 2104 */ 2105 #define BTRFS_DEV_REPLACE_KEY 250 2106 2107 /* 2108 * Stores items that allow to quickly map UUIDs to something else. 2109 * These items are part of the filesystem UUID tree. 2110 * The key is built like this: 2111 * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). 2112 */ 2113 #if BTRFS_UUID_SIZE != 16 2114 #error "UUID items require BTRFS_UUID_SIZE == 16!" 2115 #endif 2116 #define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ 2117 #define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to 2118 * received subvols */ 2119 2120 /* 2121 * string items are for debugging. They just store a short string of 2122 * data in the FS 2123 */ 2124 #define BTRFS_STRING_ITEM_KEY 253 2125 2126 /* 2127 * Flags for mount options. 2128 * 2129 * Note: don't forget to add new options to btrfs_show_options() 2130 */ 2131 #define BTRFS_MOUNT_NODATASUM (1 << 0) 2132 #define BTRFS_MOUNT_NODATACOW (1 << 1) 2133 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 2134 #define BTRFS_MOUNT_SSD (1 << 3) 2135 #define BTRFS_MOUNT_DEGRADED (1 << 4) 2136 #define BTRFS_MOUNT_COMPRESS (1 << 5) 2137 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 2138 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 2139 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 2140 #define BTRFS_MOUNT_NOSSD (1 << 9) 2141 #define BTRFS_MOUNT_DISCARD (1 << 10) 2142 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 2143 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 2144 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 2145 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 2146 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 2147 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 2148 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 2149 #define BTRFS_MOUNT_RECOVERY (1 << 18) 2150 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 2151 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 2152 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 2153 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 2154 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 2155 2156 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 2157 #define BTRFS_DEFAULT_MAX_INLINE (8192) 2158 2159 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 2160 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 2161 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 2162 #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ 2163 BTRFS_MOUNT_##opt) 2164 2165 #define btrfs_set_and_info(root, opt, fmt, args...) \ 2166 { \ 2167 if (!btrfs_test_opt(root, opt)) \ 2168 btrfs_info(root->fs_info, fmt, ##args); \ 2169 btrfs_set_opt(root->fs_info->mount_opt, opt); \ 2170 } 2171 2172 #define btrfs_clear_and_info(root, opt, fmt, args...) \ 2173 { \ 2174 if (btrfs_test_opt(root, opt)) \ 2175 btrfs_info(root->fs_info, fmt, ##args); \ 2176 btrfs_clear_opt(root->fs_info->mount_opt, opt); \ 2177 } 2178 2179 /* 2180 * Requests for changes that need to be done during transaction commit. 2181 * 2182 * Internal mount options that are used for special handling of the real 2183 * mount options (eg. cannot be set during remount and have to be set during 2184 * transaction commit) 2185 */ 2186 2187 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 2188 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 2189 #define BTRFS_PENDING_COMMIT (2) 2190 2191 #define btrfs_test_pending(info, opt) \ 2192 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2193 #define btrfs_set_pending(info, opt) \ 2194 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2195 #define btrfs_clear_pending(info, opt) \ 2196 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2197 2198 /* 2199 * Helpers for setting pending mount option changes. 2200 * 2201 * Expects corresponding macros 2202 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 2203 */ 2204 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 2205 do { \ 2206 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2207 btrfs_info((info), fmt, ##args); \ 2208 btrfs_set_pending((info), SET_##opt); \ 2209 btrfs_clear_pending((info), CLEAR_##opt); \ 2210 } \ 2211 } while(0) 2212 2213 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 2214 do { \ 2215 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2216 btrfs_info((info), fmt, ##args); \ 2217 btrfs_set_pending((info), CLEAR_##opt); \ 2218 btrfs_clear_pending((info), SET_##opt); \ 2219 } \ 2220 } while(0) 2221 2222 /* 2223 * Inode flags 2224 */ 2225 #define BTRFS_INODE_NODATASUM (1 << 0) 2226 #define BTRFS_INODE_NODATACOW (1 << 1) 2227 #define BTRFS_INODE_READONLY (1 << 2) 2228 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 2229 #define BTRFS_INODE_PREALLOC (1 << 4) 2230 #define BTRFS_INODE_SYNC (1 << 5) 2231 #define BTRFS_INODE_IMMUTABLE (1 << 6) 2232 #define BTRFS_INODE_APPEND (1 << 7) 2233 #define BTRFS_INODE_NODUMP (1 << 8) 2234 #define BTRFS_INODE_NOATIME (1 << 9) 2235 #define BTRFS_INODE_DIRSYNC (1 << 10) 2236 #define BTRFS_INODE_COMPRESS (1 << 11) 2237 2238 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 2239 2240 struct btrfs_map_token { 2241 struct extent_buffer *eb; 2242 char *kaddr; 2243 unsigned long offset; 2244 }; 2245 2246 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 2247 { 2248 token->kaddr = NULL; 2249 } 2250 2251 /* some macros to generate set/get funcs for the struct fields. This 2252 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 2253 * one for u8: 2254 */ 2255 #define le8_to_cpu(v) (v) 2256 #define cpu_to_le8(v) (v) 2257 #define __le8 u8 2258 2259 #define read_eb_member(eb, ptr, type, member, result) ( \ 2260 read_extent_buffer(eb, (char *)(result), \ 2261 ((unsigned long)(ptr)) + \ 2262 offsetof(type, member), \ 2263 sizeof(((type *)0)->member))) 2264 2265 #define write_eb_member(eb, ptr, type, member, result) ( \ 2266 write_extent_buffer(eb, (char *)(result), \ 2267 ((unsigned long)(ptr)) + \ 2268 offsetof(type, member), \ 2269 sizeof(((type *)0)->member))) 2270 2271 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 2272 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 2273 unsigned long off, \ 2274 struct btrfs_map_token *token); \ 2275 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 2276 unsigned long off, u##bits val, \ 2277 struct btrfs_map_token *token); \ 2278 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 2279 unsigned long off) \ 2280 { \ 2281 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 2282 } \ 2283 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 2284 unsigned long off, u##bits val) \ 2285 { \ 2286 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 2287 } 2288 2289 DECLARE_BTRFS_SETGET_BITS(8) 2290 DECLARE_BTRFS_SETGET_BITS(16) 2291 DECLARE_BTRFS_SETGET_BITS(32) 2292 DECLARE_BTRFS_SETGET_BITS(64) 2293 2294 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 2295 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 2296 { \ 2297 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2298 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 2299 } \ 2300 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 2301 u##bits val) \ 2302 { \ 2303 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2304 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 2305 } \ 2306 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 2307 struct btrfs_map_token *token) \ 2308 { \ 2309 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2310 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 2311 } \ 2312 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 2313 type *s, u##bits val, \ 2314 struct btrfs_map_token *token) \ 2315 { \ 2316 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2317 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 2318 } 2319 2320 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 2321 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 2322 { \ 2323 type *p = page_address(eb->pages[0]); \ 2324 u##bits res = le##bits##_to_cpu(p->member); \ 2325 return res; \ 2326 } \ 2327 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 2328 u##bits val) \ 2329 { \ 2330 type *p = page_address(eb->pages[0]); \ 2331 p->member = cpu_to_le##bits(val); \ 2332 } 2333 2334 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 2335 static inline u##bits btrfs_##name(type *s) \ 2336 { \ 2337 return le##bits##_to_cpu(s->member); \ 2338 } \ 2339 static inline void btrfs_set_##name(type *s, u##bits val) \ 2340 { \ 2341 s->member = cpu_to_le##bits(val); \ 2342 } 2343 2344 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 2345 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 2346 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 2347 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 2348 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 2349 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 2350 start_offset, 64); 2351 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 2352 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 2353 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 2354 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 2355 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 2356 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 2357 2358 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 2359 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 2360 total_bytes, 64); 2361 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 2362 bytes_used, 64); 2363 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 2364 io_align, 32); 2365 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 2366 io_width, 32); 2367 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 2368 sector_size, 32); 2369 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 2370 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 2371 dev_group, 32); 2372 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 2373 seek_speed, 8); 2374 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 2375 bandwidth, 8); 2376 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 2377 generation, 64); 2378 2379 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 2380 { 2381 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 2382 } 2383 2384 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 2385 { 2386 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 2387 } 2388 2389 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 2390 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 2391 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 2392 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 2393 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 2394 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 2395 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 2396 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 2397 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 2398 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 2399 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 2400 2401 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 2402 { 2403 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 2404 } 2405 2406 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 2407 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 2408 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 2409 stripe_len, 64); 2410 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 2411 io_align, 32); 2412 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 2413 io_width, 32); 2414 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 2415 sector_size, 32); 2416 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 2417 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 2418 num_stripes, 16); 2419 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 2420 sub_stripes, 16); 2421 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 2422 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 2423 2424 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 2425 int nr) 2426 { 2427 unsigned long offset = (unsigned long)c; 2428 offset += offsetof(struct btrfs_chunk, stripe); 2429 offset += nr * sizeof(struct btrfs_stripe); 2430 return (struct btrfs_stripe *)offset; 2431 } 2432 2433 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 2434 { 2435 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 2436 } 2437 2438 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 2439 struct btrfs_chunk *c, int nr) 2440 { 2441 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 2442 } 2443 2444 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 2445 struct btrfs_chunk *c, int nr) 2446 { 2447 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 2448 } 2449 2450 /* struct btrfs_block_group_item */ 2451 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 2452 used, 64); 2453 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 2454 used, 64); 2455 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 2456 struct btrfs_block_group_item, chunk_objectid, 64); 2457 2458 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 2459 struct btrfs_block_group_item, chunk_objectid, 64); 2460 BTRFS_SETGET_FUNCS(disk_block_group_flags, 2461 struct btrfs_block_group_item, flags, 64); 2462 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 2463 struct btrfs_block_group_item, flags, 64); 2464 2465 /* struct btrfs_inode_ref */ 2466 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 2467 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 2468 2469 /* struct btrfs_inode_extref */ 2470 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 2471 parent_objectid, 64); 2472 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 2473 name_len, 16); 2474 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 2475 2476 /* struct btrfs_inode_item */ 2477 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 2478 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 2479 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 2480 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 2481 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 2482 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 2483 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 2484 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 2485 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 2486 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 2487 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 2488 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 2489 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 2490 generation, 64); 2491 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 2492 sequence, 64); 2493 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 2494 transid, 64); 2495 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 2496 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 2497 nbytes, 64); 2498 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 2499 block_group, 64); 2500 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 2501 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 2502 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 2503 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 2504 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 2505 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 2506 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 2507 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 2508 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 2509 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 2510 2511 /* struct btrfs_dev_extent */ 2512 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 2513 chunk_tree, 64); 2514 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 2515 chunk_objectid, 64); 2516 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 2517 chunk_offset, 64); 2518 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 2519 2520 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 2521 { 2522 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 2523 return (unsigned long)dev + ptr; 2524 } 2525 2526 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 2527 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 2528 generation, 64); 2529 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 2530 2531 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 2532 2533 2534 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 2535 2536 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 2537 struct btrfs_tree_block_info *item, 2538 struct btrfs_disk_key *key) 2539 { 2540 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2541 } 2542 2543 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 2544 struct btrfs_tree_block_info *item, 2545 struct btrfs_disk_key *key) 2546 { 2547 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2548 } 2549 2550 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 2551 root, 64); 2552 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 2553 objectid, 64); 2554 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 2555 offset, 64); 2556 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 2557 count, 32); 2558 2559 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 2560 count, 32); 2561 2562 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 2563 type, 8); 2564 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 2565 offset, 64); 2566 2567 static inline u32 btrfs_extent_inline_ref_size(int type) 2568 { 2569 if (type == BTRFS_TREE_BLOCK_REF_KEY || 2570 type == BTRFS_SHARED_BLOCK_REF_KEY) 2571 return sizeof(struct btrfs_extent_inline_ref); 2572 if (type == BTRFS_SHARED_DATA_REF_KEY) 2573 return sizeof(struct btrfs_shared_data_ref) + 2574 sizeof(struct btrfs_extent_inline_ref); 2575 if (type == BTRFS_EXTENT_DATA_REF_KEY) 2576 return sizeof(struct btrfs_extent_data_ref) + 2577 offsetof(struct btrfs_extent_inline_ref, offset); 2578 BUG(); 2579 return 0; 2580 } 2581 2582 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 2583 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 2584 generation, 64); 2585 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 2586 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 2587 2588 /* struct btrfs_node */ 2589 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 2590 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 2591 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 2592 blockptr, 64); 2593 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 2594 generation, 64); 2595 2596 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 2597 { 2598 unsigned long ptr; 2599 ptr = offsetof(struct btrfs_node, ptrs) + 2600 sizeof(struct btrfs_key_ptr) * nr; 2601 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 2602 } 2603 2604 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 2605 int nr, u64 val) 2606 { 2607 unsigned long ptr; 2608 ptr = offsetof(struct btrfs_node, ptrs) + 2609 sizeof(struct btrfs_key_ptr) * nr; 2610 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 2611 } 2612 2613 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 2614 { 2615 unsigned long ptr; 2616 ptr = offsetof(struct btrfs_node, ptrs) + 2617 sizeof(struct btrfs_key_ptr) * nr; 2618 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 2619 } 2620 2621 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 2622 int nr, u64 val) 2623 { 2624 unsigned long ptr; 2625 ptr = offsetof(struct btrfs_node, ptrs) + 2626 sizeof(struct btrfs_key_ptr) * nr; 2627 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 2628 } 2629 2630 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 2631 { 2632 return offsetof(struct btrfs_node, ptrs) + 2633 sizeof(struct btrfs_key_ptr) * nr; 2634 } 2635 2636 void btrfs_node_key(struct extent_buffer *eb, 2637 struct btrfs_disk_key *disk_key, int nr); 2638 2639 static inline void btrfs_set_node_key(struct extent_buffer *eb, 2640 struct btrfs_disk_key *disk_key, int nr) 2641 { 2642 unsigned long ptr; 2643 ptr = btrfs_node_key_ptr_offset(nr); 2644 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 2645 struct btrfs_key_ptr, key, disk_key); 2646 } 2647 2648 /* struct btrfs_item */ 2649 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 2650 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 2651 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 2652 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 2653 2654 static inline unsigned long btrfs_item_nr_offset(int nr) 2655 { 2656 return offsetof(struct btrfs_leaf, items) + 2657 sizeof(struct btrfs_item) * nr; 2658 } 2659 2660 static inline struct btrfs_item *btrfs_item_nr(int nr) 2661 { 2662 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 2663 } 2664 2665 static inline u32 btrfs_item_end(struct extent_buffer *eb, 2666 struct btrfs_item *item) 2667 { 2668 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 2669 } 2670 2671 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 2672 { 2673 return btrfs_item_end(eb, btrfs_item_nr(nr)); 2674 } 2675 2676 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 2677 { 2678 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 2679 } 2680 2681 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 2682 { 2683 return btrfs_item_size(eb, btrfs_item_nr(nr)); 2684 } 2685 2686 static inline void btrfs_item_key(struct extent_buffer *eb, 2687 struct btrfs_disk_key *disk_key, int nr) 2688 { 2689 struct btrfs_item *item = btrfs_item_nr(nr); 2690 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 2691 } 2692 2693 static inline void btrfs_set_item_key(struct extent_buffer *eb, 2694 struct btrfs_disk_key *disk_key, int nr) 2695 { 2696 struct btrfs_item *item = btrfs_item_nr(nr); 2697 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 2698 } 2699 2700 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 2701 2702 /* 2703 * struct btrfs_root_ref 2704 */ 2705 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 2706 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 2707 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 2708 2709 /* struct btrfs_dir_item */ 2710 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 2711 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 2712 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 2713 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 2714 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 2715 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 2716 data_len, 16); 2717 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 2718 name_len, 16); 2719 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 2720 transid, 64); 2721 2722 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 2723 struct btrfs_dir_item *item, 2724 struct btrfs_disk_key *key) 2725 { 2726 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 2727 } 2728 2729 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 2730 struct btrfs_dir_item *item, 2731 struct btrfs_disk_key *key) 2732 { 2733 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 2734 } 2735 2736 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 2737 num_entries, 64); 2738 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 2739 num_bitmaps, 64); 2740 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 2741 generation, 64); 2742 2743 static inline void btrfs_free_space_key(struct extent_buffer *eb, 2744 struct btrfs_free_space_header *h, 2745 struct btrfs_disk_key *key) 2746 { 2747 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2748 } 2749 2750 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 2751 struct btrfs_free_space_header *h, 2752 struct btrfs_disk_key *key) 2753 { 2754 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2755 } 2756 2757 /* struct btrfs_disk_key */ 2758 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 2759 objectid, 64); 2760 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 2761 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 2762 2763 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 2764 struct btrfs_disk_key *disk) 2765 { 2766 cpu->offset = le64_to_cpu(disk->offset); 2767 cpu->type = disk->type; 2768 cpu->objectid = le64_to_cpu(disk->objectid); 2769 } 2770 2771 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 2772 struct btrfs_key *cpu) 2773 { 2774 disk->offset = cpu_to_le64(cpu->offset); 2775 disk->type = cpu->type; 2776 disk->objectid = cpu_to_le64(cpu->objectid); 2777 } 2778 2779 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 2780 struct btrfs_key *key, int nr) 2781 { 2782 struct btrfs_disk_key disk_key; 2783 btrfs_node_key(eb, &disk_key, nr); 2784 btrfs_disk_key_to_cpu(key, &disk_key); 2785 } 2786 2787 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 2788 struct btrfs_key *key, int nr) 2789 { 2790 struct btrfs_disk_key disk_key; 2791 btrfs_item_key(eb, &disk_key, nr); 2792 btrfs_disk_key_to_cpu(key, &disk_key); 2793 } 2794 2795 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 2796 struct btrfs_dir_item *item, 2797 struct btrfs_key *key) 2798 { 2799 struct btrfs_disk_key disk_key; 2800 btrfs_dir_item_key(eb, item, &disk_key); 2801 btrfs_disk_key_to_cpu(key, &disk_key); 2802 } 2803 2804 2805 static inline u8 btrfs_key_type(struct btrfs_key *key) 2806 { 2807 return key->type; 2808 } 2809 2810 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 2811 { 2812 key->type = val; 2813 } 2814 2815 /* struct btrfs_header */ 2816 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 2817 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 2818 generation, 64); 2819 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2820 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2821 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2822 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2823 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2824 generation, 64); 2825 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2826 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2827 nritems, 32); 2828 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2829 2830 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2831 { 2832 return (btrfs_header_flags(eb) & flag) == flag; 2833 } 2834 2835 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2836 { 2837 u64 flags = btrfs_header_flags(eb); 2838 btrfs_set_header_flags(eb, flags | flag); 2839 return (flags & flag) == flag; 2840 } 2841 2842 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2843 { 2844 u64 flags = btrfs_header_flags(eb); 2845 btrfs_set_header_flags(eb, flags & ~flag); 2846 return (flags & flag) == flag; 2847 } 2848 2849 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2850 { 2851 u64 flags = btrfs_header_flags(eb); 2852 return flags >> BTRFS_BACKREF_REV_SHIFT; 2853 } 2854 2855 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2856 int rev) 2857 { 2858 u64 flags = btrfs_header_flags(eb); 2859 flags &= ~BTRFS_BACKREF_REV_MASK; 2860 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2861 btrfs_set_header_flags(eb, flags); 2862 } 2863 2864 static inline unsigned long btrfs_header_fsid(void) 2865 { 2866 return offsetof(struct btrfs_header, fsid); 2867 } 2868 2869 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2870 { 2871 return offsetof(struct btrfs_header, chunk_tree_uuid); 2872 } 2873 2874 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2875 { 2876 return btrfs_header_level(eb) == 0; 2877 } 2878 2879 /* struct btrfs_root_item */ 2880 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2881 generation, 64); 2882 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2883 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2884 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2885 2886 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2887 generation, 64); 2888 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2889 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2890 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2891 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2892 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2893 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2894 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2895 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2896 last_snapshot, 64); 2897 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2898 generation_v2, 64); 2899 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2900 ctransid, 64); 2901 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2902 otransid, 64); 2903 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2904 stransid, 64); 2905 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2906 rtransid, 64); 2907 2908 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2909 { 2910 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2911 } 2912 2913 static inline bool btrfs_root_dead(struct btrfs_root *root) 2914 { 2915 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2916 } 2917 2918 /* struct btrfs_root_backup */ 2919 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2920 tree_root, 64); 2921 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2922 tree_root_gen, 64); 2923 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2924 tree_root_level, 8); 2925 2926 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2927 chunk_root, 64); 2928 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2929 chunk_root_gen, 64); 2930 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2931 chunk_root_level, 8); 2932 2933 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2934 extent_root, 64); 2935 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2936 extent_root_gen, 64); 2937 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2938 extent_root_level, 8); 2939 2940 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2941 fs_root, 64); 2942 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2943 fs_root_gen, 64); 2944 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2945 fs_root_level, 8); 2946 2947 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2948 dev_root, 64); 2949 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2950 dev_root_gen, 64); 2951 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2952 dev_root_level, 8); 2953 2954 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2955 csum_root, 64); 2956 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2957 csum_root_gen, 64); 2958 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2959 csum_root_level, 8); 2960 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2961 total_bytes, 64); 2962 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2963 bytes_used, 64); 2964 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2965 num_devices, 64); 2966 2967 /* struct btrfs_balance_item */ 2968 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2969 2970 static inline void btrfs_balance_data(struct extent_buffer *eb, 2971 struct btrfs_balance_item *bi, 2972 struct btrfs_disk_balance_args *ba) 2973 { 2974 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2975 } 2976 2977 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2978 struct btrfs_balance_item *bi, 2979 struct btrfs_disk_balance_args *ba) 2980 { 2981 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2982 } 2983 2984 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2985 struct btrfs_balance_item *bi, 2986 struct btrfs_disk_balance_args *ba) 2987 { 2988 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2989 } 2990 2991 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2992 struct btrfs_balance_item *bi, 2993 struct btrfs_disk_balance_args *ba) 2994 { 2995 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2996 } 2997 2998 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2999 struct btrfs_balance_item *bi, 3000 struct btrfs_disk_balance_args *ba) 3001 { 3002 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 3003 } 3004 3005 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 3006 struct btrfs_balance_item *bi, 3007 struct btrfs_disk_balance_args *ba) 3008 { 3009 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 3010 } 3011 3012 static inline void 3013 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3014 struct btrfs_disk_balance_args *disk) 3015 { 3016 memset(cpu, 0, sizeof(*cpu)); 3017 3018 cpu->profiles = le64_to_cpu(disk->profiles); 3019 cpu->usage = le64_to_cpu(disk->usage); 3020 cpu->devid = le64_to_cpu(disk->devid); 3021 cpu->pstart = le64_to_cpu(disk->pstart); 3022 cpu->pend = le64_to_cpu(disk->pend); 3023 cpu->vstart = le64_to_cpu(disk->vstart); 3024 cpu->vend = le64_to_cpu(disk->vend); 3025 cpu->target = le64_to_cpu(disk->target); 3026 cpu->flags = le64_to_cpu(disk->flags); 3027 cpu->limit = le64_to_cpu(disk->limit); 3028 } 3029 3030 static inline void 3031 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3032 struct btrfs_balance_args *cpu) 3033 { 3034 memset(disk, 0, sizeof(*disk)); 3035 3036 disk->profiles = cpu_to_le64(cpu->profiles); 3037 disk->usage = cpu_to_le64(cpu->usage); 3038 disk->devid = cpu_to_le64(cpu->devid); 3039 disk->pstart = cpu_to_le64(cpu->pstart); 3040 disk->pend = cpu_to_le64(cpu->pend); 3041 disk->vstart = cpu_to_le64(cpu->vstart); 3042 disk->vend = cpu_to_le64(cpu->vend); 3043 disk->target = cpu_to_le64(cpu->target); 3044 disk->flags = cpu_to_le64(cpu->flags); 3045 disk->limit = cpu_to_le64(cpu->limit); 3046 } 3047 3048 /* struct btrfs_super_block */ 3049 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 3050 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 3051 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 3052 generation, 64); 3053 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 3054 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 3055 struct btrfs_super_block, sys_chunk_array_size, 32); 3056 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 3057 struct btrfs_super_block, chunk_root_generation, 64); 3058 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 3059 root_level, 8); 3060 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 3061 chunk_root, 64); 3062 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 3063 chunk_root_level, 8); 3064 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 3065 log_root, 64); 3066 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 3067 log_root_transid, 64); 3068 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 3069 log_root_level, 8); 3070 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 3071 total_bytes, 64); 3072 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 3073 bytes_used, 64); 3074 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 3075 sectorsize, 32); 3076 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 3077 nodesize, 32); 3078 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 3079 stripesize, 32); 3080 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 3081 root_dir_objectid, 64); 3082 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 3083 num_devices, 64); 3084 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 3085 compat_flags, 64); 3086 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 3087 compat_ro_flags, 64); 3088 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 3089 incompat_flags, 64); 3090 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 3091 csum_type, 16); 3092 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 3093 cache_generation, 64); 3094 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 3095 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 3096 uuid_tree_generation, 64); 3097 3098 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 3099 { 3100 u16 t = btrfs_super_csum_type(s); 3101 /* 3102 * csum type is validated at mount time 3103 */ 3104 return btrfs_csum_sizes[t]; 3105 } 3106 3107 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 3108 { 3109 return offsetof(struct btrfs_leaf, items); 3110 } 3111 3112 /* struct btrfs_file_extent_item */ 3113 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 3114 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 3115 struct btrfs_file_extent_item, disk_bytenr, 64); 3116 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 3117 struct btrfs_file_extent_item, offset, 64); 3118 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 3119 struct btrfs_file_extent_item, generation, 64); 3120 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 3121 struct btrfs_file_extent_item, num_bytes, 64); 3122 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 3123 struct btrfs_file_extent_item, disk_num_bytes, 64); 3124 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 3125 struct btrfs_file_extent_item, compression, 8); 3126 3127 static inline unsigned long 3128 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 3129 { 3130 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 3131 } 3132 3133 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 3134 { 3135 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 3136 } 3137 3138 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 3139 disk_bytenr, 64); 3140 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 3141 generation, 64); 3142 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 3143 disk_num_bytes, 64); 3144 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 3145 offset, 64); 3146 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 3147 num_bytes, 64); 3148 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 3149 ram_bytes, 64); 3150 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 3151 compression, 8); 3152 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 3153 encryption, 8); 3154 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 3155 other_encoding, 16); 3156 3157 /* 3158 * this returns the number of bytes used by the item on disk, minus the 3159 * size of any extent headers. If a file is compressed on disk, this is 3160 * the compressed size 3161 */ 3162 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 3163 struct btrfs_item *e) 3164 { 3165 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 3166 } 3167 3168 /* this returns the number of file bytes represented by the inline item. 3169 * If an item is compressed, this is the uncompressed size 3170 */ 3171 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 3172 int slot, 3173 struct btrfs_file_extent_item *fi) 3174 { 3175 struct btrfs_map_token token; 3176 3177 btrfs_init_map_token(&token); 3178 /* 3179 * return the space used on disk if this item isn't 3180 * compressed or encoded 3181 */ 3182 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 3183 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 3184 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 3185 return btrfs_file_extent_inline_item_len(eb, 3186 btrfs_item_nr(slot)); 3187 } 3188 3189 /* otherwise use the ram bytes field */ 3190 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 3191 } 3192 3193 3194 /* btrfs_dev_stats_item */ 3195 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 3196 struct btrfs_dev_stats_item *ptr, 3197 int index) 3198 { 3199 u64 val; 3200 3201 read_extent_buffer(eb, &val, 3202 offsetof(struct btrfs_dev_stats_item, values) + 3203 ((unsigned long)ptr) + (index * sizeof(u64)), 3204 sizeof(val)); 3205 return val; 3206 } 3207 3208 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 3209 struct btrfs_dev_stats_item *ptr, 3210 int index, u64 val) 3211 { 3212 write_extent_buffer(eb, &val, 3213 offsetof(struct btrfs_dev_stats_item, values) + 3214 ((unsigned long)ptr) + (index * sizeof(u64)), 3215 sizeof(val)); 3216 } 3217 3218 /* btrfs_qgroup_status_item */ 3219 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 3220 generation, 64); 3221 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 3222 version, 64); 3223 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 3224 flags, 64); 3225 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 3226 rescan, 64); 3227 3228 /* btrfs_qgroup_info_item */ 3229 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 3230 generation, 64); 3231 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 3232 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 3233 rfer_cmpr, 64); 3234 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 3235 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 3236 excl_cmpr, 64); 3237 3238 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 3239 struct btrfs_qgroup_info_item, generation, 64); 3240 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 3241 rfer, 64); 3242 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 3243 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 3244 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 3245 excl, 64); 3246 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 3247 struct btrfs_qgroup_info_item, excl_cmpr, 64); 3248 3249 /* btrfs_qgroup_limit_item */ 3250 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 3251 flags, 64); 3252 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 3253 max_rfer, 64); 3254 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 3255 max_excl, 64); 3256 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 3257 rsv_rfer, 64); 3258 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 3259 rsv_excl, 64); 3260 3261 /* btrfs_dev_replace_item */ 3262 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 3263 struct btrfs_dev_replace_item, src_devid, 64); 3264 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 3265 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 3266 64); 3267 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 3268 replace_state, 64); 3269 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 3270 time_started, 64); 3271 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 3272 time_stopped, 64); 3273 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 3274 num_write_errors, 64); 3275 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 3276 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 3277 64); 3278 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 3279 cursor_left, 64); 3280 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 3281 cursor_right, 64); 3282 3283 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 3284 struct btrfs_dev_replace_item, src_devid, 64); 3285 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 3286 struct btrfs_dev_replace_item, 3287 cont_reading_from_srcdev_mode, 64); 3288 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 3289 struct btrfs_dev_replace_item, replace_state, 64); 3290 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 3291 struct btrfs_dev_replace_item, time_started, 64); 3292 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 3293 struct btrfs_dev_replace_item, time_stopped, 64); 3294 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 3295 struct btrfs_dev_replace_item, num_write_errors, 64); 3296 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 3297 struct btrfs_dev_replace_item, 3298 num_uncorrectable_read_errors, 64); 3299 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 3300 struct btrfs_dev_replace_item, cursor_left, 64); 3301 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 3302 struct btrfs_dev_replace_item, cursor_right, 64); 3303 3304 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 3305 { 3306 return sb->s_fs_info; 3307 } 3308 3309 /* helper function to cast into the data area of the leaf. */ 3310 #define btrfs_item_ptr(leaf, slot, type) \ 3311 ((type *)(btrfs_leaf_data(leaf) + \ 3312 btrfs_item_offset_nr(leaf, slot))) 3313 3314 #define btrfs_item_ptr_offset(leaf, slot) \ 3315 ((unsigned long)(btrfs_leaf_data(leaf) + \ 3316 btrfs_item_offset_nr(leaf, slot))) 3317 3318 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 3319 { 3320 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 3321 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 3322 } 3323 3324 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 3325 { 3326 return mapping_gfp_mask(mapping) & ~__GFP_FS; 3327 } 3328 3329 /* extent-tree.c */ 3330 3331 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes); 3332 3333 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 3334 unsigned num_items) 3335 { 3336 return (root->nodesize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3337 2 * num_items; 3338 } 3339 3340 /* 3341 * Doing a truncate won't result in new nodes or leaves, just what we need for 3342 * COW. 3343 */ 3344 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 3345 unsigned num_items) 3346 { 3347 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 3348 } 3349 3350 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 3351 struct btrfs_root *root); 3352 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 3353 struct btrfs_root *root); 3354 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3355 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 3356 struct btrfs_root *root, unsigned long count); 3357 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3358 unsigned long count, int wait); 3359 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 3360 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3361 struct btrfs_root *root, u64 bytenr, 3362 u64 offset, int metadata, u64 *refs, u64 *flags); 3363 int btrfs_pin_extent(struct btrfs_root *root, 3364 u64 bytenr, u64 num, int reserved); 3365 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 3366 u64 bytenr, u64 num_bytes); 3367 int btrfs_exclude_logged_extents(struct btrfs_root *root, 3368 struct extent_buffer *eb); 3369 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 3370 struct btrfs_root *root, 3371 u64 objectid, u64 offset, u64 bytenr); 3372 struct btrfs_block_group_cache *btrfs_lookup_block_group( 3373 struct btrfs_fs_info *info, 3374 u64 bytenr); 3375 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3376 int get_block_group_index(struct btrfs_block_group_cache *cache); 3377 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3378 struct btrfs_root *root, u64 parent, 3379 u64 root_objectid, 3380 struct btrfs_disk_key *key, int level, 3381 u64 hint, u64 empty_size); 3382 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3383 struct btrfs_root *root, 3384 struct extent_buffer *buf, 3385 u64 parent, int last_ref); 3386 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 3387 struct btrfs_root *root, 3388 u64 root_objectid, u64 owner, 3389 u64 offset, struct btrfs_key *ins); 3390 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 3391 struct btrfs_root *root, 3392 u64 root_objectid, u64 owner, u64 offset, 3393 struct btrfs_key *ins); 3394 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3395 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3396 struct btrfs_key *ins, int is_data, int delalloc); 3397 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3398 struct extent_buffer *buf, int full_backref); 3399 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3400 struct extent_buffer *buf, int full_backref); 3401 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 3402 struct btrfs_root *root, 3403 u64 bytenr, u64 num_bytes, u64 flags, 3404 int level, int is_data); 3405 int btrfs_free_extent(struct btrfs_trans_handle *trans, 3406 struct btrfs_root *root, 3407 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3408 u64 owner, u64 offset, int no_quota); 3409 3410 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 3411 int delalloc); 3412 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3413 u64 start, u64 len); 3414 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3415 struct btrfs_root *root); 3416 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 3417 struct btrfs_root *root); 3418 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 3419 struct btrfs_root *root, 3420 u64 bytenr, u64 num_bytes, u64 parent, 3421 u64 root_objectid, u64 owner, u64 offset, int no_quota); 3422 3423 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, 3424 struct btrfs_root *root); 3425 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3426 struct btrfs_root *root); 3427 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, 3428 struct btrfs_root *root); 3429 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3430 int btrfs_free_block_groups(struct btrfs_fs_info *info); 3431 int btrfs_read_block_groups(struct btrfs_root *root); 3432 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 3433 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 3434 struct btrfs_root *root, u64 bytes_used, 3435 u64 type, u64 chunk_objectid, u64 chunk_offset, 3436 u64 size); 3437 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3438 struct btrfs_root *root, u64 group_start, 3439 struct extent_map *em); 3440 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 3441 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3442 struct btrfs_root *root); 3443 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 3444 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 3445 3446 enum btrfs_reserve_flush_enum { 3447 /* If we are in the transaction, we can't flush anything.*/ 3448 BTRFS_RESERVE_NO_FLUSH, 3449 /* 3450 * Flushing delalloc may cause deadlock somewhere, in this 3451 * case, use FLUSH LIMIT 3452 */ 3453 BTRFS_RESERVE_FLUSH_LIMIT, 3454 BTRFS_RESERVE_FLUSH_ALL, 3455 }; 3456 3457 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes); 3458 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); 3459 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3460 struct btrfs_root *root); 3461 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 3462 struct inode *inode); 3463 void btrfs_orphan_release_metadata(struct inode *inode); 3464 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 3465 struct btrfs_block_rsv *rsv, 3466 int nitems, 3467 u64 *qgroup_reserved, bool use_global_rsv); 3468 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 3469 struct btrfs_block_rsv *rsv, 3470 u64 qgroup_reserved); 3471 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 3472 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 3473 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); 3474 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); 3475 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 3476 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 3477 unsigned short type); 3478 void btrfs_free_block_rsv(struct btrfs_root *root, 3479 struct btrfs_block_rsv *rsv); 3480 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv); 3481 int btrfs_block_rsv_add(struct btrfs_root *root, 3482 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 3483 enum btrfs_reserve_flush_enum flush); 3484 int btrfs_block_rsv_check(struct btrfs_root *root, 3485 struct btrfs_block_rsv *block_rsv, int min_factor); 3486 int btrfs_block_rsv_refill(struct btrfs_root *root, 3487 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 3488 enum btrfs_reserve_flush_enum flush); 3489 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3490 struct btrfs_block_rsv *dst_rsv, 3491 u64 num_bytes); 3492 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 3493 struct btrfs_block_rsv *dest, u64 num_bytes, 3494 int min_factor); 3495 void btrfs_block_rsv_release(struct btrfs_root *root, 3496 struct btrfs_block_rsv *block_rsv, 3497 u64 num_bytes); 3498 int btrfs_set_block_group_ro(struct btrfs_root *root, 3499 struct btrfs_block_group_cache *cache); 3500 void btrfs_set_block_group_rw(struct btrfs_root *root, 3501 struct btrfs_block_group_cache *cache); 3502 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 3503 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 3504 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 3505 u64 start, u64 end); 3506 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 3507 u64 num_bytes, u64 *actual_bytes); 3508 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 3509 struct btrfs_root *root, u64 type); 3510 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 3511 3512 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 3513 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 3514 struct btrfs_fs_info *fs_info); 3515 int __get_raid_index(u64 flags); 3516 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 3517 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 3518 /* ctree.c */ 3519 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 3520 int level, int *slot); 3521 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 3522 int btrfs_previous_item(struct btrfs_root *root, 3523 struct btrfs_path *path, u64 min_objectid, 3524 int type); 3525 int btrfs_previous_extent_item(struct btrfs_root *root, 3526 struct btrfs_path *path, u64 min_objectid); 3527 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 3528 struct btrfs_path *path, 3529 struct btrfs_key *new_key); 3530 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 3531 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 3532 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 3533 struct btrfs_key *key, int lowest_level, 3534 u64 min_trans); 3535 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 3536 struct btrfs_path *path, 3537 u64 min_trans); 3538 enum btrfs_compare_tree_result { 3539 BTRFS_COMPARE_TREE_NEW, 3540 BTRFS_COMPARE_TREE_DELETED, 3541 BTRFS_COMPARE_TREE_CHANGED, 3542 BTRFS_COMPARE_TREE_SAME, 3543 }; 3544 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 3545 struct btrfs_root *right_root, 3546 struct btrfs_path *left_path, 3547 struct btrfs_path *right_path, 3548 struct btrfs_key *key, 3549 enum btrfs_compare_tree_result result, 3550 void *ctx); 3551 int btrfs_compare_trees(struct btrfs_root *left_root, 3552 struct btrfs_root *right_root, 3553 btrfs_changed_cb_t cb, void *ctx); 3554 int btrfs_cow_block(struct btrfs_trans_handle *trans, 3555 struct btrfs_root *root, struct extent_buffer *buf, 3556 struct extent_buffer *parent, int parent_slot, 3557 struct extent_buffer **cow_ret); 3558 int btrfs_copy_root(struct btrfs_trans_handle *trans, 3559 struct btrfs_root *root, 3560 struct extent_buffer *buf, 3561 struct extent_buffer **cow_ret, u64 new_root_objectid); 3562 int btrfs_block_can_be_shared(struct btrfs_root *root, 3563 struct extent_buffer *buf); 3564 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 3565 u32 data_size); 3566 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 3567 u32 new_size, int from_end); 3568 int btrfs_split_item(struct btrfs_trans_handle *trans, 3569 struct btrfs_root *root, 3570 struct btrfs_path *path, 3571 struct btrfs_key *new_key, 3572 unsigned long split_offset); 3573 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 3574 struct btrfs_root *root, 3575 struct btrfs_path *path, 3576 struct btrfs_key *new_key); 3577 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 3578 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 3579 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 3580 *root, struct btrfs_key *key, struct btrfs_path *p, int 3581 ins_len, int cow); 3582 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 3583 struct btrfs_path *p, u64 time_seq); 3584 int btrfs_search_slot_for_read(struct btrfs_root *root, 3585 struct btrfs_key *key, struct btrfs_path *p, 3586 int find_higher, int return_any); 3587 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 3588 struct btrfs_root *root, struct extent_buffer *parent, 3589 int start_slot, u64 *last_ret, 3590 struct btrfs_key *progress); 3591 void btrfs_release_path(struct btrfs_path *p); 3592 struct btrfs_path *btrfs_alloc_path(void); 3593 void btrfs_free_path(struct btrfs_path *p); 3594 void btrfs_set_path_blocking(struct btrfs_path *p); 3595 void btrfs_clear_path_blocking(struct btrfs_path *p, 3596 struct extent_buffer *held, int held_rw); 3597 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 3598 3599 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3600 struct btrfs_path *path, int slot, int nr); 3601 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 3602 struct btrfs_root *root, 3603 struct btrfs_path *path) 3604 { 3605 return btrfs_del_items(trans, root, path, path->slots[0], 1); 3606 } 3607 3608 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3609 struct btrfs_key *cpu_key, u32 *data_size, 3610 u32 total_data, u32 total_size, int nr); 3611 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 3612 *root, struct btrfs_key *key, void *data, u32 data_size); 3613 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 3614 struct btrfs_root *root, 3615 struct btrfs_path *path, 3616 struct btrfs_key *cpu_key, u32 *data_size, int nr); 3617 3618 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 3619 struct btrfs_root *root, 3620 struct btrfs_path *path, 3621 struct btrfs_key *key, 3622 u32 data_size) 3623 { 3624 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 3625 } 3626 3627 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 3628 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 3629 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 3630 u64 time_seq); 3631 static inline int btrfs_next_old_item(struct btrfs_root *root, 3632 struct btrfs_path *p, u64 time_seq) 3633 { 3634 ++p->slots[0]; 3635 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 3636 return btrfs_next_old_leaf(root, p, time_seq); 3637 return 0; 3638 } 3639 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 3640 { 3641 return btrfs_next_old_item(root, p, 0); 3642 } 3643 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 3644 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 3645 struct btrfs_block_rsv *block_rsv, 3646 int update_ref, int for_reloc); 3647 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 3648 struct btrfs_root *root, 3649 struct extent_buffer *node, 3650 struct extent_buffer *parent); 3651 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 3652 { 3653 /* 3654 * Get synced with close_ctree() 3655 */ 3656 smp_mb(); 3657 return fs_info->closing; 3658 } 3659 3660 /* 3661 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 3662 * anything except sleeping. This function is used to check the status of 3663 * the fs. 3664 */ 3665 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 3666 { 3667 return (root->fs_info->sb->s_flags & MS_RDONLY || 3668 btrfs_fs_closing(root->fs_info)); 3669 } 3670 3671 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 3672 { 3673 kfree(fs_info->balance_ctl); 3674 kfree(fs_info->delayed_root); 3675 kfree(fs_info->extent_root); 3676 kfree(fs_info->tree_root); 3677 kfree(fs_info->chunk_root); 3678 kfree(fs_info->dev_root); 3679 kfree(fs_info->csum_root); 3680 kfree(fs_info->quota_root); 3681 kfree(fs_info->uuid_root); 3682 kfree(fs_info->super_copy); 3683 kfree(fs_info->super_for_commit); 3684 security_free_mnt_opts(&fs_info->security_opts); 3685 kfree(fs_info); 3686 } 3687 3688 /* tree mod log functions from ctree.c */ 3689 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 3690 struct seq_list *elem); 3691 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 3692 struct seq_list *elem); 3693 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 3694 3695 /* root-item.c */ 3696 int btrfs_find_root_ref(struct btrfs_root *tree_root, 3697 struct btrfs_path *path, 3698 u64 root_id, u64 ref_id); 3699 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 3700 struct btrfs_root *tree_root, 3701 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 3702 const char *name, int name_len); 3703 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 3704 struct btrfs_root *tree_root, 3705 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 3706 const char *name, int name_len); 3707 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3708 struct btrfs_key *key); 3709 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 3710 *root, struct btrfs_key *key, struct btrfs_root_item 3711 *item); 3712 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 3713 struct btrfs_root *root, 3714 struct btrfs_key *key, 3715 struct btrfs_root_item *item); 3716 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 3717 struct btrfs_path *path, struct btrfs_root_item *root_item, 3718 struct btrfs_key *root_key); 3719 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 3720 void btrfs_set_root_node(struct btrfs_root_item *item, 3721 struct extent_buffer *node); 3722 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 3723 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 3724 struct btrfs_root *root); 3725 3726 /* uuid-tree.c */ 3727 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 3728 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3729 u64 subid); 3730 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 3731 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3732 u64 subid); 3733 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 3734 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 3735 u64)); 3736 3737 /* dir-item.c */ 3738 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 3739 const char *name, int name_len); 3740 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 3741 struct btrfs_root *root, const char *name, 3742 int name_len, struct inode *dir, 3743 struct btrfs_key *location, u8 type, u64 index); 3744 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 3745 struct btrfs_root *root, 3746 struct btrfs_path *path, u64 dir, 3747 const char *name, int name_len, 3748 int mod); 3749 struct btrfs_dir_item * 3750 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 3751 struct btrfs_root *root, 3752 struct btrfs_path *path, u64 dir, 3753 u64 objectid, const char *name, int name_len, 3754 int mod); 3755 struct btrfs_dir_item * 3756 btrfs_search_dir_index_item(struct btrfs_root *root, 3757 struct btrfs_path *path, u64 dirid, 3758 const char *name, int name_len); 3759 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 3760 struct btrfs_root *root, 3761 struct btrfs_path *path, 3762 struct btrfs_dir_item *di); 3763 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 3764 struct btrfs_root *root, 3765 struct btrfs_path *path, u64 objectid, 3766 const char *name, u16 name_len, 3767 const void *data, u16 data_len); 3768 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 3769 struct btrfs_root *root, 3770 struct btrfs_path *path, u64 dir, 3771 const char *name, u16 name_len, 3772 int mod); 3773 int verify_dir_item(struct btrfs_root *root, 3774 struct extent_buffer *leaf, 3775 struct btrfs_dir_item *dir_item); 3776 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3777 struct btrfs_path *path, 3778 const char *name, 3779 int name_len); 3780 3781 /* orphan.c */ 3782 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3783 struct btrfs_root *root, u64 offset); 3784 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 3785 struct btrfs_root *root, u64 offset); 3786 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 3787 3788 /* inode-item.c */ 3789 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3790 struct btrfs_root *root, 3791 const char *name, int name_len, 3792 u64 inode_objectid, u64 ref_objectid, u64 index); 3793 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3794 struct btrfs_root *root, 3795 const char *name, int name_len, 3796 u64 inode_objectid, u64 ref_objectid, u64 *index); 3797 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3798 struct btrfs_root *root, 3799 struct btrfs_path *path, u64 objectid); 3800 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3801 *root, struct btrfs_path *path, 3802 struct btrfs_key *location, int mod); 3803 3804 struct btrfs_inode_extref * 3805 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3806 struct btrfs_root *root, 3807 struct btrfs_path *path, 3808 const char *name, int name_len, 3809 u64 inode_objectid, u64 ref_objectid, int ins_len, 3810 int cow); 3811 3812 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3813 u64 ref_objectid, const char *name, 3814 int name_len, 3815 struct btrfs_inode_extref **extref_ret); 3816 3817 /* file-item.c */ 3818 struct btrfs_dio_private; 3819 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3820 struct btrfs_root *root, u64 bytenr, u64 len); 3821 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3822 struct bio *bio, u32 *dst); 3823 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3824 struct bio *bio, u64 logical_offset); 3825 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3826 struct btrfs_root *root, 3827 u64 objectid, u64 pos, 3828 u64 disk_offset, u64 disk_num_bytes, 3829 u64 num_bytes, u64 offset, u64 ram_bytes, 3830 u8 compression, u8 encryption, u16 other_encoding); 3831 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3832 struct btrfs_root *root, 3833 struct btrfs_path *path, u64 objectid, 3834 u64 bytenr, int mod); 3835 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3836 struct btrfs_root *root, 3837 struct btrfs_ordered_sum *sums); 3838 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3839 struct bio *bio, u64 file_start, int contig); 3840 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3841 struct list_head *list, int search_commit); 3842 void btrfs_extent_item_to_extent_map(struct inode *inode, 3843 const struct btrfs_path *path, 3844 struct btrfs_file_extent_item *fi, 3845 const bool new_inline, 3846 struct extent_map *em); 3847 3848 /* inode.c */ 3849 struct btrfs_delalloc_work { 3850 struct inode *inode; 3851 int wait; 3852 int delay_iput; 3853 struct completion completion; 3854 struct list_head list; 3855 struct btrfs_work work; 3856 }; 3857 3858 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3859 int wait, int delay_iput); 3860 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3861 3862 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3863 size_t pg_offset, u64 start, u64 len, 3864 int create); 3865 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3866 u64 *orig_start, u64 *orig_block_len, 3867 u64 *ram_bytes); 3868 3869 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3870 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3871 #define ClearPageChecked ClearPageFsMisc 3872 #define SetPageChecked SetPageFsMisc 3873 #define PageChecked PageFsMisc 3874 #endif 3875 3876 /* This forces readahead on a given range of bytes in an inode */ 3877 static inline void btrfs_force_ra(struct address_space *mapping, 3878 struct file_ra_state *ra, struct file *file, 3879 pgoff_t offset, unsigned long req_size) 3880 { 3881 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3882 } 3883 3884 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3885 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3886 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3887 struct btrfs_root *root, 3888 struct inode *dir, struct inode *inode, 3889 const char *name, int name_len); 3890 int btrfs_add_link(struct btrfs_trans_handle *trans, 3891 struct inode *parent_inode, struct inode *inode, 3892 const char *name, int name_len, int add_backref, u64 index); 3893 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3894 struct btrfs_root *root, 3895 struct inode *dir, u64 objectid, 3896 const char *name, int name_len); 3897 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 3898 int front); 3899 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3900 struct btrfs_root *root, 3901 struct inode *inode, u64 new_size, 3902 u32 min_type); 3903 3904 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3905 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3906 int nr); 3907 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3908 struct extent_state **cached_state); 3909 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3910 struct btrfs_root *new_root, 3911 struct btrfs_root *parent_root, 3912 u64 new_dirid); 3913 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 3914 size_t size, struct bio *bio, 3915 unsigned long bio_flags); 3916 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3917 int btrfs_readpage(struct file *file, struct page *page); 3918 void btrfs_evict_inode(struct inode *inode); 3919 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3920 struct inode *btrfs_alloc_inode(struct super_block *sb); 3921 void btrfs_destroy_inode(struct inode *inode); 3922 int btrfs_drop_inode(struct inode *inode); 3923 int btrfs_init_cachep(void); 3924 void btrfs_destroy_cachep(void); 3925 long btrfs_ioctl_trans_end(struct file *file); 3926 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3927 struct btrfs_root *root, int *was_new); 3928 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3929 size_t pg_offset, u64 start, u64 end, 3930 int create); 3931 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3932 struct btrfs_root *root, 3933 struct inode *inode); 3934 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3935 struct btrfs_root *root, struct inode *inode); 3936 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3937 int btrfs_orphan_cleanup(struct btrfs_root *root); 3938 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3939 struct btrfs_root *root); 3940 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3941 void btrfs_invalidate_inodes(struct btrfs_root *root); 3942 void btrfs_add_delayed_iput(struct inode *inode); 3943 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3944 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3945 u64 start, u64 num_bytes, u64 min_size, 3946 loff_t actual_len, u64 *alloc_hint); 3947 int btrfs_prealloc_file_range_trans(struct inode *inode, 3948 struct btrfs_trans_handle *trans, int mode, 3949 u64 start, u64 num_bytes, u64 min_size, 3950 loff_t actual_len, u64 *alloc_hint); 3951 int btrfs_inode_check_errors(struct inode *inode); 3952 extern const struct dentry_operations btrfs_dentry_operations; 3953 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 3954 void btrfs_test_inode_set_ops(struct inode *inode); 3955 #endif 3956 3957 /* ioctl.c */ 3958 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3959 void btrfs_update_iflags(struct inode *inode); 3960 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3961 int btrfs_is_empty_uuid(u8 *uuid); 3962 int btrfs_defrag_file(struct inode *inode, struct file *file, 3963 struct btrfs_ioctl_defrag_range_args *range, 3964 u64 newer_than, unsigned long max_pages); 3965 void btrfs_get_block_group_info(struct list_head *groups_list, 3966 struct btrfs_ioctl_space_info *space); 3967 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3968 struct btrfs_ioctl_balance_args *bargs); 3969 3970 3971 /* file.c */ 3972 int btrfs_auto_defrag_init(void); 3973 void btrfs_auto_defrag_exit(void); 3974 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3975 struct inode *inode); 3976 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3977 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3978 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3979 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3980 int skip_pinned); 3981 extern const struct file_operations btrfs_file_operations; 3982 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3983 struct btrfs_root *root, struct inode *inode, 3984 struct btrfs_path *path, u64 start, u64 end, 3985 u64 *drop_end, int drop_cache, 3986 int replace_extent, 3987 u32 extent_item_size, 3988 int *key_inserted); 3989 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3990 struct btrfs_root *root, struct inode *inode, u64 start, 3991 u64 end, int drop_cache); 3992 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3993 struct inode *inode, u64 start, u64 end); 3994 int btrfs_release_file(struct inode *inode, struct file *file); 3995 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3996 struct page **pages, size_t num_pages, 3997 loff_t pos, size_t write_bytes, 3998 struct extent_state **cached); 3999 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 4000 4001 /* tree-defrag.c */ 4002 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 4003 struct btrfs_root *root); 4004 4005 /* sysfs.c */ 4006 int btrfs_init_sysfs(void); 4007 void btrfs_exit_sysfs(void); 4008 int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info); 4009 void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info); 4010 4011 /* xattr.c */ 4012 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 4013 4014 /* super.c */ 4015 int btrfs_parse_options(struct btrfs_root *root, char *options); 4016 int btrfs_sync_fs(struct super_block *sb, int wait); 4017 4018 #ifdef CONFIG_PRINTK 4019 __printf(2, 3) 4020 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 4021 #else 4022 static inline __printf(2, 3) 4023 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 4024 { 4025 } 4026 #endif 4027 4028 #define btrfs_emerg(fs_info, fmt, args...) \ 4029 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 4030 #define btrfs_alert(fs_info, fmt, args...) \ 4031 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 4032 #define btrfs_crit(fs_info, fmt, args...) \ 4033 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 4034 #define btrfs_err(fs_info, fmt, args...) \ 4035 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 4036 #define btrfs_warn(fs_info, fmt, args...) \ 4037 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 4038 #define btrfs_notice(fs_info, fmt, args...) \ 4039 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 4040 #define btrfs_info(fs_info, fmt, args...) \ 4041 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 4042 4043 #ifdef DEBUG 4044 #define btrfs_debug(fs_info, fmt, args...) \ 4045 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 4046 #else 4047 #define btrfs_debug(fs_info, fmt, args...) \ 4048 no_printk(KERN_DEBUG fmt, ##args) 4049 #endif 4050 4051 #ifdef CONFIG_BTRFS_ASSERT 4052 4053 static inline void assfail(char *expr, char *file, int line) 4054 { 4055 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 4056 expr, file, line); 4057 BUG(); 4058 } 4059 4060 #define ASSERT(expr) \ 4061 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 4062 #else 4063 #define ASSERT(expr) ((void)0) 4064 #endif 4065 4066 #define btrfs_assert() 4067 __printf(5, 6) 4068 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 4069 unsigned int line, int errno, const char *fmt, ...); 4070 4071 4072 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 4073 struct btrfs_root *root, const char *function, 4074 unsigned int line, int errno); 4075 4076 #define btrfs_set_fs_incompat(__fs_info, opt) \ 4077 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4078 4079 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 4080 u64 flag) 4081 { 4082 struct btrfs_super_block *disk_super; 4083 u64 features; 4084 4085 disk_super = fs_info->super_copy; 4086 features = btrfs_super_incompat_flags(disk_super); 4087 if (!(features & flag)) { 4088 spin_lock(&fs_info->super_lock); 4089 features = btrfs_super_incompat_flags(disk_super); 4090 if (!(features & flag)) { 4091 features |= flag; 4092 btrfs_set_super_incompat_flags(disk_super, features); 4093 btrfs_info(fs_info, "setting %llu feature flag", 4094 flag); 4095 } 4096 spin_unlock(&fs_info->super_lock); 4097 } 4098 } 4099 4100 #define btrfs_fs_incompat(fs_info, opt) \ 4101 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4102 4103 static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 4104 { 4105 struct btrfs_super_block *disk_super; 4106 disk_super = fs_info->super_copy; 4107 return !!(btrfs_super_incompat_flags(disk_super) & flag); 4108 } 4109 4110 /* 4111 * Call btrfs_abort_transaction as early as possible when an error condition is 4112 * detected, that way the exact line number is reported. 4113 */ 4114 4115 #define btrfs_abort_transaction(trans, root, errno) \ 4116 do { \ 4117 __btrfs_abort_transaction(trans, root, __func__, \ 4118 __LINE__, errno); \ 4119 } while (0) 4120 4121 #define btrfs_std_error(fs_info, errno) \ 4122 do { \ 4123 if ((errno)) \ 4124 __btrfs_std_error((fs_info), __func__, \ 4125 __LINE__, (errno), NULL); \ 4126 } while (0) 4127 4128 #define btrfs_error(fs_info, errno, fmt, args...) \ 4129 do { \ 4130 __btrfs_std_error((fs_info), __func__, __LINE__, \ 4131 (errno), fmt, ##args); \ 4132 } while (0) 4133 4134 __printf(5, 6) 4135 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 4136 unsigned int line, int errno, const char *fmt, ...); 4137 4138 /* 4139 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 4140 * will panic(). Otherwise we BUG() here. 4141 */ 4142 #define btrfs_panic(fs_info, errno, fmt, args...) \ 4143 do { \ 4144 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 4145 BUG(); \ 4146 } while (0) 4147 4148 /* acl.c */ 4149 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 4150 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 4151 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 4152 int btrfs_init_acl(struct btrfs_trans_handle *trans, 4153 struct inode *inode, struct inode *dir); 4154 #else 4155 #define btrfs_get_acl NULL 4156 #define btrfs_set_acl NULL 4157 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 4158 struct inode *inode, struct inode *dir) 4159 { 4160 return 0; 4161 } 4162 #endif 4163 4164 /* relocation.c */ 4165 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 4166 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 4167 struct btrfs_root *root); 4168 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 4169 struct btrfs_root *root); 4170 int btrfs_recover_relocation(struct btrfs_root *root); 4171 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 4172 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4173 struct btrfs_root *root, struct extent_buffer *buf, 4174 struct extent_buffer *cow); 4175 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4176 struct btrfs_pending_snapshot *pending, 4177 u64 *bytes_to_reserve); 4178 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4179 struct btrfs_pending_snapshot *pending); 4180 4181 /* scrub.c */ 4182 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4183 u64 end, struct btrfs_scrub_progress *progress, 4184 int readonly, int is_dev_replace); 4185 void btrfs_scrub_pause(struct btrfs_root *root); 4186 void btrfs_scrub_continue(struct btrfs_root *root); 4187 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 4188 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 4189 struct btrfs_device *dev); 4190 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 4191 struct btrfs_scrub_progress *progress); 4192 4193 /* dev-replace.c */ 4194 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 4195 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 4196 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 4197 4198 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 4199 { 4200 btrfs_bio_counter_sub(fs_info, 1); 4201 } 4202 4203 /* reada.c */ 4204 struct reada_control { 4205 struct btrfs_root *root; /* tree to prefetch */ 4206 struct btrfs_key key_start; 4207 struct btrfs_key key_end; /* exclusive */ 4208 atomic_t elems; 4209 struct kref refcnt; 4210 wait_queue_head_t wait; 4211 }; 4212 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 4213 struct btrfs_key *start, struct btrfs_key *end); 4214 int btrfs_reada_wait(void *handle); 4215 void btrfs_reada_detach(void *handle); 4216 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 4217 u64 start, int err); 4218 4219 static inline int is_fstree(u64 rootid) 4220 { 4221 if (rootid == BTRFS_FS_TREE_OBJECTID || 4222 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 4223 !btrfs_qgroup_level(rootid))) 4224 return 1; 4225 return 0; 4226 } 4227 4228 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 4229 { 4230 return signal_pending(current); 4231 } 4232 4233 /* Sanity test specific functions */ 4234 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4235 void btrfs_test_destroy_inode(struct inode *inode); 4236 #endif 4237 4238 static inline int btrfs_test_is_dummy_root(struct btrfs_root *root) 4239 { 4240 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4241 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) 4242 return 1; 4243 #endif 4244 return 0; 4245 } 4246 4247 #endif 4248