1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #ifndef __BTRFS_CTREE__ 20 #define __BTRFS_CTREE__ 21 22 #include <linux/mm.h> 23 #include <linux/highmem.h> 24 #include <linux/fs.h> 25 #include <linux/rwsem.h> 26 #include <linux/semaphore.h> 27 #include <linux/completion.h> 28 #include <linux/backing-dev.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/kobject.h> 32 #include <trace/events/btrfs.h> 33 #include <asm/kmap_types.h> 34 #include <linux/pagemap.h> 35 #include <linux/btrfs.h> 36 #include <linux/workqueue.h> 37 #include <linux/security.h> 38 #include "extent_io.h" 39 #include "extent_map.h" 40 #include "async-thread.h" 41 42 struct btrfs_trans_handle; 43 struct btrfs_transaction; 44 struct btrfs_pending_snapshot; 45 extern struct kmem_cache *btrfs_trans_handle_cachep; 46 extern struct kmem_cache *btrfs_transaction_cachep; 47 extern struct kmem_cache *btrfs_bit_radix_cachep; 48 extern struct kmem_cache *btrfs_path_cachep; 49 extern struct kmem_cache *btrfs_free_space_cachep; 50 struct btrfs_ordered_sum; 51 52 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 53 #define STATIC noinline 54 #else 55 #define STATIC static noinline 56 #endif 57 58 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 59 60 #define BTRFS_MAX_MIRRORS 3 61 62 #define BTRFS_MAX_LEVEL 8 63 64 #define BTRFS_COMPAT_EXTENT_TREE_V0 65 66 /* holds pointers to all of the tree roots */ 67 #define BTRFS_ROOT_TREE_OBJECTID 1ULL 68 69 /* stores information about which extents are in use, and reference counts */ 70 #define BTRFS_EXTENT_TREE_OBJECTID 2ULL 71 72 /* 73 * chunk tree stores translations from logical -> physical block numbering 74 * the super block points to the chunk tree 75 */ 76 #define BTRFS_CHUNK_TREE_OBJECTID 3ULL 77 78 /* 79 * stores information about which areas of a given device are in use. 80 * one per device. The tree of tree roots points to the device tree 81 */ 82 #define BTRFS_DEV_TREE_OBJECTID 4ULL 83 84 /* one per subvolume, storing files and directories */ 85 #define BTRFS_FS_TREE_OBJECTID 5ULL 86 87 /* directory objectid inside the root tree */ 88 #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL 89 90 /* holds checksums of all the data extents */ 91 #define BTRFS_CSUM_TREE_OBJECTID 7ULL 92 93 /* holds quota configuration and tracking */ 94 #define BTRFS_QUOTA_TREE_OBJECTID 8ULL 95 96 /* for storing items that use the BTRFS_UUID_KEY* types */ 97 #define BTRFS_UUID_TREE_OBJECTID 9ULL 98 99 /* for storing balance parameters in the root tree */ 100 #define BTRFS_BALANCE_OBJECTID -4ULL 101 102 /* orhpan objectid for tracking unlinked/truncated files */ 103 #define BTRFS_ORPHAN_OBJECTID -5ULL 104 105 /* does write ahead logging to speed up fsyncs */ 106 #define BTRFS_TREE_LOG_OBJECTID -6ULL 107 #define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL 108 109 /* for space balancing */ 110 #define BTRFS_TREE_RELOC_OBJECTID -8ULL 111 #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL 112 113 /* 114 * extent checksums all have this objectid 115 * this allows them to share the logging tree 116 * for fsyncs 117 */ 118 #define BTRFS_EXTENT_CSUM_OBJECTID -10ULL 119 120 /* For storing free space cache */ 121 #define BTRFS_FREE_SPACE_OBJECTID -11ULL 122 123 /* 124 * The inode number assigned to the special inode for storing 125 * free ino cache 126 */ 127 #define BTRFS_FREE_INO_OBJECTID -12ULL 128 129 /* dummy objectid represents multiple objectids */ 130 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL 131 132 /* 133 * All files have objectids in this range. 134 */ 135 #define BTRFS_FIRST_FREE_OBJECTID 256ULL 136 #define BTRFS_LAST_FREE_OBJECTID -256ULL 137 #define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL 138 139 140 /* 141 * the device items go into the chunk tree. The key is in the form 142 * [ 1 BTRFS_DEV_ITEM_KEY device_id ] 143 */ 144 #define BTRFS_DEV_ITEMS_OBJECTID 1ULL 145 146 #define BTRFS_BTREE_INODE_OBJECTID 1 147 148 #define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 149 150 #define BTRFS_DEV_REPLACE_DEVID 0ULL 151 152 /* 153 * the max metadata block size. This limit is somewhat artificial, 154 * but the memmove costs go through the roof for larger blocks. 155 */ 156 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 157 158 /* 159 * we can actually store much bigger names, but lets not confuse the rest 160 * of linux 161 */ 162 #define BTRFS_NAME_LEN 255 163 164 /* 165 * Theoretical limit is larger, but we keep this down to a sane 166 * value. That should limit greatly the possibility of collisions on 167 * inode ref items. 168 */ 169 #define BTRFS_LINK_MAX 65535U 170 171 /* 32 bytes in various csum fields */ 172 #define BTRFS_CSUM_SIZE 32 173 174 /* csum types */ 175 #define BTRFS_CSUM_TYPE_CRC32 0 176 177 static int btrfs_csum_sizes[] = { 4, 0 }; 178 179 /* four bytes for CRC32 */ 180 #define BTRFS_EMPTY_DIR_SIZE 0 181 182 /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ 183 #define REQ_GET_READ_MIRRORS (1 << 30) 184 185 #define BTRFS_FT_UNKNOWN 0 186 #define BTRFS_FT_REG_FILE 1 187 #define BTRFS_FT_DIR 2 188 #define BTRFS_FT_CHRDEV 3 189 #define BTRFS_FT_BLKDEV 4 190 #define BTRFS_FT_FIFO 5 191 #define BTRFS_FT_SOCK 6 192 #define BTRFS_FT_SYMLINK 7 193 #define BTRFS_FT_XATTR 8 194 #define BTRFS_FT_MAX 9 195 196 /* ioprio of readahead is set to idle */ 197 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 198 199 #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 200 201 /* 202 * The key defines the order in the tree, and so it also defines (optimal) 203 * block layout. 204 * 205 * objectid corresponds to the inode number. 206 * 207 * type tells us things about the object, and is a kind of stream selector. 208 * so for a given inode, keys with type of 1 might refer to the inode data, 209 * type of 2 may point to file data in the btree and type == 3 may point to 210 * extents. 211 * 212 * offset is the starting byte offset for this key in the stream. 213 * 214 * btrfs_disk_key is in disk byte order. struct btrfs_key is always 215 * in cpu native order. Otherwise they are identical and their sizes 216 * should be the same (ie both packed) 217 */ 218 struct btrfs_disk_key { 219 __le64 objectid; 220 u8 type; 221 __le64 offset; 222 } __attribute__ ((__packed__)); 223 224 struct btrfs_key { 225 u64 objectid; 226 u8 type; 227 u64 offset; 228 } __attribute__ ((__packed__)); 229 230 struct btrfs_mapping_tree { 231 struct extent_map_tree map_tree; 232 }; 233 234 struct btrfs_dev_item { 235 /* the internal btrfs device id */ 236 __le64 devid; 237 238 /* size of the device */ 239 __le64 total_bytes; 240 241 /* bytes used */ 242 __le64 bytes_used; 243 244 /* optimal io alignment for this device */ 245 __le32 io_align; 246 247 /* optimal io width for this device */ 248 __le32 io_width; 249 250 /* minimal io size for this device */ 251 __le32 sector_size; 252 253 /* type and info about this device */ 254 __le64 type; 255 256 /* expected generation for this device */ 257 __le64 generation; 258 259 /* 260 * starting byte of this partition on the device, 261 * to allow for stripe alignment in the future 262 */ 263 __le64 start_offset; 264 265 /* grouping information for allocation decisions */ 266 __le32 dev_group; 267 268 /* seek speed 0-100 where 100 is fastest */ 269 u8 seek_speed; 270 271 /* bandwidth 0-100 where 100 is fastest */ 272 u8 bandwidth; 273 274 /* btrfs generated uuid for this device */ 275 u8 uuid[BTRFS_UUID_SIZE]; 276 277 /* uuid of FS who owns this device */ 278 u8 fsid[BTRFS_UUID_SIZE]; 279 } __attribute__ ((__packed__)); 280 281 struct btrfs_stripe { 282 __le64 devid; 283 __le64 offset; 284 u8 dev_uuid[BTRFS_UUID_SIZE]; 285 } __attribute__ ((__packed__)); 286 287 struct btrfs_chunk { 288 /* size of this chunk in bytes */ 289 __le64 length; 290 291 /* objectid of the root referencing this chunk */ 292 __le64 owner; 293 294 __le64 stripe_len; 295 __le64 type; 296 297 /* optimal io alignment for this chunk */ 298 __le32 io_align; 299 300 /* optimal io width for this chunk */ 301 __le32 io_width; 302 303 /* minimal io size for this chunk */ 304 __le32 sector_size; 305 306 /* 2^16 stripes is quite a lot, a second limit is the size of a single 307 * item in the btree 308 */ 309 __le16 num_stripes; 310 311 /* sub stripes only matter for raid10 */ 312 __le16 sub_stripes; 313 struct btrfs_stripe stripe; 314 /* additional stripes go here */ 315 } __attribute__ ((__packed__)); 316 317 #define BTRFS_FREE_SPACE_EXTENT 1 318 #define BTRFS_FREE_SPACE_BITMAP 2 319 320 struct btrfs_free_space_entry { 321 __le64 offset; 322 __le64 bytes; 323 u8 type; 324 } __attribute__ ((__packed__)); 325 326 struct btrfs_free_space_header { 327 struct btrfs_disk_key location; 328 __le64 generation; 329 __le64 num_entries; 330 __le64 num_bitmaps; 331 } __attribute__ ((__packed__)); 332 333 static inline unsigned long btrfs_chunk_item_size(int num_stripes) 334 { 335 BUG_ON(num_stripes == 0); 336 return sizeof(struct btrfs_chunk) + 337 sizeof(struct btrfs_stripe) * (num_stripes - 1); 338 } 339 340 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) 341 #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) 342 343 /* 344 * File system states 345 */ 346 #define BTRFS_FS_STATE_ERROR 0 347 #define BTRFS_FS_STATE_REMOUNTING 1 348 #define BTRFS_FS_STATE_TRANS_ABORTED 2 349 #define BTRFS_FS_STATE_DEV_REPLACING 3 350 351 /* Super block flags */ 352 /* Errors detected */ 353 #define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) 354 355 #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) 356 #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) 357 358 #define BTRFS_BACKREF_REV_MAX 256 359 #define BTRFS_BACKREF_REV_SHIFT 56 360 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ 361 BTRFS_BACKREF_REV_SHIFT) 362 363 #define BTRFS_OLD_BACKREF_REV 0 364 #define BTRFS_MIXED_BACKREF_REV 1 365 366 /* 367 * every tree block (leaf or node) starts with this header. 368 */ 369 struct btrfs_header { 370 /* these first four must match the super block */ 371 u8 csum[BTRFS_CSUM_SIZE]; 372 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 373 __le64 bytenr; /* which block this node is supposed to live in */ 374 __le64 flags; 375 376 /* allowed to be different from the super from here on down */ 377 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 378 __le64 generation; 379 __le64 owner; 380 __le32 nritems; 381 u8 level; 382 } __attribute__ ((__packed__)); 383 384 #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ 385 sizeof(struct btrfs_header)) / \ 386 sizeof(struct btrfs_key_ptr)) 387 #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) 388 #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize)) 389 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 390 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) 391 #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 392 sizeof(struct btrfs_item) - \ 393 BTRFS_FILE_EXTENT_INLINE_DATA_START) 394 #define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ 395 sizeof(struct btrfs_item) -\ 396 sizeof(struct btrfs_dir_item)) 397 398 399 /* 400 * this is a very generous portion of the super block, giving us 401 * room to translate 14 chunks with 3 stripes each. 402 */ 403 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 404 #define BTRFS_LABEL_SIZE 256 405 406 /* 407 * just in case we somehow lose the roots and are not able to mount, 408 * we store an array of the roots from previous transactions 409 * in the super. 410 */ 411 #define BTRFS_NUM_BACKUP_ROOTS 4 412 struct btrfs_root_backup { 413 __le64 tree_root; 414 __le64 tree_root_gen; 415 416 __le64 chunk_root; 417 __le64 chunk_root_gen; 418 419 __le64 extent_root; 420 __le64 extent_root_gen; 421 422 __le64 fs_root; 423 __le64 fs_root_gen; 424 425 __le64 dev_root; 426 __le64 dev_root_gen; 427 428 __le64 csum_root; 429 __le64 csum_root_gen; 430 431 __le64 total_bytes; 432 __le64 bytes_used; 433 __le64 num_devices; 434 /* future */ 435 __le64 unused_64[4]; 436 437 u8 tree_root_level; 438 u8 chunk_root_level; 439 u8 extent_root_level; 440 u8 fs_root_level; 441 u8 dev_root_level; 442 u8 csum_root_level; 443 /* future and to align */ 444 u8 unused_8[10]; 445 } __attribute__ ((__packed__)); 446 447 /* 448 * the super block basically lists the main trees of the FS 449 * it currently lacks any block count etc etc 450 */ 451 struct btrfs_super_block { 452 u8 csum[BTRFS_CSUM_SIZE]; 453 /* the first 4 fields must match struct btrfs_header */ 454 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ 455 __le64 bytenr; /* this block number */ 456 __le64 flags; 457 458 /* allowed to be different from the btrfs_header from here own down */ 459 __le64 magic; 460 __le64 generation; 461 __le64 root; 462 __le64 chunk_root; 463 __le64 log_root; 464 465 /* this will help find the new super based on the log root */ 466 __le64 log_root_transid; 467 __le64 total_bytes; 468 __le64 bytes_used; 469 __le64 root_dir_objectid; 470 __le64 num_devices; 471 __le32 sectorsize; 472 __le32 nodesize; 473 __le32 __unused_leafsize; 474 __le32 stripesize; 475 __le32 sys_chunk_array_size; 476 __le64 chunk_root_generation; 477 __le64 compat_flags; 478 __le64 compat_ro_flags; 479 __le64 incompat_flags; 480 __le16 csum_type; 481 u8 root_level; 482 u8 chunk_root_level; 483 u8 log_root_level; 484 struct btrfs_dev_item dev_item; 485 486 char label[BTRFS_LABEL_SIZE]; 487 488 __le64 cache_generation; 489 __le64 uuid_tree_generation; 490 491 /* future expansion */ 492 __le64 reserved[30]; 493 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; 494 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; 495 } __attribute__ ((__packed__)); 496 497 /* 498 * Compat flags that we support. If any incompat flags are set other than the 499 * ones specified below then we will fail to mount 500 */ 501 #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) 502 #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) 503 #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) 504 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) 505 /* 506 * some patches floated around with a second compression method 507 * lets save that incompat here for when they do get in 508 * Note we don't actually support it, we're just reserving the 509 * number 510 */ 511 #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) 512 513 /* 514 * older kernels tried to do bigger metadata blocks, but the 515 * code was pretty buggy. Lets not let them try anymore. 516 */ 517 #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) 518 519 #define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) 520 #define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) 521 #define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) 522 #define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) 523 524 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL 525 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL 526 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL 527 #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL 528 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL 529 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL 530 531 #define BTRFS_FEATURE_INCOMPAT_SUPP \ 532 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ 533 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ 534 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ 535 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ 536 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ 537 BTRFS_FEATURE_INCOMPAT_RAID56 | \ 538 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ 539 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ 540 BTRFS_FEATURE_INCOMPAT_NO_HOLES) 541 542 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ 543 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) 544 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL 545 546 /* 547 * A leaf is full of items. offset and size tell us where to find 548 * the item in the leaf (relative to the start of the data area) 549 */ 550 struct btrfs_item { 551 struct btrfs_disk_key key; 552 __le32 offset; 553 __le32 size; 554 } __attribute__ ((__packed__)); 555 556 /* 557 * leaves have an item area and a data area: 558 * [item0, item1....itemN] [free space] [dataN...data1, data0] 559 * 560 * The data is separate from the items to get the keys closer together 561 * during searches. 562 */ 563 struct btrfs_leaf { 564 struct btrfs_header header; 565 struct btrfs_item items[]; 566 } __attribute__ ((__packed__)); 567 568 /* 569 * all non-leaf blocks are nodes, they hold only keys and pointers to 570 * other blocks 571 */ 572 struct btrfs_key_ptr { 573 struct btrfs_disk_key key; 574 __le64 blockptr; 575 __le64 generation; 576 } __attribute__ ((__packed__)); 577 578 struct btrfs_node { 579 struct btrfs_header header; 580 struct btrfs_key_ptr ptrs[]; 581 } __attribute__ ((__packed__)); 582 583 /* 584 * btrfs_paths remember the path taken from the root down to the leaf. 585 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 586 * to any other levels that are present. 587 * 588 * The slots array records the index of the item or block pointer 589 * used while walking the tree. 590 */ 591 struct btrfs_path { 592 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 593 int slots[BTRFS_MAX_LEVEL]; 594 /* if there is real range locking, this locks field will change */ 595 int locks[BTRFS_MAX_LEVEL]; 596 int reada; 597 /* keep some upper locks as we walk down */ 598 int lowest_level; 599 600 /* 601 * set by btrfs_split_item, tells search_slot to keep all locks 602 * and to force calls to keep space in the nodes 603 */ 604 unsigned int search_for_split:1; 605 unsigned int keep_locks:1; 606 unsigned int skip_locking:1; 607 unsigned int leave_spinning:1; 608 unsigned int search_commit_root:1; 609 unsigned int need_commit_sem:1; 610 unsigned int skip_release_on_error:1; 611 }; 612 613 /* 614 * items in the extent btree are used to record the objectid of the 615 * owner of the block and the number of references 616 */ 617 618 struct btrfs_extent_item { 619 __le64 refs; 620 __le64 generation; 621 __le64 flags; 622 } __attribute__ ((__packed__)); 623 624 struct btrfs_extent_item_v0 { 625 __le32 refs; 626 } __attribute__ ((__packed__)); 627 628 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ 629 sizeof(struct btrfs_item)) 630 631 #define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) 632 #define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) 633 634 /* following flags only apply to tree blocks */ 635 636 /* use full backrefs for extent pointers in the block */ 637 #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) 638 639 /* 640 * this flag is only used internally by scrub and may be changed at any time 641 * it is only declared here to avoid collisions 642 */ 643 #define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) 644 645 struct btrfs_tree_block_info { 646 struct btrfs_disk_key key; 647 u8 level; 648 } __attribute__ ((__packed__)); 649 650 struct btrfs_extent_data_ref { 651 __le64 root; 652 __le64 objectid; 653 __le64 offset; 654 __le32 count; 655 } __attribute__ ((__packed__)); 656 657 struct btrfs_shared_data_ref { 658 __le32 count; 659 } __attribute__ ((__packed__)); 660 661 struct btrfs_extent_inline_ref { 662 u8 type; 663 __le64 offset; 664 } __attribute__ ((__packed__)); 665 666 /* old style backrefs item */ 667 struct btrfs_extent_ref_v0 { 668 __le64 root; 669 __le64 generation; 670 __le64 objectid; 671 __le32 count; 672 } __attribute__ ((__packed__)); 673 674 675 /* dev extents record free space on individual devices. The owner 676 * field points back to the chunk allocation mapping tree that allocated 677 * the extent. The chunk tree uuid field is a way to double check the owner 678 */ 679 struct btrfs_dev_extent { 680 __le64 chunk_tree; 681 __le64 chunk_objectid; 682 __le64 chunk_offset; 683 __le64 length; 684 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 685 } __attribute__ ((__packed__)); 686 687 struct btrfs_inode_ref { 688 __le64 index; 689 __le16 name_len; 690 /* name goes here */ 691 } __attribute__ ((__packed__)); 692 693 struct btrfs_inode_extref { 694 __le64 parent_objectid; 695 __le64 index; 696 __le16 name_len; 697 __u8 name[0]; 698 /* name goes here */ 699 } __attribute__ ((__packed__)); 700 701 struct btrfs_timespec { 702 __le64 sec; 703 __le32 nsec; 704 } __attribute__ ((__packed__)); 705 706 enum btrfs_compression_type { 707 BTRFS_COMPRESS_NONE = 0, 708 BTRFS_COMPRESS_ZLIB = 1, 709 BTRFS_COMPRESS_LZO = 2, 710 BTRFS_COMPRESS_TYPES = 2, 711 BTRFS_COMPRESS_LAST = 3, 712 }; 713 714 struct btrfs_inode_item { 715 /* nfs style generation number */ 716 __le64 generation; 717 /* transid that last touched this inode */ 718 __le64 transid; 719 __le64 size; 720 __le64 nbytes; 721 __le64 block_group; 722 __le32 nlink; 723 __le32 uid; 724 __le32 gid; 725 __le32 mode; 726 __le64 rdev; 727 __le64 flags; 728 729 /* modification sequence number for NFS */ 730 __le64 sequence; 731 732 /* 733 * a little future expansion, for more than this we can 734 * just grow the inode item and version it 735 */ 736 __le64 reserved[4]; 737 struct btrfs_timespec atime; 738 struct btrfs_timespec ctime; 739 struct btrfs_timespec mtime; 740 struct btrfs_timespec otime; 741 } __attribute__ ((__packed__)); 742 743 struct btrfs_dir_log_item { 744 __le64 end; 745 } __attribute__ ((__packed__)); 746 747 struct btrfs_dir_item { 748 struct btrfs_disk_key location; 749 __le64 transid; 750 __le16 data_len; 751 __le16 name_len; 752 u8 type; 753 } __attribute__ ((__packed__)); 754 755 #define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) 756 757 /* 758 * Internal in-memory flag that a subvolume has been marked for deletion but 759 * still visible as a directory 760 */ 761 #define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) 762 763 struct btrfs_root_item { 764 struct btrfs_inode_item inode; 765 __le64 generation; 766 __le64 root_dirid; 767 __le64 bytenr; 768 __le64 byte_limit; 769 __le64 bytes_used; 770 __le64 last_snapshot; 771 __le64 flags; 772 __le32 refs; 773 struct btrfs_disk_key drop_progress; 774 u8 drop_level; 775 u8 level; 776 777 /* 778 * The following fields appear after subvol_uuids+subvol_times 779 * were introduced. 780 */ 781 782 /* 783 * This generation number is used to test if the new fields are valid 784 * and up to date while reading the root item. Everytime the root item 785 * is written out, the "generation" field is copied into this field. If 786 * anyone ever mounted the fs with an older kernel, we will have 787 * mismatching generation values here and thus must invalidate the 788 * new fields. See btrfs_update_root and btrfs_find_last_root for 789 * details. 790 * the offset of generation_v2 is also used as the start for the memset 791 * when invalidating the fields. 792 */ 793 __le64 generation_v2; 794 u8 uuid[BTRFS_UUID_SIZE]; 795 u8 parent_uuid[BTRFS_UUID_SIZE]; 796 u8 received_uuid[BTRFS_UUID_SIZE]; 797 __le64 ctransid; /* updated when an inode changes */ 798 __le64 otransid; /* trans when created */ 799 __le64 stransid; /* trans when sent. non-zero for received subvol */ 800 __le64 rtransid; /* trans when received. non-zero for received subvol */ 801 struct btrfs_timespec ctime; 802 struct btrfs_timespec otime; 803 struct btrfs_timespec stime; 804 struct btrfs_timespec rtime; 805 __le64 reserved[8]; /* for future */ 806 } __attribute__ ((__packed__)); 807 808 /* 809 * this is used for both forward and backward root refs 810 */ 811 struct btrfs_root_ref { 812 __le64 dirid; 813 __le64 sequence; 814 __le16 name_len; 815 } __attribute__ ((__packed__)); 816 817 struct btrfs_disk_balance_args { 818 /* 819 * profiles to operate on, single is denoted by 820 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 821 */ 822 __le64 profiles; 823 824 /* usage filter */ 825 __le64 usage; 826 827 /* devid filter */ 828 __le64 devid; 829 830 /* devid subset filter [pstart..pend) */ 831 __le64 pstart; 832 __le64 pend; 833 834 /* btrfs virtual address space subset filter [vstart..vend) */ 835 __le64 vstart; 836 __le64 vend; 837 838 /* 839 * profile to convert to, single is denoted by 840 * BTRFS_AVAIL_ALLOC_BIT_SINGLE 841 */ 842 __le64 target; 843 844 /* BTRFS_BALANCE_ARGS_* */ 845 __le64 flags; 846 847 /* BTRFS_BALANCE_ARGS_LIMIT value */ 848 __le64 limit; 849 850 __le64 unused[7]; 851 } __attribute__ ((__packed__)); 852 853 /* 854 * store balance parameters to disk so that balance can be properly 855 * resumed after crash or unmount 856 */ 857 struct btrfs_balance_item { 858 /* BTRFS_BALANCE_* */ 859 __le64 flags; 860 861 struct btrfs_disk_balance_args data; 862 struct btrfs_disk_balance_args meta; 863 struct btrfs_disk_balance_args sys; 864 865 __le64 unused[4]; 866 } __attribute__ ((__packed__)); 867 868 #define BTRFS_FILE_EXTENT_INLINE 0 869 #define BTRFS_FILE_EXTENT_REG 1 870 #define BTRFS_FILE_EXTENT_PREALLOC 2 871 872 struct btrfs_file_extent_item { 873 /* 874 * transaction id that created this extent 875 */ 876 __le64 generation; 877 /* 878 * max number of bytes to hold this extent in ram 879 * when we split a compressed extent we can't know how big 880 * each of the resulting pieces will be. So, this is 881 * an upper limit on the size of the extent in ram instead of 882 * an exact limit. 883 */ 884 __le64 ram_bytes; 885 886 /* 887 * 32 bits for the various ways we might encode the data, 888 * including compression and encryption. If any of these 889 * are set to something a given disk format doesn't understand 890 * it is treated like an incompat flag for reading and writing, 891 * but not for stat. 892 */ 893 u8 compression; 894 u8 encryption; 895 __le16 other_encoding; /* spare for later use */ 896 897 /* are we inline data or a real extent? */ 898 u8 type; 899 900 /* 901 * disk space consumed by the extent, checksum blocks are included 902 * in these numbers 903 * 904 * At this offset in the structure, the inline extent data start. 905 */ 906 __le64 disk_bytenr; 907 __le64 disk_num_bytes; 908 /* 909 * the logical offset in file blocks (no csums) 910 * this extent record is for. This allows a file extent to point 911 * into the middle of an existing extent on disk, sharing it 912 * between two snapshots (useful if some bytes in the middle of the 913 * extent have changed 914 */ 915 __le64 offset; 916 /* 917 * the logical number of file blocks (no csums included). This 918 * always reflects the size uncompressed and without encoding. 919 */ 920 __le64 num_bytes; 921 922 } __attribute__ ((__packed__)); 923 924 struct btrfs_csum_item { 925 u8 csum; 926 } __attribute__ ((__packed__)); 927 928 struct btrfs_dev_stats_item { 929 /* 930 * grow this item struct at the end for future enhancements and keep 931 * the existing values unchanged 932 */ 933 __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; 934 } __attribute__ ((__packed__)); 935 936 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 937 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 938 #define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 939 #define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 940 #define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 941 #define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 942 #define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 943 944 struct btrfs_dev_replace { 945 u64 replace_state; /* see #define above */ 946 u64 time_started; /* seconds since 1-Jan-1970 */ 947 u64 time_stopped; /* seconds since 1-Jan-1970 */ 948 atomic64_t num_write_errors; 949 atomic64_t num_uncorrectable_read_errors; 950 951 u64 cursor_left; 952 u64 committed_cursor_left; 953 u64 cursor_left_last_write_of_item; 954 u64 cursor_right; 955 956 u64 cont_reading_from_srcdev_mode; /* see #define above */ 957 958 int is_valid; 959 int item_needs_writeback; 960 struct btrfs_device *srcdev; 961 struct btrfs_device *tgtdev; 962 963 pid_t lock_owner; 964 atomic_t nesting_level; 965 struct mutex lock_finishing_cancel_unmount; 966 struct mutex lock_management_lock; 967 struct mutex lock; 968 969 struct btrfs_scrub_progress scrub_progress; 970 }; 971 972 struct btrfs_dev_replace_item { 973 /* 974 * grow this item struct at the end for future enhancements and keep 975 * the existing values unchanged 976 */ 977 __le64 src_devid; 978 __le64 cursor_left; 979 __le64 cursor_right; 980 __le64 cont_reading_from_srcdev_mode; 981 982 __le64 replace_state; 983 __le64 time_started; 984 __le64 time_stopped; 985 __le64 num_write_errors; 986 __le64 num_uncorrectable_read_errors; 987 } __attribute__ ((__packed__)); 988 989 /* different types of block groups (and chunks) */ 990 #define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) 991 #define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) 992 #define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) 993 #define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) 994 #define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) 995 #define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) 996 #define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) 997 #define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) 998 #define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) 999 #define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ 1000 BTRFS_SPACE_INFO_GLOBAL_RSV) 1001 1002 enum btrfs_raid_types { 1003 BTRFS_RAID_RAID10, 1004 BTRFS_RAID_RAID1, 1005 BTRFS_RAID_DUP, 1006 BTRFS_RAID_RAID0, 1007 BTRFS_RAID_SINGLE, 1008 BTRFS_RAID_RAID5, 1009 BTRFS_RAID_RAID6, 1010 BTRFS_NR_RAID_TYPES 1011 }; 1012 1013 #define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ 1014 BTRFS_BLOCK_GROUP_SYSTEM | \ 1015 BTRFS_BLOCK_GROUP_METADATA) 1016 1017 #define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ 1018 BTRFS_BLOCK_GROUP_RAID1 | \ 1019 BTRFS_BLOCK_GROUP_RAID5 | \ 1020 BTRFS_BLOCK_GROUP_RAID6 | \ 1021 BTRFS_BLOCK_GROUP_DUP | \ 1022 BTRFS_BLOCK_GROUP_RAID10) 1023 /* 1024 * We need a bit for restriper to be able to tell when chunks of type 1025 * SINGLE are available. This "extended" profile format is used in 1026 * fs_info->avail_*_alloc_bits (in-memory) and balance item fields 1027 * (on-disk). The corresponding on-disk bit in chunk.type is reserved 1028 * to avoid remappings between two formats in future. 1029 */ 1030 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) 1031 1032 /* 1033 * A fake block group type that is used to communicate global block reserve 1034 * size to userspace via the SPACE_INFO ioctl. 1035 */ 1036 #define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) 1037 1038 #define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ 1039 BTRFS_AVAIL_ALLOC_BIT_SINGLE) 1040 1041 static inline u64 chunk_to_extended(u64 flags) 1042 { 1043 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) 1044 flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1045 1046 return flags; 1047 } 1048 static inline u64 extended_to_chunk(u64 flags) 1049 { 1050 return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; 1051 } 1052 1053 struct btrfs_block_group_item { 1054 __le64 used; 1055 __le64 chunk_objectid; 1056 __le64 flags; 1057 } __attribute__ ((__packed__)); 1058 1059 /* 1060 * is subvolume quota turned on? 1061 */ 1062 #define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) 1063 /* 1064 * RESCAN is set during the initialization phase 1065 */ 1066 #define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) 1067 /* 1068 * Some qgroup entries are known to be out of date, 1069 * either because the configuration has changed in a way that 1070 * makes a rescan necessary, or because the fs has been mounted 1071 * with a non-qgroup-aware version. 1072 * Turning qouta off and on again makes it inconsistent, too. 1073 */ 1074 #define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) 1075 1076 #define BTRFS_QGROUP_STATUS_VERSION 1 1077 1078 struct btrfs_qgroup_status_item { 1079 __le64 version; 1080 /* 1081 * the generation is updated during every commit. As older 1082 * versions of btrfs are not aware of qgroups, it will be 1083 * possible to detect inconsistencies by checking the 1084 * generation on mount time 1085 */ 1086 __le64 generation; 1087 1088 /* flag definitions see above */ 1089 __le64 flags; 1090 1091 /* 1092 * only used during scanning to record the progress 1093 * of the scan. It contains a logical address 1094 */ 1095 __le64 rescan; 1096 } __attribute__ ((__packed__)); 1097 1098 struct btrfs_qgroup_info_item { 1099 __le64 generation; 1100 __le64 rfer; 1101 __le64 rfer_cmpr; 1102 __le64 excl; 1103 __le64 excl_cmpr; 1104 } __attribute__ ((__packed__)); 1105 1106 /* flags definition for qgroup limits */ 1107 #define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) 1108 #define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) 1109 #define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) 1110 #define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) 1111 #define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) 1112 #define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) 1113 1114 struct btrfs_qgroup_limit_item { 1115 /* 1116 * only updated when any of the other values change 1117 */ 1118 __le64 flags; 1119 __le64 max_rfer; 1120 __le64 max_excl; 1121 __le64 rsv_rfer; 1122 __le64 rsv_excl; 1123 } __attribute__ ((__packed__)); 1124 1125 /* For raid type sysfs entries */ 1126 struct raid_kobject { 1127 int raid_type; 1128 struct kobject kobj; 1129 }; 1130 1131 struct btrfs_space_info { 1132 spinlock_t lock; 1133 1134 u64 total_bytes; /* total bytes in the space, 1135 this doesn't take mirrors into account */ 1136 u64 bytes_used; /* total bytes used, 1137 this doesn't take mirrors into account */ 1138 u64 bytes_pinned; /* total bytes pinned, will be freed when the 1139 transaction finishes */ 1140 u64 bytes_reserved; /* total bytes the allocator has reserved for 1141 current allocations */ 1142 u64 bytes_may_use; /* number of bytes that may be used for 1143 delalloc/allocations */ 1144 u64 bytes_readonly; /* total bytes that are read only */ 1145 1146 unsigned int full:1; /* indicates that we cannot allocate any more 1147 chunks for this space */ 1148 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ 1149 1150 unsigned int flush:1; /* set if we are trying to make space */ 1151 1152 unsigned int force_alloc; /* set if we need to force a chunk 1153 alloc for this space */ 1154 1155 u64 disk_used; /* total bytes used on disk */ 1156 u64 disk_total; /* total bytes on disk, takes mirrors into 1157 account */ 1158 1159 u64 flags; 1160 1161 /* 1162 * bytes_pinned is kept in line with what is actually pinned, as in 1163 * we've called update_block_group and dropped the bytes_used counter 1164 * and increased the bytes_pinned counter. However this means that 1165 * bytes_pinned does not reflect the bytes that will be pinned once the 1166 * delayed refs are flushed, so this counter is inc'ed everytime we call 1167 * btrfs_free_extent so it is a realtime count of what will be freed 1168 * once the transaction is committed. It will be zero'ed everytime the 1169 * transaction commits. 1170 */ 1171 struct percpu_counter total_bytes_pinned; 1172 1173 struct list_head list; 1174 /* Protected by the spinlock 'lock'. */ 1175 struct list_head ro_bgs; 1176 1177 struct rw_semaphore groups_sem; 1178 /* for block groups in our same type */ 1179 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; 1180 wait_queue_head_t wait; 1181 1182 struct kobject kobj; 1183 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; 1184 }; 1185 1186 #define BTRFS_BLOCK_RSV_GLOBAL 1 1187 #define BTRFS_BLOCK_RSV_DELALLOC 2 1188 #define BTRFS_BLOCK_RSV_TRANS 3 1189 #define BTRFS_BLOCK_RSV_CHUNK 4 1190 #define BTRFS_BLOCK_RSV_DELOPS 5 1191 #define BTRFS_BLOCK_RSV_EMPTY 6 1192 #define BTRFS_BLOCK_RSV_TEMP 7 1193 1194 struct btrfs_block_rsv { 1195 u64 size; 1196 u64 reserved; 1197 struct btrfs_space_info *space_info; 1198 spinlock_t lock; 1199 unsigned short full; 1200 unsigned short type; 1201 unsigned short failfast; 1202 }; 1203 1204 /* 1205 * free clusters are used to claim free space in relatively large chunks, 1206 * allowing us to do less seeky writes. They are used for all metadata 1207 * allocations and data allocations in ssd mode. 1208 */ 1209 struct btrfs_free_cluster { 1210 spinlock_t lock; 1211 spinlock_t refill_lock; 1212 struct rb_root root; 1213 1214 /* largest extent in this cluster */ 1215 u64 max_size; 1216 1217 /* first extent starting offset */ 1218 u64 window_start; 1219 1220 struct btrfs_block_group_cache *block_group; 1221 /* 1222 * when a cluster is allocated from a block group, we put the 1223 * cluster onto a list in the block group so that it can 1224 * be freed before the block group is freed. 1225 */ 1226 struct list_head block_group_list; 1227 }; 1228 1229 enum btrfs_caching_type { 1230 BTRFS_CACHE_NO = 0, 1231 BTRFS_CACHE_STARTED = 1, 1232 BTRFS_CACHE_FAST = 2, 1233 BTRFS_CACHE_FINISHED = 3, 1234 BTRFS_CACHE_ERROR = 4, 1235 }; 1236 1237 enum btrfs_disk_cache_state { 1238 BTRFS_DC_WRITTEN = 0, 1239 BTRFS_DC_ERROR = 1, 1240 BTRFS_DC_CLEAR = 2, 1241 BTRFS_DC_SETUP = 3, 1242 BTRFS_DC_NEED_WRITE = 4, 1243 }; 1244 1245 struct btrfs_caching_control { 1246 struct list_head list; 1247 struct mutex mutex; 1248 wait_queue_head_t wait; 1249 struct btrfs_work work; 1250 struct btrfs_block_group_cache *block_group; 1251 u64 progress; 1252 atomic_t count; 1253 }; 1254 1255 struct btrfs_block_group_cache { 1256 struct btrfs_key key; 1257 struct btrfs_block_group_item item; 1258 struct btrfs_fs_info *fs_info; 1259 struct inode *inode; 1260 spinlock_t lock; 1261 u64 pinned; 1262 u64 reserved; 1263 u64 delalloc_bytes; 1264 u64 bytes_super; 1265 u64 flags; 1266 u64 sectorsize; 1267 u64 cache_generation; 1268 1269 /* 1270 * It is just used for the delayed data space allocation because 1271 * only the data space allocation and the relative metadata update 1272 * can be done cross the transaction. 1273 */ 1274 struct rw_semaphore data_rwsem; 1275 1276 /* for raid56, this is a full stripe, without parity */ 1277 unsigned long full_stripe_len; 1278 1279 unsigned int ro:1; 1280 unsigned int dirty:1; 1281 unsigned int iref:1; 1282 unsigned int has_caching_ctl:1; 1283 unsigned int removed:1; 1284 1285 int disk_cache_state; 1286 1287 /* cache tracking stuff */ 1288 int cached; 1289 struct btrfs_caching_control *caching_ctl; 1290 u64 last_byte_to_unpin; 1291 1292 struct btrfs_space_info *space_info; 1293 1294 /* free space cache stuff */ 1295 struct btrfs_free_space_ctl *free_space_ctl; 1296 1297 /* block group cache stuff */ 1298 struct rb_node cache_node; 1299 1300 /* for block groups in the same raid type */ 1301 struct list_head list; 1302 1303 /* usage count */ 1304 atomic_t count; 1305 1306 /* List of struct btrfs_free_clusters for this block group. 1307 * Today it will only have one thing on it, but that may change 1308 */ 1309 struct list_head cluster_list; 1310 1311 /* For delayed block group creation or deletion of empty block groups */ 1312 struct list_head bg_list; 1313 1314 /* For read-only block groups */ 1315 struct list_head ro_list; 1316 1317 atomic_t trimming; 1318 }; 1319 1320 /* delayed seq elem */ 1321 struct seq_list { 1322 struct list_head list; 1323 u64 seq; 1324 }; 1325 1326 enum btrfs_orphan_cleanup_state { 1327 ORPHAN_CLEANUP_STARTED = 1, 1328 ORPHAN_CLEANUP_DONE = 2, 1329 }; 1330 1331 /* used by the raid56 code to lock stripes for read/modify/write */ 1332 struct btrfs_stripe_hash { 1333 struct list_head hash_list; 1334 wait_queue_head_t wait; 1335 spinlock_t lock; 1336 }; 1337 1338 /* used by the raid56 code to lock stripes for read/modify/write */ 1339 struct btrfs_stripe_hash_table { 1340 struct list_head stripe_cache; 1341 spinlock_t cache_lock; 1342 int cache_size; 1343 struct btrfs_stripe_hash table[]; 1344 }; 1345 1346 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 1347 1348 void btrfs_init_async_reclaim_work(struct work_struct *work); 1349 1350 /* fs_info */ 1351 struct reloc_control; 1352 struct btrfs_device; 1353 struct btrfs_fs_devices; 1354 struct btrfs_balance_control; 1355 struct btrfs_delayed_root; 1356 struct btrfs_fs_info { 1357 u8 fsid[BTRFS_FSID_SIZE]; 1358 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; 1359 struct btrfs_root *extent_root; 1360 struct btrfs_root *tree_root; 1361 struct btrfs_root *chunk_root; 1362 struct btrfs_root *dev_root; 1363 struct btrfs_root *fs_root; 1364 struct btrfs_root *csum_root; 1365 struct btrfs_root *quota_root; 1366 struct btrfs_root *uuid_root; 1367 1368 /* the log root tree is a directory of all the other log roots */ 1369 struct btrfs_root *log_root_tree; 1370 1371 spinlock_t fs_roots_radix_lock; 1372 struct radix_tree_root fs_roots_radix; 1373 1374 /* block group cache stuff */ 1375 spinlock_t block_group_cache_lock; 1376 u64 first_logical_byte; 1377 struct rb_root block_group_cache_tree; 1378 1379 /* keep track of unallocated space */ 1380 spinlock_t free_chunk_lock; 1381 u64 free_chunk_space; 1382 1383 struct extent_io_tree freed_extents[2]; 1384 struct extent_io_tree *pinned_extents; 1385 1386 /* logical->physical extent mapping */ 1387 struct btrfs_mapping_tree mapping_tree; 1388 1389 /* 1390 * block reservation for extent, checksum, root tree and 1391 * delayed dir index item 1392 */ 1393 struct btrfs_block_rsv global_block_rsv; 1394 /* block reservation for delay allocation */ 1395 struct btrfs_block_rsv delalloc_block_rsv; 1396 /* block reservation for metadata operations */ 1397 struct btrfs_block_rsv trans_block_rsv; 1398 /* block reservation for chunk tree */ 1399 struct btrfs_block_rsv chunk_block_rsv; 1400 /* block reservation for delayed operations */ 1401 struct btrfs_block_rsv delayed_block_rsv; 1402 1403 struct btrfs_block_rsv empty_block_rsv; 1404 1405 u64 generation; 1406 u64 last_trans_committed; 1407 u64 avg_delayed_ref_runtime; 1408 1409 /* 1410 * this is updated to the current trans every time a full commit 1411 * is required instead of the faster short fsync log commits 1412 */ 1413 u64 last_trans_log_full_commit; 1414 unsigned long mount_opt; 1415 /* 1416 * Track requests for actions that need to be done during transaction 1417 * commit (like for some mount options). 1418 */ 1419 unsigned long pending_changes; 1420 unsigned long compress_type:4; 1421 int commit_interval; 1422 /* 1423 * It is a suggestive number, the read side is safe even it gets a 1424 * wrong number because we will write out the data into a regular 1425 * extent. The write side(mount/remount) is under ->s_umount lock, 1426 * so it is also safe. 1427 */ 1428 u64 max_inline; 1429 /* 1430 * Protected by ->chunk_mutex and sb->s_umount. 1431 * 1432 * The reason that we use two lock to protect it is because only 1433 * remount and mount operations can change it and these two operations 1434 * are under sb->s_umount, but the read side (chunk allocation) can not 1435 * acquire sb->s_umount or the deadlock would happen. So we use two 1436 * locks to protect it. On the write side, we must acquire two locks, 1437 * and on the read side, we just need acquire one of them. 1438 */ 1439 u64 alloc_start; 1440 struct btrfs_transaction *running_transaction; 1441 wait_queue_head_t transaction_throttle; 1442 wait_queue_head_t transaction_wait; 1443 wait_queue_head_t transaction_blocked_wait; 1444 wait_queue_head_t async_submit_wait; 1445 1446 /* 1447 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 1448 * when they are updated. 1449 * 1450 * Because we do not clear the flags for ever, so we needn't use 1451 * the lock on the read side. 1452 * 1453 * We also needn't use the lock when we mount the fs, because 1454 * there is no other task which will update the flag. 1455 */ 1456 spinlock_t super_lock; 1457 struct btrfs_super_block *super_copy; 1458 struct btrfs_super_block *super_for_commit; 1459 struct block_device *__bdev; 1460 struct super_block *sb; 1461 struct inode *btree_inode; 1462 struct backing_dev_info bdi; 1463 struct mutex tree_log_mutex; 1464 struct mutex transaction_kthread_mutex; 1465 struct mutex cleaner_mutex; 1466 struct mutex chunk_mutex; 1467 struct mutex volume_mutex; 1468 1469 /* this is used during read/modify/write to make sure 1470 * no two ios are trying to mod the same stripe at the same 1471 * time 1472 */ 1473 struct btrfs_stripe_hash_table *stripe_hash_table; 1474 1475 /* 1476 * this protects the ordered operations list only while we are 1477 * processing all of the entries on it. This way we make 1478 * sure the commit code doesn't find the list temporarily empty 1479 * because another function happens to be doing non-waiting preflush 1480 * before jumping into the main commit. 1481 */ 1482 struct mutex ordered_operations_mutex; 1483 1484 /* 1485 * Same as ordered_operations_mutex except this is for ordered extents 1486 * and not the operations. 1487 */ 1488 struct mutex ordered_extent_flush_mutex; 1489 1490 struct rw_semaphore commit_root_sem; 1491 1492 struct rw_semaphore cleanup_work_sem; 1493 1494 struct rw_semaphore subvol_sem; 1495 struct srcu_struct subvol_srcu; 1496 1497 spinlock_t trans_lock; 1498 /* 1499 * the reloc mutex goes with the trans lock, it is taken 1500 * during commit to protect us from the relocation code 1501 */ 1502 struct mutex reloc_mutex; 1503 1504 struct list_head trans_list; 1505 struct list_head dead_roots; 1506 struct list_head caching_block_groups; 1507 1508 spinlock_t delayed_iput_lock; 1509 struct list_head delayed_iputs; 1510 1511 /* this protects tree_mod_seq_list */ 1512 spinlock_t tree_mod_seq_lock; 1513 atomic64_t tree_mod_seq; 1514 struct list_head tree_mod_seq_list; 1515 1516 /* this protects tree_mod_log */ 1517 rwlock_t tree_mod_log_lock; 1518 struct rb_root tree_mod_log; 1519 1520 atomic_t nr_async_submits; 1521 atomic_t async_submit_draining; 1522 atomic_t nr_async_bios; 1523 atomic_t async_delalloc_pages; 1524 atomic_t open_ioctl_trans; 1525 1526 /* 1527 * this is used to protect the following list -- ordered_roots. 1528 */ 1529 spinlock_t ordered_root_lock; 1530 1531 /* 1532 * all fs/file tree roots in which there are data=ordered extents 1533 * pending writeback are added into this list. 1534 * 1535 * these can span multiple transactions and basically include 1536 * every dirty data page that isn't from nodatacow 1537 */ 1538 struct list_head ordered_roots; 1539 1540 struct mutex delalloc_root_mutex; 1541 spinlock_t delalloc_root_lock; 1542 /* all fs/file tree roots that have delalloc inodes. */ 1543 struct list_head delalloc_roots; 1544 1545 /* 1546 * there is a pool of worker threads for checksumming during writes 1547 * and a pool for checksumming after reads. This is because readers 1548 * can run with FS locks held, and the writers may be waiting for 1549 * those locks. We don't want ordering in the pending list to cause 1550 * deadlocks, and so the two are serviced separately. 1551 * 1552 * A third pool does submit_bio to avoid deadlocking with the other 1553 * two 1554 */ 1555 struct btrfs_workqueue *workers; 1556 struct btrfs_workqueue *delalloc_workers; 1557 struct btrfs_workqueue *flush_workers; 1558 struct btrfs_workqueue *endio_workers; 1559 struct btrfs_workqueue *endio_meta_workers; 1560 struct btrfs_workqueue *endio_raid56_workers; 1561 struct btrfs_workqueue *endio_repair_workers; 1562 struct btrfs_workqueue *rmw_workers; 1563 struct btrfs_workqueue *endio_meta_write_workers; 1564 struct btrfs_workqueue *endio_write_workers; 1565 struct btrfs_workqueue *endio_freespace_worker; 1566 struct btrfs_workqueue *submit_workers; 1567 struct btrfs_workqueue *caching_workers; 1568 struct btrfs_workqueue *readahead_workers; 1569 1570 /* 1571 * fixup workers take dirty pages that didn't properly go through 1572 * the cow mechanism and make them safe to write. It happens 1573 * for the sys_munmap function call path 1574 */ 1575 struct btrfs_workqueue *fixup_workers; 1576 struct btrfs_workqueue *delayed_workers; 1577 1578 /* the extent workers do delayed refs on the extent allocation tree */ 1579 struct btrfs_workqueue *extent_workers; 1580 struct task_struct *transaction_kthread; 1581 struct task_struct *cleaner_kthread; 1582 int thread_pool_size; 1583 1584 struct kobject super_kobj; 1585 struct kobject *space_info_kobj; 1586 struct kobject *device_dir_kobj; 1587 struct completion kobj_unregister; 1588 int do_barriers; 1589 int closing; 1590 int log_root_recovering; 1591 int open; 1592 1593 u64 total_pinned; 1594 1595 /* used to keep from writing metadata until there is a nice batch */ 1596 struct percpu_counter dirty_metadata_bytes; 1597 struct percpu_counter delalloc_bytes; 1598 s32 dirty_metadata_batch; 1599 s32 delalloc_batch; 1600 1601 struct list_head dirty_cowonly_roots; 1602 1603 struct btrfs_fs_devices *fs_devices; 1604 1605 /* 1606 * the space_info list is almost entirely read only. It only changes 1607 * when we add a new raid type to the FS, and that happens 1608 * very rarely. RCU is used to protect it. 1609 */ 1610 struct list_head space_info; 1611 1612 struct btrfs_space_info *data_sinfo; 1613 1614 struct reloc_control *reloc_ctl; 1615 1616 /* data_alloc_cluster is only used in ssd mode */ 1617 struct btrfs_free_cluster data_alloc_cluster; 1618 1619 /* all metadata allocations go through this cluster */ 1620 struct btrfs_free_cluster meta_alloc_cluster; 1621 1622 /* auto defrag inodes go here */ 1623 spinlock_t defrag_inodes_lock; 1624 struct rb_root defrag_inodes; 1625 atomic_t defrag_running; 1626 1627 /* Used to protect avail_{data, metadata, system}_alloc_bits */ 1628 seqlock_t profiles_lock; 1629 /* 1630 * these three are in extended format (availability of single 1631 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other 1632 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) 1633 */ 1634 u64 avail_data_alloc_bits; 1635 u64 avail_metadata_alloc_bits; 1636 u64 avail_system_alloc_bits; 1637 1638 /* restriper state */ 1639 spinlock_t balance_lock; 1640 struct mutex balance_mutex; 1641 atomic_t balance_running; 1642 atomic_t balance_pause_req; 1643 atomic_t balance_cancel_req; 1644 struct btrfs_balance_control *balance_ctl; 1645 wait_queue_head_t balance_wait_q; 1646 1647 unsigned data_chunk_allocations; 1648 unsigned metadata_ratio; 1649 1650 void *bdev_holder; 1651 1652 /* private scrub information */ 1653 struct mutex scrub_lock; 1654 atomic_t scrubs_running; 1655 atomic_t scrub_pause_req; 1656 atomic_t scrubs_paused; 1657 atomic_t scrub_cancel_req; 1658 wait_queue_head_t scrub_pause_wait; 1659 int scrub_workers_refcnt; 1660 struct btrfs_workqueue *scrub_workers; 1661 struct btrfs_workqueue *scrub_wr_completion_workers; 1662 struct btrfs_workqueue *scrub_nocow_workers; 1663 1664 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 1665 u32 check_integrity_print_mask; 1666 #endif 1667 /* 1668 * quota information 1669 */ 1670 unsigned int quota_enabled:1; 1671 1672 /* 1673 * quota_enabled only changes state after a commit. This holds the 1674 * next state. 1675 */ 1676 unsigned int pending_quota_state:1; 1677 1678 /* is qgroup tracking in a consistent state? */ 1679 u64 qgroup_flags; 1680 1681 /* holds configuration and tracking. Protected by qgroup_lock */ 1682 struct rb_root qgroup_tree; 1683 struct rb_root qgroup_op_tree; 1684 spinlock_t qgroup_lock; 1685 spinlock_t qgroup_op_lock; 1686 atomic_t qgroup_op_seq; 1687 1688 /* 1689 * used to avoid frequently calling ulist_alloc()/ulist_free() 1690 * when doing qgroup accounting, it must be protected by qgroup_lock. 1691 */ 1692 struct ulist *qgroup_ulist; 1693 1694 /* protect user change for quota operations */ 1695 struct mutex qgroup_ioctl_lock; 1696 1697 /* list of dirty qgroups to be written at next commit */ 1698 struct list_head dirty_qgroups; 1699 1700 /* used by btrfs_qgroup_record_ref for an efficient tree traversal */ 1701 u64 qgroup_seq; 1702 1703 /* qgroup rescan items */ 1704 struct mutex qgroup_rescan_lock; /* protects the progress item */ 1705 struct btrfs_key qgroup_rescan_progress; 1706 struct btrfs_workqueue *qgroup_rescan_workers; 1707 struct completion qgroup_rescan_completion; 1708 struct btrfs_work qgroup_rescan_work; 1709 1710 /* filesystem state */ 1711 unsigned long fs_state; 1712 1713 struct btrfs_delayed_root *delayed_root; 1714 1715 /* readahead tree */ 1716 spinlock_t reada_lock; 1717 struct radix_tree_root reada_tree; 1718 1719 /* Extent buffer radix tree */ 1720 spinlock_t buffer_lock; 1721 struct radix_tree_root buffer_radix; 1722 1723 /* next backup root to be overwritten */ 1724 int backup_root_index; 1725 1726 int num_tolerated_disk_barrier_failures; 1727 1728 /* device replace state */ 1729 struct btrfs_dev_replace dev_replace; 1730 1731 atomic_t mutually_exclusive_operation_running; 1732 1733 struct percpu_counter bio_counter; 1734 wait_queue_head_t replace_wait; 1735 1736 struct semaphore uuid_tree_rescan_sem; 1737 unsigned int update_uuid_tree_gen:1; 1738 1739 /* Used to reclaim the metadata space in the background. */ 1740 struct work_struct async_reclaim_work; 1741 1742 spinlock_t unused_bgs_lock; 1743 struct list_head unused_bgs; 1744 1745 /* For btrfs to record security options */ 1746 struct security_mnt_opts security_opts; 1747 1748 /* 1749 * Chunks that can't be freed yet (under a trim/discard operation) 1750 * and will be latter freed. Protected by fs_info->chunk_mutex. 1751 */ 1752 struct list_head pinned_chunks; 1753 }; 1754 1755 struct btrfs_subvolume_writers { 1756 struct percpu_counter counter; 1757 wait_queue_head_t wait; 1758 }; 1759 1760 /* 1761 * The state of btrfs root 1762 */ 1763 /* 1764 * btrfs_record_root_in_trans is a multi-step process, 1765 * and it can race with the balancing code. But the 1766 * race is very small, and only the first time the root 1767 * is added to each transaction. So IN_TRANS_SETUP 1768 * is used to tell us when more checks are required 1769 */ 1770 #define BTRFS_ROOT_IN_TRANS_SETUP 0 1771 #define BTRFS_ROOT_REF_COWS 1 1772 #define BTRFS_ROOT_TRACK_DIRTY 2 1773 #define BTRFS_ROOT_IN_RADIX 3 1774 #define BTRFS_ROOT_DUMMY_ROOT 4 1775 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5 1776 #define BTRFS_ROOT_DEFRAG_RUNNING 6 1777 #define BTRFS_ROOT_FORCE_COW 7 1778 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 1779 1780 /* 1781 * in ram representation of the tree. extent_root is used for all allocations 1782 * and for the extent tree extent_root root. 1783 */ 1784 struct btrfs_root { 1785 struct extent_buffer *node; 1786 1787 struct extent_buffer *commit_root; 1788 struct btrfs_root *log_root; 1789 struct btrfs_root *reloc_root; 1790 1791 unsigned long state; 1792 struct btrfs_root_item root_item; 1793 struct btrfs_key root_key; 1794 struct btrfs_fs_info *fs_info; 1795 struct extent_io_tree dirty_log_pages; 1796 1797 struct kobject root_kobj; 1798 struct completion kobj_unregister; 1799 struct mutex objectid_mutex; 1800 1801 spinlock_t accounting_lock; 1802 struct btrfs_block_rsv *block_rsv; 1803 1804 /* free ino cache stuff */ 1805 struct btrfs_free_space_ctl *free_ino_ctl; 1806 enum btrfs_caching_type ino_cache_state; 1807 spinlock_t ino_cache_lock; 1808 wait_queue_head_t ino_cache_wait; 1809 struct btrfs_free_space_ctl *free_ino_pinned; 1810 u64 ino_cache_progress; 1811 struct inode *ino_cache_inode; 1812 1813 struct mutex log_mutex; 1814 wait_queue_head_t log_writer_wait; 1815 wait_queue_head_t log_commit_wait[2]; 1816 struct list_head log_ctxs[2]; 1817 atomic_t log_writers; 1818 atomic_t log_commit[2]; 1819 atomic_t log_batch; 1820 int log_transid; 1821 /* No matter the commit succeeds or not*/ 1822 int log_transid_committed; 1823 /* Just be updated when the commit succeeds. */ 1824 int last_log_commit; 1825 pid_t log_start_pid; 1826 1827 u64 objectid; 1828 u64 last_trans; 1829 1830 /* data allocations are done in sectorsize units */ 1831 u32 sectorsize; 1832 1833 /* node allocations are done in nodesize units */ 1834 u32 nodesize; 1835 1836 u32 stripesize; 1837 1838 u32 type; 1839 1840 u64 highest_objectid; 1841 1842 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ 1843 u64 alloc_bytenr; 1844 1845 u64 defrag_trans_start; 1846 struct btrfs_key defrag_progress; 1847 struct btrfs_key defrag_max; 1848 char *name; 1849 1850 /* the dirty list is only used by non-reference counted roots */ 1851 struct list_head dirty_list; 1852 1853 struct list_head root_list; 1854 1855 spinlock_t log_extents_lock[2]; 1856 struct list_head logged_list[2]; 1857 1858 spinlock_t orphan_lock; 1859 atomic_t orphan_inodes; 1860 struct btrfs_block_rsv *orphan_block_rsv; 1861 int orphan_cleanup_state; 1862 1863 spinlock_t inode_lock; 1864 /* red-black tree that keeps track of in-memory inodes */ 1865 struct rb_root inode_tree; 1866 1867 /* 1868 * radix tree that keeps track of delayed nodes of every inode, 1869 * protected by inode_lock 1870 */ 1871 struct radix_tree_root delayed_nodes_tree; 1872 /* 1873 * right now this just gets used so that a root has its own devid 1874 * for stat. It may be used for more later 1875 */ 1876 dev_t anon_dev; 1877 1878 spinlock_t root_item_lock; 1879 atomic_t refs; 1880 1881 struct mutex delalloc_mutex; 1882 spinlock_t delalloc_lock; 1883 /* 1884 * all of the inodes that have delalloc bytes. It is possible for 1885 * this list to be empty even when there is still dirty data=ordered 1886 * extents waiting to finish IO. 1887 */ 1888 struct list_head delalloc_inodes; 1889 struct list_head delalloc_root; 1890 u64 nr_delalloc_inodes; 1891 1892 struct mutex ordered_extent_mutex; 1893 /* 1894 * this is used by the balancing code to wait for all the pending 1895 * ordered extents 1896 */ 1897 spinlock_t ordered_extent_lock; 1898 1899 /* 1900 * all of the data=ordered extents pending writeback 1901 * these can span multiple transactions and basically include 1902 * every dirty data page that isn't from nodatacow 1903 */ 1904 struct list_head ordered_extents; 1905 struct list_head ordered_root; 1906 u64 nr_ordered_extents; 1907 1908 /* 1909 * Number of currently running SEND ioctls to prevent 1910 * manipulation with the read-only status via SUBVOL_SETFLAGS 1911 */ 1912 int send_in_progress; 1913 struct btrfs_subvolume_writers *subv_writers; 1914 atomic_t will_be_snapshoted; 1915 }; 1916 1917 struct btrfs_ioctl_defrag_range_args { 1918 /* start of the defrag operation */ 1919 __u64 start; 1920 1921 /* number of bytes to defrag, use (u64)-1 to say all */ 1922 __u64 len; 1923 1924 /* 1925 * flags for the operation, which can include turning 1926 * on compression for this one defrag 1927 */ 1928 __u64 flags; 1929 1930 /* 1931 * any extent bigger than this will be considered 1932 * already defragged. Use 0 to take the kernel default 1933 * Use 1 to say every single extent must be rewritten 1934 */ 1935 __u32 extent_thresh; 1936 1937 /* 1938 * which compression method to use if turning on compression 1939 * for this defrag operation. If unspecified, zlib will 1940 * be used 1941 */ 1942 __u32 compress_type; 1943 1944 /* spare for later */ 1945 __u32 unused[4]; 1946 }; 1947 1948 1949 /* 1950 * inode items have the data typically returned from stat and store other 1951 * info about object characteristics. There is one for every file and dir in 1952 * the FS 1953 */ 1954 #define BTRFS_INODE_ITEM_KEY 1 1955 #define BTRFS_INODE_REF_KEY 12 1956 #define BTRFS_INODE_EXTREF_KEY 13 1957 #define BTRFS_XATTR_ITEM_KEY 24 1958 #define BTRFS_ORPHAN_ITEM_KEY 48 1959 /* reserve 2-15 close to the inode for later flexibility */ 1960 1961 /* 1962 * dir items are the name -> inode pointers in a directory. There is one 1963 * for every name in a directory. 1964 */ 1965 #define BTRFS_DIR_LOG_ITEM_KEY 60 1966 #define BTRFS_DIR_LOG_INDEX_KEY 72 1967 #define BTRFS_DIR_ITEM_KEY 84 1968 #define BTRFS_DIR_INDEX_KEY 96 1969 /* 1970 * extent data is for file data 1971 */ 1972 #define BTRFS_EXTENT_DATA_KEY 108 1973 1974 /* 1975 * extent csums are stored in a separate tree and hold csums for 1976 * an entire extent on disk. 1977 */ 1978 #define BTRFS_EXTENT_CSUM_KEY 128 1979 1980 /* 1981 * root items point to tree roots. They are typically in the root 1982 * tree used by the super block to find all the other trees 1983 */ 1984 #define BTRFS_ROOT_ITEM_KEY 132 1985 1986 /* 1987 * root backrefs tie subvols and snapshots to the directory entries that 1988 * reference them 1989 */ 1990 #define BTRFS_ROOT_BACKREF_KEY 144 1991 1992 /* 1993 * root refs make a fast index for listing all of the snapshots and 1994 * subvolumes referenced by a given root. They point directly to the 1995 * directory item in the root that references the subvol 1996 */ 1997 #define BTRFS_ROOT_REF_KEY 156 1998 1999 /* 2000 * extent items are in the extent map tree. These record which blocks 2001 * are used, and how many references there are to each block 2002 */ 2003 #define BTRFS_EXTENT_ITEM_KEY 168 2004 2005 /* 2006 * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know 2007 * the length, so we save the level in key->offset instead of the length. 2008 */ 2009 #define BTRFS_METADATA_ITEM_KEY 169 2010 2011 #define BTRFS_TREE_BLOCK_REF_KEY 176 2012 2013 #define BTRFS_EXTENT_DATA_REF_KEY 178 2014 2015 #define BTRFS_EXTENT_REF_V0_KEY 180 2016 2017 #define BTRFS_SHARED_BLOCK_REF_KEY 182 2018 2019 #define BTRFS_SHARED_DATA_REF_KEY 184 2020 2021 /* 2022 * block groups give us hints into the extent allocation trees. Which 2023 * blocks are free etc etc 2024 */ 2025 #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 2026 2027 #define BTRFS_DEV_EXTENT_KEY 204 2028 #define BTRFS_DEV_ITEM_KEY 216 2029 #define BTRFS_CHUNK_ITEM_KEY 228 2030 2031 /* 2032 * Records the overall state of the qgroups. 2033 * There's only one instance of this key present, 2034 * (0, BTRFS_QGROUP_STATUS_KEY, 0) 2035 */ 2036 #define BTRFS_QGROUP_STATUS_KEY 240 2037 /* 2038 * Records the currently used space of the qgroup. 2039 * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). 2040 */ 2041 #define BTRFS_QGROUP_INFO_KEY 242 2042 /* 2043 * Contains the user configured limits for the qgroup. 2044 * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). 2045 */ 2046 #define BTRFS_QGROUP_LIMIT_KEY 244 2047 /* 2048 * Records the child-parent relationship of qgroups. For 2049 * each relation, 2 keys are present: 2050 * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) 2051 * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) 2052 */ 2053 #define BTRFS_QGROUP_RELATION_KEY 246 2054 2055 #define BTRFS_BALANCE_ITEM_KEY 248 2056 2057 /* 2058 * Persistantly stores the io stats in the device tree. 2059 * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid). 2060 */ 2061 #define BTRFS_DEV_STATS_KEY 249 2062 2063 /* 2064 * Persistantly stores the device replace state in the device tree. 2065 * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). 2066 */ 2067 #define BTRFS_DEV_REPLACE_KEY 250 2068 2069 /* 2070 * Stores items that allow to quickly map UUIDs to something else. 2071 * These items are part of the filesystem UUID tree. 2072 * The key is built like this: 2073 * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). 2074 */ 2075 #if BTRFS_UUID_SIZE != 16 2076 #error "UUID items require BTRFS_UUID_SIZE == 16!" 2077 #endif 2078 #define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ 2079 #define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to 2080 * received subvols */ 2081 2082 /* 2083 * string items are for debugging. They just store a short string of 2084 * data in the FS 2085 */ 2086 #define BTRFS_STRING_ITEM_KEY 253 2087 2088 /* 2089 * Flags for mount options. 2090 * 2091 * Note: don't forget to add new options to btrfs_show_options() 2092 */ 2093 #define BTRFS_MOUNT_NODATASUM (1 << 0) 2094 #define BTRFS_MOUNT_NODATACOW (1 << 1) 2095 #define BTRFS_MOUNT_NOBARRIER (1 << 2) 2096 #define BTRFS_MOUNT_SSD (1 << 3) 2097 #define BTRFS_MOUNT_DEGRADED (1 << 4) 2098 #define BTRFS_MOUNT_COMPRESS (1 << 5) 2099 #define BTRFS_MOUNT_NOTREELOG (1 << 6) 2100 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) 2101 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) 2102 #define BTRFS_MOUNT_NOSSD (1 << 9) 2103 #define BTRFS_MOUNT_DISCARD (1 << 10) 2104 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) 2105 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 2106 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 2107 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 2108 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 2109 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 2110 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) 2111 #define BTRFS_MOUNT_RECOVERY (1 << 18) 2112 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) 2113 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) 2114 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 2115 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 2116 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 2117 2118 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 2119 #define BTRFS_DEFAULT_MAX_INLINE (8192) 2120 2121 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 2122 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 2123 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 2124 #define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ 2125 BTRFS_MOUNT_##opt) 2126 2127 #define btrfs_set_and_info(root, opt, fmt, args...) \ 2128 { \ 2129 if (!btrfs_test_opt(root, opt)) \ 2130 btrfs_info(root->fs_info, fmt, ##args); \ 2131 btrfs_set_opt(root->fs_info->mount_opt, opt); \ 2132 } 2133 2134 #define btrfs_clear_and_info(root, opt, fmt, args...) \ 2135 { \ 2136 if (btrfs_test_opt(root, opt)) \ 2137 btrfs_info(root->fs_info, fmt, ##args); \ 2138 btrfs_clear_opt(root->fs_info->mount_opt, opt); \ 2139 } 2140 2141 /* 2142 * Requests for changes that need to be done during transaction commit. 2143 * 2144 * Internal mount options that are used for special handling of the real 2145 * mount options (eg. cannot be set during remount and have to be set during 2146 * transaction commit) 2147 */ 2148 2149 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) 2150 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) 2151 #define BTRFS_PENDING_COMMIT (2) 2152 2153 #define btrfs_test_pending(info, opt) \ 2154 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2155 #define btrfs_set_pending(info, opt) \ 2156 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2157 #define btrfs_clear_pending(info, opt) \ 2158 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) 2159 2160 /* 2161 * Helpers for setting pending mount option changes. 2162 * 2163 * Expects corresponding macros 2164 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name 2165 */ 2166 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ 2167 do { \ 2168 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2169 btrfs_info((info), fmt, ##args); \ 2170 btrfs_set_pending((info), SET_##opt); \ 2171 btrfs_clear_pending((info), CLEAR_##opt); \ 2172 } \ 2173 } while(0) 2174 2175 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ 2176 do { \ 2177 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ 2178 btrfs_info((info), fmt, ##args); \ 2179 btrfs_set_pending((info), CLEAR_##opt); \ 2180 btrfs_clear_pending((info), SET_##opt); \ 2181 } \ 2182 } while(0) 2183 2184 /* 2185 * Inode flags 2186 */ 2187 #define BTRFS_INODE_NODATASUM (1 << 0) 2188 #define BTRFS_INODE_NODATACOW (1 << 1) 2189 #define BTRFS_INODE_READONLY (1 << 2) 2190 #define BTRFS_INODE_NOCOMPRESS (1 << 3) 2191 #define BTRFS_INODE_PREALLOC (1 << 4) 2192 #define BTRFS_INODE_SYNC (1 << 5) 2193 #define BTRFS_INODE_IMMUTABLE (1 << 6) 2194 #define BTRFS_INODE_APPEND (1 << 7) 2195 #define BTRFS_INODE_NODUMP (1 << 8) 2196 #define BTRFS_INODE_NOATIME (1 << 9) 2197 #define BTRFS_INODE_DIRSYNC (1 << 10) 2198 #define BTRFS_INODE_COMPRESS (1 << 11) 2199 2200 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) 2201 2202 struct btrfs_map_token { 2203 struct extent_buffer *eb; 2204 char *kaddr; 2205 unsigned long offset; 2206 }; 2207 2208 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 2209 { 2210 token->kaddr = NULL; 2211 } 2212 2213 /* some macros to generate set/get funcs for the struct fields. This 2214 * assumes there is a lefoo_to_cpu for every type, so lets make a simple 2215 * one for u8: 2216 */ 2217 #define le8_to_cpu(v) (v) 2218 #define cpu_to_le8(v) (v) 2219 #define __le8 u8 2220 2221 #define read_eb_member(eb, ptr, type, member, result) ( \ 2222 read_extent_buffer(eb, (char *)(result), \ 2223 ((unsigned long)(ptr)) + \ 2224 offsetof(type, member), \ 2225 sizeof(((type *)0)->member))) 2226 2227 #define write_eb_member(eb, ptr, type, member, result) ( \ 2228 write_extent_buffer(eb, (char *)(result), \ 2229 ((unsigned long)(ptr)) + \ 2230 offsetof(type, member), \ 2231 sizeof(((type *)0)->member))) 2232 2233 #define DECLARE_BTRFS_SETGET_BITS(bits) \ 2234 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ 2235 unsigned long off, \ 2236 struct btrfs_map_token *token); \ 2237 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ 2238 unsigned long off, u##bits val, \ 2239 struct btrfs_map_token *token); \ 2240 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ 2241 unsigned long off) \ 2242 { \ 2243 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ 2244 } \ 2245 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ 2246 unsigned long off, u##bits val) \ 2247 { \ 2248 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ 2249 } 2250 2251 DECLARE_BTRFS_SETGET_BITS(8) 2252 DECLARE_BTRFS_SETGET_BITS(16) 2253 DECLARE_BTRFS_SETGET_BITS(32) 2254 DECLARE_BTRFS_SETGET_BITS(64) 2255 2256 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ 2257 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ 2258 { \ 2259 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2260 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ 2261 } \ 2262 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ 2263 u##bits val) \ 2264 { \ 2265 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2266 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ 2267 } \ 2268 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ 2269 struct btrfs_map_token *token) \ 2270 { \ 2271 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2272 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ 2273 } \ 2274 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ 2275 type *s, u##bits val, \ 2276 struct btrfs_map_token *token) \ 2277 { \ 2278 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ 2279 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ 2280 } 2281 2282 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ 2283 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ 2284 { \ 2285 type *p = page_address(eb->pages[0]); \ 2286 u##bits res = le##bits##_to_cpu(p->member); \ 2287 return res; \ 2288 } \ 2289 static inline void btrfs_set_##name(struct extent_buffer *eb, \ 2290 u##bits val) \ 2291 { \ 2292 type *p = page_address(eb->pages[0]); \ 2293 p->member = cpu_to_le##bits(val); \ 2294 } 2295 2296 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ 2297 static inline u##bits btrfs_##name(type *s) \ 2298 { \ 2299 return le##bits##_to_cpu(s->member); \ 2300 } \ 2301 static inline void btrfs_set_##name(type *s, u##bits val) \ 2302 { \ 2303 s->member = cpu_to_le##bits(val); \ 2304 } 2305 2306 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); 2307 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); 2308 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); 2309 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); 2310 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); 2311 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, 2312 start_offset, 64); 2313 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); 2314 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); 2315 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); 2316 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); 2317 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); 2318 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); 2319 2320 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); 2321 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, 2322 total_bytes, 64); 2323 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, 2324 bytes_used, 64); 2325 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, 2326 io_align, 32); 2327 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, 2328 io_width, 32); 2329 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, 2330 sector_size, 32); 2331 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); 2332 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, 2333 dev_group, 32); 2334 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, 2335 seek_speed, 8); 2336 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, 2337 bandwidth, 8); 2338 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, 2339 generation, 64); 2340 2341 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) 2342 { 2343 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); 2344 } 2345 2346 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) 2347 { 2348 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); 2349 } 2350 2351 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); 2352 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); 2353 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); 2354 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); 2355 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); 2356 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); 2357 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); 2358 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); 2359 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); 2360 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); 2361 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); 2362 2363 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) 2364 { 2365 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); 2366 } 2367 2368 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); 2369 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); 2370 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, 2371 stripe_len, 64); 2372 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, 2373 io_align, 32); 2374 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, 2375 io_width, 32); 2376 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, 2377 sector_size, 32); 2378 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); 2379 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, 2380 num_stripes, 16); 2381 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, 2382 sub_stripes, 16); 2383 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); 2384 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); 2385 2386 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, 2387 int nr) 2388 { 2389 unsigned long offset = (unsigned long)c; 2390 offset += offsetof(struct btrfs_chunk, stripe); 2391 offset += nr * sizeof(struct btrfs_stripe); 2392 return (struct btrfs_stripe *)offset; 2393 } 2394 2395 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) 2396 { 2397 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); 2398 } 2399 2400 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, 2401 struct btrfs_chunk *c, int nr) 2402 { 2403 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); 2404 } 2405 2406 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, 2407 struct btrfs_chunk *c, int nr) 2408 { 2409 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); 2410 } 2411 2412 /* struct btrfs_block_group_item */ 2413 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, 2414 used, 64); 2415 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, 2416 used, 64); 2417 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, 2418 struct btrfs_block_group_item, chunk_objectid, 64); 2419 2420 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, 2421 struct btrfs_block_group_item, chunk_objectid, 64); 2422 BTRFS_SETGET_FUNCS(disk_block_group_flags, 2423 struct btrfs_block_group_item, flags, 64); 2424 BTRFS_SETGET_STACK_FUNCS(block_group_flags, 2425 struct btrfs_block_group_item, flags, 64); 2426 2427 /* struct btrfs_inode_ref */ 2428 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); 2429 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); 2430 2431 /* struct btrfs_inode_extref */ 2432 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, 2433 parent_objectid, 64); 2434 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, 2435 name_len, 16); 2436 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); 2437 2438 /* struct btrfs_inode_item */ 2439 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); 2440 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); 2441 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); 2442 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); 2443 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); 2444 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); 2445 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); 2446 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); 2447 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); 2448 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); 2449 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); 2450 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); 2451 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, 2452 generation, 64); 2453 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, 2454 sequence, 64); 2455 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, 2456 transid, 64); 2457 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); 2458 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, 2459 nbytes, 64); 2460 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, 2461 block_group, 64); 2462 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); 2463 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); 2464 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); 2465 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); 2466 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); 2467 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); 2468 2469 static inline struct btrfs_timespec * 2470 btrfs_inode_atime(struct btrfs_inode_item *inode_item) 2471 { 2472 unsigned long ptr = (unsigned long)inode_item; 2473 ptr += offsetof(struct btrfs_inode_item, atime); 2474 return (struct btrfs_timespec *)ptr; 2475 } 2476 2477 static inline struct btrfs_timespec * 2478 btrfs_inode_mtime(struct btrfs_inode_item *inode_item) 2479 { 2480 unsigned long ptr = (unsigned long)inode_item; 2481 ptr += offsetof(struct btrfs_inode_item, mtime); 2482 return (struct btrfs_timespec *)ptr; 2483 } 2484 2485 static inline struct btrfs_timespec * 2486 btrfs_inode_ctime(struct btrfs_inode_item *inode_item) 2487 { 2488 unsigned long ptr = (unsigned long)inode_item; 2489 ptr += offsetof(struct btrfs_inode_item, ctime); 2490 return (struct btrfs_timespec *)ptr; 2491 } 2492 2493 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); 2494 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); 2495 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); 2496 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); 2497 2498 /* struct btrfs_dev_extent */ 2499 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, 2500 chunk_tree, 64); 2501 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, 2502 chunk_objectid, 64); 2503 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, 2504 chunk_offset, 64); 2505 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); 2506 2507 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) 2508 { 2509 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); 2510 return (unsigned long)dev + ptr; 2511 } 2512 2513 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 2514 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, 2515 generation, 64); 2516 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); 2517 2518 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 2519 2520 2521 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); 2522 2523 static inline void btrfs_tree_block_key(struct extent_buffer *eb, 2524 struct btrfs_tree_block_info *item, 2525 struct btrfs_disk_key *key) 2526 { 2527 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2528 } 2529 2530 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, 2531 struct btrfs_tree_block_info *item, 2532 struct btrfs_disk_key *key) 2533 { 2534 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); 2535 } 2536 2537 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, 2538 root, 64); 2539 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, 2540 objectid, 64); 2541 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, 2542 offset, 64); 2543 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, 2544 count, 32); 2545 2546 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, 2547 count, 32); 2548 2549 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, 2550 type, 8); 2551 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, 2552 offset, 64); 2553 2554 static inline u32 btrfs_extent_inline_ref_size(int type) 2555 { 2556 if (type == BTRFS_TREE_BLOCK_REF_KEY || 2557 type == BTRFS_SHARED_BLOCK_REF_KEY) 2558 return sizeof(struct btrfs_extent_inline_ref); 2559 if (type == BTRFS_SHARED_DATA_REF_KEY) 2560 return sizeof(struct btrfs_shared_data_ref) + 2561 sizeof(struct btrfs_extent_inline_ref); 2562 if (type == BTRFS_EXTENT_DATA_REF_KEY) 2563 return sizeof(struct btrfs_extent_data_ref) + 2564 offsetof(struct btrfs_extent_inline_ref, offset); 2565 BUG(); 2566 return 0; 2567 } 2568 2569 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); 2570 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, 2571 generation, 64); 2572 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); 2573 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); 2574 2575 /* struct btrfs_node */ 2576 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); 2577 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); 2578 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, 2579 blockptr, 64); 2580 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, 2581 generation, 64); 2582 2583 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 2584 { 2585 unsigned long ptr; 2586 ptr = offsetof(struct btrfs_node, ptrs) + 2587 sizeof(struct btrfs_key_ptr) * nr; 2588 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); 2589 } 2590 2591 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, 2592 int nr, u64 val) 2593 { 2594 unsigned long ptr; 2595 ptr = offsetof(struct btrfs_node, ptrs) + 2596 sizeof(struct btrfs_key_ptr) * nr; 2597 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); 2598 } 2599 2600 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) 2601 { 2602 unsigned long ptr; 2603 ptr = offsetof(struct btrfs_node, ptrs) + 2604 sizeof(struct btrfs_key_ptr) * nr; 2605 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 2606 } 2607 2608 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, 2609 int nr, u64 val) 2610 { 2611 unsigned long ptr; 2612 ptr = offsetof(struct btrfs_node, ptrs) + 2613 sizeof(struct btrfs_key_ptr) * nr; 2614 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 2615 } 2616 2617 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 2618 { 2619 return offsetof(struct btrfs_node, ptrs) + 2620 sizeof(struct btrfs_key_ptr) * nr; 2621 } 2622 2623 void btrfs_node_key(struct extent_buffer *eb, 2624 struct btrfs_disk_key *disk_key, int nr); 2625 2626 static inline void btrfs_set_node_key(struct extent_buffer *eb, 2627 struct btrfs_disk_key *disk_key, int nr) 2628 { 2629 unsigned long ptr; 2630 ptr = btrfs_node_key_ptr_offset(nr); 2631 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, 2632 struct btrfs_key_ptr, key, disk_key); 2633 } 2634 2635 /* struct btrfs_item */ 2636 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); 2637 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); 2638 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); 2639 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); 2640 2641 static inline unsigned long btrfs_item_nr_offset(int nr) 2642 { 2643 return offsetof(struct btrfs_leaf, items) + 2644 sizeof(struct btrfs_item) * nr; 2645 } 2646 2647 static inline struct btrfs_item *btrfs_item_nr(int nr) 2648 { 2649 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 2650 } 2651 2652 static inline u32 btrfs_item_end(struct extent_buffer *eb, 2653 struct btrfs_item *item) 2654 { 2655 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 2656 } 2657 2658 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 2659 { 2660 return btrfs_item_end(eb, btrfs_item_nr(nr)); 2661 } 2662 2663 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) 2664 { 2665 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 2666 } 2667 2668 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) 2669 { 2670 return btrfs_item_size(eb, btrfs_item_nr(nr)); 2671 } 2672 2673 static inline void btrfs_item_key(struct extent_buffer *eb, 2674 struct btrfs_disk_key *disk_key, int nr) 2675 { 2676 struct btrfs_item *item = btrfs_item_nr(nr); 2677 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 2678 } 2679 2680 static inline void btrfs_set_item_key(struct extent_buffer *eb, 2681 struct btrfs_disk_key *disk_key, int nr) 2682 { 2683 struct btrfs_item *item = btrfs_item_nr(nr); 2684 write_eb_member(eb, item, struct btrfs_item, key, disk_key); 2685 } 2686 2687 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); 2688 2689 /* 2690 * struct btrfs_root_ref 2691 */ 2692 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 2693 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 2694 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 2695 2696 /* struct btrfs_dir_item */ 2697 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 2698 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); 2699 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); 2700 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); 2701 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); 2702 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, 2703 data_len, 16); 2704 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, 2705 name_len, 16); 2706 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, 2707 transid, 64); 2708 2709 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 2710 struct btrfs_dir_item *item, 2711 struct btrfs_disk_key *key) 2712 { 2713 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 2714 } 2715 2716 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, 2717 struct btrfs_dir_item *item, 2718 struct btrfs_disk_key *key) 2719 { 2720 write_eb_member(eb, item, struct btrfs_dir_item, location, key); 2721 } 2722 2723 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 2724 num_entries, 64); 2725 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 2726 num_bitmaps, 64); 2727 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 2728 generation, 64); 2729 2730 static inline void btrfs_free_space_key(struct extent_buffer *eb, 2731 struct btrfs_free_space_header *h, 2732 struct btrfs_disk_key *key) 2733 { 2734 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2735 } 2736 2737 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, 2738 struct btrfs_free_space_header *h, 2739 struct btrfs_disk_key *key) 2740 { 2741 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 2742 } 2743 2744 /* struct btrfs_disk_key */ 2745 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, 2746 objectid, 64); 2747 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); 2748 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); 2749 2750 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, 2751 struct btrfs_disk_key *disk) 2752 { 2753 cpu->offset = le64_to_cpu(disk->offset); 2754 cpu->type = disk->type; 2755 cpu->objectid = le64_to_cpu(disk->objectid); 2756 } 2757 2758 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, 2759 struct btrfs_key *cpu) 2760 { 2761 disk->offset = cpu_to_le64(cpu->offset); 2762 disk->type = cpu->type; 2763 disk->objectid = cpu_to_le64(cpu->objectid); 2764 } 2765 2766 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, 2767 struct btrfs_key *key, int nr) 2768 { 2769 struct btrfs_disk_key disk_key; 2770 btrfs_node_key(eb, &disk_key, nr); 2771 btrfs_disk_key_to_cpu(key, &disk_key); 2772 } 2773 2774 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 2775 struct btrfs_key *key, int nr) 2776 { 2777 struct btrfs_disk_key disk_key; 2778 btrfs_item_key(eb, &disk_key, nr); 2779 btrfs_disk_key_to_cpu(key, &disk_key); 2780 } 2781 2782 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, 2783 struct btrfs_dir_item *item, 2784 struct btrfs_key *key) 2785 { 2786 struct btrfs_disk_key disk_key; 2787 btrfs_dir_item_key(eb, item, &disk_key); 2788 btrfs_disk_key_to_cpu(key, &disk_key); 2789 } 2790 2791 2792 static inline u8 btrfs_key_type(struct btrfs_key *key) 2793 { 2794 return key->type; 2795 } 2796 2797 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 2798 { 2799 key->type = val; 2800 } 2801 2802 /* struct btrfs_header */ 2803 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 2804 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 2805 generation, 64); 2806 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); 2807 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); 2808 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); 2809 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); 2810 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, 2811 generation, 64); 2812 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); 2813 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, 2814 nritems, 32); 2815 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 2816 2817 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) 2818 { 2819 return (btrfs_header_flags(eb) & flag) == flag; 2820 } 2821 2822 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 2823 { 2824 u64 flags = btrfs_header_flags(eb); 2825 btrfs_set_header_flags(eb, flags | flag); 2826 return (flags & flag) == flag; 2827 } 2828 2829 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) 2830 { 2831 u64 flags = btrfs_header_flags(eb); 2832 btrfs_set_header_flags(eb, flags & ~flag); 2833 return (flags & flag) == flag; 2834 } 2835 2836 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) 2837 { 2838 u64 flags = btrfs_header_flags(eb); 2839 return flags >> BTRFS_BACKREF_REV_SHIFT; 2840 } 2841 2842 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, 2843 int rev) 2844 { 2845 u64 flags = btrfs_header_flags(eb); 2846 flags &= ~BTRFS_BACKREF_REV_MASK; 2847 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; 2848 btrfs_set_header_flags(eb, flags); 2849 } 2850 2851 static inline unsigned long btrfs_header_fsid(void) 2852 { 2853 return offsetof(struct btrfs_header, fsid); 2854 } 2855 2856 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) 2857 { 2858 return offsetof(struct btrfs_header, chunk_tree_uuid); 2859 } 2860 2861 static inline int btrfs_is_leaf(struct extent_buffer *eb) 2862 { 2863 return btrfs_header_level(eb) == 0; 2864 } 2865 2866 /* struct btrfs_root_item */ 2867 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, 2868 generation, 64); 2869 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2870 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); 2871 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 2872 2873 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, 2874 generation, 64); 2875 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); 2876 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); 2877 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); 2878 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 2879 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); 2880 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); 2881 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); 2882 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, 2883 last_snapshot, 64); 2884 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, 2885 generation_v2, 64); 2886 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, 2887 ctransid, 64); 2888 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, 2889 otransid, 64); 2890 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, 2891 stransid, 64); 2892 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, 2893 rtransid, 64); 2894 2895 static inline bool btrfs_root_readonly(struct btrfs_root *root) 2896 { 2897 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; 2898 } 2899 2900 static inline bool btrfs_root_dead(struct btrfs_root *root) 2901 { 2902 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; 2903 } 2904 2905 /* struct btrfs_root_backup */ 2906 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, 2907 tree_root, 64); 2908 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, 2909 tree_root_gen, 64); 2910 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, 2911 tree_root_level, 8); 2912 2913 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, 2914 chunk_root, 64); 2915 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, 2916 chunk_root_gen, 64); 2917 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, 2918 chunk_root_level, 8); 2919 2920 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, 2921 extent_root, 64); 2922 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, 2923 extent_root_gen, 64); 2924 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, 2925 extent_root_level, 8); 2926 2927 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, 2928 fs_root, 64); 2929 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, 2930 fs_root_gen, 64); 2931 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, 2932 fs_root_level, 8); 2933 2934 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, 2935 dev_root, 64); 2936 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, 2937 dev_root_gen, 64); 2938 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, 2939 dev_root_level, 8); 2940 2941 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, 2942 csum_root, 64); 2943 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, 2944 csum_root_gen, 64); 2945 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, 2946 csum_root_level, 8); 2947 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, 2948 total_bytes, 64); 2949 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, 2950 bytes_used, 64); 2951 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, 2952 num_devices, 64); 2953 2954 /* struct btrfs_balance_item */ 2955 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); 2956 2957 static inline void btrfs_balance_data(struct extent_buffer *eb, 2958 struct btrfs_balance_item *bi, 2959 struct btrfs_disk_balance_args *ba) 2960 { 2961 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2962 } 2963 2964 static inline void btrfs_set_balance_data(struct extent_buffer *eb, 2965 struct btrfs_balance_item *bi, 2966 struct btrfs_disk_balance_args *ba) 2967 { 2968 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); 2969 } 2970 2971 static inline void btrfs_balance_meta(struct extent_buffer *eb, 2972 struct btrfs_balance_item *bi, 2973 struct btrfs_disk_balance_args *ba) 2974 { 2975 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2976 } 2977 2978 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, 2979 struct btrfs_balance_item *bi, 2980 struct btrfs_disk_balance_args *ba) 2981 { 2982 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); 2983 } 2984 2985 static inline void btrfs_balance_sys(struct extent_buffer *eb, 2986 struct btrfs_balance_item *bi, 2987 struct btrfs_disk_balance_args *ba) 2988 { 2989 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2990 } 2991 2992 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, 2993 struct btrfs_balance_item *bi, 2994 struct btrfs_disk_balance_args *ba) 2995 { 2996 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); 2997 } 2998 2999 static inline void 3000 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, 3001 struct btrfs_disk_balance_args *disk) 3002 { 3003 memset(cpu, 0, sizeof(*cpu)); 3004 3005 cpu->profiles = le64_to_cpu(disk->profiles); 3006 cpu->usage = le64_to_cpu(disk->usage); 3007 cpu->devid = le64_to_cpu(disk->devid); 3008 cpu->pstart = le64_to_cpu(disk->pstart); 3009 cpu->pend = le64_to_cpu(disk->pend); 3010 cpu->vstart = le64_to_cpu(disk->vstart); 3011 cpu->vend = le64_to_cpu(disk->vend); 3012 cpu->target = le64_to_cpu(disk->target); 3013 cpu->flags = le64_to_cpu(disk->flags); 3014 cpu->limit = le64_to_cpu(disk->limit); 3015 } 3016 3017 static inline void 3018 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, 3019 struct btrfs_balance_args *cpu) 3020 { 3021 memset(disk, 0, sizeof(*disk)); 3022 3023 disk->profiles = cpu_to_le64(cpu->profiles); 3024 disk->usage = cpu_to_le64(cpu->usage); 3025 disk->devid = cpu_to_le64(cpu->devid); 3026 disk->pstart = cpu_to_le64(cpu->pstart); 3027 disk->pend = cpu_to_le64(cpu->pend); 3028 disk->vstart = cpu_to_le64(cpu->vstart); 3029 disk->vend = cpu_to_le64(cpu->vend); 3030 disk->target = cpu_to_le64(cpu->target); 3031 disk->flags = cpu_to_le64(cpu->flags); 3032 disk->limit = cpu_to_le64(cpu->limit); 3033 } 3034 3035 /* struct btrfs_super_block */ 3036 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); 3037 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); 3038 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, 3039 generation, 64); 3040 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); 3041 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, 3042 struct btrfs_super_block, sys_chunk_array_size, 32); 3043 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, 3044 struct btrfs_super_block, chunk_root_generation, 64); 3045 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, 3046 root_level, 8); 3047 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, 3048 chunk_root, 64); 3049 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, 3050 chunk_root_level, 8); 3051 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, 3052 log_root, 64); 3053 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, 3054 log_root_transid, 64); 3055 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, 3056 log_root_level, 8); 3057 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, 3058 total_bytes, 64); 3059 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, 3060 bytes_used, 64); 3061 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, 3062 sectorsize, 32); 3063 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, 3064 nodesize, 32); 3065 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, 3066 stripesize, 32); 3067 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, 3068 root_dir_objectid, 64); 3069 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, 3070 num_devices, 64); 3071 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, 3072 compat_flags, 64); 3073 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, 3074 compat_ro_flags, 64); 3075 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, 3076 incompat_flags, 64); 3077 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, 3078 csum_type, 16); 3079 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, 3080 cache_generation, 64); 3081 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); 3082 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, 3083 uuid_tree_generation, 64); 3084 3085 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) 3086 { 3087 u16 t = btrfs_super_csum_type(s); 3088 /* 3089 * csum type is validated at mount time 3090 */ 3091 return btrfs_csum_sizes[t]; 3092 } 3093 3094 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) 3095 { 3096 return offsetof(struct btrfs_leaf, items); 3097 } 3098 3099 /* struct btrfs_file_extent_item */ 3100 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); 3101 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, 3102 struct btrfs_file_extent_item, disk_bytenr, 64); 3103 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, 3104 struct btrfs_file_extent_item, offset, 64); 3105 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, 3106 struct btrfs_file_extent_item, generation, 64); 3107 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, 3108 struct btrfs_file_extent_item, num_bytes, 64); 3109 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, 3110 struct btrfs_file_extent_item, disk_num_bytes, 64); 3111 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, 3112 struct btrfs_file_extent_item, compression, 8); 3113 3114 static inline unsigned long 3115 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) 3116 { 3117 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; 3118 } 3119 3120 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) 3121 { 3122 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; 3123 } 3124 3125 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, 3126 disk_bytenr, 64); 3127 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, 3128 generation, 64); 3129 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, 3130 disk_num_bytes, 64); 3131 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, 3132 offset, 64); 3133 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, 3134 num_bytes, 64); 3135 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, 3136 ram_bytes, 64); 3137 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, 3138 compression, 8); 3139 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, 3140 encryption, 8); 3141 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, 3142 other_encoding, 16); 3143 3144 /* 3145 * this returns the number of bytes used by the item on disk, minus the 3146 * size of any extent headers. If a file is compressed on disk, this is 3147 * the compressed size 3148 */ 3149 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, 3150 struct btrfs_item *e) 3151 { 3152 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 3153 } 3154 3155 /* this returns the number of file bytes represented by the inline item. 3156 * If an item is compressed, this is the uncompressed size 3157 */ 3158 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, 3159 int slot, 3160 struct btrfs_file_extent_item *fi) 3161 { 3162 struct btrfs_map_token token; 3163 3164 btrfs_init_map_token(&token); 3165 /* 3166 * return the space used on disk if this item isn't 3167 * compressed or encoded 3168 */ 3169 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && 3170 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && 3171 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { 3172 return btrfs_file_extent_inline_item_len(eb, 3173 btrfs_item_nr(slot)); 3174 } 3175 3176 /* otherwise use the ram bytes field */ 3177 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); 3178 } 3179 3180 3181 /* btrfs_dev_stats_item */ 3182 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, 3183 struct btrfs_dev_stats_item *ptr, 3184 int index) 3185 { 3186 u64 val; 3187 3188 read_extent_buffer(eb, &val, 3189 offsetof(struct btrfs_dev_stats_item, values) + 3190 ((unsigned long)ptr) + (index * sizeof(u64)), 3191 sizeof(val)); 3192 return val; 3193 } 3194 3195 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, 3196 struct btrfs_dev_stats_item *ptr, 3197 int index, u64 val) 3198 { 3199 write_extent_buffer(eb, &val, 3200 offsetof(struct btrfs_dev_stats_item, values) + 3201 ((unsigned long)ptr) + (index * sizeof(u64)), 3202 sizeof(val)); 3203 } 3204 3205 /* btrfs_qgroup_status_item */ 3206 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, 3207 generation, 64); 3208 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, 3209 version, 64); 3210 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, 3211 flags, 64); 3212 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, 3213 rescan, 64); 3214 3215 /* btrfs_qgroup_info_item */ 3216 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, 3217 generation, 64); 3218 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); 3219 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, 3220 rfer_cmpr, 64); 3221 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); 3222 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, 3223 excl_cmpr, 64); 3224 3225 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, 3226 struct btrfs_qgroup_info_item, generation, 64); 3227 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, 3228 rfer, 64); 3229 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, 3230 struct btrfs_qgroup_info_item, rfer_cmpr, 64); 3231 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, 3232 excl, 64); 3233 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, 3234 struct btrfs_qgroup_info_item, excl_cmpr, 64); 3235 3236 /* btrfs_qgroup_limit_item */ 3237 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, 3238 flags, 64); 3239 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, 3240 max_rfer, 64); 3241 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, 3242 max_excl, 64); 3243 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, 3244 rsv_rfer, 64); 3245 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, 3246 rsv_excl, 64); 3247 3248 /* btrfs_dev_replace_item */ 3249 BTRFS_SETGET_FUNCS(dev_replace_src_devid, 3250 struct btrfs_dev_replace_item, src_devid, 64); 3251 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, 3252 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, 3253 64); 3254 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, 3255 replace_state, 64); 3256 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, 3257 time_started, 64); 3258 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, 3259 time_stopped, 64); 3260 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, 3261 num_write_errors, 64); 3262 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, 3263 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, 3264 64); 3265 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, 3266 cursor_left, 64); 3267 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, 3268 cursor_right, 64); 3269 3270 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, 3271 struct btrfs_dev_replace_item, src_devid, 64); 3272 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, 3273 struct btrfs_dev_replace_item, 3274 cont_reading_from_srcdev_mode, 64); 3275 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, 3276 struct btrfs_dev_replace_item, replace_state, 64); 3277 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, 3278 struct btrfs_dev_replace_item, time_started, 64); 3279 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, 3280 struct btrfs_dev_replace_item, time_stopped, 64); 3281 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, 3282 struct btrfs_dev_replace_item, num_write_errors, 64); 3283 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, 3284 struct btrfs_dev_replace_item, 3285 num_uncorrectable_read_errors, 64); 3286 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, 3287 struct btrfs_dev_replace_item, cursor_left, 64); 3288 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, 3289 struct btrfs_dev_replace_item, cursor_right, 64); 3290 3291 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 3292 { 3293 return sb->s_fs_info; 3294 } 3295 3296 /* helper function to cast into the data area of the leaf. */ 3297 #define btrfs_item_ptr(leaf, slot, type) \ 3298 ((type *)(btrfs_leaf_data(leaf) + \ 3299 btrfs_item_offset_nr(leaf, slot))) 3300 3301 #define btrfs_item_ptr_offset(leaf, slot) \ 3302 ((unsigned long)(btrfs_leaf_data(leaf) + \ 3303 btrfs_item_offset_nr(leaf, slot))) 3304 3305 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 3306 { 3307 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && 3308 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); 3309 } 3310 3311 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) 3312 { 3313 return mapping_gfp_mask(mapping) & ~__GFP_FS; 3314 } 3315 3316 /* extent-tree.c */ 3317 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, 3318 unsigned num_items) 3319 { 3320 return (root->nodesize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * 3321 2 * num_items; 3322 } 3323 3324 /* 3325 * Doing a truncate won't result in new nodes or leaves, just what we need for 3326 * COW. 3327 */ 3328 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root, 3329 unsigned num_items) 3330 { 3331 return root->nodesize * BTRFS_MAX_LEVEL * num_items; 3332 } 3333 3334 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 3335 struct btrfs_root *root); 3336 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, 3337 struct btrfs_root *root); 3338 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3339 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, 3340 struct btrfs_root *root, unsigned long count); 3341 int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3342 unsigned long count, int wait); 3343 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); 3344 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3345 struct btrfs_root *root, u64 bytenr, 3346 u64 offset, int metadata, u64 *refs, u64 *flags); 3347 int btrfs_pin_extent(struct btrfs_root *root, 3348 u64 bytenr, u64 num, int reserved); 3349 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root, 3350 u64 bytenr, u64 num_bytes); 3351 int btrfs_exclude_logged_extents(struct btrfs_root *root, 3352 struct extent_buffer *eb); 3353 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, 3354 struct btrfs_root *root, 3355 u64 objectid, u64 offset, u64 bytenr); 3356 struct btrfs_block_group_cache *btrfs_lookup_block_group( 3357 struct btrfs_fs_info *info, 3358 u64 bytenr); 3359 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); 3360 int get_block_group_index(struct btrfs_block_group_cache *cache); 3361 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, 3362 struct btrfs_root *root, u64 parent, 3363 u64 root_objectid, 3364 struct btrfs_disk_key *key, int level, 3365 u64 hint, u64 empty_size); 3366 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 3367 struct btrfs_root *root, 3368 struct extent_buffer *buf, 3369 u64 parent, int last_ref); 3370 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, 3371 struct btrfs_root *root, 3372 u64 root_objectid, u64 owner, 3373 u64 offset, struct btrfs_key *ins); 3374 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, 3375 struct btrfs_root *root, 3376 u64 root_objectid, u64 owner, u64 offset, 3377 struct btrfs_key *ins); 3378 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, 3379 u64 min_alloc_size, u64 empty_size, u64 hint_byte, 3380 struct btrfs_key *ins, int is_data, int delalloc); 3381 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3382 struct extent_buffer *buf, int full_backref); 3383 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3384 struct extent_buffer *buf, int full_backref); 3385 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 3386 struct btrfs_root *root, 3387 u64 bytenr, u64 num_bytes, u64 flags, 3388 int level, int is_data); 3389 int btrfs_free_extent(struct btrfs_trans_handle *trans, 3390 struct btrfs_root *root, 3391 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3392 u64 owner, u64 offset, int no_quota); 3393 3394 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len, 3395 int delalloc); 3396 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, 3397 u64 start, u64 len); 3398 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, 3399 struct btrfs_root *root); 3400 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, 3401 struct btrfs_root *root); 3402 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 3403 struct btrfs_root *root, 3404 u64 bytenr, u64 num_bytes, u64 parent, 3405 u64 root_objectid, u64 owner, u64 offset, int no_quota); 3406 3407 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 3408 struct btrfs_root *root); 3409 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr); 3410 int btrfs_free_block_groups(struct btrfs_fs_info *info); 3411 int btrfs_read_block_groups(struct btrfs_root *root); 3412 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr); 3413 int btrfs_make_block_group(struct btrfs_trans_handle *trans, 3414 struct btrfs_root *root, u64 bytes_used, 3415 u64 type, u64 chunk_objectid, u64 chunk_offset, 3416 u64 size); 3417 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3418 struct btrfs_root *root, u64 group_start, 3419 struct extent_map *em); 3420 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 3421 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3422 struct btrfs_root *root); 3423 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); 3424 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); 3425 3426 enum btrfs_reserve_flush_enum { 3427 /* If we are in the transaction, we can't flush anything.*/ 3428 BTRFS_RESERVE_NO_FLUSH, 3429 /* 3430 * Flushing delalloc may cause deadlock somewhere, in this 3431 * case, use FLUSH LIMIT 3432 */ 3433 BTRFS_RESERVE_FLUSH_LIMIT, 3434 BTRFS_RESERVE_FLUSH_ALL, 3435 }; 3436 3437 int btrfs_check_data_free_space(struct inode *inode, u64 bytes); 3438 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes); 3439 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, 3440 struct btrfs_root *root); 3441 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, 3442 struct inode *inode); 3443 void btrfs_orphan_release_metadata(struct inode *inode); 3444 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 3445 struct btrfs_block_rsv *rsv, 3446 int nitems, 3447 u64 *qgroup_reserved, bool use_global_rsv); 3448 void btrfs_subvolume_release_metadata(struct btrfs_root *root, 3449 struct btrfs_block_rsv *rsv, 3450 u64 qgroup_reserved); 3451 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); 3452 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); 3453 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); 3454 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); 3455 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); 3456 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root, 3457 unsigned short type); 3458 void btrfs_free_block_rsv(struct btrfs_root *root, 3459 struct btrfs_block_rsv *rsv); 3460 int btrfs_block_rsv_add(struct btrfs_root *root, 3461 struct btrfs_block_rsv *block_rsv, u64 num_bytes, 3462 enum btrfs_reserve_flush_enum flush); 3463 int btrfs_block_rsv_check(struct btrfs_root *root, 3464 struct btrfs_block_rsv *block_rsv, int min_factor); 3465 int btrfs_block_rsv_refill(struct btrfs_root *root, 3466 struct btrfs_block_rsv *block_rsv, u64 min_reserved, 3467 enum btrfs_reserve_flush_enum flush); 3468 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, 3469 struct btrfs_block_rsv *dst_rsv, 3470 u64 num_bytes); 3471 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, 3472 struct btrfs_block_rsv *dest, u64 num_bytes, 3473 int min_factor); 3474 void btrfs_block_rsv_release(struct btrfs_root *root, 3475 struct btrfs_block_rsv *block_rsv, 3476 u64 num_bytes); 3477 int btrfs_set_block_group_ro(struct btrfs_root *root, 3478 struct btrfs_block_group_cache *cache); 3479 void btrfs_set_block_group_rw(struct btrfs_root *root, 3480 struct btrfs_block_group_cache *cache); 3481 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); 3482 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 3483 int btrfs_error_unpin_extent_range(struct btrfs_root *root, 3484 u64 start, u64 end); 3485 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 3486 u64 num_bytes, u64 *actual_bytes); 3487 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 3488 struct btrfs_root *root, u64 type); 3489 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 3490 3491 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); 3492 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 3493 struct btrfs_fs_info *fs_info); 3494 int __get_raid_index(u64 flags); 3495 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); 3496 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); 3497 /* ctree.c */ 3498 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 3499 int level, int *slot); 3500 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2); 3501 int btrfs_previous_item(struct btrfs_root *root, 3502 struct btrfs_path *path, u64 min_objectid, 3503 int type); 3504 int btrfs_previous_extent_item(struct btrfs_root *root, 3505 struct btrfs_path *path, u64 min_objectid); 3506 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path, 3507 struct btrfs_key *new_key); 3508 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); 3509 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 3510 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 3511 struct btrfs_key *key, int lowest_level, 3512 u64 min_trans); 3513 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 3514 struct btrfs_path *path, 3515 u64 min_trans); 3516 enum btrfs_compare_tree_result { 3517 BTRFS_COMPARE_TREE_NEW, 3518 BTRFS_COMPARE_TREE_DELETED, 3519 BTRFS_COMPARE_TREE_CHANGED, 3520 BTRFS_COMPARE_TREE_SAME, 3521 }; 3522 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, 3523 struct btrfs_root *right_root, 3524 struct btrfs_path *left_path, 3525 struct btrfs_path *right_path, 3526 struct btrfs_key *key, 3527 enum btrfs_compare_tree_result result, 3528 void *ctx); 3529 int btrfs_compare_trees(struct btrfs_root *left_root, 3530 struct btrfs_root *right_root, 3531 btrfs_changed_cb_t cb, void *ctx); 3532 int btrfs_cow_block(struct btrfs_trans_handle *trans, 3533 struct btrfs_root *root, struct extent_buffer *buf, 3534 struct extent_buffer *parent, int parent_slot, 3535 struct extent_buffer **cow_ret); 3536 int btrfs_copy_root(struct btrfs_trans_handle *trans, 3537 struct btrfs_root *root, 3538 struct extent_buffer *buf, 3539 struct extent_buffer **cow_ret, u64 new_root_objectid); 3540 int btrfs_block_can_be_shared(struct btrfs_root *root, 3541 struct extent_buffer *buf); 3542 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path, 3543 u32 data_size); 3544 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path, 3545 u32 new_size, int from_end); 3546 int btrfs_split_item(struct btrfs_trans_handle *trans, 3547 struct btrfs_root *root, 3548 struct btrfs_path *path, 3549 struct btrfs_key *new_key, 3550 unsigned long split_offset); 3551 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 3552 struct btrfs_root *root, 3553 struct btrfs_path *path, 3554 struct btrfs_key *new_key); 3555 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 3556 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 3557 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 3558 *root, struct btrfs_key *key, struct btrfs_path *p, int 3559 ins_len, int cow); 3560 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key, 3561 struct btrfs_path *p, u64 time_seq); 3562 int btrfs_search_slot_for_read(struct btrfs_root *root, 3563 struct btrfs_key *key, struct btrfs_path *p, 3564 int find_higher, int return_any); 3565 int btrfs_realloc_node(struct btrfs_trans_handle *trans, 3566 struct btrfs_root *root, struct extent_buffer *parent, 3567 int start_slot, u64 *last_ret, 3568 struct btrfs_key *progress); 3569 void btrfs_release_path(struct btrfs_path *p); 3570 struct btrfs_path *btrfs_alloc_path(void); 3571 void btrfs_free_path(struct btrfs_path *p); 3572 void btrfs_set_path_blocking(struct btrfs_path *p); 3573 void btrfs_clear_path_blocking(struct btrfs_path *p, 3574 struct extent_buffer *held, int held_rw); 3575 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 3576 3577 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3578 struct btrfs_path *path, int slot, int nr); 3579 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 3580 struct btrfs_root *root, 3581 struct btrfs_path *path) 3582 { 3583 return btrfs_del_items(trans, root, path, path->slots[0], 1); 3584 } 3585 3586 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 3587 struct btrfs_key *cpu_key, u32 *data_size, 3588 u32 total_data, u32 total_size, int nr); 3589 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root 3590 *root, struct btrfs_key *key, void *data, u32 data_size); 3591 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 3592 struct btrfs_root *root, 3593 struct btrfs_path *path, 3594 struct btrfs_key *cpu_key, u32 *data_size, int nr); 3595 3596 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 3597 struct btrfs_root *root, 3598 struct btrfs_path *path, 3599 struct btrfs_key *key, 3600 u32 data_size) 3601 { 3602 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 3603 } 3604 3605 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 3606 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 3607 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 3608 u64 time_seq); 3609 static inline int btrfs_next_old_item(struct btrfs_root *root, 3610 struct btrfs_path *p, u64 time_seq) 3611 { 3612 ++p->slots[0]; 3613 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 3614 return btrfs_next_old_leaf(root, p, time_seq); 3615 return 0; 3616 } 3617 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 3618 { 3619 return btrfs_next_old_item(root, p, 0); 3620 } 3621 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 3622 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 3623 struct btrfs_block_rsv *block_rsv, 3624 int update_ref, int for_reloc); 3625 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, 3626 struct btrfs_root *root, 3627 struct extent_buffer *node, 3628 struct extent_buffer *parent); 3629 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) 3630 { 3631 /* 3632 * Get synced with close_ctree() 3633 */ 3634 smp_mb(); 3635 return fs_info->closing; 3636 } 3637 3638 /* 3639 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 3640 * anything except sleeping. This function is used to check the status of 3641 * the fs. 3642 */ 3643 static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root) 3644 { 3645 return (root->fs_info->sb->s_flags & MS_RDONLY || 3646 btrfs_fs_closing(root->fs_info)); 3647 } 3648 3649 static inline void free_fs_info(struct btrfs_fs_info *fs_info) 3650 { 3651 kfree(fs_info->balance_ctl); 3652 kfree(fs_info->delayed_root); 3653 kfree(fs_info->extent_root); 3654 kfree(fs_info->tree_root); 3655 kfree(fs_info->chunk_root); 3656 kfree(fs_info->dev_root); 3657 kfree(fs_info->csum_root); 3658 kfree(fs_info->quota_root); 3659 kfree(fs_info->uuid_root); 3660 kfree(fs_info->super_copy); 3661 kfree(fs_info->super_for_commit); 3662 security_free_mnt_opts(&fs_info->security_opts); 3663 kfree(fs_info); 3664 } 3665 3666 /* tree mod log functions from ctree.c */ 3667 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, 3668 struct seq_list *elem); 3669 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, 3670 struct seq_list *elem); 3671 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); 3672 3673 /* root-item.c */ 3674 int btrfs_find_root_ref(struct btrfs_root *tree_root, 3675 struct btrfs_path *path, 3676 u64 root_id, u64 ref_id); 3677 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, 3678 struct btrfs_root *tree_root, 3679 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, 3680 const char *name, int name_len); 3681 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, 3682 struct btrfs_root *tree_root, 3683 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, 3684 const char *name, int name_len); 3685 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, 3686 struct btrfs_key *key); 3687 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root 3688 *root, struct btrfs_key *key, struct btrfs_root_item 3689 *item); 3690 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, 3691 struct btrfs_root *root, 3692 struct btrfs_key *key, 3693 struct btrfs_root_item *item); 3694 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, 3695 struct btrfs_path *path, struct btrfs_root_item *root_item, 3696 struct btrfs_key *root_key); 3697 int btrfs_find_orphan_roots(struct btrfs_root *tree_root); 3698 void btrfs_set_root_node(struct btrfs_root_item *item, 3699 struct extent_buffer *node); 3700 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); 3701 void btrfs_update_root_times(struct btrfs_trans_handle *trans, 3702 struct btrfs_root *root); 3703 3704 /* uuid-tree.c */ 3705 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, 3706 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3707 u64 subid); 3708 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, 3709 struct btrfs_root *uuid_root, u8 *uuid, u8 type, 3710 u64 subid); 3711 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, 3712 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, 3713 u64)); 3714 3715 /* dir-item.c */ 3716 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, 3717 const char *name, int name_len); 3718 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, 3719 struct btrfs_root *root, const char *name, 3720 int name_len, struct inode *dir, 3721 struct btrfs_key *location, u8 type, u64 index); 3722 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, 3723 struct btrfs_root *root, 3724 struct btrfs_path *path, u64 dir, 3725 const char *name, int name_len, 3726 int mod); 3727 struct btrfs_dir_item * 3728 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, 3729 struct btrfs_root *root, 3730 struct btrfs_path *path, u64 dir, 3731 u64 objectid, const char *name, int name_len, 3732 int mod); 3733 struct btrfs_dir_item * 3734 btrfs_search_dir_index_item(struct btrfs_root *root, 3735 struct btrfs_path *path, u64 dirid, 3736 const char *name, int name_len); 3737 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, 3738 struct btrfs_root *root, 3739 struct btrfs_path *path, 3740 struct btrfs_dir_item *di); 3741 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, 3742 struct btrfs_root *root, 3743 struct btrfs_path *path, u64 objectid, 3744 const char *name, u16 name_len, 3745 const void *data, u16 data_len); 3746 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, 3747 struct btrfs_root *root, 3748 struct btrfs_path *path, u64 dir, 3749 const char *name, u16 name_len, 3750 int mod); 3751 int verify_dir_item(struct btrfs_root *root, 3752 struct extent_buffer *leaf, 3753 struct btrfs_dir_item *dir_item); 3754 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 3755 struct btrfs_path *path, 3756 const char *name, 3757 int name_len); 3758 3759 /* orphan.c */ 3760 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3761 struct btrfs_root *root, u64 offset); 3762 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, 3763 struct btrfs_root *root, u64 offset); 3764 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); 3765 3766 /* inode-item.c */ 3767 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, 3768 struct btrfs_root *root, 3769 const char *name, int name_len, 3770 u64 inode_objectid, u64 ref_objectid, u64 index); 3771 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, 3772 struct btrfs_root *root, 3773 const char *name, int name_len, 3774 u64 inode_objectid, u64 ref_objectid, u64 *index); 3775 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, 3776 struct btrfs_root *root, 3777 struct btrfs_path *path, u64 objectid); 3778 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root 3779 *root, struct btrfs_path *path, 3780 struct btrfs_key *location, int mod); 3781 3782 struct btrfs_inode_extref * 3783 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, 3784 struct btrfs_root *root, 3785 struct btrfs_path *path, 3786 const char *name, int name_len, 3787 u64 inode_objectid, u64 ref_objectid, int ins_len, 3788 int cow); 3789 3790 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, 3791 u64 ref_objectid, const char *name, 3792 int name_len, 3793 struct btrfs_inode_extref **extref_ret); 3794 3795 /* file-item.c */ 3796 struct btrfs_dio_private; 3797 int btrfs_del_csums(struct btrfs_trans_handle *trans, 3798 struct btrfs_root *root, u64 bytenr, u64 len); 3799 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 3800 struct bio *bio, u32 *dst); 3801 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 3802 struct bio *bio, u64 logical_offset); 3803 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 3804 struct btrfs_root *root, 3805 u64 objectid, u64 pos, 3806 u64 disk_offset, u64 disk_num_bytes, 3807 u64 num_bytes, u64 offset, u64 ram_bytes, 3808 u8 compression, u8 encryption, u16 other_encoding); 3809 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 3810 struct btrfs_root *root, 3811 struct btrfs_path *path, u64 objectid, 3812 u64 bytenr, int mod); 3813 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 3814 struct btrfs_root *root, 3815 struct btrfs_ordered_sum *sums); 3816 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 3817 struct bio *bio, u64 file_start, int contig); 3818 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 3819 struct list_head *list, int search_commit); 3820 void btrfs_extent_item_to_extent_map(struct inode *inode, 3821 const struct btrfs_path *path, 3822 struct btrfs_file_extent_item *fi, 3823 const bool new_inline, 3824 struct extent_map *em); 3825 3826 /* inode.c */ 3827 struct btrfs_delalloc_work { 3828 struct inode *inode; 3829 int wait; 3830 int delay_iput; 3831 struct completion completion; 3832 struct list_head list; 3833 struct btrfs_work work; 3834 }; 3835 3836 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 3837 int wait, int delay_iput); 3838 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); 3839 3840 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, 3841 size_t pg_offset, u64 start, u64 len, 3842 int create); 3843 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, 3844 u64 *orig_start, u64 *orig_block_len, 3845 u64 *ram_bytes); 3846 3847 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ 3848 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) 3849 #define ClearPageChecked ClearPageFsMisc 3850 #define SetPageChecked SetPageFsMisc 3851 #define PageChecked PageFsMisc 3852 #endif 3853 3854 /* This forces readahead on a given range of bytes in an inode */ 3855 static inline void btrfs_force_ra(struct address_space *mapping, 3856 struct file_ra_state *ra, struct file *file, 3857 pgoff_t offset, unsigned long req_size) 3858 { 3859 page_cache_sync_readahead(mapping, ra, file, offset, req_size); 3860 } 3861 3862 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); 3863 int btrfs_set_inode_index(struct inode *dir, u64 *index); 3864 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, 3865 struct btrfs_root *root, 3866 struct inode *dir, struct inode *inode, 3867 const char *name, int name_len); 3868 int btrfs_add_link(struct btrfs_trans_handle *trans, 3869 struct inode *parent_inode, struct inode *inode, 3870 const char *name, int name_len, int add_backref, u64 index); 3871 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, 3872 struct btrfs_root *root, 3873 struct inode *dir, u64 objectid, 3874 const char *name, int name_len); 3875 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len, 3876 int front); 3877 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, 3878 struct btrfs_root *root, 3879 struct inode *inode, u64 new_size, 3880 u32 min_type); 3881 3882 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); 3883 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, 3884 int nr); 3885 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, 3886 struct extent_state **cached_state); 3887 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 3888 struct btrfs_root *new_root, 3889 struct btrfs_root *parent_root, 3890 u64 new_dirid); 3891 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset, 3892 size_t size, struct bio *bio, 3893 unsigned long bio_flags); 3894 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); 3895 int btrfs_readpage(struct file *file, struct page *page); 3896 void btrfs_evict_inode(struct inode *inode); 3897 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 3898 struct inode *btrfs_alloc_inode(struct super_block *sb); 3899 void btrfs_destroy_inode(struct inode *inode); 3900 int btrfs_drop_inode(struct inode *inode); 3901 int btrfs_init_cachep(void); 3902 void btrfs_destroy_cachep(void); 3903 long btrfs_ioctl_trans_end(struct file *file); 3904 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, 3905 struct btrfs_root *root, int *was_new); 3906 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, 3907 size_t pg_offset, u64 start, u64 end, 3908 int create); 3909 int btrfs_update_inode(struct btrfs_trans_handle *trans, 3910 struct btrfs_root *root, 3911 struct inode *inode); 3912 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, 3913 struct btrfs_root *root, struct inode *inode); 3914 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); 3915 int btrfs_orphan_cleanup(struct btrfs_root *root); 3916 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, 3917 struct btrfs_root *root); 3918 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); 3919 void btrfs_invalidate_inodes(struct btrfs_root *root); 3920 void btrfs_add_delayed_iput(struct inode *inode); 3921 void btrfs_run_delayed_iputs(struct btrfs_root *root); 3922 int btrfs_prealloc_file_range(struct inode *inode, int mode, 3923 u64 start, u64 num_bytes, u64 min_size, 3924 loff_t actual_len, u64 *alloc_hint); 3925 int btrfs_prealloc_file_range_trans(struct inode *inode, 3926 struct btrfs_trans_handle *trans, int mode, 3927 u64 start, u64 num_bytes, u64 min_size, 3928 loff_t actual_len, u64 *alloc_hint); 3929 int btrfs_inode_check_errors(struct inode *inode); 3930 extern const struct dentry_operations btrfs_dentry_operations; 3931 3932 /* ioctl.c */ 3933 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 3934 void btrfs_update_iflags(struct inode *inode); 3935 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); 3936 int btrfs_is_empty_uuid(u8 *uuid); 3937 int btrfs_defrag_file(struct inode *inode, struct file *file, 3938 struct btrfs_ioctl_defrag_range_args *range, 3939 u64 newer_than, unsigned long max_pages); 3940 void btrfs_get_block_group_info(struct list_head *groups_list, 3941 struct btrfs_ioctl_space_info *space); 3942 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 3943 struct btrfs_ioctl_balance_args *bargs); 3944 3945 3946 /* file.c */ 3947 int btrfs_auto_defrag_init(void); 3948 void btrfs_auto_defrag_exit(void); 3949 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 3950 struct inode *inode); 3951 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); 3952 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); 3953 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); 3954 void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 3955 int skip_pinned); 3956 extern const struct file_operations btrfs_file_operations; 3957 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, 3958 struct btrfs_root *root, struct inode *inode, 3959 struct btrfs_path *path, u64 start, u64 end, 3960 u64 *drop_end, int drop_cache, 3961 int replace_extent, 3962 u32 extent_item_size, 3963 int *key_inserted); 3964 int btrfs_drop_extents(struct btrfs_trans_handle *trans, 3965 struct btrfs_root *root, struct inode *inode, u64 start, 3966 u64 end, int drop_cache); 3967 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 3968 struct inode *inode, u64 start, u64 end); 3969 int btrfs_release_file(struct inode *inode, struct file *file); 3970 int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, 3971 struct page **pages, size_t num_pages, 3972 loff_t pos, size_t write_bytes, 3973 struct extent_state **cached); 3974 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); 3975 3976 /* tree-defrag.c */ 3977 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3978 struct btrfs_root *root); 3979 3980 /* sysfs.c */ 3981 int btrfs_init_sysfs(void); 3982 void btrfs_exit_sysfs(void); 3983 int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info); 3984 void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info); 3985 3986 /* xattr.c */ 3987 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 3988 3989 /* super.c */ 3990 int btrfs_parse_options(struct btrfs_root *root, char *options); 3991 int btrfs_sync_fs(struct super_block *sb, int wait); 3992 3993 #ifdef CONFIG_PRINTK 3994 __printf(2, 3) 3995 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); 3996 #else 3997 static inline __printf(2, 3) 3998 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) 3999 { 4000 } 4001 #endif 4002 4003 #define btrfs_emerg(fs_info, fmt, args...) \ 4004 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) 4005 #define btrfs_alert(fs_info, fmt, args...) \ 4006 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) 4007 #define btrfs_crit(fs_info, fmt, args...) \ 4008 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) 4009 #define btrfs_err(fs_info, fmt, args...) \ 4010 btrfs_printk(fs_info, KERN_ERR fmt, ##args) 4011 #define btrfs_warn(fs_info, fmt, args...) \ 4012 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) 4013 #define btrfs_notice(fs_info, fmt, args...) \ 4014 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) 4015 #define btrfs_info(fs_info, fmt, args...) \ 4016 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 4017 4018 #ifdef DEBUG 4019 #define btrfs_debug(fs_info, fmt, args...) \ 4020 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) 4021 #else 4022 #define btrfs_debug(fs_info, fmt, args...) \ 4023 no_printk(KERN_DEBUG fmt, ##args) 4024 #endif 4025 4026 #ifdef CONFIG_BTRFS_ASSERT 4027 4028 static inline void assfail(char *expr, char *file, int line) 4029 { 4030 pr_err("BTRFS: assertion failed: %s, file: %s, line: %d", 4031 expr, file, line); 4032 BUG(); 4033 } 4034 4035 #define ASSERT(expr) \ 4036 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 4037 #else 4038 #define ASSERT(expr) ((void)0) 4039 #endif 4040 4041 #define btrfs_assert() 4042 __printf(5, 6) 4043 void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, 4044 unsigned int line, int errno, const char *fmt, ...); 4045 4046 4047 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 4048 struct btrfs_root *root, const char *function, 4049 unsigned int line, int errno); 4050 4051 #define btrfs_set_fs_incompat(__fs_info, opt) \ 4052 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4053 4054 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, 4055 u64 flag) 4056 { 4057 struct btrfs_super_block *disk_super; 4058 u64 features; 4059 4060 disk_super = fs_info->super_copy; 4061 features = btrfs_super_incompat_flags(disk_super); 4062 if (!(features & flag)) { 4063 spin_lock(&fs_info->super_lock); 4064 features = btrfs_super_incompat_flags(disk_super); 4065 if (!(features & flag)) { 4066 features |= flag; 4067 btrfs_set_super_incompat_flags(disk_super, features); 4068 btrfs_info(fs_info, "setting %llu feature flag", 4069 flag); 4070 } 4071 spin_unlock(&fs_info->super_lock); 4072 } 4073 } 4074 4075 #define btrfs_fs_incompat(fs_info, opt) \ 4076 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) 4077 4078 static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) 4079 { 4080 struct btrfs_super_block *disk_super; 4081 disk_super = fs_info->super_copy; 4082 return !!(btrfs_super_incompat_flags(disk_super) & flag); 4083 } 4084 4085 /* 4086 * Call btrfs_abort_transaction as early as possible when an error condition is 4087 * detected, that way the exact line number is reported. 4088 */ 4089 4090 #define btrfs_abort_transaction(trans, root, errno) \ 4091 do { \ 4092 __btrfs_abort_transaction(trans, root, __func__, \ 4093 __LINE__, errno); \ 4094 } while (0) 4095 4096 #define btrfs_std_error(fs_info, errno) \ 4097 do { \ 4098 if ((errno)) \ 4099 __btrfs_std_error((fs_info), __func__, \ 4100 __LINE__, (errno), NULL); \ 4101 } while (0) 4102 4103 #define btrfs_error(fs_info, errno, fmt, args...) \ 4104 do { \ 4105 __btrfs_std_error((fs_info), __func__, __LINE__, \ 4106 (errno), fmt, ##args); \ 4107 } while (0) 4108 4109 __printf(5, 6) 4110 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, 4111 unsigned int line, int errno, const char *fmt, ...); 4112 4113 /* 4114 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic 4115 * will panic(). Otherwise we BUG() here. 4116 */ 4117 #define btrfs_panic(fs_info, errno, fmt, args...) \ 4118 do { \ 4119 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ 4120 BUG(); \ 4121 } while (0) 4122 4123 /* acl.c */ 4124 #ifdef CONFIG_BTRFS_FS_POSIX_ACL 4125 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); 4126 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 4127 int btrfs_init_acl(struct btrfs_trans_handle *trans, 4128 struct inode *inode, struct inode *dir); 4129 #else 4130 #define btrfs_get_acl NULL 4131 #define btrfs_set_acl NULL 4132 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, 4133 struct inode *inode, struct inode *dir) 4134 { 4135 return 0; 4136 } 4137 #endif 4138 4139 /* relocation.c */ 4140 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start); 4141 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, 4142 struct btrfs_root *root); 4143 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, 4144 struct btrfs_root *root); 4145 int btrfs_recover_relocation(struct btrfs_root *root); 4146 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); 4147 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, 4148 struct btrfs_root *root, struct extent_buffer *buf, 4149 struct extent_buffer *cow); 4150 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, 4151 struct btrfs_pending_snapshot *pending, 4152 u64 *bytes_to_reserve); 4153 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, 4154 struct btrfs_pending_snapshot *pending); 4155 4156 /* scrub.c */ 4157 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, 4158 u64 end, struct btrfs_scrub_progress *progress, 4159 int readonly, int is_dev_replace); 4160 void btrfs_scrub_pause(struct btrfs_root *root); 4161 void btrfs_scrub_continue(struct btrfs_root *root); 4162 int btrfs_scrub_cancel(struct btrfs_fs_info *info); 4163 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, 4164 struct btrfs_device *dev); 4165 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, 4166 struct btrfs_scrub_progress *progress); 4167 4168 /* dev-replace.c */ 4169 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 4170 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 4171 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); 4172 4173 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 4174 { 4175 btrfs_bio_counter_sub(fs_info, 1); 4176 } 4177 4178 /* reada.c */ 4179 struct reada_control { 4180 struct btrfs_root *root; /* tree to prefetch */ 4181 struct btrfs_key key_start; 4182 struct btrfs_key key_end; /* exclusive */ 4183 atomic_t elems; 4184 struct kref refcnt; 4185 wait_queue_head_t wait; 4186 }; 4187 struct reada_control *btrfs_reada_add(struct btrfs_root *root, 4188 struct btrfs_key *start, struct btrfs_key *end); 4189 int btrfs_reada_wait(void *handle); 4190 void btrfs_reada_detach(void *handle); 4191 int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 4192 u64 start, int err); 4193 4194 static inline int is_fstree(u64 rootid) 4195 { 4196 if (rootid == BTRFS_FS_TREE_OBJECTID || 4197 (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID) 4198 return 1; 4199 return 0; 4200 } 4201 4202 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 4203 { 4204 return signal_pending(current); 4205 } 4206 4207 /* Sanity test specific functions */ 4208 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4209 void btrfs_test_destroy_inode(struct inode *inode); 4210 #endif 4211 4212 static inline int btrfs_test_is_dummy_root(struct btrfs_root *root) 4213 { 4214 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 4215 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state))) 4216 return 1; 4217 #endif 4218 return 0; 4219 } 4220 4221 #endif 4222