1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "messages.h" 13 #include "misc.h" 14 #include "ctree.h" 15 #include "disk-io.h" 16 #include "transaction.h" 17 #include "bio.h" 18 #include "print-tree.h" 19 #include "compression.h" 20 #include "fs.h" 21 #include "accessors.h" 22 #include "file-item.h" 23 #include "super.h" 24 25 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 26 sizeof(struct btrfs_item) * 2) / \ 27 size) - 1)) 28 29 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 30 PAGE_SIZE)) 31 32 /* 33 * Set inode's size according to filesystem options. 34 * 35 * @inode: inode we want to update the disk_i_size for 36 * @new_i_size: i_size we want to set to, 0 if we use i_size 37 * 38 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read() 39 * returns as it is perfectly fine with a file that has holes without hole file 40 * extent items. 41 * 42 * However without NO_HOLES we need to only return the area that is contiguous 43 * from the 0 offset of the file. Otherwise we could end up adjust i_size up 44 * to an extent that has a gap in between. 45 * 46 * Finally new_i_size should only be set in the case of truncate where we're not 47 * ready to use i_size_read() as the limiter yet. 48 */ 49 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) 50 { 51 struct btrfs_fs_info *fs_info = inode->root->fs_info; 52 u64 start, end, i_size; 53 int ret; 54 55 i_size = new_i_size ?: i_size_read(&inode->vfs_inode); 56 if (btrfs_fs_incompat(fs_info, NO_HOLES)) { 57 inode->disk_i_size = i_size; 58 return; 59 } 60 61 spin_lock(&inode->lock); 62 ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start, 63 &end, EXTENT_DIRTY); 64 if (!ret && start == 0) 65 i_size = min(i_size, end + 1); 66 else 67 i_size = 0; 68 inode->disk_i_size = i_size; 69 spin_unlock(&inode->lock); 70 } 71 72 /* 73 * Mark range within a file as having a new extent inserted. 74 * 75 * @inode: inode being modified 76 * @start: start file offset of the file extent we've inserted 77 * @len: logical length of the file extent item 78 * 79 * Call when we are inserting a new file extent where there was none before. 80 * Does not need to call this in the case where we're replacing an existing file 81 * extent, however if not sure it's fine to call this multiple times. 82 * 83 * The start and len must match the file extent item, so thus must be sectorsize 84 * aligned. 85 */ 86 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, 87 u64 len) 88 { 89 if (len == 0) 90 return 0; 91 92 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); 93 94 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 95 return 0; 96 return set_extent_bits(&inode->file_extent_tree, start, start + len - 1, 97 EXTENT_DIRTY); 98 } 99 100 /* 101 * Mark an inode range as not having a backing extent. 102 * 103 * @inode: inode being modified 104 * @start: start file offset of the file extent we've inserted 105 * @len: logical length of the file extent item 106 * 107 * Called when we drop a file extent, for example when we truncate. Doesn't 108 * need to be called for cases where we're replacing a file extent, like when 109 * we've COWed a file extent. 110 * 111 * The start and len must match the file extent item, so thus must be sectorsize 112 * aligned. 113 */ 114 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 115 u64 len) 116 { 117 if (len == 0) 118 return 0; 119 120 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || 121 len == (u64)-1); 122 123 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 124 return 0; 125 return clear_extent_bit(&inode->file_extent_tree, start, 126 start + len - 1, EXTENT_DIRTY, NULL); 127 } 128 129 static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) 130 { 131 ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize)); 132 133 return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size; 134 } 135 136 static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size) 137 { 138 ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size)); 139 140 return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits; 141 } 142 143 static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) 144 { 145 u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum), 146 fs_info->csum_size); 147 148 return csum_size_to_bytes(fs_info, max_csum_size); 149 } 150 151 /* 152 * Calculate the total size needed to allocate for an ordered sum structure 153 * spanning @bytes in the file. 154 */ 155 static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) 156 { 157 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 158 } 159 160 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, 161 struct btrfs_root *root, 162 u64 objectid, u64 pos, u64 num_bytes) 163 { 164 int ret = 0; 165 struct btrfs_file_extent_item *item; 166 struct btrfs_key file_key; 167 struct btrfs_path *path; 168 struct extent_buffer *leaf; 169 170 path = btrfs_alloc_path(); 171 if (!path) 172 return -ENOMEM; 173 file_key.objectid = objectid; 174 file_key.offset = pos; 175 file_key.type = BTRFS_EXTENT_DATA_KEY; 176 177 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 178 sizeof(*item)); 179 if (ret < 0) 180 goto out; 181 BUG_ON(ret); /* Can't happen */ 182 leaf = path->nodes[0]; 183 item = btrfs_item_ptr(leaf, path->slots[0], 184 struct btrfs_file_extent_item); 185 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); 186 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); 187 btrfs_set_file_extent_offset(leaf, item, 0); 188 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 189 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); 190 btrfs_set_file_extent_generation(leaf, item, trans->transid); 191 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 192 btrfs_set_file_extent_compression(leaf, item, 0); 193 btrfs_set_file_extent_encryption(leaf, item, 0); 194 btrfs_set_file_extent_other_encoding(leaf, item, 0); 195 196 btrfs_mark_buffer_dirty(leaf); 197 out: 198 btrfs_free_path(path); 199 return ret; 200 } 201 202 static struct btrfs_csum_item * 203 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 204 struct btrfs_root *root, 205 struct btrfs_path *path, 206 u64 bytenr, int cow) 207 { 208 struct btrfs_fs_info *fs_info = root->fs_info; 209 int ret; 210 struct btrfs_key file_key; 211 struct btrfs_key found_key; 212 struct btrfs_csum_item *item; 213 struct extent_buffer *leaf; 214 u64 csum_offset = 0; 215 const u32 csum_size = fs_info->csum_size; 216 int csums_in_item; 217 218 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 219 file_key.offset = bytenr; 220 file_key.type = BTRFS_EXTENT_CSUM_KEY; 221 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 222 if (ret < 0) 223 goto fail; 224 leaf = path->nodes[0]; 225 if (ret > 0) { 226 ret = 1; 227 if (path->slots[0] == 0) 228 goto fail; 229 path->slots[0]--; 230 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 231 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 232 goto fail; 233 234 csum_offset = (bytenr - found_key.offset) >> 235 fs_info->sectorsize_bits; 236 csums_in_item = btrfs_item_size(leaf, path->slots[0]); 237 csums_in_item /= csum_size; 238 239 if (csum_offset == csums_in_item) { 240 ret = -EFBIG; 241 goto fail; 242 } else if (csum_offset > csums_in_item) { 243 goto fail; 244 } 245 } 246 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 247 item = (struct btrfs_csum_item *)((unsigned char *)item + 248 csum_offset * csum_size); 249 return item; 250 fail: 251 if (ret > 0) 252 ret = -ENOENT; 253 return ERR_PTR(ret); 254 } 255 256 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 257 struct btrfs_root *root, 258 struct btrfs_path *path, u64 objectid, 259 u64 offset, int mod) 260 { 261 struct btrfs_key file_key; 262 int ins_len = mod < 0 ? -1 : 0; 263 int cow = mod != 0; 264 265 file_key.objectid = objectid; 266 file_key.offset = offset; 267 file_key.type = BTRFS_EXTENT_DATA_KEY; 268 269 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 270 } 271 272 /* 273 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and 274 * store the result to @dst. 275 * 276 * Return >0 for the number of sectors we found. 277 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum 278 * for it. Caller may want to try next sector until one range is hit. 279 * Return <0 for fatal error. 280 */ 281 static int search_csum_tree(struct btrfs_fs_info *fs_info, 282 struct btrfs_path *path, u64 disk_bytenr, 283 u64 len, u8 *dst) 284 { 285 struct btrfs_root *csum_root; 286 struct btrfs_csum_item *item = NULL; 287 struct btrfs_key key; 288 const u32 sectorsize = fs_info->sectorsize; 289 const u32 csum_size = fs_info->csum_size; 290 u32 itemsize; 291 int ret; 292 u64 csum_start; 293 u64 csum_len; 294 295 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && 296 IS_ALIGNED(len, sectorsize)); 297 298 /* Check if the current csum item covers disk_bytenr */ 299 if (path->nodes[0]) { 300 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 301 struct btrfs_csum_item); 302 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 303 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 304 305 csum_start = key.offset; 306 csum_len = (itemsize / csum_size) * sectorsize; 307 308 if (in_range(disk_bytenr, csum_start, csum_len)) 309 goto found; 310 } 311 312 /* Current item doesn't contain the desired range, search again */ 313 btrfs_release_path(path); 314 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 315 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 316 if (IS_ERR(item)) { 317 ret = PTR_ERR(item); 318 goto out; 319 } 320 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 321 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 322 323 csum_start = key.offset; 324 csum_len = (itemsize / csum_size) * sectorsize; 325 ASSERT(in_range(disk_bytenr, csum_start, csum_len)); 326 327 found: 328 ret = (min(csum_start + csum_len, disk_bytenr + len) - 329 disk_bytenr) >> fs_info->sectorsize_bits; 330 read_extent_buffer(path->nodes[0], dst, (unsigned long)item, 331 ret * csum_size); 332 out: 333 if (ret == -ENOENT || ret == -EFBIG) 334 ret = 0; 335 return ret; 336 } 337 338 /* 339 * Lookup the checksum for the read bio in csum tree. 340 * 341 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 342 */ 343 blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) 344 { 345 struct btrfs_inode *inode = bbio->inode; 346 struct btrfs_fs_info *fs_info = inode->root->fs_info; 347 struct bio *bio = &bbio->bio; 348 struct btrfs_path *path; 349 const u32 sectorsize = fs_info->sectorsize; 350 const u32 csum_size = fs_info->csum_size; 351 u32 orig_len = bio->bi_iter.bi_size; 352 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; 353 u64 cur_disk_bytenr; 354 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; 355 int count = 0; 356 blk_status_t ret = BLK_STS_OK; 357 358 if ((inode->flags & BTRFS_INODE_NODATASUM) || 359 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) 360 return BLK_STS_OK; 361 362 /* 363 * This function is only called for read bio. 364 * 365 * This means two things: 366 * - All our csums should only be in csum tree 367 * No ordered extents csums, as ordered extents are only for write 368 * path. 369 * - No need to bother any other info from bvec 370 * Since we're looking up csums, the only important info is the 371 * disk_bytenr and the length, which can be extracted from bi_iter 372 * directly. 373 */ 374 ASSERT(bio_op(bio) == REQ_OP_READ); 375 path = btrfs_alloc_path(); 376 if (!path) 377 return BLK_STS_RESOURCE; 378 379 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 380 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); 381 if (!bbio->csum) { 382 btrfs_free_path(path); 383 return BLK_STS_RESOURCE; 384 } 385 } else { 386 bbio->csum = bbio->csum_inline; 387 } 388 389 /* 390 * If requested number of sectors is larger than one leaf can contain, 391 * kick the readahead for csum tree. 392 */ 393 if (nblocks > fs_info->csums_per_leaf) 394 path->reada = READA_FORWARD; 395 396 /* 397 * the free space stuff is only read when it hasn't been 398 * updated in the current transaction. So, we can safely 399 * read from the commit root and sidestep a nasty deadlock 400 * between reading the free space cache and updating the csum tree. 401 */ 402 if (btrfs_is_free_space_inode(inode)) { 403 path->search_commit_root = 1; 404 path->skip_locking = 1; 405 } 406 407 for (cur_disk_bytenr = orig_disk_bytenr; 408 cur_disk_bytenr < orig_disk_bytenr + orig_len; 409 cur_disk_bytenr += (count * sectorsize)) { 410 u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr; 411 unsigned int sector_offset; 412 u8 *csum_dst; 413 414 /* 415 * Although both cur_disk_bytenr and orig_disk_bytenr is u64, 416 * we're calculating the offset to the bio start. 417 * 418 * Bio size is limited to UINT_MAX, thus unsigned int is large 419 * enough to contain the raw result, not to mention the right 420 * shifted result. 421 */ 422 ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX); 423 sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >> 424 fs_info->sectorsize_bits; 425 csum_dst = bbio->csum + sector_offset * csum_size; 426 427 count = search_csum_tree(fs_info, path, cur_disk_bytenr, 428 search_len, csum_dst); 429 if (count < 0) { 430 ret = errno_to_blk_status(count); 431 if (bbio->csum != bbio->csum_inline) 432 kfree(bbio->csum); 433 bbio->csum = NULL; 434 break; 435 } 436 437 /* 438 * We didn't find a csum for this range. We need to make sure 439 * we complain loudly about this, because we are not NODATASUM. 440 * 441 * However for the DATA_RELOC inode we could potentially be 442 * relocating data extents for a NODATASUM inode, so the inode 443 * itself won't be marked with NODATASUM, but the extent we're 444 * copying is in fact NODATASUM. If we don't find a csum we 445 * assume this is the case. 446 */ 447 if (count == 0) { 448 memset(csum_dst, 0, csum_size); 449 count = 1; 450 451 if (inode->root->root_key.objectid == 452 BTRFS_DATA_RELOC_TREE_OBJECTID) { 453 u64 file_offset = bbio->file_offset + 454 cur_disk_bytenr - orig_disk_bytenr; 455 456 set_extent_bits(&inode->io_tree, file_offset, 457 file_offset + sectorsize - 1, 458 EXTENT_NODATASUM); 459 } else { 460 btrfs_warn_rl(fs_info, 461 "csum hole found for disk bytenr range [%llu, %llu)", 462 cur_disk_bytenr, cur_disk_bytenr + sectorsize); 463 } 464 } 465 } 466 467 btrfs_free_path(path); 468 return ret; 469 } 470 471 int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end, 472 struct list_head *list, int search_commit, 473 bool nowait) 474 { 475 struct btrfs_fs_info *fs_info = root->fs_info; 476 struct btrfs_key key; 477 struct btrfs_path *path; 478 struct extent_buffer *leaf; 479 struct btrfs_ordered_sum *sums; 480 struct btrfs_csum_item *item; 481 LIST_HEAD(tmplist); 482 int ret; 483 484 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 485 IS_ALIGNED(end + 1, fs_info->sectorsize)); 486 487 path = btrfs_alloc_path(); 488 if (!path) 489 return -ENOMEM; 490 491 path->nowait = nowait; 492 if (search_commit) { 493 path->skip_locking = 1; 494 path->reada = READA_FORWARD; 495 path->search_commit_root = 1; 496 } 497 498 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 499 key.offset = start; 500 key.type = BTRFS_EXTENT_CSUM_KEY; 501 502 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 503 if (ret < 0) 504 goto fail; 505 if (ret > 0 && path->slots[0] > 0) { 506 leaf = path->nodes[0]; 507 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 508 509 /* 510 * There are two cases we can hit here for the previous csum 511 * item: 512 * 513 * |<- search range ->| 514 * |<- csum item ->| 515 * 516 * Or 517 * |<- search range ->| 518 * |<- csum item ->| 519 * 520 * Check if the previous csum item covers the leading part of 521 * the search range. If so we have to start from previous csum 522 * item. 523 */ 524 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 525 key.type == BTRFS_EXTENT_CSUM_KEY) { 526 if (bytes_to_csum_size(fs_info, start - key.offset) < 527 btrfs_item_size(leaf, path->slots[0] - 1)) 528 path->slots[0]--; 529 } 530 } 531 532 while (start <= end) { 533 u64 csum_end; 534 535 leaf = path->nodes[0]; 536 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 537 ret = btrfs_next_leaf(root, path); 538 if (ret < 0) 539 goto fail; 540 if (ret > 0) 541 break; 542 leaf = path->nodes[0]; 543 } 544 545 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 546 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 547 key.type != BTRFS_EXTENT_CSUM_KEY || 548 key.offset > end) 549 break; 550 551 if (key.offset > start) 552 start = key.offset; 553 554 csum_end = key.offset + csum_size_to_bytes(fs_info, 555 btrfs_item_size(leaf, path->slots[0])); 556 if (csum_end <= start) { 557 path->slots[0]++; 558 continue; 559 } 560 561 csum_end = min(csum_end, end + 1); 562 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 563 struct btrfs_csum_item); 564 while (start < csum_end) { 565 unsigned long offset; 566 size_t size; 567 568 size = min_t(size_t, csum_end - start, 569 max_ordered_sum_bytes(fs_info)); 570 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 571 GFP_NOFS); 572 if (!sums) { 573 ret = -ENOMEM; 574 goto fail; 575 } 576 577 sums->bytenr = start; 578 sums->len = (int)size; 579 580 offset = bytes_to_csum_size(fs_info, start - key.offset); 581 582 read_extent_buffer(path->nodes[0], 583 sums->sums, 584 ((unsigned long)item) + offset, 585 bytes_to_csum_size(fs_info, size)); 586 587 start += size; 588 list_add_tail(&sums->list, &tmplist); 589 } 590 path->slots[0]++; 591 } 592 ret = 0; 593 fail: 594 while (ret < 0 && !list_empty(&tmplist)) { 595 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 596 list_del(&sums->list); 597 kfree(sums); 598 } 599 list_splice_tail(&tmplist, list); 600 601 btrfs_free_path(path); 602 return ret; 603 } 604 605 /* 606 * Do the same work as btrfs_lookup_csums_list(), the difference is in how 607 * we return the result. 608 * 609 * This version will set the corresponding bits in @csum_bitmap to represent 610 * that there is a csum found. 611 * Each bit represents a sector. Thus caller should ensure @csum_buf passed 612 * in is large enough to contain all csums. 613 */ 614 int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end, 615 u8 *csum_buf, unsigned long *csum_bitmap) 616 { 617 struct btrfs_fs_info *fs_info = root->fs_info; 618 struct btrfs_key key; 619 struct btrfs_path *path; 620 struct extent_buffer *leaf; 621 struct btrfs_csum_item *item; 622 const u64 orig_start = start; 623 int ret; 624 625 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 626 IS_ALIGNED(end + 1, fs_info->sectorsize)); 627 628 path = btrfs_alloc_path(); 629 if (!path) 630 return -ENOMEM; 631 632 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 633 key.type = BTRFS_EXTENT_CSUM_KEY; 634 key.offset = start; 635 636 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 637 if (ret < 0) 638 goto fail; 639 if (ret > 0 && path->slots[0] > 0) { 640 leaf = path->nodes[0]; 641 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 642 643 /* 644 * There are two cases we can hit here for the previous csum 645 * item: 646 * 647 * |<- search range ->| 648 * |<- csum item ->| 649 * 650 * Or 651 * |<- search range ->| 652 * |<- csum item ->| 653 * 654 * Check if the previous csum item covers the leading part of 655 * the search range. If so we have to start from previous csum 656 * item. 657 */ 658 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 659 key.type == BTRFS_EXTENT_CSUM_KEY) { 660 if (bytes_to_csum_size(fs_info, start - key.offset) < 661 btrfs_item_size(leaf, path->slots[0] - 1)) 662 path->slots[0]--; 663 } 664 } 665 666 while (start <= end) { 667 u64 csum_end; 668 669 leaf = path->nodes[0]; 670 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 671 ret = btrfs_next_leaf(root, path); 672 if (ret < 0) 673 goto fail; 674 if (ret > 0) 675 break; 676 leaf = path->nodes[0]; 677 } 678 679 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 680 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 681 key.type != BTRFS_EXTENT_CSUM_KEY || 682 key.offset > end) 683 break; 684 685 if (key.offset > start) 686 start = key.offset; 687 688 csum_end = key.offset + csum_size_to_bytes(fs_info, 689 btrfs_item_size(leaf, path->slots[0])); 690 if (csum_end <= start) { 691 path->slots[0]++; 692 continue; 693 } 694 695 csum_end = min(csum_end, end + 1); 696 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 697 struct btrfs_csum_item); 698 while (start < csum_end) { 699 unsigned long offset; 700 size_t size; 701 u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info, 702 start - orig_start); 703 704 size = min_t(size_t, csum_end - start, end + 1 - start); 705 706 offset = bytes_to_csum_size(fs_info, start - key.offset); 707 708 read_extent_buffer(path->nodes[0], csum_dest, 709 ((unsigned long)item) + offset, 710 bytes_to_csum_size(fs_info, size)); 711 712 bitmap_set(csum_bitmap, 713 (start - orig_start) >> fs_info->sectorsize_bits, 714 size >> fs_info->sectorsize_bits); 715 716 start += size; 717 } 718 path->slots[0]++; 719 } 720 ret = 0; 721 fail: 722 btrfs_free_path(path); 723 return ret; 724 } 725 726 /* 727 * Calculate checksums of the data contained inside a bio. 728 */ 729 blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) 730 { 731 struct btrfs_inode *inode = bbio->inode; 732 struct btrfs_fs_info *fs_info = inode->root->fs_info; 733 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 734 struct bio *bio = &bbio->bio; 735 u64 offset = bbio->file_offset; 736 struct btrfs_ordered_sum *sums; 737 struct btrfs_ordered_extent *ordered = NULL; 738 char *data; 739 struct bvec_iter iter; 740 struct bio_vec bvec; 741 int index; 742 unsigned int blockcount; 743 unsigned long total_bytes = 0; 744 unsigned long this_sum_bytes = 0; 745 int i; 746 unsigned nofs_flag; 747 748 nofs_flag = memalloc_nofs_save(); 749 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 750 GFP_KERNEL); 751 memalloc_nofs_restore(nofs_flag); 752 753 if (!sums) 754 return BLK_STS_RESOURCE; 755 756 sums->len = bio->bi_iter.bi_size; 757 INIT_LIST_HEAD(&sums->list); 758 759 sums->bytenr = bio->bi_iter.bi_sector << 9; 760 index = 0; 761 762 shash->tfm = fs_info->csum_shash; 763 764 bio_for_each_segment(bvec, bio, iter) { 765 if (!ordered) { 766 ordered = btrfs_lookup_ordered_extent(inode, offset); 767 /* 768 * The bio range is not covered by any ordered extent, 769 * must be a code logic error. 770 */ 771 if (unlikely(!ordered)) { 772 WARN(1, KERN_WARNING 773 "no ordered extent for root %llu ino %llu offset %llu\n", 774 inode->root->root_key.objectid, 775 btrfs_ino(inode), offset); 776 kvfree(sums); 777 return BLK_STS_IOERR; 778 } 779 } 780 781 blockcount = BTRFS_BYTES_TO_BLKS(fs_info, 782 bvec.bv_len + fs_info->sectorsize 783 - 1); 784 785 for (i = 0; i < blockcount; i++) { 786 if (!(bio->bi_opf & REQ_BTRFS_ONE_ORDERED) && 787 !in_range(offset, ordered->file_offset, 788 ordered->num_bytes)) { 789 unsigned long bytes_left; 790 791 sums->len = this_sum_bytes; 792 this_sum_bytes = 0; 793 btrfs_add_ordered_sum(ordered, sums); 794 btrfs_put_ordered_extent(ordered); 795 796 bytes_left = bio->bi_iter.bi_size - total_bytes; 797 798 nofs_flag = memalloc_nofs_save(); 799 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, 800 bytes_left), GFP_KERNEL); 801 memalloc_nofs_restore(nofs_flag); 802 BUG_ON(!sums); /* -ENOMEM */ 803 sums->len = bytes_left; 804 ordered = btrfs_lookup_ordered_extent(inode, 805 offset); 806 ASSERT(ordered); /* Logic error */ 807 sums->bytenr = (bio->bi_iter.bi_sector << 9) 808 + total_bytes; 809 index = 0; 810 } 811 812 data = bvec_kmap_local(&bvec); 813 crypto_shash_digest(shash, 814 data + (i * fs_info->sectorsize), 815 fs_info->sectorsize, 816 sums->sums + index); 817 kunmap_local(data); 818 index += fs_info->csum_size; 819 offset += fs_info->sectorsize; 820 this_sum_bytes += fs_info->sectorsize; 821 total_bytes += fs_info->sectorsize; 822 } 823 824 } 825 this_sum_bytes = 0; 826 btrfs_add_ordered_sum(ordered, sums); 827 btrfs_put_ordered_extent(ordered); 828 return 0; 829 } 830 831 /* 832 * Remove one checksum overlapping a range. 833 * 834 * This expects the key to describe the csum pointed to by the path, and it 835 * expects the csum to overlap the range [bytenr, len] 836 * 837 * The csum should not be entirely contained in the range and the range should 838 * not be entirely contained in the csum. 839 * 840 * This calls btrfs_truncate_item with the correct args based on the overlap, 841 * and fixes up the key as required. 842 */ 843 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 844 struct btrfs_path *path, 845 struct btrfs_key *key, 846 u64 bytenr, u64 len) 847 { 848 struct extent_buffer *leaf; 849 const u32 csum_size = fs_info->csum_size; 850 u64 csum_end; 851 u64 end_byte = bytenr + len; 852 u32 blocksize_bits = fs_info->sectorsize_bits; 853 854 leaf = path->nodes[0]; 855 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 856 csum_end <<= blocksize_bits; 857 csum_end += key->offset; 858 859 if (key->offset < bytenr && csum_end <= end_byte) { 860 /* 861 * [ bytenr - len ] 862 * [ ] 863 * [csum ] 864 * A simple truncate off the end of the item 865 */ 866 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 867 new_size *= csum_size; 868 btrfs_truncate_item(path, new_size, 1); 869 } else if (key->offset >= bytenr && csum_end > end_byte && 870 end_byte > key->offset) { 871 /* 872 * [ bytenr - len ] 873 * [ ] 874 * [csum ] 875 * we need to truncate from the beginning of the csum 876 */ 877 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 878 new_size *= csum_size; 879 880 btrfs_truncate_item(path, new_size, 0); 881 882 key->offset = end_byte; 883 btrfs_set_item_key_safe(fs_info, path, key); 884 } else { 885 BUG(); 886 } 887 } 888 889 /* 890 * Delete the csum items from the csum tree for a given range of bytes. 891 */ 892 int btrfs_del_csums(struct btrfs_trans_handle *trans, 893 struct btrfs_root *root, u64 bytenr, u64 len) 894 { 895 struct btrfs_fs_info *fs_info = trans->fs_info; 896 struct btrfs_path *path; 897 struct btrfs_key key; 898 u64 end_byte = bytenr + len; 899 u64 csum_end; 900 struct extent_buffer *leaf; 901 int ret = 0; 902 const u32 csum_size = fs_info->csum_size; 903 u32 blocksize_bits = fs_info->sectorsize_bits; 904 905 ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID || 906 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 907 908 path = btrfs_alloc_path(); 909 if (!path) 910 return -ENOMEM; 911 912 while (1) { 913 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 914 key.offset = end_byte - 1; 915 key.type = BTRFS_EXTENT_CSUM_KEY; 916 917 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 918 if (ret > 0) { 919 ret = 0; 920 if (path->slots[0] == 0) 921 break; 922 path->slots[0]--; 923 } else if (ret < 0) { 924 break; 925 } 926 927 leaf = path->nodes[0]; 928 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 929 930 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 931 key.type != BTRFS_EXTENT_CSUM_KEY) { 932 break; 933 } 934 935 if (key.offset >= end_byte) 936 break; 937 938 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 939 csum_end <<= blocksize_bits; 940 csum_end += key.offset; 941 942 /* this csum ends before we start, we're done */ 943 if (csum_end <= bytenr) 944 break; 945 946 /* delete the entire item, it is inside our range */ 947 if (key.offset >= bytenr && csum_end <= end_byte) { 948 int del_nr = 1; 949 950 /* 951 * Check how many csum items preceding this one in this 952 * leaf correspond to our range and then delete them all 953 * at once. 954 */ 955 if (key.offset > bytenr && path->slots[0] > 0) { 956 int slot = path->slots[0] - 1; 957 958 while (slot >= 0) { 959 struct btrfs_key pk; 960 961 btrfs_item_key_to_cpu(leaf, &pk, slot); 962 if (pk.offset < bytenr || 963 pk.type != BTRFS_EXTENT_CSUM_KEY || 964 pk.objectid != 965 BTRFS_EXTENT_CSUM_OBJECTID) 966 break; 967 path->slots[0] = slot; 968 del_nr++; 969 key.offset = pk.offset; 970 slot--; 971 } 972 } 973 ret = btrfs_del_items(trans, root, path, 974 path->slots[0], del_nr); 975 if (ret) 976 break; 977 if (key.offset == bytenr) 978 break; 979 } else if (key.offset < bytenr && csum_end > end_byte) { 980 unsigned long offset; 981 unsigned long shift_len; 982 unsigned long item_offset; 983 /* 984 * [ bytenr - len ] 985 * [csum ] 986 * 987 * Our bytes are in the middle of the csum, 988 * we need to split this item and insert a new one. 989 * 990 * But we can't drop the path because the 991 * csum could change, get removed, extended etc. 992 * 993 * The trick here is the max size of a csum item leaves 994 * enough room in the tree block for a single 995 * item header. So, we split the item in place, 996 * adding a new header pointing to the existing 997 * bytes. Then we loop around again and we have 998 * a nicely formed csum item that we can neatly 999 * truncate. 1000 */ 1001 offset = (bytenr - key.offset) >> blocksize_bits; 1002 offset *= csum_size; 1003 1004 shift_len = (len >> blocksize_bits) * csum_size; 1005 1006 item_offset = btrfs_item_ptr_offset(leaf, 1007 path->slots[0]); 1008 1009 memzero_extent_buffer(leaf, item_offset + offset, 1010 shift_len); 1011 key.offset = bytenr; 1012 1013 /* 1014 * btrfs_split_item returns -EAGAIN when the 1015 * item changed size or key 1016 */ 1017 ret = btrfs_split_item(trans, root, path, &key, offset); 1018 if (ret && ret != -EAGAIN) { 1019 btrfs_abort_transaction(trans, ret); 1020 break; 1021 } 1022 ret = 0; 1023 1024 key.offset = end_byte - 1; 1025 } else { 1026 truncate_one_csum(fs_info, path, &key, bytenr, len); 1027 if (key.offset < bytenr) 1028 break; 1029 } 1030 btrfs_release_path(path); 1031 } 1032 btrfs_free_path(path); 1033 return ret; 1034 } 1035 1036 static int find_next_csum_offset(struct btrfs_root *root, 1037 struct btrfs_path *path, 1038 u64 *next_offset) 1039 { 1040 const u32 nritems = btrfs_header_nritems(path->nodes[0]); 1041 struct btrfs_key found_key; 1042 int slot = path->slots[0] + 1; 1043 int ret; 1044 1045 if (nritems == 0 || slot >= nritems) { 1046 ret = btrfs_next_leaf(root, path); 1047 if (ret < 0) { 1048 return ret; 1049 } else if (ret > 0) { 1050 *next_offset = (u64)-1; 1051 return 0; 1052 } 1053 slot = path->slots[0]; 1054 } 1055 1056 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 1057 1058 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1059 found_key.type != BTRFS_EXTENT_CSUM_KEY) 1060 *next_offset = (u64)-1; 1061 else 1062 *next_offset = found_key.offset; 1063 1064 return 0; 1065 } 1066 1067 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 1068 struct btrfs_root *root, 1069 struct btrfs_ordered_sum *sums) 1070 { 1071 struct btrfs_fs_info *fs_info = root->fs_info; 1072 struct btrfs_key file_key; 1073 struct btrfs_key found_key; 1074 struct btrfs_path *path; 1075 struct btrfs_csum_item *item; 1076 struct btrfs_csum_item *item_end; 1077 struct extent_buffer *leaf = NULL; 1078 u64 next_offset; 1079 u64 total_bytes = 0; 1080 u64 csum_offset; 1081 u64 bytenr; 1082 u32 ins_size; 1083 int index = 0; 1084 int found_next; 1085 int ret; 1086 const u32 csum_size = fs_info->csum_size; 1087 1088 path = btrfs_alloc_path(); 1089 if (!path) 1090 return -ENOMEM; 1091 again: 1092 next_offset = (u64)-1; 1093 found_next = 0; 1094 bytenr = sums->bytenr + total_bytes; 1095 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1096 file_key.offset = bytenr; 1097 file_key.type = BTRFS_EXTENT_CSUM_KEY; 1098 1099 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 1100 if (!IS_ERR(item)) { 1101 ret = 0; 1102 leaf = path->nodes[0]; 1103 item_end = btrfs_item_ptr(leaf, path->slots[0], 1104 struct btrfs_csum_item); 1105 item_end = (struct btrfs_csum_item *)((char *)item_end + 1106 btrfs_item_size(leaf, path->slots[0])); 1107 goto found; 1108 } 1109 ret = PTR_ERR(item); 1110 if (ret != -EFBIG && ret != -ENOENT) 1111 goto out; 1112 1113 if (ret == -EFBIG) { 1114 u32 item_size; 1115 /* we found one, but it isn't big enough yet */ 1116 leaf = path->nodes[0]; 1117 item_size = btrfs_item_size(leaf, path->slots[0]); 1118 if ((item_size / csum_size) >= 1119 MAX_CSUM_ITEMS(fs_info, csum_size)) { 1120 /* already at max size, make a new one */ 1121 goto insert; 1122 } 1123 } else { 1124 /* We didn't find a csum item, insert one. */ 1125 ret = find_next_csum_offset(root, path, &next_offset); 1126 if (ret < 0) 1127 goto out; 1128 found_next = 1; 1129 goto insert; 1130 } 1131 1132 /* 1133 * At this point, we know the tree has a checksum item that ends at an 1134 * offset matching the start of the checksum range we want to insert. 1135 * We try to extend that item as much as possible and then add as many 1136 * checksums to it as they fit. 1137 * 1138 * First check if the leaf has enough free space for at least one 1139 * checksum. If it has go directly to the item extension code, otherwise 1140 * release the path and do a search for insertion before the extension. 1141 */ 1142 if (btrfs_leaf_free_space(leaf) >= csum_size) { 1143 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1144 csum_offset = (bytenr - found_key.offset) >> 1145 fs_info->sectorsize_bits; 1146 goto extend_csum; 1147 } 1148 1149 btrfs_release_path(path); 1150 path->search_for_extension = 1; 1151 ret = btrfs_search_slot(trans, root, &file_key, path, 1152 csum_size, 1); 1153 path->search_for_extension = 0; 1154 if (ret < 0) 1155 goto out; 1156 1157 if (ret > 0) { 1158 if (path->slots[0] == 0) 1159 goto insert; 1160 path->slots[0]--; 1161 } 1162 1163 leaf = path->nodes[0]; 1164 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1165 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits; 1166 1167 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 1168 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1169 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 1170 goto insert; 1171 } 1172 1173 extend_csum: 1174 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) / 1175 csum_size) { 1176 int extend_nr; 1177 u64 tmp; 1178 u32 diff; 1179 1180 tmp = sums->len - total_bytes; 1181 tmp >>= fs_info->sectorsize_bits; 1182 WARN_ON(tmp < 1); 1183 extend_nr = max_t(int, 1, tmp); 1184 1185 /* 1186 * A log tree can already have checksum items with a subset of 1187 * the checksums we are trying to log. This can happen after 1188 * doing a sequence of partial writes into prealloc extents and 1189 * fsyncs in between, with a full fsync logging a larger subrange 1190 * of an extent for which a previous fast fsync logged a smaller 1191 * subrange. And this happens in particular due to merging file 1192 * extent items when we complete an ordered extent for a range 1193 * covered by a prealloc extent - this is done at 1194 * btrfs_mark_extent_written(). 1195 * 1196 * So if we try to extend the previous checksum item, which has 1197 * a range that ends at the start of the range we want to insert, 1198 * make sure we don't extend beyond the start offset of the next 1199 * checksum item. If we are at the last item in the leaf, then 1200 * forget the optimization of extending and add a new checksum 1201 * item - it is not worth the complexity of releasing the path, 1202 * getting the first key for the next leaf, repeat the btree 1203 * search, etc, because log trees are temporary anyway and it 1204 * would only save a few bytes of leaf space. 1205 */ 1206 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 1207 if (path->slots[0] + 1 >= 1208 btrfs_header_nritems(path->nodes[0])) { 1209 ret = find_next_csum_offset(root, path, &next_offset); 1210 if (ret < 0) 1211 goto out; 1212 found_next = 1; 1213 goto insert; 1214 } 1215 1216 ret = find_next_csum_offset(root, path, &next_offset); 1217 if (ret < 0) 1218 goto out; 1219 1220 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; 1221 if (tmp <= INT_MAX) 1222 extend_nr = min_t(int, extend_nr, tmp); 1223 } 1224 1225 diff = (csum_offset + extend_nr) * csum_size; 1226 diff = min(diff, 1227 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 1228 1229 diff = diff - btrfs_item_size(leaf, path->slots[0]); 1230 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 1231 diff /= csum_size; 1232 diff *= csum_size; 1233 1234 btrfs_extend_item(path, diff); 1235 ret = 0; 1236 goto csum; 1237 } 1238 1239 insert: 1240 btrfs_release_path(path); 1241 csum_offset = 0; 1242 if (found_next) { 1243 u64 tmp; 1244 1245 tmp = sums->len - total_bytes; 1246 tmp >>= fs_info->sectorsize_bits; 1247 tmp = min(tmp, (next_offset - file_key.offset) >> 1248 fs_info->sectorsize_bits); 1249 1250 tmp = max_t(u64, 1, tmp); 1251 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 1252 ins_size = csum_size * tmp; 1253 } else { 1254 ins_size = csum_size; 1255 } 1256 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 1257 ins_size); 1258 if (ret < 0) 1259 goto out; 1260 if (WARN_ON(ret != 0)) 1261 goto out; 1262 leaf = path->nodes[0]; 1263 csum: 1264 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 1265 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 1266 btrfs_item_size(leaf, path->slots[0])); 1267 item = (struct btrfs_csum_item *)((unsigned char *)item + 1268 csum_offset * csum_size); 1269 found: 1270 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits; 1271 ins_size *= csum_size; 1272 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 1273 ins_size); 1274 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 1275 ins_size); 1276 1277 index += ins_size; 1278 ins_size /= csum_size; 1279 total_bytes += ins_size * fs_info->sectorsize; 1280 1281 btrfs_mark_buffer_dirty(path->nodes[0]); 1282 if (total_bytes < sums->len) { 1283 btrfs_release_path(path); 1284 cond_resched(); 1285 goto again; 1286 } 1287 out: 1288 btrfs_free_path(path); 1289 return ret; 1290 } 1291 1292 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1293 const struct btrfs_path *path, 1294 struct btrfs_file_extent_item *fi, 1295 struct extent_map *em) 1296 { 1297 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1298 struct btrfs_root *root = inode->root; 1299 struct extent_buffer *leaf = path->nodes[0]; 1300 const int slot = path->slots[0]; 1301 struct btrfs_key key; 1302 u64 extent_start, extent_end; 1303 u64 bytenr; 1304 u8 type = btrfs_file_extent_type(leaf, fi); 1305 int compress_type = btrfs_file_extent_compression(leaf, fi); 1306 1307 btrfs_item_key_to_cpu(leaf, &key, slot); 1308 extent_start = key.offset; 1309 extent_end = btrfs_file_extent_end(path); 1310 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1311 em->generation = btrfs_file_extent_generation(leaf, fi); 1312 if (type == BTRFS_FILE_EXTENT_REG || 1313 type == BTRFS_FILE_EXTENT_PREALLOC) { 1314 em->start = extent_start; 1315 em->len = extent_end - extent_start; 1316 em->orig_start = extent_start - 1317 btrfs_file_extent_offset(leaf, fi); 1318 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 1319 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1320 if (bytenr == 0) { 1321 em->block_start = EXTENT_MAP_HOLE; 1322 return; 1323 } 1324 if (compress_type != BTRFS_COMPRESS_NONE) { 1325 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1326 em->compress_type = compress_type; 1327 em->block_start = bytenr; 1328 em->block_len = em->orig_block_len; 1329 } else { 1330 bytenr += btrfs_file_extent_offset(leaf, fi); 1331 em->block_start = bytenr; 1332 em->block_len = em->len; 1333 if (type == BTRFS_FILE_EXTENT_PREALLOC) 1334 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 1335 } 1336 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1337 em->block_start = EXTENT_MAP_INLINE; 1338 em->start = extent_start; 1339 em->len = extent_end - extent_start; 1340 /* 1341 * Initialize orig_start and block_len with the same values 1342 * as in inode.c:btrfs_get_extent(). 1343 */ 1344 em->orig_start = EXTENT_MAP_HOLE; 1345 em->block_len = (u64)-1; 1346 em->compress_type = compress_type; 1347 if (compress_type != BTRFS_COMPRESS_NONE) 1348 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1349 } else { 1350 btrfs_err(fs_info, 1351 "unknown file extent item type %d, inode %llu, offset %llu, " 1352 "root %llu", type, btrfs_ino(inode), extent_start, 1353 root->root_key.objectid); 1354 } 1355 } 1356 1357 /* 1358 * Returns the end offset (non inclusive) of the file extent item the given path 1359 * points to. If it points to an inline extent, the returned offset is rounded 1360 * up to the sector size. 1361 */ 1362 u64 btrfs_file_extent_end(const struct btrfs_path *path) 1363 { 1364 const struct extent_buffer *leaf = path->nodes[0]; 1365 const int slot = path->slots[0]; 1366 struct btrfs_file_extent_item *fi; 1367 struct btrfs_key key; 1368 u64 end; 1369 1370 btrfs_item_key_to_cpu(leaf, &key, slot); 1371 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 1372 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1373 1374 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 1375 end = btrfs_file_extent_ram_bytes(leaf, fi); 1376 end = ALIGN(key.offset + end, leaf->fs_info->sectorsize); 1377 } else { 1378 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1379 } 1380 1381 return end; 1382 } 1383