1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "messages.h" 13 #include "misc.h" 14 #include "ctree.h" 15 #include "disk-io.h" 16 #include "transaction.h" 17 #include "bio.h" 18 #include "print-tree.h" 19 #include "compression.h" 20 #include "fs.h" 21 #include "accessors.h" 22 #include "file-item.h" 23 #include "super.h" 24 25 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 26 sizeof(struct btrfs_item) * 2) / \ 27 size) - 1)) 28 29 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 30 PAGE_SIZE)) 31 32 /* 33 * Set inode's size according to filesystem options. 34 * 35 * @inode: inode we want to update the disk_i_size for 36 * @new_i_size: i_size we want to set to, 0 if we use i_size 37 * 38 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read() 39 * returns as it is perfectly fine with a file that has holes without hole file 40 * extent items. 41 * 42 * However without NO_HOLES we need to only return the area that is contiguous 43 * from the 0 offset of the file. Otherwise we could end up adjust i_size up 44 * to an extent that has a gap in between. 45 * 46 * Finally new_i_size should only be set in the case of truncate where we're not 47 * ready to use i_size_read() as the limiter yet. 48 */ 49 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size) 50 { 51 struct btrfs_fs_info *fs_info = inode->root->fs_info; 52 u64 start, end, i_size; 53 int ret; 54 55 i_size = new_i_size ?: i_size_read(&inode->vfs_inode); 56 if (btrfs_fs_incompat(fs_info, NO_HOLES)) { 57 inode->disk_i_size = i_size; 58 return; 59 } 60 61 spin_lock(&inode->lock); 62 ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start, 63 &end, EXTENT_DIRTY); 64 if (!ret && start == 0) 65 i_size = min(i_size, end + 1); 66 else 67 i_size = 0; 68 inode->disk_i_size = i_size; 69 spin_unlock(&inode->lock); 70 } 71 72 /* 73 * Mark range within a file as having a new extent inserted. 74 * 75 * @inode: inode being modified 76 * @start: start file offset of the file extent we've inserted 77 * @len: logical length of the file extent item 78 * 79 * Call when we are inserting a new file extent where there was none before. 80 * Does not need to call this in the case where we're replacing an existing file 81 * extent, however if not sure it's fine to call this multiple times. 82 * 83 * The start and len must match the file extent item, so thus must be sectorsize 84 * aligned. 85 */ 86 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, 87 u64 len) 88 { 89 if (len == 0) 90 return 0; 91 92 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); 93 94 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 95 return 0; 96 return set_extent_bits(&inode->file_extent_tree, start, start + len - 1, 97 EXTENT_DIRTY); 98 } 99 100 /* 101 * Mark an inode range as not having a backing extent. 102 * 103 * @inode: inode being modified 104 * @start: start file offset of the file extent we've inserted 105 * @len: logical length of the file extent item 106 * 107 * Called when we drop a file extent, for example when we truncate. Doesn't 108 * need to be called for cases where we're replacing a file extent, like when 109 * we've COWed a file extent. 110 * 111 * The start and len must match the file extent item, so thus must be sectorsize 112 * aligned. 113 */ 114 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start, 115 u64 len) 116 { 117 if (len == 0) 118 return 0; 119 120 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || 121 len == (u64)-1); 122 123 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) 124 return 0; 125 return clear_extent_bit(&inode->file_extent_tree, start, 126 start + len - 1, EXTENT_DIRTY, NULL); 127 } 128 129 static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes) 130 { 131 ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize)); 132 133 return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size; 134 } 135 136 static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size) 137 { 138 ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size)); 139 140 return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits; 141 } 142 143 static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info) 144 { 145 u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum), 146 fs_info->csum_size); 147 148 return csum_size_to_bytes(fs_info, max_csum_size); 149 } 150 151 /* 152 * Calculate the total size needed to allocate for an ordered sum structure 153 * spanning @bytes in the file. 154 */ 155 static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes) 156 { 157 return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes); 158 } 159 160 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans, 161 struct btrfs_root *root, 162 u64 objectid, u64 pos, u64 num_bytes) 163 { 164 int ret = 0; 165 struct btrfs_file_extent_item *item; 166 struct btrfs_key file_key; 167 struct btrfs_path *path; 168 struct extent_buffer *leaf; 169 170 path = btrfs_alloc_path(); 171 if (!path) 172 return -ENOMEM; 173 file_key.objectid = objectid; 174 file_key.offset = pos; 175 file_key.type = BTRFS_EXTENT_DATA_KEY; 176 177 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 178 sizeof(*item)); 179 if (ret < 0) 180 goto out; 181 BUG_ON(ret); /* Can't happen */ 182 leaf = path->nodes[0]; 183 item = btrfs_item_ptr(leaf, path->slots[0], 184 struct btrfs_file_extent_item); 185 btrfs_set_file_extent_disk_bytenr(leaf, item, 0); 186 btrfs_set_file_extent_disk_num_bytes(leaf, item, 0); 187 btrfs_set_file_extent_offset(leaf, item, 0); 188 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 189 btrfs_set_file_extent_ram_bytes(leaf, item, num_bytes); 190 btrfs_set_file_extent_generation(leaf, item, trans->transid); 191 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 192 btrfs_set_file_extent_compression(leaf, item, 0); 193 btrfs_set_file_extent_encryption(leaf, item, 0); 194 btrfs_set_file_extent_other_encoding(leaf, item, 0); 195 196 btrfs_mark_buffer_dirty(leaf); 197 out: 198 btrfs_free_path(path); 199 return ret; 200 } 201 202 static struct btrfs_csum_item * 203 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 204 struct btrfs_root *root, 205 struct btrfs_path *path, 206 u64 bytenr, int cow) 207 { 208 struct btrfs_fs_info *fs_info = root->fs_info; 209 int ret; 210 struct btrfs_key file_key; 211 struct btrfs_key found_key; 212 struct btrfs_csum_item *item; 213 struct extent_buffer *leaf; 214 u64 csum_offset = 0; 215 const u32 csum_size = fs_info->csum_size; 216 int csums_in_item; 217 218 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 219 file_key.offset = bytenr; 220 file_key.type = BTRFS_EXTENT_CSUM_KEY; 221 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 222 if (ret < 0) 223 goto fail; 224 leaf = path->nodes[0]; 225 if (ret > 0) { 226 ret = 1; 227 if (path->slots[0] == 0) 228 goto fail; 229 path->slots[0]--; 230 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 231 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 232 goto fail; 233 234 csum_offset = (bytenr - found_key.offset) >> 235 fs_info->sectorsize_bits; 236 csums_in_item = btrfs_item_size(leaf, path->slots[0]); 237 csums_in_item /= csum_size; 238 239 if (csum_offset == csums_in_item) { 240 ret = -EFBIG; 241 goto fail; 242 } else if (csum_offset > csums_in_item) { 243 goto fail; 244 } 245 } 246 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 247 item = (struct btrfs_csum_item *)((unsigned char *)item + 248 csum_offset * csum_size); 249 return item; 250 fail: 251 if (ret > 0) 252 ret = -ENOENT; 253 return ERR_PTR(ret); 254 } 255 256 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 257 struct btrfs_root *root, 258 struct btrfs_path *path, u64 objectid, 259 u64 offset, int mod) 260 { 261 struct btrfs_key file_key; 262 int ins_len = mod < 0 ? -1 : 0; 263 int cow = mod != 0; 264 265 file_key.objectid = objectid; 266 file_key.offset = offset; 267 file_key.type = BTRFS_EXTENT_DATA_KEY; 268 269 return btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 270 } 271 272 /* 273 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and 274 * store the result to @dst. 275 * 276 * Return >0 for the number of sectors we found. 277 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum 278 * for it. Caller may want to try next sector until one range is hit. 279 * Return <0 for fatal error. 280 */ 281 static int search_csum_tree(struct btrfs_fs_info *fs_info, 282 struct btrfs_path *path, u64 disk_bytenr, 283 u64 len, u8 *dst) 284 { 285 struct btrfs_root *csum_root; 286 struct btrfs_csum_item *item = NULL; 287 struct btrfs_key key; 288 const u32 sectorsize = fs_info->sectorsize; 289 const u32 csum_size = fs_info->csum_size; 290 u32 itemsize; 291 int ret; 292 u64 csum_start; 293 u64 csum_len; 294 295 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && 296 IS_ALIGNED(len, sectorsize)); 297 298 /* Check if the current csum item covers disk_bytenr */ 299 if (path->nodes[0]) { 300 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 301 struct btrfs_csum_item); 302 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 303 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 304 305 csum_start = key.offset; 306 csum_len = (itemsize / csum_size) * sectorsize; 307 308 if (in_range(disk_bytenr, csum_start, csum_len)) 309 goto found; 310 } 311 312 /* Current item doesn't contain the desired range, search again */ 313 btrfs_release_path(path); 314 csum_root = btrfs_csum_root(fs_info, disk_bytenr); 315 item = btrfs_lookup_csum(NULL, csum_root, path, disk_bytenr, 0); 316 if (IS_ERR(item)) { 317 ret = PTR_ERR(item); 318 goto out; 319 } 320 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 321 itemsize = btrfs_item_size(path->nodes[0], path->slots[0]); 322 323 csum_start = key.offset; 324 csum_len = (itemsize / csum_size) * sectorsize; 325 ASSERT(in_range(disk_bytenr, csum_start, csum_len)); 326 327 found: 328 ret = (min(csum_start + csum_len, disk_bytenr + len) - 329 disk_bytenr) >> fs_info->sectorsize_bits; 330 read_extent_buffer(path->nodes[0], dst, (unsigned long)item, 331 ret * csum_size); 332 out: 333 if (ret == -ENOENT || ret == -EFBIG) 334 ret = 0; 335 return ret; 336 } 337 338 /* 339 * Lookup the checksum for the read bio in csum tree. 340 * 341 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise. 342 */ 343 blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) 344 { 345 struct btrfs_inode *inode = bbio->inode; 346 struct btrfs_fs_info *fs_info = inode->root->fs_info; 347 struct bio *bio = &bbio->bio; 348 struct btrfs_path *path; 349 const u32 sectorsize = fs_info->sectorsize; 350 const u32 csum_size = fs_info->csum_size; 351 u32 orig_len = bio->bi_iter.bi_size; 352 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; 353 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits; 354 blk_status_t ret = BLK_STS_OK; 355 u32 bio_offset = 0; 356 357 if ((inode->flags & BTRFS_INODE_NODATASUM) || 358 test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) 359 return BLK_STS_OK; 360 361 /* 362 * This function is only called for read bio. 363 * 364 * This means two things: 365 * - All our csums should only be in csum tree 366 * No ordered extents csums, as ordered extents are only for write 367 * path. 368 * - No need to bother any other info from bvec 369 * Since we're looking up csums, the only important info is the 370 * disk_bytenr and the length, which can be extracted from bi_iter 371 * directly. 372 */ 373 ASSERT(bio_op(bio) == REQ_OP_READ); 374 path = btrfs_alloc_path(); 375 if (!path) 376 return BLK_STS_RESOURCE; 377 378 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 379 bbio->csum = kmalloc_array(nblocks, csum_size, GFP_NOFS); 380 if (!bbio->csum) { 381 btrfs_free_path(path); 382 return BLK_STS_RESOURCE; 383 } 384 } else { 385 bbio->csum = bbio->csum_inline; 386 } 387 388 /* 389 * If requested number of sectors is larger than one leaf can contain, 390 * kick the readahead for csum tree. 391 */ 392 if (nblocks > fs_info->csums_per_leaf) 393 path->reada = READA_FORWARD; 394 395 /* 396 * the free space stuff is only read when it hasn't been 397 * updated in the current transaction. So, we can safely 398 * read from the commit root and sidestep a nasty deadlock 399 * between reading the free space cache and updating the csum tree. 400 */ 401 if (btrfs_is_free_space_inode(inode)) { 402 path->search_commit_root = 1; 403 path->skip_locking = 1; 404 } 405 406 while (bio_offset < orig_len) { 407 int count; 408 u64 cur_disk_bytenr = orig_disk_bytenr + bio_offset; 409 u8 *csum_dst = bbio->csum + 410 (bio_offset >> fs_info->sectorsize_bits) * csum_size; 411 412 count = search_csum_tree(fs_info, path, cur_disk_bytenr, 413 orig_len - bio_offset, csum_dst); 414 if (count < 0) { 415 ret = errno_to_blk_status(count); 416 if (bbio->csum != bbio->csum_inline) 417 kfree(bbio->csum); 418 bbio->csum = NULL; 419 break; 420 } 421 422 /* 423 * We didn't find a csum for this range. We need to make sure 424 * we complain loudly about this, because we are not NODATASUM. 425 * 426 * However for the DATA_RELOC inode we could potentially be 427 * relocating data extents for a NODATASUM inode, so the inode 428 * itself won't be marked with NODATASUM, but the extent we're 429 * copying is in fact NODATASUM. If we don't find a csum we 430 * assume this is the case. 431 */ 432 if (count == 0) { 433 memset(csum_dst, 0, csum_size); 434 count = 1; 435 436 if (inode->root->root_key.objectid == 437 BTRFS_DATA_RELOC_TREE_OBJECTID) { 438 u64 file_offset = bbio->file_offset + bio_offset; 439 440 set_extent_bits(&inode->io_tree, file_offset, 441 file_offset + sectorsize - 1, 442 EXTENT_NODATASUM); 443 } else { 444 btrfs_warn_rl(fs_info, 445 "csum hole found for disk bytenr range [%llu, %llu)", 446 cur_disk_bytenr, cur_disk_bytenr + sectorsize); 447 } 448 } 449 bio_offset += count * sectorsize; 450 } 451 452 btrfs_free_path(path); 453 return ret; 454 } 455 456 int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end, 457 struct list_head *list, int search_commit, 458 bool nowait) 459 { 460 struct btrfs_fs_info *fs_info = root->fs_info; 461 struct btrfs_key key; 462 struct btrfs_path *path; 463 struct extent_buffer *leaf; 464 struct btrfs_ordered_sum *sums; 465 struct btrfs_csum_item *item; 466 LIST_HEAD(tmplist); 467 int ret; 468 469 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 470 IS_ALIGNED(end + 1, fs_info->sectorsize)); 471 472 path = btrfs_alloc_path(); 473 if (!path) 474 return -ENOMEM; 475 476 path->nowait = nowait; 477 if (search_commit) { 478 path->skip_locking = 1; 479 path->reada = READA_FORWARD; 480 path->search_commit_root = 1; 481 } 482 483 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 484 key.offset = start; 485 key.type = BTRFS_EXTENT_CSUM_KEY; 486 487 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 488 if (ret < 0) 489 goto fail; 490 if (ret > 0 && path->slots[0] > 0) { 491 leaf = path->nodes[0]; 492 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 493 494 /* 495 * There are two cases we can hit here for the previous csum 496 * item: 497 * 498 * |<- search range ->| 499 * |<- csum item ->| 500 * 501 * Or 502 * |<- search range ->| 503 * |<- csum item ->| 504 * 505 * Check if the previous csum item covers the leading part of 506 * the search range. If so we have to start from previous csum 507 * item. 508 */ 509 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 510 key.type == BTRFS_EXTENT_CSUM_KEY) { 511 if (bytes_to_csum_size(fs_info, start - key.offset) < 512 btrfs_item_size(leaf, path->slots[0] - 1)) 513 path->slots[0]--; 514 } 515 } 516 517 while (start <= end) { 518 u64 csum_end; 519 520 leaf = path->nodes[0]; 521 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 522 ret = btrfs_next_leaf(root, path); 523 if (ret < 0) 524 goto fail; 525 if (ret > 0) 526 break; 527 leaf = path->nodes[0]; 528 } 529 530 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 531 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 532 key.type != BTRFS_EXTENT_CSUM_KEY || 533 key.offset > end) 534 break; 535 536 if (key.offset > start) 537 start = key.offset; 538 539 csum_end = key.offset + csum_size_to_bytes(fs_info, 540 btrfs_item_size(leaf, path->slots[0])); 541 if (csum_end <= start) { 542 path->slots[0]++; 543 continue; 544 } 545 546 csum_end = min(csum_end, end + 1); 547 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 548 struct btrfs_csum_item); 549 while (start < csum_end) { 550 unsigned long offset; 551 size_t size; 552 553 size = min_t(size_t, csum_end - start, 554 max_ordered_sum_bytes(fs_info)); 555 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 556 GFP_NOFS); 557 if (!sums) { 558 ret = -ENOMEM; 559 goto fail; 560 } 561 562 sums->bytenr = start; 563 sums->len = (int)size; 564 565 offset = bytes_to_csum_size(fs_info, start - key.offset); 566 567 read_extent_buffer(path->nodes[0], 568 sums->sums, 569 ((unsigned long)item) + offset, 570 bytes_to_csum_size(fs_info, size)); 571 572 start += size; 573 list_add_tail(&sums->list, &tmplist); 574 } 575 path->slots[0]++; 576 } 577 ret = 0; 578 fail: 579 while (ret < 0 && !list_empty(&tmplist)) { 580 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 581 list_del(&sums->list); 582 kfree(sums); 583 } 584 list_splice_tail(&tmplist, list); 585 586 btrfs_free_path(path); 587 return ret; 588 } 589 590 /* 591 * Do the same work as btrfs_lookup_csums_list(), the difference is in how 592 * we return the result. 593 * 594 * This version will set the corresponding bits in @csum_bitmap to represent 595 * that there is a csum found. 596 * Each bit represents a sector. Thus caller should ensure @csum_buf passed 597 * in is large enough to contain all csums. 598 */ 599 int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end, 600 u8 *csum_buf, unsigned long *csum_bitmap, 601 bool search_commit) 602 { 603 struct btrfs_fs_info *fs_info = root->fs_info; 604 struct btrfs_key key; 605 struct btrfs_path *path; 606 struct extent_buffer *leaf; 607 struct btrfs_csum_item *item; 608 const u64 orig_start = start; 609 int ret; 610 611 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 612 IS_ALIGNED(end + 1, fs_info->sectorsize)); 613 614 path = btrfs_alloc_path(); 615 if (!path) 616 return -ENOMEM; 617 618 if (search_commit) { 619 path->skip_locking = 1; 620 path->reada = READA_FORWARD; 621 path->search_commit_root = 1; 622 } 623 624 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 625 key.type = BTRFS_EXTENT_CSUM_KEY; 626 key.offset = start; 627 628 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 629 if (ret < 0) 630 goto fail; 631 if (ret > 0 && path->slots[0] > 0) { 632 leaf = path->nodes[0]; 633 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 634 635 /* 636 * There are two cases we can hit here for the previous csum 637 * item: 638 * 639 * |<- search range ->| 640 * |<- csum item ->| 641 * 642 * Or 643 * |<- search range ->| 644 * |<- csum item ->| 645 * 646 * Check if the previous csum item covers the leading part of 647 * the search range. If so we have to start from previous csum 648 * item. 649 */ 650 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 651 key.type == BTRFS_EXTENT_CSUM_KEY) { 652 if (bytes_to_csum_size(fs_info, start - key.offset) < 653 btrfs_item_size(leaf, path->slots[0] - 1)) 654 path->slots[0]--; 655 } 656 } 657 658 while (start <= end) { 659 u64 csum_end; 660 661 leaf = path->nodes[0]; 662 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 663 ret = btrfs_next_leaf(root, path); 664 if (ret < 0) 665 goto fail; 666 if (ret > 0) 667 break; 668 leaf = path->nodes[0]; 669 } 670 671 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 672 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 673 key.type != BTRFS_EXTENT_CSUM_KEY || 674 key.offset > end) 675 break; 676 677 if (key.offset > start) 678 start = key.offset; 679 680 csum_end = key.offset + csum_size_to_bytes(fs_info, 681 btrfs_item_size(leaf, path->slots[0])); 682 if (csum_end <= start) { 683 path->slots[0]++; 684 continue; 685 } 686 687 csum_end = min(csum_end, end + 1); 688 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 689 struct btrfs_csum_item); 690 while (start < csum_end) { 691 unsigned long offset; 692 size_t size; 693 u8 *csum_dest = csum_buf + bytes_to_csum_size(fs_info, 694 start - orig_start); 695 696 size = min_t(size_t, csum_end - start, end + 1 - start); 697 698 offset = bytes_to_csum_size(fs_info, start - key.offset); 699 700 read_extent_buffer(path->nodes[0], csum_dest, 701 ((unsigned long)item) + offset, 702 bytes_to_csum_size(fs_info, size)); 703 704 bitmap_set(csum_bitmap, 705 (start - orig_start) >> fs_info->sectorsize_bits, 706 size >> fs_info->sectorsize_bits); 707 708 start += size; 709 } 710 path->slots[0]++; 711 } 712 ret = 0; 713 fail: 714 btrfs_free_path(path); 715 return ret; 716 } 717 718 /* 719 * Calculate checksums of the data contained inside a bio. 720 */ 721 blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) 722 { 723 struct btrfs_inode *inode = bbio->inode; 724 struct btrfs_fs_info *fs_info = inode->root->fs_info; 725 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 726 struct bio *bio = &bbio->bio; 727 u64 offset = bbio->file_offset; 728 struct btrfs_ordered_sum *sums; 729 struct btrfs_ordered_extent *ordered = NULL; 730 char *data; 731 struct bvec_iter iter; 732 struct bio_vec bvec; 733 int index; 734 unsigned int blockcount; 735 unsigned long total_bytes = 0; 736 unsigned long this_sum_bytes = 0; 737 int i; 738 unsigned nofs_flag; 739 740 nofs_flag = memalloc_nofs_save(); 741 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 742 GFP_KERNEL); 743 memalloc_nofs_restore(nofs_flag); 744 745 if (!sums) 746 return BLK_STS_RESOURCE; 747 748 sums->len = bio->bi_iter.bi_size; 749 INIT_LIST_HEAD(&sums->list); 750 751 sums->bytenr = bio->bi_iter.bi_sector << 9; 752 index = 0; 753 754 shash->tfm = fs_info->csum_shash; 755 756 bio_for_each_segment(bvec, bio, iter) { 757 if (!ordered) { 758 ordered = btrfs_lookup_ordered_extent(inode, offset); 759 /* 760 * The bio range is not covered by any ordered extent, 761 * must be a code logic error. 762 */ 763 if (unlikely(!ordered)) { 764 WARN(1, KERN_WARNING 765 "no ordered extent for root %llu ino %llu offset %llu\n", 766 inode->root->root_key.objectid, 767 btrfs_ino(inode), offset); 768 kvfree(sums); 769 return BLK_STS_IOERR; 770 } 771 } 772 773 blockcount = BTRFS_BYTES_TO_BLKS(fs_info, 774 bvec.bv_len + fs_info->sectorsize 775 - 1); 776 777 for (i = 0; i < blockcount; i++) { 778 if (!(bio->bi_opf & REQ_BTRFS_ONE_ORDERED) && 779 !in_range(offset, ordered->file_offset, 780 ordered->num_bytes)) { 781 unsigned long bytes_left; 782 783 sums->len = this_sum_bytes; 784 this_sum_bytes = 0; 785 btrfs_add_ordered_sum(ordered, sums); 786 btrfs_put_ordered_extent(ordered); 787 788 bytes_left = bio->bi_iter.bi_size - total_bytes; 789 790 nofs_flag = memalloc_nofs_save(); 791 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, 792 bytes_left), GFP_KERNEL); 793 memalloc_nofs_restore(nofs_flag); 794 BUG_ON(!sums); /* -ENOMEM */ 795 sums->len = bytes_left; 796 ordered = btrfs_lookup_ordered_extent(inode, 797 offset); 798 ASSERT(ordered); /* Logic error */ 799 sums->bytenr = (bio->bi_iter.bi_sector << 9) 800 + total_bytes; 801 index = 0; 802 } 803 804 data = bvec_kmap_local(&bvec); 805 crypto_shash_digest(shash, 806 data + (i * fs_info->sectorsize), 807 fs_info->sectorsize, 808 sums->sums + index); 809 kunmap_local(data); 810 index += fs_info->csum_size; 811 offset += fs_info->sectorsize; 812 this_sum_bytes += fs_info->sectorsize; 813 total_bytes += fs_info->sectorsize; 814 } 815 816 } 817 this_sum_bytes = 0; 818 btrfs_add_ordered_sum(ordered, sums); 819 btrfs_put_ordered_extent(ordered); 820 return 0; 821 } 822 823 /* 824 * Remove one checksum overlapping a range. 825 * 826 * This expects the key to describe the csum pointed to by the path, and it 827 * expects the csum to overlap the range [bytenr, len] 828 * 829 * The csum should not be entirely contained in the range and the range should 830 * not be entirely contained in the csum. 831 * 832 * This calls btrfs_truncate_item with the correct args based on the overlap, 833 * and fixes up the key as required. 834 */ 835 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 836 struct btrfs_path *path, 837 struct btrfs_key *key, 838 u64 bytenr, u64 len) 839 { 840 struct extent_buffer *leaf; 841 const u32 csum_size = fs_info->csum_size; 842 u64 csum_end; 843 u64 end_byte = bytenr + len; 844 u32 blocksize_bits = fs_info->sectorsize_bits; 845 846 leaf = path->nodes[0]; 847 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 848 csum_end <<= blocksize_bits; 849 csum_end += key->offset; 850 851 if (key->offset < bytenr && csum_end <= end_byte) { 852 /* 853 * [ bytenr - len ] 854 * [ ] 855 * [csum ] 856 * A simple truncate off the end of the item 857 */ 858 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 859 new_size *= csum_size; 860 btrfs_truncate_item(path, new_size, 1); 861 } else if (key->offset >= bytenr && csum_end > end_byte && 862 end_byte > key->offset) { 863 /* 864 * [ bytenr - len ] 865 * [ ] 866 * [csum ] 867 * we need to truncate from the beginning of the csum 868 */ 869 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 870 new_size *= csum_size; 871 872 btrfs_truncate_item(path, new_size, 0); 873 874 key->offset = end_byte; 875 btrfs_set_item_key_safe(fs_info, path, key); 876 } else { 877 BUG(); 878 } 879 } 880 881 /* 882 * Delete the csum items from the csum tree for a given range of bytes. 883 */ 884 int btrfs_del_csums(struct btrfs_trans_handle *trans, 885 struct btrfs_root *root, u64 bytenr, u64 len) 886 { 887 struct btrfs_fs_info *fs_info = trans->fs_info; 888 struct btrfs_path *path; 889 struct btrfs_key key; 890 u64 end_byte = bytenr + len; 891 u64 csum_end; 892 struct extent_buffer *leaf; 893 int ret = 0; 894 const u32 csum_size = fs_info->csum_size; 895 u32 blocksize_bits = fs_info->sectorsize_bits; 896 897 ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID || 898 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 899 900 path = btrfs_alloc_path(); 901 if (!path) 902 return -ENOMEM; 903 904 while (1) { 905 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 906 key.offset = end_byte - 1; 907 key.type = BTRFS_EXTENT_CSUM_KEY; 908 909 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 910 if (ret > 0) { 911 ret = 0; 912 if (path->slots[0] == 0) 913 break; 914 path->slots[0]--; 915 } else if (ret < 0) { 916 break; 917 } 918 919 leaf = path->nodes[0]; 920 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 921 922 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 923 key.type != BTRFS_EXTENT_CSUM_KEY) { 924 break; 925 } 926 927 if (key.offset >= end_byte) 928 break; 929 930 csum_end = btrfs_item_size(leaf, path->slots[0]) / csum_size; 931 csum_end <<= blocksize_bits; 932 csum_end += key.offset; 933 934 /* this csum ends before we start, we're done */ 935 if (csum_end <= bytenr) 936 break; 937 938 /* delete the entire item, it is inside our range */ 939 if (key.offset >= bytenr && csum_end <= end_byte) { 940 int del_nr = 1; 941 942 /* 943 * Check how many csum items preceding this one in this 944 * leaf correspond to our range and then delete them all 945 * at once. 946 */ 947 if (key.offset > bytenr && path->slots[0] > 0) { 948 int slot = path->slots[0] - 1; 949 950 while (slot >= 0) { 951 struct btrfs_key pk; 952 953 btrfs_item_key_to_cpu(leaf, &pk, slot); 954 if (pk.offset < bytenr || 955 pk.type != BTRFS_EXTENT_CSUM_KEY || 956 pk.objectid != 957 BTRFS_EXTENT_CSUM_OBJECTID) 958 break; 959 path->slots[0] = slot; 960 del_nr++; 961 key.offset = pk.offset; 962 slot--; 963 } 964 } 965 ret = btrfs_del_items(trans, root, path, 966 path->slots[0], del_nr); 967 if (ret) 968 break; 969 if (key.offset == bytenr) 970 break; 971 } else if (key.offset < bytenr && csum_end > end_byte) { 972 unsigned long offset; 973 unsigned long shift_len; 974 unsigned long item_offset; 975 /* 976 * [ bytenr - len ] 977 * [csum ] 978 * 979 * Our bytes are in the middle of the csum, 980 * we need to split this item and insert a new one. 981 * 982 * But we can't drop the path because the 983 * csum could change, get removed, extended etc. 984 * 985 * The trick here is the max size of a csum item leaves 986 * enough room in the tree block for a single 987 * item header. So, we split the item in place, 988 * adding a new header pointing to the existing 989 * bytes. Then we loop around again and we have 990 * a nicely formed csum item that we can neatly 991 * truncate. 992 */ 993 offset = (bytenr - key.offset) >> blocksize_bits; 994 offset *= csum_size; 995 996 shift_len = (len >> blocksize_bits) * csum_size; 997 998 item_offset = btrfs_item_ptr_offset(leaf, 999 path->slots[0]); 1000 1001 memzero_extent_buffer(leaf, item_offset + offset, 1002 shift_len); 1003 key.offset = bytenr; 1004 1005 /* 1006 * btrfs_split_item returns -EAGAIN when the 1007 * item changed size or key 1008 */ 1009 ret = btrfs_split_item(trans, root, path, &key, offset); 1010 if (ret && ret != -EAGAIN) { 1011 btrfs_abort_transaction(trans, ret); 1012 break; 1013 } 1014 ret = 0; 1015 1016 key.offset = end_byte - 1; 1017 } else { 1018 truncate_one_csum(fs_info, path, &key, bytenr, len); 1019 if (key.offset < bytenr) 1020 break; 1021 } 1022 btrfs_release_path(path); 1023 } 1024 btrfs_free_path(path); 1025 return ret; 1026 } 1027 1028 static int find_next_csum_offset(struct btrfs_root *root, 1029 struct btrfs_path *path, 1030 u64 *next_offset) 1031 { 1032 const u32 nritems = btrfs_header_nritems(path->nodes[0]); 1033 struct btrfs_key found_key; 1034 int slot = path->slots[0] + 1; 1035 int ret; 1036 1037 if (nritems == 0 || slot >= nritems) { 1038 ret = btrfs_next_leaf(root, path); 1039 if (ret < 0) { 1040 return ret; 1041 } else if (ret > 0) { 1042 *next_offset = (u64)-1; 1043 return 0; 1044 } 1045 slot = path->slots[0]; 1046 } 1047 1048 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 1049 1050 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1051 found_key.type != BTRFS_EXTENT_CSUM_KEY) 1052 *next_offset = (u64)-1; 1053 else 1054 *next_offset = found_key.offset; 1055 1056 return 0; 1057 } 1058 1059 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 1060 struct btrfs_root *root, 1061 struct btrfs_ordered_sum *sums) 1062 { 1063 struct btrfs_fs_info *fs_info = root->fs_info; 1064 struct btrfs_key file_key; 1065 struct btrfs_key found_key; 1066 struct btrfs_path *path; 1067 struct btrfs_csum_item *item; 1068 struct btrfs_csum_item *item_end; 1069 struct extent_buffer *leaf = NULL; 1070 u64 next_offset; 1071 u64 total_bytes = 0; 1072 u64 csum_offset; 1073 u64 bytenr; 1074 u32 ins_size; 1075 int index = 0; 1076 int found_next; 1077 int ret; 1078 const u32 csum_size = fs_info->csum_size; 1079 1080 path = btrfs_alloc_path(); 1081 if (!path) 1082 return -ENOMEM; 1083 again: 1084 next_offset = (u64)-1; 1085 found_next = 0; 1086 bytenr = sums->bytenr + total_bytes; 1087 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 1088 file_key.offset = bytenr; 1089 file_key.type = BTRFS_EXTENT_CSUM_KEY; 1090 1091 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 1092 if (!IS_ERR(item)) { 1093 ret = 0; 1094 leaf = path->nodes[0]; 1095 item_end = btrfs_item_ptr(leaf, path->slots[0], 1096 struct btrfs_csum_item); 1097 item_end = (struct btrfs_csum_item *)((char *)item_end + 1098 btrfs_item_size(leaf, path->slots[0])); 1099 goto found; 1100 } 1101 ret = PTR_ERR(item); 1102 if (ret != -EFBIG && ret != -ENOENT) 1103 goto out; 1104 1105 if (ret == -EFBIG) { 1106 u32 item_size; 1107 /* we found one, but it isn't big enough yet */ 1108 leaf = path->nodes[0]; 1109 item_size = btrfs_item_size(leaf, path->slots[0]); 1110 if ((item_size / csum_size) >= 1111 MAX_CSUM_ITEMS(fs_info, csum_size)) { 1112 /* already at max size, make a new one */ 1113 goto insert; 1114 } 1115 } else { 1116 /* We didn't find a csum item, insert one. */ 1117 ret = find_next_csum_offset(root, path, &next_offset); 1118 if (ret < 0) 1119 goto out; 1120 found_next = 1; 1121 goto insert; 1122 } 1123 1124 /* 1125 * At this point, we know the tree has a checksum item that ends at an 1126 * offset matching the start of the checksum range we want to insert. 1127 * We try to extend that item as much as possible and then add as many 1128 * checksums to it as they fit. 1129 * 1130 * First check if the leaf has enough free space for at least one 1131 * checksum. If it has go directly to the item extension code, otherwise 1132 * release the path and do a search for insertion before the extension. 1133 */ 1134 if (btrfs_leaf_free_space(leaf) >= csum_size) { 1135 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1136 csum_offset = (bytenr - found_key.offset) >> 1137 fs_info->sectorsize_bits; 1138 goto extend_csum; 1139 } 1140 1141 btrfs_release_path(path); 1142 path->search_for_extension = 1; 1143 ret = btrfs_search_slot(trans, root, &file_key, path, 1144 csum_size, 1); 1145 path->search_for_extension = 0; 1146 if (ret < 0) 1147 goto out; 1148 1149 if (ret > 0) { 1150 if (path->slots[0] == 0) 1151 goto insert; 1152 path->slots[0]--; 1153 } 1154 1155 leaf = path->nodes[0]; 1156 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 1157 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits; 1158 1159 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 1160 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 1161 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 1162 goto insert; 1163 } 1164 1165 extend_csum: 1166 if (csum_offset == btrfs_item_size(leaf, path->slots[0]) / 1167 csum_size) { 1168 int extend_nr; 1169 u64 tmp; 1170 u32 diff; 1171 1172 tmp = sums->len - total_bytes; 1173 tmp >>= fs_info->sectorsize_bits; 1174 WARN_ON(tmp < 1); 1175 extend_nr = max_t(int, 1, tmp); 1176 1177 /* 1178 * A log tree can already have checksum items with a subset of 1179 * the checksums we are trying to log. This can happen after 1180 * doing a sequence of partial writes into prealloc extents and 1181 * fsyncs in between, with a full fsync logging a larger subrange 1182 * of an extent for which a previous fast fsync logged a smaller 1183 * subrange. And this happens in particular due to merging file 1184 * extent items when we complete an ordered extent for a range 1185 * covered by a prealloc extent - this is done at 1186 * btrfs_mark_extent_written(). 1187 * 1188 * So if we try to extend the previous checksum item, which has 1189 * a range that ends at the start of the range we want to insert, 1190 * make sure we don't extend beyond the start offset of the next 1191 * checksum item. If we are at the last item in the leaf, then 1192 * forget the optimization of extending and add a new checksum 1193 * item - it is not worth the complexity of releasing the path, 1194 * getting the first key for the next leaf, repeat the btree 1195 * search, etc, because log trees are temporary anyway and it 1196 * would only save a few bytes of leaf space. 1197 */ 1198 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { 1199 if (path->slots[0] + 1 >= 1200 btrfs_header_nritems(path->nodes[0])) { 1201 ret = find_next_csum_offset(root, path, &next_offset); 1202 if (ret < 0) 1203 goto out; 1204 found_next = 1; 1205 goto insert; 1206 } 1207 1208 ret = find_next_csum_offset(root, path, &next_offset); 1209 if (ret < 0) 1210 goto out; 1211 1212 tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; 1213 if (tmp <= INT_MAX) 1214 extend_nr = min_t(int, extend_nr, tmp); 1215 } 1216 1217 diff = (csum_offset + extend_nr) * csum_size; 1218 diff = min(diff, 1219 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 1220 1221 diff = diff - btrfs_item_size(leaf, path->slots[0]); 1222 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff); 1223 diff /= csum_size; 1224 diff *= csum_size; 1225 1226 btrfs_extend_item(path, diff); 1227 ret = 0; 1228 goto csum; 1229 } 1230 1231 insert: 1232 btrfs_release_path(path); 1233 csum_offset = 0; 1234 if (found_next) { 1235 u64 tmp; 1236 1237 tmp = sums->len - total_bytes; 1238 tmp >>= fs_info->sectorsize_bits; 1239 tmp = min(tmp, (next_offset - file_key.offset) >> 1240 fs_info->sectorsize_bits); 1241 1242 tmp = max_t(u64, 1, tmp); 1243 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 1244 ins_size = csum_size * tmp; 1245 } else { 1246 ins_size = csum_size; 1247 } 1248 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 1249 ins_size); 1250 if (ret < 0) 1251 goto out; 1252 if (WARN_ON(ret != 0)) 1253 goto out; 1254 leaf = path->nodes[0]; 1255 csum: 1256 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 1257 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 1258 btrfs_item_size(leaf, path->slots[0])); 1259 item = (struct btrfs_csum_item *)((unsigned char *)item + 1260 csum_offset * csum_size); 1261 found: 1262 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits; 1263 ins_size *= csum_size; 1264 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 1265 ins_size); 1266 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 1267 ins_size); 1268 1269 index += ins_size; 1270 ins_size /= csum_size; 1271 total_bytes += ins_size * fs_info->sectorsize; 1272 1273 btrfs_mark_buffer_dirty(path->nodes[0]); 1274 if (total_bytes < sums->len) { 1275 btrfs_release_path(path); 1276 cond_resched(); 1277 goto again; 1278 } 1279 out: 1280 btrfs_free_path(path); 1281 return ret; 1282 } 1283 1284 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 1285 const struct btrfs_path *path, 1286 struct btrfs_file_extent_item *fi, 1287 struct extent_map *em) 1288 { 1289 struct btrfs_fs_info *fs_info = inode->root->fs_info; 1290 struct btrfs_root *root = inode->root; 1291 struct extent_buffer *leaf = path->nodes[0]; 1292 const int slot = path->slots[0]; 1293 struct btrfs_key key; 1294 u64 extent_start, extent_end; 1295 u64 bytenr; 1296 u8 type = btrfs_file_extent_type(leaf, fi); 1297 int compress_type = btrfs_file_extent_compression(leaf, fi); 1298 1299 btrfs_item_key_to_cpu(leaf, &key, slot); 1300 extent_start = key.offset; 1301 extent_end = btrfs_file_extent_end(path); 1302 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 1303 em->generation = btrfs_file_extent_generation(leaf, fi); 1304 if (type == BTRFS_FILE_EXTENT_REG || 1305 type == BTRFS_FILE_EXTENT_PREALLOC) { 1306 em->start = extent_start; 1307 em->len = extent_end - extent_start; 1308 em->orig_start = extent_start - 1309 btrfs_file_extent_offset(leaf, fi); 1310 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 1311 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 1312 if (bytenr == 0) { 1313 em->block_start = EXTENT_MAP_HOLE; 1314 return; 1315 } 1316 if (compress_type != BTRFS_COMPRESS_NONE) { 1317 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1318 em->compress_type = compress_type; 1319 em->block_start = bytenr; 1320 em->block_len = em->orig_block_len; 1321 } else { 1322 bytenr += btrfs_file_extent_offset(leaf, fi); 1323 em->block_start = bytenr; 1324 em->block_len = em->len; 1325 if (type == BTRFS_FILE_EXTENT_PREALLOC) 1326 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 1327 } 1328 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 1329 em->block_start = EXTENT_MAP_INLINE; 1330 em->start = extent_start; 1331 em->len = extent_end - extent_start; 1332 /* 1333 * Initialize orig_start and block_len with the same values 1334 * as in inode.c:btrfs_get_extent(). 1335 */ 1336 em->orig_start = EXTENT_MAP_HOLE; 1337 em->block_len = (u64)-1; 1338 em->compress_type = compress_type; 1339 if (compress_type != BTRFS_COMPRESS_NONE) 1340 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1341 } else { 1342 btrfs_err(fs_info, 1343 "unknown file extent item type %d, inode %llu, offset %llu, " 1344 "root %llu", type, btrfs_ino(inode), extent_start, 1345 root->root_key.objectid); 1346 } 1347 } 1348 1349 /* 1350 * Returns the end offset (non inclusive) of the file extent item the given path 1351 * points to. If it points to an inline extent, the returned offset is rounded 1352 * up to the sector size. 1353 */ 1354 u64 btrfs_file_extent_end(const struct btrfs_path *path) 1355 { 1356 const struct extent_buffer *leaf = path->nodes[0]; 1357 const int slot = path->slots[0]; 1358 struct btrfs_file_extent_item *fi; 1359 struct btrfs_key key; 1360 u64 end; 1361 1362 btrfs_item_key_to_cpu(leaf, &key, slot); 1363 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 1364 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 1365 1366 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) { 1367 end = btrfs_file_extent_ram_bytes(leaf, fi); 1368 end = ALIGN(key.offset + end, leaf->fs_info->sectorsize); 1369 } else { 1370 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); 1371 } 1372 1373 return end; 1374 } 1375