1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #include <linux/bio.h> 7 #include <linux/slab.h> 8 #include <linux/pagemap.h> 9 #include <linux/highmem.h> 10 #include <linux/sched/mm.h> 11 #include <crypto/hash.h> 12 #include "ctree.h" 13 #include "disk-io.h" 14 #include "transaction.h" 15 #include "volumes.h" 16 #include "print-tree.h" 17 #include "compression.h" 18 19 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 20 sizeof(struct btrfs_item) * 2) / \ 21 size) - 1)) 22 23 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 24 PAGE_SIZE)) 25 26 static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info, 27 u16 csum_size) 28 { 29 u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size; 30 31 return ncsums * fs_info->sectorsize; 32 } 33 34 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 35 struct btrfs_root *root, 36 u64 objectid, u64 pos, 37 u64 disk_offset, u64 disk_num_bytes, 38 u64 num_bytes, u64 offset, u64 ram_bytes, 39 u8 compression, u8 encryption, u16 other_encoding) 40 { 41 int ret = 0; 42 struct btrfs_file_extent_item *item; 43 struct btrfs_key file_key; 44 struct btrfs_path *path; 45 struct extent_buffer *leaf; 46 47 path = btrfs_alloc_path(); 48 if (!path) 49 return -ENOMEM; 50 file_key.objectid = objectid; 51 file_key.offset = pos; 52 file_key.type = BTRFS_EXTENT_DATA_KEY; 53 54 path->leave_spinning = 1; 55 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 56 sizeof(*item)); 57 if (ret < 0) 58 goto out; 59 BUG_ON(ret); /* Can't happen */ 60 leaf = path->nodes[0]; 61 item = btrfs_item_ptr(leaf, path->slots[0], 62 struct btrfs_file_extent_item); 63 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 64 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 65 btrfs_set_file_extent_offset(leaf, item, offset); 66 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 67 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 68 btrfs_set_file_extent_generation(leaf, item, trans->transid); 69 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 70 btrfs_set_file_extent_compression(leaf, item, compression); 71 btrfs_set_file_extent_encryption(leaf, item, encryption); 72 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 73 74 btrfs_mark_buffer_dirty(leaf); 75 out: 76 btrfs_free_path(path); 77 return ret; 78 } 79 80 static struct btrfs_csum_item * 81 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 82 struct btrfs_root *root, 83 struct btrfs_path *path, 84 u64 bytenr, int cow) 85 { 86 struct btrfs_fs_info *fs_info = root->fs_info; 87 int ret; 88 struct btrfs_key file_key; 89 struct btrfs_key found_key; 90 struct btrfs_csum_item *item; 91 struct extent_buffer *leaf; 92 u64 csum_offset = 0; 93 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 94 int csums_in_item; 95 96 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 97 file_key.offset = bytenr; 98 file_key.type = BTRFS_EXTENT_CSUM_KEY; 99 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 100 if (ret < 0) 101 goto fail; 102 leaf = path->nodes[0]; 103 if (ret > 0) { 104 ret = 1; 105 if (path->slots[0] == 0) 106 goto fail; 107 path->slots[0]--; 108 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 109 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 110 goto fail; 111 112 csum_offset = (bytenr - found_key.offset) >> 113 fs_info->sb->s_blocksize_bits; 114 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 115 csums_in_item /= csum_size; 116 117 if (csum_offset == csums_in_item) { 118 ret = -EFBIG; 119 goto fail; 120 } else if (csum_offset > csums_in_item) { 121 goto fail; 122 } 123 } 124 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 125 item = (struct btrfs_csum_item *)((unsigned char *)item + 126 csum_offset * csum_size); 127 return item; 128 fail: 129 if (ret > 0) 130 ret = -ENOENT; 131 return ERR_PTR(ret); 132 } 133 134 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 135 struct btrfs_root *root, 136 struct btrfs_path *path, u64 objectid, 137 u64 offset, int mod) 138 { 139 int ret; 140 struct btrfs_key file_key; 141 int ins_len = mod < 0 ? -1 : 0; 142 int cow = mod != 0; 143 144 file_key.objectid = objectid; 145 file_key.offset = offset; 146 file_key.type = BTRFS_EXTENT_DATA_KEY; 147 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 148 return ret; 149 } 150 151 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 152 u64 logical_offset, u8 *dst, int dio) 153 { 154 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 155 struct bio_vec bvec; 156 struct bvec_iter iter; 157 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 158 struct btrfs_csum_item *item = NULL; 159 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 160 struct btrfs_path *path; 161 u8 *csum; 162 u64 offset = 0; 163 u64 item_start_offset = 0; 164 u64 item_last_offset = 0; 165 u64 disk_bytenr; 166 u64 page_bytes_left; 167 u32 diff; 168 int nblocks; 169 int count = 0; 170 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 171 172 path = btrfs_alloc_path(); 173 if (!path) 174 return BLK_STS_RESOURCE; 175 176 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 177 if (!dst) { 178 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 179 btrfs_bio->csum = kmalloc_array(nblocks, csum_size, 180 GFP_NOFS); 181 if (!btrfs_bio->csum) { 182 btrfs_free_path(path); 183 return BLK_STS_RESOURCE; 184 } 185 } else { 186 btrfs_bio->csum = btrfs_bio->csum_inline; 187 } 188 csum = btrfs_bio->csum; 189 } else { 190 csum = dst; 191 } 192 193 if (bio->bi_iter.bi_size > PAGE_SIZE * 8) 194 path->reada = READA_FORWARD; 195 196 /* 197 * the free space stuff is only read when it hasn't been 198 * updated in the current transaction. So, we can safely 199 * read from the commit root and sidestep a nasty deadlock 200 * between reading the free space cache and updating the csum tree. 201 */ 202 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 203 path->search_commit_root = 1; 204 path->skip_locking = 1; 205 } 206 207 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 208 if (dio) 209 offset = logical_offset; 210 211 bio_for_each_segment(bvec, bio, iter) { 212 page_bytes_left = bvec.bv_len; 213 if (count) 214 goto next; 215 216 if (!dio) 217 offset = page_offset(bvec.bv_page) + bvec.bv_offset; 218 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, 219 csum, nblocks); 220 if (count) 221 goto found; 222 223 if (!item || disk_bytenr < item_start_offset || 224 disk_bytenr >= item_last_offset) { 225 struct btrfs_key found_key; 226 u32 item_size; 227 228 if (item) 229 btrfs_release_path(path); 230 item = btrfs_lookup_csum(NULL, fs_info->csum_root, 231 path, disk_bytenr, 0); 232 if (IS_ERR(item)) { 233 count = 1; 234 memset(csum, 0, csum_size); 235 if (BTRFS_I(inode)->root->root_key.objectid == 236 BTRFS_DATA_RELOC_TREE_OBJECTID) { 237 set_extent_bits(io_tree, offset, 238 offset + fs_info->sectorsize - 1, 239 EXTENT_NODATASUM); 240 } else { 241 btrfs_info_rl(fs_info, 242 "no csum found for inode %llu start %llu", 243 btrfs_ino(BTRFS_I(inode)), offset); 244 } 245 item = NULL; 246 btrfs_release_path(path); 247 goto found; 248 } 249 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 250 path->slots[0]); 251 252 item_start_offset = found_key.offset; 253 item_size = btrfs_item_size_nr(path->nodes[0], 254 path->slots[0]); 255 item_last_offset = item_start_offset + 256 (item_size / csum_size) * 257 fs_info->sectorsize; 258 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 259 struct btrfs_csum_item); 260 } 261 /* 262 * this byte range must be able to fit inside 263 * a single leaf so it will also fit inside a u32 264 */ 265 diff = disk_bytenr - item_start_offset; 266 diff = diff / fs_info->sectorsize; 267 diff = diff * csum_size; 268 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> 269 inode->i_sb->s_blocksize_bits); 270 read_extent_buffer(path->nodes[0], csum, 271 ((unsigned long)item) + diff, 272 csum_size * count); 273 found: 274 csum += count * csum_size; 275 nblocks -= count; 276 next: 277 while (count--) { 278 disk_bytenr += fs_info->sectorsize; 279 offset += fs_info->sectorsize; 280 page_bytes_left -= fs_info->sectorsize; 281 if (!page_bytes_left) 282 break; /* move to next bio */ 283 } 284 } 285 286 WARN_ON_ONCE(count); 287 btrfs_free_path(path); 288 return 0; 289 } 290 291 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 292 u8 *dst) 293 { 294 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); 295 } 296 297 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) 298 { 299 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); 300 } 301 302 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 303 struct list_head *list, int search_commit) 304 { 305 struct btrfs_fs_info *fs_info = root->fs_info; 306 struct btrfs_key key; 307 struct btrfs_path *path; 308 struct extent_buffer *leaf; 309 struct btrfs_ordered_sum *sums; 310 struct btrfs_csum_item *item; 311 LIST_HEAD(tmplist); 312 unsigned long offset; 313 int ret; 314 size_t size; 315 u64 csum_end; 316 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 317 318 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 319 IS_ALIGNED(end + 1, fs_info->sectorsize)); 320 321 path = btrfs_alloc_path(); 322 if (!path) 323 return -ENOMEM; 324 325 if (search_commit) { 326 path->skip_locking = 1; 327 path->reada = READA_FORWARD; 328 path->search_commit_root = 1; 329 } 330 331 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 332 key.offset = start; 333 key.type = BTRFS_EXTENT_CSUM_KEY; 334 335 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 336 if (ret < 0) 337 goto fail; 338 if (ret > 0 && path->slots[0] > 0) { 339 leaf = path->nodes[0]; 340 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 341 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 342 key.type == BTRFS_EXTENT_CSUM_KEY) { 343 offset = (start - key.offset) >> 344 fs_info->sb->s_blocksize_bits; 345 if (offset * csum_size < 346 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 347 path->slots[0]--; 348 } 349 } 350 351 while (start <= end) { 352 leaf = path->nodes[0]; 353 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 354 ret = btrfs_next_leaf(root, path); 355 if (ret < 0) 356 goto fail; 357 if (ret > 0) 358 break; 359 leaf = path->nodes[0]; 360 } 361 362 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 363 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 364 key.type != BTRFS_EXTENT_CSUM_KEY || 365 key.offset > end) 366 break; 367 368 if (key.offset > start) 369 start = key.offset; 370 371 size = btrfs_item_size_nr(leaf, path->slots[0]); 372 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize; 373 if (csum_end <= start) { 374 path->slots[0]++; 375 continue; 376 } 377 378 csum_end = min(csum_end, end + 1); 379 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 380 struct btrfs_csum_item); 381 while (start < csum_end) { 382 size = min_t(size_t, csum_end - start, 383 max_ordered_sum_bytes(fs_info, csum_size)); 384 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 385 GFP_NOFS); 386 if (!sums) { 387 ret = -ENOMEM; 388 goto fail; 389 } 390 391 sums->bytenr = start; 392 sums->len = (int)size; 393 394 offset = (start - key.offset) >> 395 fs_info->sb->s_blocksize_bits; 396 offset *= csum_size; 397 size >>= fs_info->sb->s_blocksize_bits; 398 399 read_extent_buffer(path->nodes[0], 400 sums->sums, 401 ((unsigned long)item) + offset, 402 csum_size * size); 403 404 start += fs_info->sectorsize * size; 405 list_add_tail(&sums->list, &tmplist); 406 } 407 path->slots[0]++; 408 } 409 ret = 0; 410 fail: 411 while (ret < 0 && !list_empty(&tmplist)) { 412 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 413 list_del(&sums->list); 414 kfree(sums); 415 } 416 list_splice_tail(&tmplist, list); 417 418 btrfs_free_path(path); 419 return ret; 420 } 421 422 /* 423 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio 424 * @inode: Owner of the data inside the bio 425 * @bio: Contains the data to be checksummed 426 * @file_start: offset in file this bio begins to describe 427 * @contig: Boolean. If true/1 means all bio vecs in this bio are 428 * contiguous and they begin at @file_start in the file. False/0 429 * means this bio can contains potentially discontigous bio vecs 430 * so the logical offset of each should be calculated separately. 431 */ 432 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 433 u64 file_start, int contig) 434 { 435 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 436 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); 437 struct btrfs_ordered_sum *sums; 438 struct btrfs_ordered_extent *ordered = NULL; 439 char *data; 440 struct bvec_iter iter; 441 struct bio_vec bvec; 442 int index; 443 int nr_sectors; 444 unsigned long total_bytes = 0; 445 unsigned long this_sum_bytes = 0; 446 int i; 447 u64 offset; 448 unsigned nofs_flag; 449 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 450 451 nofs_flag = memalloc_nofs_save(); 452 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 453 GFP_KERNEL); 454 memalloc_nofs_restore(nofs_flag); 455 456 if (!sums) 457 return BLK_STS_RESOURCE; 458 459 sums->len = bio->bi_iter.bi_size; 460 INIT_LIST_HEAD(&sums->list); 461 462 if (contig) 463 offset = file_start; 464 else 465 offset = 0; /* shut up gcc */ 466 467 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 468 index = 0; 469 470 shash->tfm = fs_info->csum_shash; 471 472 bio_for_each_segment(bvec, bio, iter) { 473 if (!contig) 474 offset = page_offset(bvec.bv_page) + bvec.bv_offset; 475 476 if (!ordered) { 477 ordered = btrfs_lookup_ordered_extent(inode, offset); 478 BUG_ON(!ordered); /* Logic error */ 479 } 480 481 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, 482 bvec.bv_len + fs_info->sectorsize 483 - 1); 484 485 for (i = 0; i < nr_sectors; i++) { 486 if (offset >= ordered->file_offset + ordered->len || 487 offset < ordered->file_offset) { 488 unsigned long bytes_left; 489 490 sums->len = this_sum_bytes; 491 this_sum_bytes = 0; 492 btrfs_add_ordered_sum(ordered, sums); 493 btrfs_put_ordered_extent(ordered); 494 495 bytes_left = bio->bi_iter.bi_size - total_bytes; 496 497 nofs_flag = memalloc_nofs_save(); 498 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, 499 bytes_left), GFP_KERNEL); 500 memalloc_nofs_restore(nofs_flag); 501 BUG_ON(!sums); /* -ENOMEM */ 502 sums->len = bytes_left; 503 ordered = btrfs_lookup_ordered_extent(inode, 504 offset); 505 ASSERT(ordered); /* Logic error */ 506 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) 507 + total_bytes; 508 index = 0; 509 } 510 511 crypto_shash_init(shash); 512 data = kmap_atomic(bvec.bv_page); 513 crypto_shash_update(shash, data + bvec.bv_offset 514 + (i * fs_info->sectorsize), 515 fs_info->sectorsize); 516 kunmap_atomic(data); 517 crypto_shash_final(shash, (char *)(sums->sums + index)); 518 index += csum_size; 519 offset += fs_info->sectorsize; 520 this_sum_bytes += fs_info->sectorsize; 521 total_bytes += fs_info->sectorsize; 522 } 523 524 } 525 this_sum_bytes = 0; 526 btrfs_add_ordered_sum(ordered, sums); 527 btrfs_put_ordered_extent(ordered); 528 return 0; 529 } 530 531 /* 532 * helper function for csum removal, this expects the 533 * key to describe the csum pointed to by the path, and it expects 534 * the csum to overlap the range [bytenr, len] 535 * 536 * The csum should not be entirely contained in the range and the 537 * range should not be entirely contained in the csum. 538 * 539 * This calls btrfs_truncate_item with the correct args based on the 540 * overlap, and fixes up the key as required. 541 */ 542 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 543 struct btrfs_path *path, 544 struct btrfs_key *key, 545 u64 bytenr, u64 len) 546 { 547 struct extent_buffer *leaf; 548 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 549 u64 csum_end; 550 u64 end_byte = bytenr + len; 551 u32 blocksize_bits = fs_info->sb->s_blocksize_bits; 552 553 leaf = path->nodes[0]; 554 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 555 csum_end <<= fs_info->sb->s_blocksize_bits; 556 csum_end += key->offset; 557 558 if (key->offset < bytenr && csum_end <= end_byte) { 559 /* 560 * [ bytenr - len ] 561 * [ ] 562 * [csum ] 563 * A simple truncate off the end of the item 564 */ 565 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 566 new_size *= csum_size; 567 btrfs_truncate_item(path, new_size, 1); 568 } else if (key->offset >= bytenr && csum_end > end_byte && 569 end_byte > key->offset) { 570 /* 571 * [ bytenr - len ] 572 * [ ] 573 * [csum ] 574 * we need to truncate from the beginning of the csum 575 */ 576 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 577 new_size *= csum_size; 578 579 btrfs_truncate_item(path, new_size, 0); 580 581 key->offset = end_byte; 582 btrfs_set_item_key_safe(fs_info, path, key); 583 } else { 584 BUG(); 585 } 586 } 587 588 /* 589 * deletes the csum items from the csum tree for a given 590 * range of bytes. 591 */ 592 int btrfs_del_csums(struct btrfs_trans_handle *trans, 593 struct btrfs_root *root, u64 bytenr, u64 len) 594 { 595 struct btrfs_fs_info *fs_info = trans->fs_info; 596 struct btrfs_path *path; 597 struct btrfs_key key; 598 u64 end_byte = bytenr + len; 599 u64 csum_end; 600 struct extent_buffer *leaf; 601 int ret; 602 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 603 int blocksize_bits = fs_info->sb->s_blocksize_bits; 604 605 ASSERT(root == fs_info->csum_root || 606 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); 607 608 path = btrfs_alloc_path(); 609 if (!path) 610 return -ENOMEM; 611 612 while (1) { 613 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 614 key.offset = end_byte - 1; 615 key.type = BTRFS_EXTENT_CSUM_KEY; 616 617 path->leave_spinning = 1; 618 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 619 if (ret > 0) { 620 if (path->slots[0] == 0) 621 break; 622 path->slots[0]--; 623 } else if (ret < 0) { 624 break; 625 } 626 627 leaf = path->nodes[0]; 628 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 629 630 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 631 key.type != BTRFS_EXTENT_CSUM_KEY) { 632 break; 633 } 634 635 if (key.offset >= end_byte) 636 break; 637 638 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 639 csum_end <<= blocksize_bits; 640 csum_end += key.offset; 641 642 /* this csum ends before we start, we're done */ 643 if (csum_end <= bytenr) 644 break; 645 646 /* delete the entire item, it is inside our range */ 647 if (key.offset >= bytenr && csum_end <= end_byte) { 648 int del_nr = 1; 649 650 /* 651 * Check how many csum items preceding this one in this 652 * leaf correspond to our range and then delete them all 653 * at once. 654 */ 655 if (key.offset > bytenr && path->slots[0] > 0) { 656 int slot = path->slots[0] - 1; 657 658 while (slot >= 0) { 659 struct btrfs_key pk; 660 661 btrfs_item_key_to_cpu(leaf, &pk, slot); 662 if (pk.offset < bytenr || 663 pk.type != BTRFS_EXTENT_CSUM_KEY || 664 pk.objectid != 665 BTRFS_EXTENT_CSUM_OBJECTID) 666 break; 667 path->slots[0] = slot; 668 del_nr++; 669 key.offset = pk.offset; 670 slot--; 671 } 672 } 673 ret = btrfs_del_items(trans, root, path, 674 path->slots[0], del_nr); 675 if (ret) 676 goto out; 677 if (key.offset == bytenr) 678 break; 679 } else if (key.offset < bytenr && csum_end > end_byte) { 680 unsigned long offset; 681 unsigned long shift_len; 682 unsigned long item_offset; 683 /* 684 * [ bytenr - len ] 685 * [csum ] 686 * 687 * Our bytes are in the middle of the csum, 688 * we need to split this item and insert a new one. 689 * 690 * But we can't drop the path because the 691 * csum could change, get removed, extended etc. 692 * 693 * The trick here is the max size of a csum item leaves 694 * enough room in the tree block for a single 695 * item header. So, we split the item in place, 696 * adding a new header pointing to the existing 697 * bytes. Then we loop around again and we have 698 * a nicely formed csum item that we can neatly 699 * truncate. 700 */ 701 offset = (bytenr - key.offset) >> blocksize_bits; 702 offset *= csum_size; 703 704 shift_len = (len >> blocksize_bits) * csum_size; 705 706 item_offset = btrfs_item_ptr_offset(leaf, 707 path->slots[0]); 708 709 memzero_extent_buffer(leaf, item_offset + offset, 710 shift_len); 711 key.offset = bytenr; 712 713 /* 714 * btrfs_split_item returns -EAGAIN when the 715 * item changed size or key 716 */ 717 ret = btrfs_split_item(trans, root, path, &key, offset); 718 if (ret && ret != -EAGAIN) { 719 btrfs_abort_transaction(trans, ret); 720 goto out; 721 } 722 723 key.offset = end_byte - 1; 724 } else { 725 truncate_one_csum(fs_info, path, &key, bytenr, len); 726 if (key.offset < bytenr) 727 break; 728 } 729 btrfs_release_path(path); 730 } 731 ret = 0; 732 out: 733 btrfs_free_path(path); 734 return ret; 735 } 736 737 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 738 struct btrfs_root *root, 739 struct btrfs_ordered_sum *sums) 740 { 741 struct btrfs_fs_info *fs_info = root->fs_info; 742 struct btrfs_key file_key; 743 struct btrfs_key found_key; 744 struct btrfs_path *path; 745 struct btrfs_csum_item *item; 746 struct btrfs_csum_item *item_end; 747 struct extent_buffer *leaf = NULL; 748 u64 next_offset; 749 u64 total_bytes = 0; 750 u64 csum_offset; 751 u64 bytenr; 752 u32 nritems; 753 u32 ins_size; 754 int index = 0; 755 int found_next; 756 int ret; 757 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 758 759 path = btrfs_alloc_path(); 760 if (!path) 761 return -ENOMEM; 762 again: 763 next_offset = (u64)-1; 764 found_next = 0; 765 bytenr = sums->bytenr + total_bytes; 766 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 767 file_key.offset = bytenr; 768 file_key.type = BTRFS_EXTENT_CSUM_KEY; 769 770 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 771 if (!IS_ERR(item)) { 772 ret = 0; 773 leaf = path->nodes[0]; 774 item_end = btrfs_item_ptr(leaf, path->slots[0], 775 struct btrfs_csum_item); 776 item_end = (struct btrfs_csum_item *)((char *)item_end + 777 btrfs_item_size_nr(leaf, path->slots[0])); 778 goto found; 779 } 780 ret = PTR_ERR(item); 781 if (ret != -EFBIG && ret != -ENOENT) 782 goto fail_unlock; 783 784 if (ret == -EFBIG) { 785 u32 item_size; 786 /* we found one, but it isn't big enough yet */ 787 leaf = path->nodes[0]; 788 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 789 if ((item_size / csum_size) >= 790 MAX_CSUM_ITEMS(fs_info, csum_size)) { 791 /* already at max size, make a new one */ 792 goto insert; 793 } 794 } else { 795 int slot = path->slots[0] + 1; 796 /* we didn't find a csum item, insert one */ 797 nritems = btrfs_header_nritems(path->nodes[0]); 798 if (!nritems || (path->slots[0] >= nritems - 1)) { 799 ret = btrfs_next_leaf(root, path); 800 if (ret == 1) 801 found_next = 1; 802 if (ret != 0) 803 goto insert; 804 slot = path->slots[0]; 805 } 806 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 807 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 808 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 809 found_next = 1; 810 goto insert; 811 } 812 next_offset = found_key.offset; 813 found_next = 1; 814 goto insert; 815 } 816 817 /* 818 * at this point, we know the tree has an item, but it isn't big 819 * enough yet to put our csum in. Grow it 820 */ 821 btrfs_release_path(path); 822 ret = btrfs_search_slot(trans, root, &file_key, path, 823 csum_size, 1); 824 if (ret < 0) 825 goto fail_unlock; 826 827 if (ret > 0) { 828 if (path->slots[0] == 0) 829 goto insert; 830 path->slots[0]--; 831 } 832 833 leaf = path->nodes[0]; 834 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 835 csum_offset = (bytenr - found_key.offset) >> 836 fs_info->sb->s_blocksize_bits; 837 838 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 839 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 840 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 841 goto insert; 842 } 843 844 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 845 csum_size) { 846 int extend_nr; 847 u64 tmp; 848 u32 diff; 849 u32 free_space; 850 851 if (btrfs_leaf_free_space(leaf) < 852 sizeof(struct btrfs_item) + csum_size * 2) 853 goto insert; 854 855 free_space = btrfs_leaf_free_space(leaf) - 856 sizeof(struct btrfs_item) - csum_size; 857 tmp = sums->len - total_bytes; 858 tmp >>= fs_info->sb->s_blocksize_bits; 859 WARN_ON(tmp < 1); 860 861 extend_nr = max_t(int, 1, (int)tmp); 862 diff = (csum_offset + extend_nr) * csum_size; 863 diff = min(diff, 864 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 865 866 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 867 diff = min(free_space, diff); 868 diff /= csum_size; 869 diff *= csum_size; 870 871 btrfs_extend_item(path, diff); 872 ret = 0; 873 goto csum; 874 } 875 876 insert: 877 btrfs_release_path(path); 878 csum_offset = 0; 879 if (found_next) { 880 u64 tmp; 881 882 tmp = sums->len - total_bytes; 883 tmp >>= fs_info->sb->s_blocksize_bits; 884 tmp = min(tmp, (next_offset - file_key.offset) >> 885 fs_info->sb->s_blocksize_bits); 886 887 tmp = max_t(u64, 1, tmp); 888 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 889 ins_size = csum_size * tmp; 890 } else { 891 ins_size = csum_size; 892 } 893 path->leave_spinning = 1; 894 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 895 ins_size); 896 path->leave_spinning = 0; 897 if (ret < 0) 898 goto fail_unlock; 899 if (WARN_ON(ret != 0)) 900 goto fail_unlock; 901 leaf = path->nodes[0]; 902 csum: 903 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 904 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 905 btrfs_item_size_nr(leaf, path->slots[0])); 906 item = (struct btrfs_csum_item *)((unsigned char *)item + 907 csum_offset * csum_size); 908 found: 909 ins_size = (u32)(sums->len - total_bytes) >> 910 fs_info->sb->s_blocksize_bits; 911 ins_size *= csum_size; 912 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 913 ins_size); 914 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 915 ins_size); 916 917 index += ins_size; 918 ins_size /= csum_size; 919 total_bytes += ins_size * fs_info->sectorsize; 920 921 btrfs_mark_buffer_dirty(path->nodes[0]); 922 if (total_bytes < sums->len) { 923 btrfs_release_path(path); 924 cond_resched(); 925 goto again; 926 } 927 out: 928 btrfs_free_path(path); 929 return ret; 930 931 fail_unlock: 932 goto out; 933 } 934 935 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, 936 const struct btrfs_path *path, 937 struct btrfs_file_extent_item *fi, 938 const bool new_inline, 939 struct extent_map *em) 940 { 941 struct btrfs_fs_info *fs_info = inode->root->fs_info; 942 struct btrfs_root *root = inode->root; 943 struct extent_buffer *leaf = path->nodes[0]; 944 const int slot = path->slots[0]; 945 struct btrfs_key key; 946 u64 extent_start, extent_end; 947 u64 bytenr; 948 u8 type = btrfs_file_extent_type(leaf, fi); 949 int compress_type = btrfs_file_extent_compression(leaf, fi); 950 951 btrfs_item_key_to_cpu(leaf, &key, slot); 952 extent_start = key.offset; 953 954 if (type == BTRFS_FILE_EXTENT_REG || 955 type == BTRFS_FILE_EXTENT_PREALLOC) { 956 extent_end = extent_start + 957 btrfs_file_extent_num_bytes(leaf, fi); 958 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 959 size_t size; 960 size = btrfs_file_extent_ram_bytes(leaf, fi); 961 extent_end = ALIGN(extent_start + size, 962 fs_info->sectorsize); 963 } 964 965 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 966 if (type == BTRFS_FILE_EXTENT_REG || 967 type == BTRFS_FILE_EXTENT_PREALLOC) { 968 em->start = extent_start; 969 em->len = extent_end - extent_start; 970 em->orig_start = extent_start - 971 btrfs_file_extent_offset(leaf, fi); 972 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 973 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 974 if (bytenr == 0) { 975 em->block_start = EXTENT_MAP_HOLE; 976 return; 977 } 978 if (compress_type != BTRFS_COMPRESS_NONE) { 979 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 980 em->compress_type = compress_type; 981 em->block_start = bytenr; 982 em->block_len = em->orig_block_len; 983 } else { 984 bytenr += btrfs_file_extent_offset(leaf, fi); 985 em->block_start = bytenr; 986 em->block_len = em->len; 987 if (type == BTRFS_FILE_EXTENT_PREALLOC) 988 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 989 } 990 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 991 em->block_start = EXTENT_MAP_INLINE; 992 em->start = extent_start; 993 em->len = extent_end - extent_start; 994 /* 995 * Initialize orig_start and block_len with the same values 996 * as in inode.c:btrfs_get_extent(). 997 */ 998 em->orig_start = EXTENT_MAP_HOLE; 999 em->block_len = (u64)-1; 1000 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) { 1001 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 1002 em->compress_type = compress_type; 1003 } 1004 } else { 1005 btrfs_err(fs_info, 1006 "unknown file extent item type %d, inode %llu, offset %llu, " 1007 "root %llu", type, btrfs_ino(inode), extent_start, 1008 root->root_key.objectid); 1009 } 1010 } 1011