1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include "ctree.h" 24 #include "disk-io.h" 25 #include "transaction.h" 26 #include "volumes.h" 27 #include "print-tree.h" 28 29 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 30 sizeof(struct btrfs_item) * 2) / \ 31 size) - 1)) 32 33 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 34 PAGE_CACHE_SIZE)) 35 36 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 37 sizeof(struct btrfs_ordered_sum)) / \ 38 sizeof(u32) * (r)->sectorsize) 39 40 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 41 struct btrfs_root *root, 42 u64 objectid, u64 pos, 43 u64 disk_offset, u64 disk_num_bytes, 44 u64 num_bytes, u64 offset, u64 ram_bytes, 45 u8 compression, u8 encryption, u16 other_encoding) 46 { 47 int ret = 0; 48 struct btrfs_file_extent_item *item; 49 struct btrfs_key file_key; 50 struct btrfs_path *path; 51 struct extent_buffer *leaf; 52 53 path = btrfs_alloc_path(); 54 if (!path) 55 return -ENOMEM; 56 file_key.objectid = objectid; 57 file_key.offset = pos; 58 file_key.type = BTRFS_EXTENT_DATA_KEY; 59 60 path->leave_spinning = 1; 61 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 62 sizeof(*item)); 63 if (ret < 0) 64 goto out; 65 BUG_ON(ret); /* Can't happen */ 66 leaf = path->nodes[0]; 67 item = btrfs_item_ptr(leaf, path->slots[0], 68 struct btrfs_file_extent_item); 69 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 70 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 71 btrfs_set_file_extent_offset(leaf, item, offset); 72 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 73 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 74 btrfs_set_file_extent_generation(leaf, item, trans->transid); 75 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 76 btrfs_set_file_extent_compression(leaf, item, compression); 77 btrfs_set_file_extent_encryption(leaf, item, encryption); 78 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 79 80 btrfs_mark_buffer_dirty(leaf); 81 out: 82 btrfs_free_path(path); 83 return ret; 84 } 85 86 static struct btrfs_csum_item * 87 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 88 struct btrfs_root *root, 89 struct btrfs_path *path, 90 u64 bytenr, int cow) 91 { 92 int ret; 93 struct btrfs_key file_key; 94 struct btrfs_key found_key; 95 struct btrfs_csum_item *item; 96 struct extent_buffer *leaf; 97 u64 csum_offset = 0; 98 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 99 int csums_in_item; 100 101 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 102 file_key.offset = bytenr; 103 file_key.type = BTRFS_EXTENT_CSUM_KEY; 104 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 105 if (ret < 0) 106 goto fail; 107 leaf = path->nodes[0]; 108 if (ret > 0) { 109 ret = 1; 110 if (path->slots[0] == 0) 111 goto fail; 112 path->slots[0]--; 113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 114 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 115 goto fail; 116 117 csum_offset = (bytenr - found_key.offset) >> 118 root->fs_info->sb->s_blocksize_bits; 119 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 120 csums_in_item /= csum_size; 121 122 if (csum_offset == csums_in_item) { 123 ret = -EFBIG; 124 goto fail; 125 } else if (csum_offset > csums_in_item) { 126 goto fail; 127 } 128 } 129 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 130 item = (struct btrfs_csum_item *)((unsigned char *)item + 131 csum_offset * csum_size); 132 return item; 133 fail: 134 if (ret > 0) 135 ret = -ENOENT; 136 return ERR_PTR(ret); 137 } 138 139 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 140 struct btrfs_root *root, 141 struct btrfs_path *path, u64 objectid, 142 u64 offset, int mod) 143 { 144 int ret; 145 struct btrfs_key file_key; 146 int ins_len = mod < 0 ? -1 : 0; 147 int cow = mod != 0; 148 149 file_key.objectid = objectid; 150 file_key.offset = offset; 151 file_key.type = BTRFS_EXTENT_DATA_KEY; 152 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 153 return ret; 154 } 155 156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) 157 { 158 kfree(bio->csum_allocated); 159 } 160 161 static int __btrfs_lookup_bio_sums(struct btrfs_root *root, 162 struct inode *inode, struct bio *bio, 163 u64 logical_offset, u32 *dst, int dio) 164 { 165 struct bio_vec *bvec = bio->bi_io_vec; 166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 167 struct btrfs_csum_item *item = NULL; 168 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 169 struct btrfs_path *path; 170 u8 *csum; 171 u64 offset = 0; 172 u64 item_start_offset = 0; 173 u64 item_last_offset = 0; 174 u64 disk_bytenr; 175 u64 page_bytes_left; 176 u32 diff; 177 int nblocks; 178 int bio_index = 0; 179 int count; 180 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 181 182 path = btrfs_alloc_path(); 183 if (!path) 184 return -ENOMEM; 185 186 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 187 if (!dst) { 188 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 189 btrfs_bio->csum_allocated = kmalloc_array(nblocks, 190 csum_size, GFP_NOFS); 191 if (!btrfs_bio->csum_allocated) { 192 btrfs_free_path(path); 193 return -ENOMEM; 194 } 195 btrfs_bio->csum = btrfs_bio->csum_allocated; 196 btrfs_bio->end_io = btrfs_io_bio_endio_readpage; 197 } else { 198 btrfs_bio->csum = btrfs_bio->csum_inline; 199 } 200 csum = btrfs_bio->csum; 201 } else { 202 csum = (u8 *)dst; 203 } 204 205 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) 206 path->reada = READA_FORWARD; 207 208 WARN_ON(bio->bi_vcnt <= 0); 209 210 /* 211 * the free space stuff is only read when it hasn't been 212 * updated in the current transaction. So, we can safely 213 * read from the commit root and sidestep a nasty deadlock 214 * between reading the free space cache and updating the csum tree. 215 */ 216 if (btrfs_is_free_space_inode(inode)) { 217 path->search_commit_root = 1; 218 path->skip_locking = 1; 219 } 220 221 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 222 if (dio) 223 offset = logical_offset; 224 225 page_bytes_left = bvec->bv_len; 226 while (bio_index < bio->bi_vcnt) { 227 if (!dio) 228 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 229 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, 230 (u32 *)csum, nblocks); 231 if (count) 232 goto found; 233 234 if (!item || disk_bytenr < item_start_offset || 235 disk_bytenr >= item_last_offset) { 236 struct btrfs_key found_key; 237 u32 item_size; 238 239 if (item) 240 btrfs_release_path(path); 241 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, 242 path, disk_bytenr, 0); 243 if (IS_ERR(item)) { 244 count = 1; 245 memset(csum, 0, csum_size); 246 if (BTRFS_I(inode)->root->root_key.objectid == 247 BTRFS_DATA_RELOC_TREE_OBJECTID) { 248 set_extent_bits(io_tree, offset, 249 offset + root->sectorsize - 1, 250 EXTENT_NODATASUM, GFP_NOFS); 251 } else { 252 btrfs_info(BTRFS_I(inode)->root->fs_info, 253 "no csum found for inode %llu start %llu", 254 btrfs_ino(inode), offset); 255 } 256 item = NULL; 257 btrfs_release_path(path); 258 goto found; 259 } 260 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 261 path->slots[0]); 262 263 item_start_offset = found_key.offset; 264 item_size = btrfs_item_size_nr(path->nodes[0], 265 path->slots[0]); 266 item_last_offset = item_start_offset + 267 (item_size / csum_size) * 268 root->sectorsize; 269 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 270 struct btrfs_csum_item); 271 } 272 /* 273 * this byte range must be able to fit inside 274 * a single leaf so it will also fit inside a u32 275 */ 276 diff = disk_bytenr - item_start_offset; 277 diff = diff / root->sectorsize; 278 diff = diff * csum_size; 279 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> 280 inode->i_sb->s_blocksize_bits); 281 read_extent_buffer(path->nodes[0], csum, 282 ((unsigned long)item) + diff, 283 csum_size * count); 284 found: 285 csum += count * csum_size; 286 nblocks -= count; 287 288 while (count--) { 289 disk_bytenr += root->sectorsize; 290 offset += root->sectorsize; 291 page_bytes_left -= root->sectorsize; 292 if (!page_bytes_left) { 293 bio_index++; 294 bvec++; 295 page_bytes_left = bvec->bv_len; 296 } 297 298 } 299 } 300 btrfs_free_path(path); 301 return 0; 302 } 303 304 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 305 struct bio *bio, u32 *dst) 306 { 307 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); 308 } 309 310 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 311 struct bio *bio, u64 offset) 312 { 313 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); 314 } 315 316 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 317 struct list_head *list, int search_commit) 318 { 319 struct btrfs_key key; 320 struct btrfs_path *path; 321 struct extent_buffer *leaf; 322 struct btrfs_ordered_sum *sums; 323 struct btrfs_csum_item *item; 324 LIST_HEAD(tmplist); 325 unsigned long offset; 326 int ret; 327 size_t size; 328 u64 csum_end; 329 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 330 331 ASSERT(IS_ALIGNED(start, root->sectorsize) && 332 IS_ALIGNED(end + 1, root->sectorsize)); 333 334 path = btrfs_alloc_path(); 335 if (!path) 336 return -ENOMEM; 337 338 if (search_commit) { 339 path->skip_locking = 1; 340 path->reada = READA_FORWARD; 341 path->search_commit_root = 1; 342 } 343 344 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 345 key.offset = start; 346 key.type = BTRFS_EXTENT_CSUM_KEY; 347 348 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 349 if (ret < 0) 350 goto fail; 351 if (ret > 0 && path->slots[0] > 0) { 352 leaf = path->nodes[0]; 353 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 354 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 355 key.type == BTRFS_EXTENT_CSUM_KEY) { 356 offset = (start - key.offset) >> 357 root->fs_info->sb->s_blocksize_bits; 358 if (offset * csum_size < 359 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 360 path->slots[0]--; 361 } 362 } 363 364 while (start <= end) { 365 leaf = path->nodes[0]; 366 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 367 ret = btrfs_next_leaf(root, path); 368 if (ret < 0) 369 goto fail; 370 if (ret > 0) 371 break; 372 leaf = path->nodes[0]; 373 } 374 375 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 376 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 377 key.type != BTRFS_EXTENT_CSUM_KEY || 378 key.offset > end) 379 break; 380 381 if (key.offset > start) 382 start = key.offset; 383 384 size = btrfs_item_size_nr(leaf, path->slots[0]); 385 csum_end = key.offset + (size / csum_size) * root->sectorsize; 386 if (csum_end <= start) { 387 path->slots[0]++; 388 continue; 389 } 390 391 csum_end = min(csum_end, end + 1); 392 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 393 struct btrfs_csum_item); 394 while (start < csum_end) { 395 size = min_t(size_t, csum_end - start, 396 MAX_ORDERED_SUM_BYTES(root)); 397 sums = kzalloc(btrfs_ordered_sum_size(root, size), 398 GFP_NOFS); 399 if (!sums) { 400 ret = -ENOMEM; 401 goto fail; 402 } 403 404 sums->bytenr = start; 405 sums->len = (int)size; 406 407 offset = (start - key.offset) >> 408 root->fs_info->sb->s_blocksize_bits; 409 offset *= csum_size; 410 size >>= root->fs_info->sb->s_blocksize_bits; 411 412 read_extent_buffer(path->nodes[0], 413 sums->sums, 414 ((unsigned long)item) + offset, 415 csum_size * size); 416 417 start += root->sectorsize * size; 418 list_add_tail(&sums->list, &tmplist); 419 } 420 path->slots[0]++; 421 } 422 ret = 0; 423 fail: 424 while (ret < 0 && !list_empty(&tmplist)) { 425 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 426 list_del(&sums->list); 427 kfree(sums); 428 } 429 list_splice_tail(&tmplist, list); 430 431 btrfs_free_path(path); 432 return ret; 433 } 434 435 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 436 struct bio *bio, u64 file_start, int contig) 437 { 438 struct btrfs_ordered_sum *sums; 439 struct btrfs_ordered_extent *ordered; 440 char *data; 441 struct bio_vec *bvec = bio->bi_io_vec; 442 int bio_index = 0; 443 int index; 444 int nr_sectors; 445 int i; 446 unsigned long total_bytes = 0; 447 unsigned long this_sum_bytes = 0; 448 u64 offset; 449 450 WARN_ON(bio->bi_vcnt <= 0); 451 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), 452 GFP_NOFS); 453 if (!sums) 454 return -ENOMEM; 455 456 sums->len = bio->bi_iter.bi_size; 457 INIT_LIST_HEAD(&sums->list); 458 459 if (contig) 460 offset = file_start; 461 else 462 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 463 464 ordered = btrfs_lookup_ordered_extent(inode, offset); 465 BUG_ON(!ordered); /* Logic error */ 466 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 467 index = 0; 468 469 while (bio_index < bio->bi_vcnt) { 470 if (!contig) 471 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 472 473 data = kmap_atomic(bvec->bv_page); 474 475 nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, 476 bvec->bv_len + root->sectorsize 477 - 1); 478 479 for (i = 0; i < nr_sectors; i++) { 480 if (offset >= ordered->file_offset + ordered->len || 481 offset < ordered->file_offset) { 482 unsigned long bytes_left; 483 484 kunmap_atomic(data); 485 sums->len = this_sum_bytes; 486 this_sum_bytes = 0; 487 btrfs_add_ordered_sum(inode, ordered, sums); 488 btrfs_put_ordered_extent(ordered); 489 490 bytes_left = bio->bi_iter.bi_size - total_bytes; 491 492 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 493 GFP_NOFS); 494 BUG_ON(!sums); /* -ENOMEM */ 495 sums->len = bytes_left; 496 ordered = btrfs_lookup_ordered_extent(inode, 497 offset); 498 ASSERT(ordered); /* Logic error */ 499 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) 500 + total_bytes; 501 index = 0; 502 503 data = kmap_atomic(bvec->bv_page); 504 } 505 506 sums->sums[index] = ~(u32)0; 507 sums->sums[index] 508 = btrfs_csum_data(data + bvec->bv_offset 509 + (i * root->sectorsize), 510 sums->sums[index], 511 root->sectorsize); 512 btrfs_csum_final(sums->sums[index], 513 (char *)(sums->sums + index)); 514 index++; 515 offset += root->sectorsize; 516 this_sum_bytes += root->sectorsize; 517 total_bytes += root->sectorsize; 518 } 519 520 kunmap_atomic(data); 521 522 bio_index++; 523 bvec++; 524 } 525 this_sum_bytes = 0; 526 btrfs_add_ordered_sum(inode, ordered, sums); 527 btrfs_put_ordered_extent(ordered); 528 return 0; 529 } 530 531 /* 532 * helper function for csum removal, this expects the 533 * key to describe the csum pointed to by the path, and it expects 534 * the csum to overlap the range [bytenr, len] 535 * 536 * The csum should not be entirely contained in the range and the 537 * range should not be entirely contained in the csum. 538 * 539 * This calls btrfs_truncate_item with the correct args based on the 540 * overlap, and fixes up the key as required. 541 */ 542 static noinline void truncate_one_csum(struct btrfs_root *root, 543 struct btrfs_path *path, 544 struct btrfs_key *key, 545 u64 bytenr, u64 len) 546 { 547 struct extent_buffer *leaf; 548 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 549 u64 csum_end; 550 u64 end_byte = bytenr + len; 551 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; 552 553 leaf = path->nodes[0]; 554 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 555 csum_end <<= root->fs_info->sb->s_blocksize_bits; 556 csum_end += key->offset; 557 558 if (key->offset < bytenr && csum_end <= end_byte) { 559 /* 560 * [ bytenr - len ] 561 * [ ] 562 * [csum ] 563 * A simple truncate off the end of the item 564 */ 565 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 566 new_size *= csum_size; 567 btrfs_truncate_item(root, path, new_size, 1); 568 } else if (key->offset >= bytenr && csum_end > end_byte && 569 end_byte > key->offset) { 570 /* 571 * [ bytenr - len ] 572 * [ ] 573 * [csum ] 574 * we need to truncate from the beginning of the csum 575 */ 576 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 577 new_size *= csum_size; 578 579 btrfs_truncate_item(root, path, new_size, 0); 580 581 key->offset = end_byte; 582 btrfs_set_item_key_safe(root->fs_info, path, key); 583 } else { 584 BUG(); 585 } 586 } 587 588 /* 589 * deletes the csum items from the csum tree for a given 590 * range of bytes. 591 */ 592 int btrfs_del_csums(struct btrfs_trans_handle *trans, 593 struct btrfs_root *root, u64 bytenr, u64 len) 594 { 595 struct btrfs_path *path; 596 struct btrfs_key key; 597 u64 end_byte = bytenr + len; 598 u64 csum_end; 599 struct extent_buffer *leaf; 600 int ret; 601 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 602 int blocksize_bits = root->fs_info->sb->s_blocksize_bits; 603 604 root = root->fs_info->csum_root; 605 606 path = btrfs_alloc_path(); 607 if (!path) 608 return -ENOMEM; 609 610 while (1) { 611 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 612 key.offset = end_byte - 1; 613 key.type = BTRFS_EXTENT_CSUM_KEY; 614 615 path->leave_spinning = 1; 616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 617 if (ret > 0) { 618 if (path->slots[0] == 0) 619 break; 620 path->slots[0]--; 621 } else if (ret < 0) { 622 break; 623 } 624 625 leaf = path->nodes[0]; 626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 627 628 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 629 key.type != BTRFS_EXTENT_CSUM_KEY) { 630 break; 631 } 632 633 if (key.offset >= end_byte) 634 break; 635 636 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 637 csum_end <<= blocksize_bits; 638 csum_end += key.offset; 639 640 /* this csum ends before we start, we're done */ 641 if (csum_end <= bytenr) 642 break; 643 644 /* delete the entire item, it is inside our range */ 645 if (key.offset >= bytenr && csum_end <= end_byte) { 646 ret = btrfs_del_item(trans, root, path); 647 if (ret) 648 goto out; 649 if (key.offset == bytenr) 650 break; 651 } else if (key.offset < bytenr && csum_end > end_byte) { 652 unsigned long offset; 653 unsigned long shift_len; 654 unsigned long item_offset; 655 /* 656 * [ bytenr - len ] 657 * [csum ] 658 * 659 * Our bytes are in the middle of the csum, 660 * we need to split this item and insert a new one. 661 * 662 * But we can't drop the path because the 663 * csum could change, get removed, extended etc. 664 * 665 * The trick here is the max size of a csum item leaves 666 * enough room in the tree block for a single 667 * item header. So, we split the item in place, 668 * adding a new header pointing to the existing 669 * bytes. Then we loop around again and we have 670 * a nicely formed csum item that we can neatly 671 * truncate. 672 */ 673 offset = (bytenr - key.offset) >> blocksize_bits; 674 offset *= csum_size; 675 676 shift_len = (len >> blocksize_bits) * csum_size; 677 678 item_offset = btrfs_item_ptr_offset(leaf, 679 path->slots[0]); 680 681 memset_extent_buffer(leaf, 0, item_offset + offset, 682 shift_len); 683 key.offset = bytenr; 684 685 /* 686 * btrfs_split_item returns -EAGAIN when the 687 * item changed size or key 688 */ 689 ret = btrfs_split_item(trans, root, path, &key, offset); 690 if (ret && ret != -EAGAIN) { 691 btrfs_abort_transaction(trans, root, ret); 692 goto out; 693 } 694 695 key.offset = end_byte - 1; 696 } else { 697 truncate_one_csum(root, path, &key, bytenr, len); 698 if (key.offset < bytenr) 699 break; 700 } 701 btrfs_release_path(path); 702 } 703 ret = 0; 704 out: 705 btrfs_free_path(path); 706 return ret; 707 } 708 709 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 710 struct btrfs_root *root, 711 struct btrfs_ordered_sum *sums) 712 { 713 struct btrfs_key file_key; 714 struct btrfs_key found_key; 715 struct btrfs_path *path; 716 struct btrfs_csum_item *item; 717 struct btrfs_csum_item *item_end; 718 struct extent_buffer *leaf = NULL; 719 u64 next_offset; 720 u64 total_bytes = 0; 721 u64 csum_offset; 722 u64 bytenr; 723 u32 nritems; 724 u32 ins_size; 725 int index = 0; 726 int found_next; 727 int ret; 728 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 729 730 path = btrfs_alloc_path(); 731 if (!path) 732 return -ENOMEM; 733 again: 734 next_offset = (u64)-1; 735 found_next = 0; 736 bytenr = sums->bytenr + total_bytes; 737 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 738 file_key.offset = bytenr; 739 file_key.type = BTRFS_EXTENT_CSUM_KEY; 740 741 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 742 if (!IS_ERR(item)) { 743 ret = 0; 744 leaf = path->nodes[0]; 745 item_end = btrfs_item_ptr(leaf, path->slots[0], 746 struct btrfs_csum_item); 747 item_end = (struct btrfs_csum_item *)((char *)item_end + 748 btrfs_item_size_nr(leaf, path->slots[0])); 749 goto found; 750 } 751 ret = PTR_ERR(item); 752 if (ret != -EFBIG && ret != -ENOENT) 753 goto fail_unlock; 754 755 if (ret == -EFBIG) { 756 u32 item_size; 757 /* we found one, but it isn't big enough yet */ 758 leaf = path->nodes[0]; 759 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 760 if ((item_size / csum_size) >= 761 MAX_CSUM_ITEMS(root, csum_size)) { 762 /* already at max size, make a new one */ 763 goto insert; 764 } 765 } else { 766 int slot = path->slots[0] + 1; 767 /* we didn't find a csum item, insert one */ 768 nritems = btrfs_header_nritems(path->nodes[0]); 769 if (!nritems || (path->slots[0] >= nritems - 1)) { 770 ret = btrfs_next_leaf(root, path); 771 if (ret == 1) 772 found_next = 1; 773 if (ret != 0) 774 goto insert; 775 slot = path->slots[0]; 776 } 777 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 778 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 779 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 780 found_next = 1; 781 goto insert; 782 } 783 next_offset = found_key.offset; 784 found_next = 1; 785 goto insert; 786 } 787 788 /* 789 * at this point, we know the tree has an item, but it isn't big 790 * enough yet to put our csum in. Grow it 791 */ 792 btrfs_release_path(path); 793 ret = btrfs_search_slot(trans, root, &file_key, path, 794 csum_size, 1); 795 if (ret < 0) 796 goto fail_unlock; 797 798 if (ret > 0) { 799 if (path->slots[0] == 0) 800 goto insert; 801 path->slots[0]--; 802 } 803 804 leaf = path->nodes[0]; 805 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 806 csum_offset = (bytenr - found_key.offset) >> 807 root->fs_info->sb->s_blocksize_bits; 808 809 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 810 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 811 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { 812 goto insert; 813 } 814 815 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 816 csum_size) { 817 int extend_nr; 818 u64 tmp; 819 u32 diff; 820 u32 free_space; 821 822 if (btrfs_leaf_free_space(root, leaf) < 823 sizeof(struct btrfs_item) + csum_size * 2) 824 goto insert; 825 826 free_space = btrfs_leaf_free_space(root, leaf) - 827 sizeof(struct btrfs_item) - csum_size; 828 tmp = sums->len - total_bytes; 829 tmp >>= root->fs_info->sb->s_blocksize_bits; 830 WARN_ON(tmp < 1); 831 832 extend_nr = max_t(int, 1, (int)tmp); 833 diff = (csum_offset + extend_nr) * csum_size; 834 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size); 835 836 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 837 diff = min(free_space, diff); 838 diff /= csum_size; 839 diff *= csum_size; 840 841 btrfs_extend_item(root, path, diff); 842 ret = 0; 843 goto csum; 844 } 845 846 insert: 847 btrfs_release_path(path); 848 csum_offset = 0; 849 if (found_next) { 850 u64 tmp; 851 852 tmp = sums->len - total_bytes; 853 tmp >>= root->fs_info->sb->s_blocksize_bits; 854 tmp = min(tmp, (next_offset - file_key.offset) >> 855 root->fs_info->sb->s_blocksize_bits); 856 857 tmp = max((u64)1, tmp); 858 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); 859 ins_size = csum_size * tmp; 860 } else { 861 ins_size = csum_size; 862 } 863 path->leave_spinning = 1; 864 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 865 ins_size); 866 path->leave_spinning = 0; 867 if (ret < 0) 868 goto fail_unlock; 869 if (WARN_ON(ret != 0)) 870 goto fail_unlock; 871 leaf = path->nodes[0]; 872 csum: 873 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 874 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 875 btrfs_item_size_nr(leaf, path->slots[0])); 876 item = (struct btrfs_csum_item *)((unsigned char *)item + 877 csum_offset * csum_size); 878 found: 879 ins_size = (u32)(sums->len - total_bytes) >> 880 root->fs_info->sb->s_blocksize_bits; 881 ins_size *= csum_size; 882 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 883 ins_size); 884 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 885 ins_size); 886 887 ins_size /= csum_size; 888 total_bytes += ins_size * root->sectorsize; 889 index += ins_size; 890 891 btrfs_mark_buffer_dirty(path->nodes[0]); 892 if (total_bytes < sums->len) { 893 btrfs_release_path(path); 894 cond_resched(); 895 goto again; 896 } 897 out: 898 btrfs_free_path(path); 899 return ret; 900 901 fail_unlock: 902 goto out; 903 } 904 905 void btrfs_extent_item_to_extent_map(struct inode *inode, 906 const struct btrfs_path *path, 907 struct btrfs_file_extent_item *fi, 908 const bool new_inline, 909 struct extent_map *em) 910 { 911 struct btrfs_root *root = BTRFS_I(inode)->root; 912 struct extent_buffer *leaf = path->nodes[0]; 913 const int slot = path->slots[0]; 914 struct btrfs_key key; 915 u64 extent_start, extent_end; 916 u64 bytenr; 917 u8 type = btrfs_file_extent_type(leaf, fi); 918 int compress_type = btrfs_file_extent_compression(leaf, fi); 919 920 em->bdev = root->fs_info->fs_devices->latest_bdev; 921 btrfs_item_key_to_cpu(leaf, &key, slot); 922 extent_start = key.offset; 923 924 if (type == BTRFS_FILE_EXTENT_REG || 925 type == BTRFS_FILE_EXTENT_PREALLOC) { 926 extent_end = extent_start + 927 btrfs_file_extent_num_bytes(leaf, fi); 928 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 929 size_t size; 930 size = btrfs_file_extent_inline_len(leaf, slot, fi); 931 extent_end = ALIGN(extent_start + size, root->sectorsize); 932 } 933 934 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 935 if (type == BTRFS_FILE_EXTENT_REG || 936 type == BTRFS_FILE_EXTENT_PREALLOC) { 937 em->start = extent_start; 938 em->len = extent_end - extent_start; 939 em->orig_start = extent_start - 940 btrfs_file_extent_offset(leaf, fi); 941 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 942 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 943 if (bytenr == 0) { 944 em->block_start = EXTENT_MAP_HOLE; 945 return; 946 } 947 if (compress_type != BTRFS_COMPRESS_NONE) { 948 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 949 em->compress_type = compress_type; 950 em->block_start = bytenr; 951 em->block_len = em->orig_block_len; 952 } else { 953 bytenr += btrfs_file_extent_offset(leaf, fi); 954 em->block_start = bytenr; 955 em->block_len = em->len; 956 if (type == BTRFS_FILE_EXTENT_PREALLOC) 957 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 958 } 959 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 960 em->block_start = EXTENT_MAP_INLINE; 961 em->start = extent_start; 962 em->len = extent_end - extent_start; 963 /* 964 * Initialize orig_start and block_len with the same values 965 * as in inode.c:btrfs_get_extent(). 966 */ 967 em->orig_start = EXTENT_MAP_HOLE; 968 em->block_len = (u64)-1; 969 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) { 970 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 971 em->compress_type = compress_type; 972 } 973 } else { 974 btrfs_err(root->fs_info, 975 "unknown file extent item type %d, inode %llu, offset %llu, root %llu", 976 type, btrfs_ino(inode), extent_start, 977 root->root_key.objectid); 978 } 979 } 980