1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include "ctree.h" 24 #include "disk-io.h" 25 #include "transaction.h" 26 #include "volumes.h" 27 #include "print-tree.h" 28 #include "compression.h" 29 30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \ 31 sizeof(struct btrfs_item) * 2) / \ 32 size) - 1)) 33 34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ 35 PAGE_SIZE)) 36 37 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \ 38 sizeof(struct btrfs_ordered_sum)) / \ 39 sizeof(u32) * (fs_info)->sectorsize) 40 41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 42 struct btrfs_root *root, 43 u64 objectid, u64 pos, 44 u64 disk_offset, u64 disk_num_bytes, 45 u64 num_bytes, u64 offset, u64 ram_bytes, 46 u8 compression, u8 encryption, u16 other_encoding) 47 { 48 int ret = 0; 49 struct btrfs_file_extent_item *item; 50 struct btrfs_key file_key; 51 struct btrfs_path *path; 52 struct extent_buffer *leaf; 53 54 path = btrfs_alloc_path(); 55 if (!path) 56 return -ENOMEM; 57 file_key.objectid = objectid; 58 file_key.offset = pos; 59 file_key.type = BTRFS_EXTENT_DATA_KEY; 60 61 path->leave_spinning = 1; 62 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 63 sizeof(*item)); 64 if (ret < 0) 65 goto out; 66 BUG_ON(ret); /* Can't happen */ 67 leaf = path->nodes[0]; 68 item = btrfs_item_ptr(leaf, path->slots[0], 69 struct btrfs_file_extent_item); 70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 72 btrfs_set_file_extent_offset(leaf, item, offset); 73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 75 btrfs_set_file_extent_generation(leaf, item, trans->transid); 76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 77 btrfs_set_file_extent_compression(leaf, item, compression); 78 btrfs_set_file_extent_encryption(leaf, item, encryption); 79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 80 81 btrfs_mark_buffer_dirty(leaf); 82 out: 83 btrfs_free_path(path); 84 return ret; 85 } 86 87 static struct btrfs_csum_item * 88 btrfs_lookup_csum(struct btrfs_trans_handle *trans, 89 struct btrfs_root *root, 90 struct btrfs_path *path, 91 u64 bytenr, int cow) 92 { 93 struct btrfs_fs_info *fs_info = root->fs_info; 94 int ret; 95 struct btrfs_key file_key; 96 struct btrfs_key found_key; 97 struct btrfs_csum_item *item; 98 struct extent_buffer *leaf; 99 u64 csum_offset = 0; 100 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 101 int csums_in_item; 102 103 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 104 file_key.offset = bytenr; 105 file_key.type = BTRFS_EXTENT_CSUM_KEY; 106 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 107 if (ret < 0) 108 goto fail; 109 leaf = path->nodes[0]; 110 if (ret > 0) { 111 ret = 1; 112 if (path->slots[0] == 0) 113 goto fail; 114 path->slots[0]--; 115 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 116 if (found_key.type != BTRFS_EXTENT_CSUM_KEY) 117 goto fail; 118 119 csum_offset = (bytenr - found_key.offset) >> 120 fs_info->sb->s_blocksize_bits; 121 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 122 csums_in_item /= csum_size; 123 124 if (csum_offset == csums_in_item) { 125 ret = -EFBIG; 126 goto fail; 127 } else if (csum_offset > csums_in_item) { 128 goto fail; 129 } 130 } 131 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 132 item = (struct btrfs_csum_item *)((unsigned char *)item + 133 csum_offset * csum_size); 134 return item; 135 fail: 136 if (ret > 0) 137 ret = -ENOENT; 138 return ERR_PTR(ret); 139 } 140 141 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 142 struct btrfs_root *root, 143 struct btrfs_path *path, u64 objectid, 144 u64 offset, int mod) 145 { 146 int ret; 147 struct btrfs_key file_key; 148 int ins_len = mod < 0 ? -1 : 0; 149 int cow = mod != 0; 150 151 file_key.objectid = objectid; 152 file_key.offset = offset; 153 file_key.type = BTRFS_EXTENT_DATA_KEY; 154 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 155 return ret; 156 } 157 158 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) 159 { 160 kfree(bio->csum_allocated); 161 } 162 163 static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, 164 u64 logical_offset, u32 *dst, int dio) 165 { 166 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 167 struct bio_vec *bvec; 168 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); 169 struct btrfs_csum_item *item = NULL; 170 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 171 struct btrfs_path *path; 172 u8 *csum; 173 u64 offset = 0; 174 u64 item_start_offset = 0; 175 u64 item_last_offset = 0; 176 u64 disk_bytenr; 177 u64 page_bytes_left; 178 u32 diff; 179 int nblocks; 180 int count = 0, i; 181 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 182 183 path = btrfs_alloc_path(); 184 if (!path) 185 return -ENOMEM; 186 187 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; 188 if (!dst) { 189 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 190 btrfs_bio->csum_allocated = kmalloc_array(nblocks, 191 csum_size, GFP_NOFS); 192 if (!btrfs_bio->csum_allocated) { 193 btrfs_free_path(path); 194 return -ENOMEM; 195 } 196 btrfs_bio->csum = btrfs_bio->csum_allocated; 197 btrfs_bio->end_io = btrfs_io_bio_endio_readpage; 198 } else { 199 btrfs_bio->csum = btrfs_bio->csum_inline; 200 } 201 csum = btrfs_bio->csum; 202 } else { 203 csum = (u8 *)dst; 204 } 205 206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8) 207 path->reada = READA_FORWARD; 208 209 WARN_ON(bio->bi_vcnt <= 0); 210 211 /* 212 * the free space stuff is only read when it hasn't been 213 * updated in the current transaction. So, we can safely 214 * read from the commit root and sidestep a nasty deadlock 215 * between reading the free space cache and updating the csum tree. 216 */ 217 if (btrfs_is_free_space_inode(inode)) { 218 path->search_commit_root = 1; 219 path->skip_locking = 1; 220 } 221 222 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; 223 if (dio) 224 offset = logical_offset; 225 226 bio_for_each_segment_all(bvec, bio, i) { 227 page_bytes_left = bvec->bv_len; 228 if (count) 229 goto next; 230 231 if (!dio) 232 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 233 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, 234 (u32 *)csum, nblocks); 235 if (count) 236 goto found; 237 238 if (!item || disk_bytenr < item_start_offset || 239 disk_bytenr >= item_last_offset) { 240 struct btrfs_key found_key; 241 u32 item_size; 242 243 if (item) 244 btrfs_release_path(path); 245 item = btrfs_lookup_csum(NULL, fs_info->csum_root, 246 path, disk_bytenr, 0); 247 if (IS_ERR(item)) { 248 count = 1; 249 memset(csum, 0, csum_size); 250 if (BTRFS_I(inode)->root->root_key.objectid == 251 BTRFS_DATA_RELOC_TREE_OBJECTID) { 252 set_extent_bits(io_tree, offset, 253 offset + fs_info->sectorsize - 1, 254 EXTENT_NODATASUM); 255 } else { 256 btrfs_info_rl(fs_info, 257 "no csum found for inode %llu start %llu", 258 btrfs_ino(BTRFS_I(inode)), offset); 259 } 260 item = NULL; 261 btrfs_release_path(path); 262 goto found; 263 } 264 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 265 path->slots[0]); 266 267 item_start_offset = found_key.offset; 268 item_size = btrfs_item_size_nr(path->nodes[0], 269 path->slots[0]); 270 item_last_offset = item_start_offset + 271 (item_size / csum_size) * 272 fs_info->sectorsize; 273 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 274 struct btrfs_csum_item); 275 } 276 /* 277 * this byte range must be able to fit inside 278 * a single leaf so it will also fit inside a u32 279 */ 280 diff = disk_bytenr - item_start_offset; 281 diff = diff / fs_info->sectorsize; 282 diff = diff * csum_size; 283 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >> 284 inode->i_sb->s_blocksize_bits); 285 read_extent_buffer(path->nodes[0], csum, 286 ((unsigned long)item) + diff, 287 csum_size * count); 288 found: 289 csum += count * csum_size; 290 nblocks -= count; 291 next: 292 while (count--) { 293 disk_bytenr += fs_info->sectorsize; 294 offset += fs_info->sectorsize; 295 page_bytes_left -= fs_info->sectorsize; 296 if (!page_bytes_left) 297 break; /* move to next bio */ 298 } 299 } 300 301 WARN_ON_ONCE(count); 302 btrfs_free_path(path); 303 return 0; 304 } 305 306 int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst) 307 { 308 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0); 309 } 310 311 int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset) 312 { 313 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1); 314 } 315 316 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 317 struct list_head *list, int search_commit) 318 { 319 struct btrfs_fs_info *fs_info = root->fs_info; 320 struct btrfs_key key; 321 struct btrfs_path *path; 322 struct extent_buffer *leaf; 323 struct btrfs_ordered_sum *sums; 324 struct btrfs_csum_item *item; 325 LIST_HEAD(tmplist); 326 unsigned long offset; 327 int ret; 328 size_t size; 329 u64 csum_end; 330 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 331 332 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 333 IS_ALIGNED(end + 1, fs_info->sectorsize)); 334 335 path = btrfs_alloc_path(); 336 if (!path) 337 return -ENOMEM; 338 339 if (search_commit) { 340 path->skip_locking = 1; 341 path->reada = READA_FORWARD; 342 path->search_commit_root = 1; 343 } 344 345 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 346 key.offset = start; 347 key.type = BTRFS_EXTENT_CSUM_KEY; 348 349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 350 if (ret < 0) 351 goto fail; 352 if (ret > 0 && path->slots[0] > 0) { 353 leaf = path->nodes[0]; 354 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 355 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 356 key.type == BTRFS_EXTENT_CSUM_KEY) { 357 offset = (start - key.offset) >> 358 fs_info->sb->s_blocksize_bits; 359 if (offset * csum_size < 360 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 361 path->slots[0]--; 362 } 363 } 364 365 while (start <= end) { 366 leaf = path->nodes[0]; 367 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 368 ret = btrfs_next_leaf(root, path); 369 if (ret < 0) 370 goto fail; 371 if (ret > 0) 372 break; 373 leaf = path->nodes[0]; 374 } 375 376 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 377 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 378 key.type != BTRFS_EXTENT_CSUM_KEY || 379 key.offset > end) 380 break; 381 382 if (key.offset > start) 383 start = key.offset; 384 385 size = btrfs_item_size_nr(leaf, path->slots[0]); 386 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize; 387 if (csum_end <= start) { 388 path->slots[0]++; 389 continue; 390 } 391 392 csum_end = min(csum_end, end + 1); 393 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 394 struct btrfs_csum_item); 395 while (start < csum_end) { 396 size = min_t(size_t, csum_end - start, 397 MAX_ORDERED_SUM_BYTES(fs_info)); 398 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size), 399 GFP_NOFS); 400 if (!sums) { 401 ret = -ENOMEM; 402 goto fail; 403 } 404 405 sums->bytenr = start; 406 sums->len = (int)size; 407 408 offset = (start - key.offset) >> 409 fs_info->sb->s_blocksize_bits; 410 offset *= csum_size; 411 size >>= fs_info->sb->s_blocksize_bits; 412 413 read_extent_buffer(path->nodes[0], 414 sums->sums, 415 ((unsigned long)item) + offset, 416 csum_size * size); 417 418 start += fs_info->sectorsize * size; 419 list_add_tail(&sums->list, &tmplist); 420 } 421 path->slots[0]++; 422 } 423 ret = 0; 424 fail: 425 while (ret < 0 && !list_empty(&tmplist)) { 426 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); 427 list_del(&sums->list); 428 kfree(sums); 429 } 430 list_splice_tail(&tmplist, list); 431 432 btrfs_free_path(path); 433 return ret; 434 } 435 436 int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, 437 u64 file_start, int contig) 438 { 439 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 440 struct btrfs_ordered_sum *sums; 441 struct btrfs_ordered_extent *ordered = NULL; 442 char *data; 443 struct bio_vec *bvec; 444 int index; 445 int nr_sectors; 446 int i, j; 447 unsigned long total_bytes = 0; 448 unsigned long this_sum_bytes = 0; 449 u64 offset; 450 451 WARN_ON(bio->bi_vcnt <= 0); 452 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size), 453 GFP_NOFS); 454 if (!sums) 455 return -ENOMEM; 456 457 sums->len = bio->bi_iter.bi_size; 458 INIT_LIST_HEAD(&sums->list); 459 460 if (contig) 461 offset = file_start; 462 else 463 offset = 0; /* shut up gcc */ 464 465 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; 466 index = 0; 467 468 bio_for_each_segment_all(bvec, bio, j) { 469 if (!contig) 470 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 471 472 if (!ordered) { 473 ordered = btrfs_lookup_ordered_extent(inode, offset); 474 BUG_ON(!ordered); /* Logic error */ 475 } 476 477 data = kmap_atomic(bvec->bv_page); 478 479 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, 480 bvec->bv_len + fs_info->sectorsize 481 - 1); 482 483 for (i = 0; i < nr_sectors; i++) { 484 if (offset >= ordered->file_offset + ordered->len || 485 offset < ordered->file_offset) { 486 unsigned long bytes_left; 487 488 kunmap_atomic(data); 489 sums->len = this_sum_bytes; 490 this_sum_bytes = 0; 491 btrfs_add_ordered_sum(inode, ordered, sums); 492 btrfs_put_ordered_extent(ordered); 493 494 bytes_left = bio->bi_iter.bi_size - total_bytes; 495 496 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left), 497 GFP_NOFS); 498 BUG_ON(!sums); /* -ENOMEM */ 499 sums->len = bytes_left; 500 ordered = btrfs_lookup_ordered_extent(inode, 501 offset); 502 ASSERT(ordered); /* Logic error */ 503 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) 504 + total_bytes; 505 index = 0; 506 507 data = kmap_atomic(bvec->bv_page); 508 } 509 510 sums->sums[index] = ~(u32)0; 511 sums->sums[index] 512 = btrfs_csum_data(data + bvec->bv_offset 513 + (i * fs_info->sectorsize), 514 sums->sums[index], 515 fs_info->sectorsize); 516 btrfs_csum_final(sums->sums[index], 517 (char *)(sums->sums + index)); 518 index++; 519 offset += fs_info->sectorsize; 520 this_sum_bytes += fs_info->sectorsize; 521 total_bytes += fs_info->sectorsize; 522 } 523 524 kunmap_atomic(data); 525 } 526 this_sum_bytes = 0; 527 btrfs_add_ordered_sum(inode, ordered, sums); 528 btrfs_put_ordered_extent(ordered); 529 return 0; 530 } 531 532 /* 533 * helper function for csum removal, this expects the 534 * key to describe the csum pointed to by the path, and it expects 535 * the csum to overlap the range [bytenr, len] 536 * 537 * The csum should not be entirely contained in the range and the 538 * range should not be entirely contained in the csum. 539 * 540 * This calls btrfs_truncate_item with the correct args based on the 541 * overlap, and fixes up the key as required. 542 */ 543 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info, 544 struct btrfs_path *path, 545 struct btrfs_key *key, 546 u64 bytenr, u64 len) 547 { 548 struct extent_buffer *leaf; 549 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 550 u64 csum_end; 551 u64 end_byte = bytenr + len; 552 u32 blocksize_bits = fs_info->sb->s_blocksize_bits; 553 554 leaf = path->nodes[0]; 555 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 556 csum_end <<= fs_info->sb->s_blocksize_bits; 557 csum_end += key->offset; 558 559 if (key->offset < bytenr && csum_end <= end_byte) { 560 /* 561 * [ bytenr - len ] 562 * [ ] 563 * [csum ] 564 * A simple truncate off the end of the item 565 */ 566 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 567 new_size *= csum_size; 568 btrfs_truncate_item(fs_info, path, new_size, 1); 569 } else if (key->offset >= bytenr && csum_end > end_byte && 570 end_byte > key->offset) { 571 /* 572 * [ bytenr - len ] 573 * [ ] 574 * [csum ] 575 * we need to truncate from the beginning of the csum 576 */ 577 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 578 new_size *= csum_size; 579 580 btrfs_truncate_item(fs_info, path, new_size, 0); 581 582 key->offset = end_byte; 583 btrfs_set_item_key_safe(fs_info, path, key); 584 } else { 585 BUG(); 586 } 587 } 588 589 /* 590 * deletes the csum items from the csum tree for a given 591 * range of bytes. 592 */ 593 int btrfs_del_csums(struct btrfs_trans_handle *trans, 594 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len) 595 { 596 struct btrfs_root *root = fs_info->csum_root; 597 struct btrfs_path *path; 598 struct btrfs_key key; 599 u64 end_byte = bytenr + len; 600 u64 csum_end; 601 struct extent_buffer *leaf; 602 int ret; 603 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 604 int blocksize_bits = fs_info->sb->s_blocksize_bits; 605 606 path = btrfs_alloc_path(); 607 if (!path) 608 return -ENOMEM; 609 610 while (1) { 611 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 612 key.offset = end_byte - 1; 613 key.type = BTRFS_EXTENT_CSUM_KEY; 614 615 path->leave_spinning = 1; 616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 617 if (ret > 0) { 618 if (path->slots[0] == 0) 619 break; 620 path->slots[0]--; 621 } else if (ret < 0) { 622 break; 623 } 624 625 leaf = path->nodes[0]; 626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 627 628 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 629 key.type != BTRFS_EXTENT_CSUM_KEY) { 630 break; 631 } 632 633 if (key.offset >= end_byte) 634 break; 635 636 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 637 csum_end <<= blocksize_bits; 638 csum_end += key.offset; 639 640 /* this csum ends before we start, we're done */ 641 if (csum_end <= bytenr) 642 break; 643 644 /* delete the entire item, it is inside our range */ 645 if (key.offset >= bytenr && csum_end <= end_byte) { 646 ret = btrfs_del_item(trans, root, path); 647 if (ret) 648 goto out; 649 if (key.offset == bytenr) 650 break; 651 } else if (key.offset < bytenr && csum_end > end_byte) { 652 unsigned long offset; 653 unsigned long shift_len; 654 unsigned long item_offset; 655 /* 656 * [ bytenr - len ] 657 * [csum ] 658 * 659 * Our bytes are in the middle of the csum, 660 * we need to split this item and insert a new one. 661 * 662 * But we can't drop the path because the 663 * csum could change, get removed, extended etc. 664 * 665 * The trick here is the max size of a csum item leaves 666 * enough room in the tree block for a single 667 * item header. So, we split the item in place, 668 * adding a new header pointing to the existing 669 * bytes. Then we loop around again and we have 670 * a nicely formed csum item that we can neatly 671 * truncate. 672 */ 673 offset = (bytenr - key.offset) >> blocksize_bits; 674 offset *= csum_size; 675 676 shift_len = (len >> blocksize_bits) * csum_size; 677 678 item_offset = btrfs_item_ptr_offset(leaf, 679 path->slots[0]); 680 681 memzero_extent_buffer(leaf, item_offset + offset, 682 shift_len); 683 key.offset = bytenr; 684 685 /* 686 * btrfs_split_item returns -EAGAIN when the 687 * item changed size or key 688 */ 689 ret = btrfs_split_item(trans, root, path, &key, offset); 690 if (ret && ret != -EAGAIN) { 691 btrfs_abort_transaction(trans, ret); 692 goto out; 693 } 694 695 key.offset = end_byte - 1; 696 } else { 697 truncate_one_csum(fs_info, path, &key, bytenr, len); 698 if (key.offset < bytenr) 699 break; 700 } 701 btrfs_release_path(path); 702 } 703 ret = 0; 704 out: 705 btrfs_free_path(path); 706 return ret; 707 } 708 709 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 710 struct btrfs_root *root, 711 struct btrfs_ordered_sum *sums) 712 { 713 struct btrfs_fs_info *fs_info = root->fs_info; 714 struct btrfs_key file_key; 715 struct btrfs_key found_key; 716 struct btrfs_path *path; 717 struct btrfs_csum_item *item; 718 struct btrfs_csum_item *item_end; 719 struct extent_buffer *leaf = NULL; 720 u64 next_offset; 721 u64 total_bytes = 0; 722 u64 csum_offset; 723 u64 bytenr; 724 u32 nritems; 725 u32 ins_size; 726 int index = 0; 727 int found_next; 728 int ret; 729 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); 730 731 path = btrfs_alloc_path(); 732 if (!path) 733 return -ENOMEM; 734 again: 735 next_offset = (u64)-1; 736 found_next = 0; 737 bytenr = sums->bytenr + total_bytes; 738 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 739 file_key.offset = bytenr; 740 file_key.type = BTRFS_EXTENT_CSUM_KEY; 741 742 item = btrfs_lookup_csum(trans, root, path, bytenr, 1); 743 if (!IS_ERR(item)) { 744 ret = 0; 745 leaf = path->nodes[0]; 746 item_end = btrfs_item_ptr(leaf, path->slots[0], 747 struct btrfs_csum_item); 748 item_end = (struct btrfs_csum_item *)((char *)item_end + 749 btrfs_item_size_nr(leaf, path->slots[0])); 750 goto found; 751 } 752 ret = PTR_ERR(item); 753 if (ret != -EFBIG && ret != -ENOENT) 754 goto fail_unlock; 755 756 if (ret == -EFBIG) { 757 u32 item_size; 758 /* we found one, but it isn't big enough yet */ 759 leaf = path->nodes[0]; 760 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 761 if ((item_size / csum_size) >= 762 MAX_CSUM_ITEMS(fs_info, csum_size)) { 763 /* already at max size, make a new one */ 764 goto insert; 765 } 766 } else { 767 int slot = path->slots[0] + 1; 768 /* we didn't find a csum item, insert one */ 769 nritems = btrfs_header_nritems(path->nodes[0]); 770 if (!nritems || (path->slots[0] >= nritems - 1)) { 771 ret = btrfs_next_leaf(root, path); 772 if (ret == 1) 773 found_next = 1; 774 if (ret != 0) 775 goto insert; 776 slot = path->slots[0]; 777 } 778 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 779 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 780 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 781 found_next = 1; 782 goto insert; 783 } 784 next_offset = found_key.offset; 785 found_next = 1; 786 goto insert; 787 } 788 789 /* 790 * at this point, we know the tree has an item, but it isn't big 791 * enough yet to put our csum in. Grow it 792 */ 793 btrfs_release_path(path); 794 ret = btrfs_search_slot(trans, root, &file_key, path, 795 csum_size, 1); 796 if (ret < 0) 797 goto fail_unlock; 798 799 if (ret > 0) { 800 if (path->slots[0] == 0) 801 goto insert; 802 path->slots[0]--; 803 } 804 805 leaf = path->nodes[0]; 806 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 807 csum_offset = (bytenr - found_key.offset) >> 808 fs_info->sb->s_blocksize_bits; 809 810 if (found_key.type != BTRFS_EXTENT_CSUM_KEY || 811 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 812 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) { 813 goto insert; 814 } 815 816 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / 817 csum_size) { 818 int extend_nr; 819 u64 tmp; 820 u32 diff; 821 u32 free_space; 822 823 if (btrfs_leaf_free_space(fs_info, leaf) < 824 sizeof(struct btrfs_item) + csum_size * 2) 825 goto insert; 826 827 free_space = btrfs_leaf_free_space(fs_info, leaf) - 828 sizeof(struct btrfs_item) - csum_size; 829 tmp = sums->len - total_bytes; 830 tmp >>= fs_info->sb->s_blocksize_bits; 831 WARN_ON(tmp < 1); 832 833 extend_nr = max_t(int, 1, (int)tmp); 834 diff = (csum_offset + extend_nr) * csum_size; 835 diff = min(diff, 836 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); 837 838 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 839 diff = min(free_space, diff); 840 diff /= csum_size; 841 diff *= csum_size; 842 843 btrfs_extend_item(fs_info, path, diff); 844 ret = 0; 845 goto csum; 846 } 847 848 insert: 849 btrfs_release_path(path); 850 csum_offset = 0; 851 if (found_next) { 852 u64 tmp; 853 854 tmp = sums->len - total_bytes; 855 tmp >>= fs_info->sb->s_blocksize_bits; 856 tmp = min(tmp, (next_offset - file_key.offset) >> 857 fs_info->sb->s_blocksize_bits); 858 859 tmp = max_t(u64, 1, tmp); 860 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size)); 861 ins_size = csum_size * tmp; 862 } else { 863 ins_size = csum_size; 864 } 865 path->leave_spinning = 1; 866 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 867 ins_size); 868 path->leave_spinning = 0; 869 if (ret < 0) 870 goto fail_unlock; 871 if (WARN_ON(ret != 0)) 872 goto fail_unlock; 873 leaf = path->nodes[0]; 874 csum: 875 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 876 item_end = (struct btrfs_csum_item *)((unsigned char *)item + 877 btrfs_item_size_nr(leaf, path->slots[0])); 878 item = (struct btrfs_csum_item *)((unsigned char *)item + 879 csum_offset * csum_size); 880 found: 881 ins_size = (u32)(sums->len - total_bytes) >> 882 fs_info->sb->s_blocksize_bits; 883 ins_size *= csum_size; 884 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item, 885 ins_size); 886 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item, 887 ins_size); 888 889 ins_size /= csum_size; 890 total_bytes += ins_size * fs_info->sectorsize; 891 index += ins_size; 892 893 btrfs_mark_buffer_dirty(path->nodes[0]); 894 if (total_bytes < sums->len) { 895 btrfs_release_path(path); 896 cond_resched(); 897 goto again; 898 } 899 out: 900 btrfs_free_path(path); 901 return ret; 902 903 fail_unlock: 904 goto out; 905 } 906 907 void btrfs_extent_item_to_extent_map(struct inode *inode, 908 const struct btrfs_path *path, 909 struct btrfs_file_extent_item *fi, 910 const bool new_inline, 911 struct extent_map *em) 912 { 913 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 914 struct btrfs_root *root = BTRFS_I(inode)->root; 915 struct extent_buffer *leaf = path->nodes[0]; 916 const int slot = path->slots[0]; 917 struct btrfs_key key; 918 u64 extent_start, extent_end; 919 u64 bytenr; 920 u8 type = btrfs_file_extent_type(leaf, fi); 921 int compress_type = btrfs_file_extent_compression(leaf, fi); 922 923 em->bdev = fs_info->fs_devices->latest_bdev; 924 btrfs_item_key_to_cpu(leaf, &key, slot); 925 extent_start = key.offset; 926 927 if (type == BTRFS_FILE_EXTENT_REG || 928 type == BTRFS_FILE_EXTENT_PREALLOC) { 929 extent_end = extent_start + 930 btrfs_file_extent_num_bytes(leaf, fi); 931 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 932 size_t size; 933 size = btrfs_file_extent_inline_len(leaf, slot, fi); 934 extent_end = ALIGN(extent_start + size, 935 fs_info->sectorsize); 936 } 937 938 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); 939 if (type == BTRFS_FILE_EXTENT_REG || 940 type == BTRFS_FILE_EXTENT_PREALLOC) { 941 em->start = extent_start; 942 em->len = extent_end - extent_start; 943 em->orig_start = extent_start - 944 btrfs_file_extent_offset(leaf, fi); 945 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); 946 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); 947 if (bytenr == 0) { 948 em->block_start = EXTENT_MAP_HOLE; 949 return; 950 } 951 if (compress_type != BTRFS_COMPRESS_NONE) { 952 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 953 em->compress_type = compress_type; 954 em->block_start = bytenr; 955 em->block_len = em->orig_block_len; 956 } else { 957 bytenr += btrfs_file_extent_offset(leaf, fi); 958 em->block_start = bytenr; 959 em->block_len = em->len; 960 if (type == BTRFS_FILE_EXTENT_PREALLOC) 961 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); 962 } 963 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 964 em->block_start = EXTENT_MAP_INLINE; 965 em->start = extent_start; 966 em->len = extent_end - extent_start; 967 /* 968 * Initialize orig_start and block_len with the same values 969 * as in inode.c:btrfs_get_extent(). 970 */ 971 em->orig_start = EXTENT_MAP_HOLE; 972 em->block_len = (u64)-1; 973 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) { 974 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); 975 em->compress_type = compress_type; 976 } 977 } else { 978 btrfs_err(fs_info, 979 "unknown file extent item type %d, inode %llu, offset %llu, root %llu", 980 type, btrfs_ino(BTRFS_I(inode)), extent_start, 981 root->root_key.objectid); 982 } 983 } 984