1 /* 2 * Copyright (C) 2007 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/bio.h> 20 #include <linux/slab.h> 21 #include <linux/pagemap.h> 22 #include <linux/highmem.h> 23 #include "ctree.h" 24 #include "disk-io.h" 25 #include "transaction.h" 26 #include "print-tree.h" 27 28 #define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ 29 sizeof(struct btrfs_item) * 2) / \ 30 size) - 1)) 31 32 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ 33 sizeof(struct btrfs_ordered_sum)) / \ 34 sizeof(struct btrfs_sector_sum) * \ 35 (r)->sectorsize - (r)->sectorsize) 36 37 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, 38 struct btrfs_root *root, 39 u64 objectid, u64 pos, 40 u64 disk_offset, u64 disk_num_bytes, 41 u64 num_bytes, u64 offset, u64 ram_bytes, 42 u8 compression, u8 encryption, u16 other_encoding) 43 { 44 int ret = 0; 45 struct btrfs_file_extent_item *item; 46 struct btrfs_key file_key; 47 struct btrfs_path *path; 48 struct extent_buffer *leaf; 49 50 path = btrfs_alloc_path(); 51 if (!path) 52 return -ENOMEM; 53 file_key.objectid = objectid; 54 file_key.offset = pos; 55 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); 56 57 path->leave_spinning = 1; 58 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 59 sizeof(*item)); 60 if (ret < 0) 61 goto out; 62 BUG_ON(ret); 63 leaf = path->nodes[0]; 64 item = btrfs_item_ptr(leaf, path->slots[0], 65 struct btrfs_file_extent_item); 66 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset); 67 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); 68 btrfs_set_file_extent_offset(leaf, item, offset); 69 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); 70 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); 71 btrfs_set_file_extent_generation(leaf, item, trans->transid); 72 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); 73 btrfs_set_file_extent_compression(leaf, item, compression); 74 btrfs_set_file_extent_encryption(leaf, item, encryption); 75 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); 76 77 btrfs_mark_buffer_dirty(leaf); 78 out: 79 btrfs_free_path(path); 80 return ret; 81 } 82 83 struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, 84 struct btrfs_root *root, 85 struct btrfs_path *path, 86 u64 bytenr, int cow) 87 { 88 int ret; 89 struct btrfs_key file_key; 90 struct btrfs_key found_key; 91 struct btrfs_csum_item *item; 92 struct extent_buffer *leaf; 93 u64 csum_offset = 0; 94 u16 csum_size = 95 btrfs_super_csum_size(&root->fs_info->super_copy); 96 int csums_in_item; 97 98 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 99 file_key.offset = bytenr; 100 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); 101 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); 102 if (ret < 0) 103 goto fail; 104 leaf = path->nodes[0]; 105 if (ret > 0) { 106 ret = 1; 107 if (path->slots[0] == 0) 108 goto fail; 109 path->slots[0]--; 110 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 111 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY) 112 goto fail; 113 114 csum_offset = (bytenr - found_key.offset) >> 115 root->fs_info->sb->s_blocksize_bits; 116 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); 117 csums_in_item /= csum_size; 118 119 if (csum_offset >= csums_in_item) { 120 ret = -EFBIG; 121 goto fail; 122 } 123 } 124 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 125 item = (struct btrfs_csum_item *)((unsigned char *)item + 126 csum_offset * csum_size); 127 return item; 128 fail: 129 if (ret > 0) 130 ret = -ENOENT; 131 return ERR_PTR(ret); 132 } 133 134 135 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, 136 struct btrfs_root *root, 137 struct btrfs_path *path, u64 objectid, 138 u64 offset, int mod) 139 { 140 int ret; 141 struct btrfs_key file_key; 142 int ins_len = mod < 0 ? -1 : 0; 143 int cow = mod != 0; 144 145 file_key.objectid = objectid; 146 file_key.offset = offset; 147 btrfs_set_key_type(&file_key, BTRFS_EXTENT_DATA_KEY); 148 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow); 149 return ret; 150 } 151 152 153 static int __btrfs_lookup_bio_sums(struct btrfs_root *root, 154 struct inode *inode, struct bio *bio, 155 u64 logical_offset, u32 *dst, int dio) 156 { 157 u32 sum; 158 struct bio_vec *bvec = bio->bi_io_vec; 159 int bio_index = 0; 160 u64 offset = 0; 161 u64 item_start_offset = 0; 162 u64 item_last_offset = 0; 163 u64 disk_bytenr; 164 u32 diff; 165 u16 csum_size = 166 btrfs_super_csum_size(&root->fs_info->super_copy); 167 int ret; 168 struct btrfs_path *path; 169 struct btrfs_csum_item *item = NULL; 170 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 171 172 path = btrfs_alloc_path(); 173 if (!path) 174 return -ENOMEM; 175 if (bio->bi_size > PAGE_CACHE_SIZE * 8) 176 path->reada = 2; 177 178 WARN_ON(bio->bi_vcnt <= 0); 179 180 /* 181 * the free space stuff is only read when it hasn't been 182 * updated in the current transaction. So, we can safely 183 * read from the commit root and sidestep a nasty deadlock 184 * between reading the free space cache and updating the csum tree. 185 */ 186 if (btrfs_is_free_space_inode(root, inode)) { 187 path->search_commit_root = 1; 188 path->skip_locking = 1; 189 } 190 191 disk_bytenr = (u64)bio->bi_sector << 9; 192 if (dio) 193 offset = logical_offset; 194 while (bio_index < bio->bi_vcnt) { 195 if (!dio) 196 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 197 ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); 198 if (ret == 0) 199 goto found; 200 201 if (!item || disk_bytenr < item_start_offset || 202 disk_bytenr >= item_last_offset) { 203 struct btrfs_key found_key; 204 u32 item_size; 205 206 if (item) 207 btrfs_release_path(path); 208 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, 209 path, disk_bytenr, 0); 210 if (IS_ERR(item)) { 211 ret = PTR_ERR(item); 212 if (ret == -ENOENT || ret == -EFBIG) 213 ret = 0; 214 sum = 0; 215 if (BTRFS_I(inode)->root->root_key.objectid == 216 BTRFS_DATA_RELOC_TREE_OBJECTID) { 217 set_extent_bits(io_tree, offset, 218 offset + bvec->bv_len - 1, 219 EXTENT_NODATASUM, GFP_NOFS); 220 } else { 221 printk(KERN_INFO "btrfs no csum found " 222 "for inode %llu start %llu\n", 223 (unsigned long long) 224 btrfs_ino(inode), 225 (unsigned long long)offset); 226 } 227 item = NULL; 228 btrfs_release_path(path); 229 goto found; 230 } 231 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 232 path->slots[0]); 233 234 item_start_offset = found_key.offset; 235 item_size = btrfs_item_size_nr(path->nodes[0], 236 path->slots[0]); 237 item_last_offset = item_start_offset + 238 (item_size / csum_size) * 239 root->sectorsize; 240 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 241 struct btrfs_csum_item); 242 } 243 /* 244 * this byte range must be able to fit inside 245 * a single leaf so it will also fit inside a u32 246 */ 247 diff = disk_bytenr - item_start_offset; 248 diff = diff / root->sectorsize; 249 diff = diff * csum_size; 250 251 read_extent_buffer(path->nodes[0], &sum, 252 ((unsigned long)item) + diff, 253 csum_size); 254 found: 255 if (dst) 256 *dst++ = sum; 257 else 258 set_state_private(io_tree, offset, sum); 259 disk_bytenr += bvec->bv_len; 260 offset += bvec->bv_len; 261 bio_index++; 262 bvec++; 263 } 264 btrfs_free_path(path); 265 return 0; 266 } 267 268 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, 269 struct bio *bio, u32 *dst) 270 { 271 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0); 272 } 273 274 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, 275 struct bio *bio, u64 offset, u32 *dst) 276 { 277 return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); 278 } 279 280 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, 281 struct list_head *list, int search_commit) 282 { 283 struct btrfs_key key; 284 struct btrfs_path *path; 285 struct extent_buffer *leaf; 286 struct btrfs_ordered_sum *sums; 287 struct btrfs_sector_sum *sector_sum; 288 struct btrfs_csum_item *item; 289 unsigned long offset; 290 int ret; 291 size_t size; 292 u64 csum_end; 293 u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); 294 295 path = btrfs_alloc_path(); 296 if (!path) 297 return -ENOMEM; 298 299 if (search_commit) { 300 path->skip_locking = 1; 301 path->reada = 2; 302 path->search_commit_root = 1; 303 } 304 305 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 306 key.offset = start; 307 key.type = BTRFS_EXTENT_CSUM_KEY; 308 309 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 310 if (ret < 0) 311 goto fail; 312 if (ret > 0 && path->slots[0] > 0) { 313 leaf = path->nodes[0]; 314 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); 315 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID && 316 key.type == BTRFS_EXTENT_CSUM_KEY) { 317 offset = (start - key.offset) >> 318 root->fs_info->sb->s_blocksize_bits; 319 if (offset * csum_size < 320 btrfs_item_size_nr(leaf, path->slots[0] - 1)) 321 path->slots[0]--; 322 } 323 } 324 325 while (start <= end) { 326 leaf = path->nodes[0]; 327 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 328 ret = btrfs_next_leaf(root, path); 329 if (ret < 0) 330 goto fail; 331 if (ret > 0) 332 break; 333 leaf = path->nodes[0]; 334 } 335 336 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 337 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 338 key.type != BTRFS_EXTENT_CSUM_KEY) 339 break; 340 341 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 342 if (key.offset > end) 343 break; 344 345 if (key.offset > start) 346 start = key.offset; 347 348 size = btrfs_item_size_nr(leaf, path->slots[0]); 349 csum_end = key.offset + (size / csum_size) * root->sectorsize; 350 if (csum_end <= start) { 351 path->slots[0]++; 352 continue; 353 } 354 355 csum_end = min(csum_end, end + 1); 356 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 357 struct btrfs_csum_item); 358 while (start < csum_end) { 359 size = min_t(size_t, csum_end - start, 360 MAX_ORDERED_SUM_BYTES(root)); 361 sums = kzalloc(btrfs_ordered_sum_size(root, size), 362 GFP_NOFS); 363 BUG_ON(!sums); 364 365 sector_sum = sums->sums; 366 sums->bytenr = start; 367 sums->len = size; 368 369 offset = (start - key.offset) >> 370 root->fs_info->sb->s_blocksize_bits; 371 offset *= csum_size; 372 373 while (size > 0) { 374 read_extent_buffer(path->nodes[0], 375 §or_sum->sum, 376 ((unsigned long)item) + 377 offset, csum_size); 378 sector_sum->bytenr = start; 379 380 size -= root->sectorsize; 381 start += root->sectorsize; 382 offset += csum_size; 383 sector_sum++; 384 } 385 list_add_tail(&sums->list, list); 386 } 387 path->slots[0]++; 388 } 389 ret = 0; 390 fail: 391 btrfs_free_path(path); 392 return ret; 393 } 394 395 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, 396 struct bio *bio, u64 file_start, int contig) 397 { 398 struct btrfs_ordered_sum *sums; 399 struct btrfs_sector_sum *sector_sum; 400 struct btrfs_ordered_extent *ordered; 401 char *data; 402 struct bio_vec *bvec = bio->bi_io_vec; 403 int bio_index = 0; 404 unsigned long total_bytes = 0; 405 unsigned long this_sum_bytes = 0; 406 u64 offset; 407 u64 disk_bytenr; 408 409 WARN_ON(bio->bi_vcnt <= 0); 410 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); 411 if (!sums) 412 return -ENOMEM; 413 414 sector_sum = sums->sums; 415 disk_bytenr = (u64)bio->bi_sector << 9; 416 sums->len = bio->bi_size; 417 INIT_LIST_HEAD(&sums->list); 418 419 if (contig) 420 offset = file_start; 421 else 422 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 423 424 ordered = btrfs_lookup_ordered_extent(inode, offset); 425 BUG_ON(!ordered); 426 sums->bytenr = ordered->start; 427 428 while (bio_index < bio->bi_vcnt) { 429 if (!contig) 430 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 431 432 if (!contig && (offset >= ordered->file_offset + ordered->len || 433 offset < ordered->file_offset)) { 434 unsigned long bytes_left; 435 sums->len = this_sum_bytes; 436 this_sum_bytes = 0; 437 btrfs_add_ordered_sum(inode, ordered, sums); 438 btrfs_put_ordered_extent(ordered); 439 440 bytes_left = bio->bi_size - total_bytes; 441 442 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 443 GFP_NOFS); 444 BUG_ON(!sums); 445 sector_sum = sums->sums; 446 sums->len = bytes_left; 447 ordered = btrfs_lookup_ordered_extent(inode, offset); 448 BUG_ON(!ordered); 449 sums->bytenr = ordered->start; 450 } 451 452 data = kmap_atomic(bvec->bv_page, KM_USER0); 453 sector_sum->sum = ~(u32)0; 454 sector_sum->sum = btrfs_csum_data(root, 455 data + bvec->bv_offset, 456 sector_sum->sum, 457 bvec->bv_len); 458 kunmap_atomic(data, KM_USER0); 459 btrfs_csum_final(sector_sum->sum, 460 (char *)§or_sum->sum); 461 sector_sum->bytenr = disk_bytenr; 462 463 sector_sum++; 464 bio_index++; 465 total_bytes += bvec->bv_len; 466 this_sum_bytes += bvec->bv_len; 467 disk_bytenr += bvec->bv_len; 468 offset += bvec->bv_len; 469 bvec++; 470 } 471 this_sum_bytes = 0; 472 btrfs_add_ordered_sum(inode, ordered, sums); 473 btrfs_put_ordered_extent(ordered); 474 return 0; 475 } 476 477 /* 478 * helper function for csum removal, this expects the 479 * key to describe the csum pointed to by the path, and it expects 480 * the csum to overlap the range [bytenr, len] 481 * 482 * The csum should not be entirely contained in the range and the 483 * range should not be entirely contained in the csum. 484 * 485 * This calls btrfs_truncate_item with the correct args based on the 486 * overlap, and fixes up the key as required. 487 */ 488 static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, 489 struct btrfs_root *root, 490 struct btrfs_path *path, 491 struct btrfs_key *key, 492 u64 bytenr, u64 len) 493 { 494 struct extent_buffer *leaf; 495 u16 csum_size = 496 btrfs_super_csum_size(&root->fs_info->super_copy); 497 u64 csum_end; 498 u64 end_byte = bytenr + len; 499 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; 500 int ret; 501 502 leaf = path->nodes[0]; 503 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 504 csum_end <<= root->fs_info->sb->s_blocksize_bits; 505 csum_end += key->offset; 506 507 if (key->offset < bytenr && csum_end <= end_byte) { 508 /* 509 * [ bytenr - len ] 510 * [ ] 511 * [csum ] 512 * A simple truncate off the end of the item 513 */ 514 u32 new_size = (bytenr - key->offset) >> blocksize_bits; 515 new_size *= csum_size; 516 ret = btrfs_truncate_item(trans, root, path, new_size, 1); 517 } else if (key->offset >= bytenr && csum_end > end_byte && 518 end_byte > key->offset) { 519 /* 520 * [ bytenr - len ] 521 * [ ] 522 * [csum ] 523 * we need to truncate from the beginning of the csum 524 */ 525 u32 new_size = (csum_end - end_byte) >> blocksize_bits; 526 new_size *= csum_size; 527 528 ret = btrfs_truncate_item(trans, root, path, new_size, 0); 529 530 key->offset = end_byte; 531 ret = btrfs_set_item_key_safe(trans, root, path, key); 532 BUG_ON(ret); 533 } else { 534 BUG(); 535 } 536 return 0; 537 } 538 539 /* 540 * deletes the csum items from the csum tree for a given 541 * range of bytes. 542 */ 543 int btrfs_del_csums(struct btrfs_trans_handle *trans, 544 struct btrfs_root *root, u64 bytenr, u64 len) 545 { 546 struct btrfs_path *path; 547 struct btrfs_key key; 548 u64 end_byte = bytenr + len; 549 u64 csum_end; 550 struct extent_buffer *leaf; 551 int ret; 552 u16 csum_size = 553 btrfs_super_csum_size(&root->fs_info->super_copy); 554 int blocksize_bits = root->fs_info->sb->s_blocksize_bits; 555 556 root = root->fs_info->csum_root; 557 558 path = btrfs_alloc_path(); 559 if (!path) 560 return -ENOMEM; 561 562 while (1) { 563 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 564 key.offset = end_byte - 1; 565 key.type = BTRFS_EXTENT_CSUM_KEY; 566 567 path->leave_spinning = 1; 568 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 569 if (ret > 0) { 570 if (path->slots[0] == 0) 571 break; 572 path->slots[0]--; 573 } else if (ret < 0) { 574 break; 575 } 576 577 leaf = path->nodes[0]; 578 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 579 580 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 581 key.type != BTRFS_EXTENT_CSUM_KEY) { 582 break; 583 } 584 585 if (key.offset >= end_byte) 586 break; 587 588 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; 589 csum_end <<= blocksize_bits; 590 csum_end += key.offset; 591 592 /* this csum ends before we start, we're done */ 593 if (csum_end <= bytenr) 594 break; 595 596 /* delete the entire item, it is inside our range */ 597 if (key.offset >= bytenr && csum_end <= end_byte) { 598 ret = btrfs_del_item(trans, root, path); 599 if (ret) 600 goto out; 601 if (key.offset == bytenr) 602 break; 603 } else if (key.offset < bytenr && csum_end > end_byte) { 604 unsigned long offset; 605 unsigned long shift_len; 606 unsigned long item_offset; 607 /* 608 * [ bytenr - len ] 609 * [csum ] 610 * 611 * Our bytes are in the middle of the csum, 612 * we need to split this item and insert a new one. 613 * 614 * But we can't drop the path because the 615 * csum could change, get removed, extended etc. 616 * 617 * The trick here is the max size of a csum item leaves 618 * enough room in the tree block for a single 619 * item header. So, we split the item in place, 620 * adding a new header pointing to the existing 621 * bytes. Then we loop around again and we have 622 * a nicely formed csum item that we can neatly 623 * truncate. 624 */ 625 offset = (bytenr - key.offset) >> blocksize_bits; 626 offset *= csum_size; 627 628 shift_len = (len >> blocksize_bits) * csum_size; 629 630 item_offset = btrfs_item_ptr_offset(leaf, 631 path->slots[0]); 632 633 memset_extent_buffer(leaf, 0, item_offset + offset, 634 shift_len); 635 key.offset = bytenr; 636 637 /* 638 * btrfs_split_item returns -EAGAIN when the 639 * item changed size or key 640 */ 641 ret = btrfs_split_item(trans, root, path, &key, offset); 642 BUG_ON(ret && ret != -EAGAIN); 643 644 key.offset = end_byte - 1; 645 } else { 646 ret = truncate_one_csum(trans, root, path, 647 &key, bytenr, len); 648 BUG_ON(ret); 649 if (key.offset < bytenr) 650 break; 651 } 652 btrfs_release_path(path); 653 } 654 ret = 0; 655 out: 656 btrfs_free_path(path); 657 return ret; 658 } 659 660 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, 661 struct btrfs_root *root, 662 struct btrfs_ordered_sum *sums) 663 { 664 u64 bytenr; 665 int ret; 666 struct btrfs_key file_key; 667 struct btrfs_key found_key; 668 u64 next_offset; 669 u64 total_bytes = 0; 670 int found_next; 671 struct btrfs_path *path; 672 struct btrfs_csum_item *item; 673 struct btrfs_csum_item *item_end; 674 struct extent_buffer *leaf = NULL; 675 u64 csum_offset; 676 struct btrfs_sector_sum *sector_sum; 677 u32 nritems; 678 u32 ins_size; 679 u16 csum_size = 680 btrfs_super_csum_size(&root->fs_info->super_copy); 681 682 path = btrfs_alloc_path(); 683 if (!path) 684 return -ENOMEM; 685 686 sector_sum = sums->sums; 687 again: 688 next_offset = (u64)-1; 689 found_next = 0; 690 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; 691 file_key.offset = sector_sum->bytenr; 692 bytenr = sector_sum->bytenr; 693 btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); 694 695 item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1); 696 if (!IS_ERR(item)) { 697 leaf = path->nodes[0]; 698 ret = 0; 699 goto found; 700 } 701 ret = PTR_ERR(item); 702 if (ret != -EFBIG && ret != -ENOENT) 703 goto fail_unlock; 704 705 if (ret == -EFBIG) { 706 u32 item_size; 707 /* we found one, but it isn't big enough yet */ 708 leaf = path->nodes[0]; 709 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 710 if ((item_size / csum_size) >= 711 MAX_CSUM_ITEMS(root, csum_size)) { 712 /* already at max size, make a new one */ 713 goto insert; 714 } 715 } else { 716 int slot = path->slots[0] + 1; 717 /* we didn't find a csum item, insert one */ 718 nritems = btrfs_header_nritems(path->nodes[0]); 719 if (path->slots[0] >= nritems - 1) { 720 ret = btrfs_next_leaf(root, path); 721 if (ret == 1) 722 found_next = 1; 723 if (ret != 0) 724 goto insert; 725 slot = 0; 726 } 727 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); 728 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 729 found_key.type != BTRFS_EXTENT_CSUM_KEY) { 730 found_next = 1; 731 goto insert; 732 } 733 next_offset = found_key.offset; 734 found_next = 1; 735 goto insert; 736 } 737 738 /* 739 * at this point, we know the tree has an item, but it isn't big 740 * enough yet to put our csum in. Grow it 741 */ 742 btrfs_release_path(path); 743 ret = btrfs_search_slot(trans, root, &file_key, path, 744 csum_size, 1); 745 if (ret < 0) 746 goto fail_unlock; 747 748 if (ret > 0) { 749 if (path->slots[0] == 0) 750 goto insert; 751 path->slots[0]--; 752 } 753 754 leaf = path->nodes[0]; 755 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 756 csum_offset = (bytenr - found_key.offset) >> 757 root->fs_info->sb->s_blocksize_bits; 758 759 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY || 760 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || 761 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { 762 goto insert; 763 } 764 765 if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) / 766 csum_size) { 767 u32 diff = (csum_offset + 1) * csum_size; 768 769 /* 770 * is the item big enough already? we dropped our lock 771 * before and need to recheck 772 */ 773 if (diff < btrfs_item_size_nr(leaf, path->slots[0])) 774 goto csum; 775 776 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); 777 if (diff != csum_size) 778 goto insert; 779 780 ret = btrfs_extend_item(trans, root, path, diff); 781 goto csum; 782 } 783 784 insert: 785 btrfs_release_path(path); 786 csum_offset = 0; 787 if (found_next) { 788 u64 tmp = total_bytes + root->sectorsize; 789 u64 next_sector = sector_sum->bytenr; 790 struct btrfs_sector_sum *next = sector_sum + 1; 791 792 while (tmp < sums->len) { 793 if (next_sector + root->sectorsize != next->bytenr) 794 break; 795 tmp += root->sectorsize; 796 next_sector = next->bytenr; 797 next++; 798 } 799 tmp = min(tmp, next_offset - file_key.offset); 800 tmp >>= root->fs_info->sb->s_blocksize_bits; 801 tmp = max((u64)1, tmp); 802 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); 803 ins_size = csum_size * tmp; 804 } else { 805 ins_size = csum_size; 806 } 807 path->leave_spinning = 1; 808 ret = btrfs_insert_empty_item(trans, root, path, &file_key, 809 ins_size); 810 path->leave_spinning = 0; 811 if (ret < 0) 812 goto fail_unlock; 813 if (ret != 0) { 814 WARN_ON(1); 815 goto fail_unlock; 816 } 817 csum: 818 leaf = path->nodes[0]; 819 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 820 ret = 0; 821 item = (struct btrfs_csum_item *)((unsigned char *)item + 822 csum_offset * csum_size); 823 found: 824 item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); 825 item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + 826 btrfs_item_size_nr(leaf, path->slots[0])); 827 next_sector: 828 829 write_extent_buffer(leaf, §or_sum->sum, (unsigned long)item, csum_size); 830 831 total_bytes += root->sectorsize; 832 sector_sum++; 833 if (total_bytes < sums->len) { 834 item = (struct btrfs_csum_item *)((char *)item + 835 csum_size); 836 if (item < item_end && bytenr + PAGE_CACHE_SIZE == 837 sector_sum->bytenr) { 838 bytenr = sector_sum->bytenr; 839 goto next_sector; 840 } 841 } 842 843 btrfs_mark_buffer_dirty(path->nodes[0]); 844 if (total_bytes < sums->len) { 845 btrfs_release_path(path); 846 cond_resched(); 847 goto again; 848 } 849 out: 850 btrfs_free_path(path); 851 return ret; 852 853 fail_unlock: 854 goto out; 855 } 856