1 /* 2 * Copyright (C) 2011 Red Hat, Inc. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-space-map-common.h" 8 #include "dm-transaction-manager.h" 9 10 #include <linux/bitops.h> 11 #include <linux/device-mapper.h> 12 13 #define DM_MSG_PREFIX "space map common" 14 15 /*----------------------------------------------------------------*/ 16 17 /* 18 * Index validator. 19 */ 20 #define INDEX_CSUM_XOR 160478 21 22 static void index_prepare_for_write(struct dm_block_validator *v, 23 struct dm_block *b, 24 size_t block_size) 25 { 26 struct disk_metadata_index *mi_le = dm_block_data(b); 27 28 mi_le->blocknr = cpu_to_le64(dm_block_location(b)); 29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding, 30 block_size - sizeof(__le32), 31 INDEX_CSUM_XOR)); 32 } 33 34 static int index_check(struct dm_block_validator *v, 35 struct dm_block *b, 36 size_t block_size) 37 { 38 struct disk_metadata_index *mi_le = dm_block_data(b); 39 __le32 csum_disk; 40 41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) { 42 DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu", 43 le64_to_cpu(mi_le->blocknr), dm_block_location(b)); 44 return -ENOTBLK; 45 } 46 47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding, 48 block_size - sizeof(__le32), 49 INDEX_CSUM_XOR)); 50 if (csum_disk != mi_le->csum) { 51 DMERR_LIMIT("index_check failed: csum %u != wanted %u", 52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); 53 return -EILSEQ; 54 } 55 56 return 0; 57 } 58 59 static struct dm_block_validator index_validator = { 60 .name = "index", 61 .prepare_for_write = index_prepare_for_write, 62 .check = index_check 63 }; 64 65 /*----------------------------------------------------------------*/ 66 67 /* 68 * Bitmap validator 69 */ 70 #define BITMAP_CSUM_XOR 240779 71 72 static void dm_bitmap_prepare_for_write(struct dm_block_validator *v, 73 struct dm_block *b, 74 size_t block_size) 75 { 76 struct disk_bitmap_header *disk_header = dm_block_data(b); 77 78 disk_header->blocknr = cpu_to_le64(dm_block_location(b)); 79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, 80 block_size - sizeof(__le32), 81 BITMAP_CSUM_XOR)); 82 } 83 84 static int dm_bitmap_check(struct dm_block_validator *v, 85 struct dm_block *b, 86 size_t block_size) 87 { 88 struct disk_bitmap_header *disk_header = dm_block_data(b); 89 __le32 csum_disk; 90 91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { 92 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu", 93 le64_to_cpu(disk_header->blocknr), dm_block_location(b)); 94 return -ENOTBLK; 95 } 96 97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, 98 block_size - sizeof(__le32), 99 BITMAP_CSUM_XOR)); 100 if (csum_disk != disk_header->csum) { 101 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u", 102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); 103 return -EILSEQ; 104 } 105 106 return 0; 107 } 108 109 static struct dm_block_validator dm_sm_bitmap_validator = { 110 .name = "sm_bitmap", 111 .prepare_for_write = dm_bitmap_prepare_for_write, 112 .check = dm_bitmap_check, 113 }; 114 115 /*----------------------------------------------------------------*/ 116 117 #define ENTRIES_PER_WORD 32 118 #define ENTRIES_SHIFT 5 119 120 static void *dm_bitmap_data(struct dm_block *b) 121 { 122 return dm_block_data(b) + sizeof(struct disk_bitmap_header); 123 } 124 125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL 126 127 static unsigned dm_bitmap_word_used(void *addr, unsigned b) 128 { 129 __le64 *words_le = addr; 130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 131 132 uint64_t bits = le64_to_cpu(*w_le); 133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH; 134 135 return !(~bits & mask); 136 } 137 138 static unsigned sm_lookup_bitmap(void *addr, unsigned b) 139 { 140 __le64 *words_le = addr; 141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 142 unsigned hi, lo; 143 144 b = (b & (ENTRIES_PER_WORD - 1)) << 1; 145 hi = !!test_bit_le(b, (void *) w_le); 146 lo = !!test_bit_le(b + 1, (void *) w_le); 147 return (hi << 1) | lo; 148 } 149 150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val) 151 { 152 __le64 *words_le = addr; 153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 154 155 b = (b & (ENTRIES_PER_WORD - 1)) << 1; 156 157 if (val & 2) 158 __set_bit_le(b, (void *) w_le); 159 else 160 __clear_bit_le(b, (void *) w_le); 161 162 if (val & 1) 163 __set_bit_le(b + 1, (void *) w_le); 164 else 165 __clear_bit_le(b + 1, (void *) w_le); 166 } 167 168 static int sm_find_free(void *addr, unsigned begin, unsigned end, 169 unsigned *result) 170 { 171 while (begin < end) { 172 if (!(begin & (ENTRIES_PER_WORD - 1)) && 173 dm_bitmap_word_used(addr, begin)) { 174 begin += ENTRIES_PER_WORD; 175 continue; 176 } 177 178 if (!sm_lookup_bitmap(addr, begin)) { 179 *result = begin; 180 return 0; 181 } 182 183 begin++; 184 } 185 186 return -ENOSPC; 187 } 188 189 /*----------------------------------------------------------------*/ 190 191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm) 192 { 193 memset(ll, 0, sizeof(struct ll_disk)); 194 195 ll->tm = tm; 196 197 ll->bitmap_info.tm = tm; 198 ll->bitmap_info.levels = 1; 199 200 /* 201 * Because the new bitmap blocks are created via a shadow 202 * operation, the old entry has already had its reference count 203 * decremented and we don't need the btree to do any bookkeeping. 204 */ 205 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry); 206 ll->bitmap_info.value_type.inc = NULL; 207 ll->bitmap_info.value_type.dec = NULL; 208 ll->bitmap_info.value_type.equal = NULL; 209 210 ll->ref_count_info.tm = tm; 211 ll->ref_count_info.levels = 1; 212 ll->ref_count_info.value_type.size = sizeof(uint32_t); 213 ll->ref_count_info.value_type.inc = NULL; 214 ll->ref_count_info.value_type.dec = NULL; 215 ll->ref_count_info.value_type.equal = NULL; 216 217 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm)); 218 219 if (ll->block_size > (1 << 30)) { 220 DMERR("block size too big to hold bitmaps"); 221 return -EINVAL; 222 } 223 224 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) * 225 ENTRIES_PER_BYTE; 226 ll->nr_blocks = 0; 227 ll->bitmap_root = 0; 228 ll->ref_count_root = 0; 229 ll->bitmap_index_changed = false; 230 231 return 0; 232 } 233 234 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks) 235 { 236 int r; 237 dm_block_t i, nr_blocks, nr_indexes; 238 unsigned old_blocks, blocks; 239 240 nr_blocks = ll->nr_blocks + extra_blocks; 241 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block); 242 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block); 243 244 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); 245 if (nr_indexes > ll->max_entries(ll)) { 246 DMERR("space map too large"); 247 return -EINVAL; 248 } 249 250 /* 251 * We need to set this before the dm_tm_new_block() call below. 252 */ 253 ll->nr_blocks = nr_blocks; 254 for (i = old_blocks; i < blocks; i++) { 255 struct dm_block *b; 256 struct disk_index_entry idx; 257 258 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b); 259 if (r < 0) 260 return r; 261 262 idx.blocknr = cpu_to_le64(dm_block_location(b)); 263 264 dm_tm_unlock(ll->tm, b); 265 266 idx.nr_free = cpu_to_le32(ll->entries_per_block); 267 idx.none_free_before = 0; 268 269 r = ll->save_ie(ll, i, &idx); 270 if (r < 0) 271 return r; 272 } 273 274 return 0; 275 } 276 277 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) 278 { 279 int r; 280 dm_block_t index = b; 281 struct disk_index_entry ie_disk; 282 struct dm_block *blk; 283 284 b = do_div(index, ll->entries_per_block); 285 r = ll->load_ie(ll, index, &ie_disk); 286 if (r < 0) 287 return r; 288 289 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), 290 &dm_sm_bitmap_validator, &blk); 291 if (r < 0) 292 return r; 293 294 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b); 295 296 dm_tm_unlock(ll->tm, blk); 297 298 return 0; 299 } 300 301 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b, 302 uint32_t *result) 303 { 304 __le32 le_rc; 305 int r; 306 307 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc); 308 if (r < 0) 309 return r; 310 311 *result = le32_to_cpu(le_rc); 312 313 return r; 314 } 315 316 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) 317 { 318 int r = sm_ll_lookup_bitmap(ll, b, result); 319 320 if (r) 321 return r; 322 323 if (*result != 3) 324 return r; 325 326 return sm_ll_lookup_big_ref_count(ll, b, result); 327 } 328 329 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, 330 dm_block_t end, dm_block_t *result) 331 { 332 int r; 333 struct disk_index_entry ie_disk; 334 dm_block_t i, index_begin = begin; 335 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block); 336 337 /* 338 * FIXME: Use shifts 339 */ 340 begin = do_div(index_begin, ll->entries_per_block); 341 end = do_div(end, ll->entries_per_block); 342 343 for (i = index_begin; i < index_end; i++, begin = 0) { 344 struct dm_block *blk; 345 unsigned position; 346 uint32_t bit_end; 347 348 r = ll->load_ie(ll, i, &ie_disk); 349 if (r < 0) 350 return r; 351 352 if (le32_to_cpu(ie_disk.nr_free) == 0) 353 continue; 354 355 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), 356 &dm_sm_bitmap_validator, &blk); 357 if (r < 0) 358 return r; 359 360 bit_end = (i == index_end - 1) ? end : ll->entries_per_block; 361 362 r = sm_find_free(dm_bitmap_data(blk), 363 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)), 364 bit_end, &position); 365 if (r == -ENOSPC) { 366 /* 367 * This might happen because we started searching 368 * part way through the bitmap. 369 */ 370 dm_tm_unlock(ll->tm, blk); 371 continue; 372 } 373 374 dm_tm_unlock(ll->tm, blk); 375 376 *result = i * ll->entries_per_block + (dm_block_t) position; 377 return 0; 378 } 379 380 return -ENOSPC; 381 } 382 383 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, 384 dm_block_t begin, dm_block_t end, dm_block_t *b) 385 { 386 int r; 387 uint32_t count; 388 389 do { 390 r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); 391 if (r) 392 break; 393 394 /* double check this block wasn't used in the old transaction */ 395 if (*b >= old_ll->nr_blocks) 396 count = 0; 397 else { 398 r = sm_ll_lookup(old_ll, *b, &count); 399 if (r) 400 break; 401 402 if (count) 403 begin = *b + 1; 404 } 405 } while (count); 406 407 return r; 408 } 409 410 static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, 411 int (*mutator)(void *context, uint32_t old, uint32_t *new), 412 void *context, enum allocation_event *ev) 413 { 414 int r; 415 uint32_t bit, old, ref_count; 416 struct dm_block *nb; 417 dm_block_t index = b; 418 struct disk_index_entry ie_disk; 419 void *bm_le; 420 int inc; 421 422 bit = do_div(index, ll->entries_per_block); 423 r = ll->load_ie(ll, index, &ie_disk); 424 if (r < 0) 425 return r; 426 427 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr), 428 &dm_sm_bitmap_validator, &nb, &inc); 429 if (r < 0) { 430 DMERR("dm_tm_shadow_block() failed"); 431 return r; 432 } 433 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb)); 434 435 bm_le = dm_bitmap_data(nb); 436 old = sm_lookup_bitmap(bm_le, bit); 437 438 if (old > 2) { 439 r = sm_ll_lookup_big_ref_count(ll, b, &old); 440 if (r < 0) { 441 dm_tm_unlock(ll->tm, nb); 442 return r; 443 } 444 } 445 446 r = mutator(context, old, &ref_count); 447 if (r) { 448 dm_tm_unlock(ll->tm, nb); 449 return r; 450 } 451 452 if (ref_count <= 2) { 453 sm_set_bitmap(bm_le, bit, ref_count); 454 455 dm_tm_unlock(ll->tm, nb); 456 457 if (old > 2) { 458 r = dm_btree_remove(&ll->ref_count_info, 459 ll->ref_count_root, 460 &b, &ll->ref_count_root); 461 if (r) 462 return r; 463 } 464 465 } else { 466 __le32 le_rc = cpu_to_le32(ref_count); 467 468 sm_set_bitmap(bm_le, bit, 3); 469 dm_tm_unlock(ll->tm, nb); 470 471 __dm_bless_for_disk(&le_rc); 472 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root, 473 &b, &le_rc, &ll->ref_count_root); 474 if (r < 0) { 475 DMERR("ref count insert failed"); 476 return r; 477 } 478 } 479 480 if (ref_count && !old) { 481 *ev = SM_ALLOC; 482 ll->nr_allocated++; 483 le32_add_cpu(&ie_disk.nr_free, -1); 484 if (le32_to_cpu(ie_disk.none_free_before) == bit) 485 ie_disk.none_free_before = cpu_to_le32(bit + 1); 486 487 } else if (old && !ref_count) { 488 *ev = SM_FREE; 489 ll->nr_allocated--; 490 le32_add_cpu(&ie_disk.nr_free, 1); 491 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); 492 } else 493 *ev = SM_NONE; 494 495 return ll->save_ie(ll, index, &ie_disk); 496 } 497 498 static int set_ref_count(void *context, uint32_t old, uint32_t *new) 499 { 500 *new = *((uint32_t *) context); 501 return 0; 502 } 503 504 int sm_ll_insert(struct ll_disk *ll, dm_block_t b, 505 uint32_t ref_count, enum allocation_event *ev) 506 { 507 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); 508 } 509 510 static int inc_ref_count(void *context, uint32_t old, uint32_t *new) 511 { 512 *new = old + 1; 513 return 0; 514 } 515 516 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 517 { 518 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); 519 } 520 521 static int dec_ref_count(void *context, uint32_t old, uint32_t *new) 522 { 523 if (!old) { 524 DMERR_LIMIT("unable to decrement a reference count below 0"); 525 return -EINVAL; 526 } 527 528 *new = old - 1; 529 return 0; 530 } 531 532 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 533 { 534 return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev); 535 } 536 537 int sm_ll_commit(struct ll_disk *ll) 538 { 539 int r = 0; 540 541 if (ll->bitmap_index_changed) { 542 r = ll->commit(ll); 543 if (!r) 544 ll->bitmap_index_changed = false; 545 } 546 547 return r; 548 } 549 550 /*----------------------------------------------------------------*/ 551 552 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index, 553 struct disk_index_entry *ie) 554 { 555 memcpy(ie, ll->mi_le.index + index, sizeof(*ie)); 556 return 0; 557 } 558 559 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index, 560 struct disk_index_entry *ie) 561 { 562 ll->bitmap_index_changed = true; 563 memcpy(ll->mi_le.index + index, ie, sizeof(*ie)); 564 return 0; 565 } 566 567 static int metadata_ll_init_index(struct ll_disk *ll) 568 { 569 int r; 570 struct dm_block *b; 571 572 r = dm_tm_new_block(ll->tm, &index_validator, &b); 573 if (r < 0) 574 return r; 575 576 ll->bitmap_root = dm_block_location(b); 577 578 dm_tm_unlock(ll->tm, b); 579 580 return 0; 581 } 582 583 static int metadata_ll_open(struct ll_disk *ll) 584 { 585 int r; 586 struct dm_block *block; 587 588 r = dm_tm_read_lock(ll->tm, ll->bitmap_root, 589 &index_validator, &block); 590 if (r) 591 return r; 592 593 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le)); 594 dm_tm_unlock(ll->tm, block); 595 596 return 0; 597 } 598 599 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll) 600 { 601 return MAX_METADATA_BITMAPS; 602 } 603 604 static int metadata_ll_commit(struct ll_disk *ll) 605 { 606 int r, inc; 607 struct dm_block *b; 608 609 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc); 610 if (r) 611 return r; 612 613 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le)); 614 ll->bitmap_root = dm_block_location(b); 615 616 dm_tm_unlock(ll->tm, b); 617 618 return 0; 619 } 620 621 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm) 622 { 623 int r; 624 625 r = sm_ll_init(ll, tm); 626 if (r < 0) 627 return r; 628 629 ll->load_ie = metadata_ll_load_ie; 630 ll->save_ie = metadata_ll_save_ie; 631 ll->init_index = metadata_ll_init_index; 632 ll->open_index = metadata_ll_open; 633 ll->max_entries = metadata_ll_max_entries; 634 ll->commit = metadata_ll_commit; 635 636 ll->nr_blocks = 0; 637 ll->nr_allocated = 0; 638 639 r = ll->init_index(ll); 640 if (r < 0) 641 return r; 642 643 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); 644 if (r < 0) 645 return r; 646 647 return 0; 648 } 649 650 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm, 651 void *root_le, size_t len) 652 { 653 int r; 654 struct disk_sm_root smr; 655 656 if (len < sizeof(struct disk_sm_root)) { 657 DMERR("sm_metadata root too small"); 658 return -ENOMEM; 659 } 660 661 /* 662 * We don't know the alignment of the root_le buffer, so need to 663 * copy into a new structure. 664 */ 665 memcpy(&smr, root_le, sizeof(smr)); 666 667 r = sm_ll_init(ll, tm); 668 if (r < 0) 669 return r; 670 671 ll->load_ie = metadata_ll_load_ie; 672 ll->save_ie = metadata_ll_save_ie; 673 ll->init_index = metadata_ll_init_index; 674 ll->open_index = metadata_ll_open; 675 ll->max_entries = metadata_ll_max_entries; 676 ll->commit = metadata_ll_commit; 677 678 ll->nr_blocks = le64_to_cpu(smr.nr_blocks); 679 ll->nr_allocated = le64_to_cpu(smr.nr_allocated); 680 ll->bitmap_root = le64_to_cpu(smr.bitmap_root); 681 ll->ref_count_root = le64_to_cpu(smr.ref_count_root); 682 683 return ll->open_index(ll); 684 } 685 686 /*----------------------------------------------------------------*/ 687 688 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index, 689 struct disk_index_entry *ie) 690 { 691 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie); 692 } 693 694 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index, 695 struct disk_index_entry *ie) 696 { 697 __dm_bless_for_disk(ie); 698 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root, 699 &index, ie, &ll->bitmap_root); 700 } 701 702 static int disk_ll_init_index(struct ll_disk *ll) 703 { 704 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root); 705 } 706 707 static int disk_ll_open(struct ll_disk *ll) 708 { 709 /* nothing to do */ 710 return 0; 711 } 712 713 static dm_block_t disk_ll_max_entries(struct ll_disk *ll) 714 { 715 return -1ULL; 716 } 717 718 static int disk_ll_commit(struct ll_disk *ll) 719 { 720 return 0; 721 } 722 723 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm) 724 { 725 int r; 726 727 r = sm_ll_init(ll, tm); 728 if (r < 0) 729 return r; 730 731 ll->load_ie = disk_ll_load_ie; 732 ll->save_ie = disk_ll_save_ie; 733 ll->init_index = disk_ll_init_index; 734 ll->open_index = disk_ll_open; 735 ll->max_entries = disk_ll_max_entries; 736 ll->commit = disk_ll_commit; 737 738 ll->nr_blocks = 0; 739 ll->nr_allocated = 0; 740 741 r = ll->init_index(ll); 742 if (r < 0) 743 return r; 744 745 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); 746 if (r < 0) 747 return r; 748 749 return 0; 750 } 751 752 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm, 753 void *root_le, size_t len) 754 { 755 int r; 756 struct disk_sm_root *smr = root_le; 757 758 if (len < sizeof(struct disk_sm_root)) { 759 DMERR("sm_metadata root too small"); 760 return -ENOMEM; 761 } 762 763 r = sm_ll_init(ll, tm); 764 if (r < 0) 765 return r; 766 767 ll->load_ie = disk_ll_load_ie; 768 ll->save_ie = disk_ll_save_ie; 769 ll->init_index = disk_ll_init_index; 770 ll->open_index = disk_ll_open; 771 ll->max_entries = disk_ll_max_entries; 772 ll->commit = disk_ll_commit; 773 774 ll->nr_blocks = le64_to_cpu(smr->nr_blocks); 775 ll->nr_allocated = le64_to_cpu(smr->nr_allocated); 776 ll->bitmap_root = le64_to_cpu(smr->bitmap_root); 777 ll->ref_count_root = le64_to_cpu(smr->ref_count_root); 778 779 return ll->open_index(ll); 780 } 781 782 /*----------------------------------------------------------------*/ 783