1 /* 2 * Copyright (C) 2011 Red Hat, Inc. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include "dm-space-map-common.h" 8 #include "dm-transaction-manager.h" 9 10 #include <linux/bitops.h> 11 #include <linux/device-mapper.h> 12 13 #define DM_MSG_PREFIX "space map common" 14 15 /*----------------------------------------------------------------*/ 16 17 /* 18 * Index validator. 19 */ 20 #define INDEX_CSUM_XOR 160478 21 22 static void index_prepare_for_write(struct dm_block_validator *v, 23 struct dm_block *b, 24 size_t block_size) 25 { 26 struct disk_metadata_index *mi_le = dm_block_data(b); 27 28 mi_le->blocknr = cpu_to_le64(dm_block_location(b)); 29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding, 30 block_size - sizeof(__le32), 31 INDEX_CSUM_XOR)); 32 } 33 34 static int index_check(struct dm_block_validator *v, 35 struct dm_block *b, 36 size_t block_size) 37 { 38 struct disk_metadata_index *mi_le = dm_block_data(b); 39 __le32 csum_disk; 40 41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) { 42 DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu", 43 le64_to_cpu(mi_le->blocknr), dm_block_location(b)); 44 return -ENOTBLK; 45 } 46 47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding, 48 block_size - sizeof(__le32), 49 INDEX_CSUM_XOR)); 50 if (csum_disk != mi_le->csum) { 51 DMERR_LIMIT("index_check failed: csum %u != wanted %u", 52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum)); 53 return -EILSEQ; 54 } 55 56 return 0; 57 } 58 59 static struct dm_block_validator index_validator = { 60 .name = "index", 61 .prepare_for_write = index_prepare_for_write, 62 .check = index_check 63 }; 64 65 /*----------------------------------------------------------------*/ 66 67 /* 68 * Bitmap validator 69 */ 70 #define BITMAP_CSUM_XOR 240779 71 72 static void dm_bitmap_prepare_for_write(struct dm_block_validator *v, 73 struct dm_block *b, 74 size_t block_size) 75 { 76 struct disk_bitmap_header *disk_header = dm_block_data(b); 77 78 disk_header->blocknr = cpu_to_le64(dm_block_location(b)); 79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, 80 block_size - sizeof(__le32), 81 BITMAP_CSUM_XOR)); 82 } 83 84 static int dm_bitmap_check(struct dm_block_validator *v, 85 struct dm_block *b, 86 size_t block_size) 87 { 88 struct disk_bitmap_header *disk_header = dm_block_data(b); 89 __le32 csum_disk; 90 91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) { 92 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu", 93 le64_to_cpu(disk_header->blocknr), dm_block_location(b)); 94 return -ENOTBLK; 95 } 96 97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used, 98 block_size - sizeof(__le32), 99 BITMAP_CSUM_XOR)); 100 if (csum_disk != disk_header->csum) { 101 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u", 102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum)); 103 return -EILSEQ; 104 } 105 106 return 0; 107 } 108 109 static struct dm_block_validator dm_sm_bitmap_validator = { 110 .name = "sm_bitmap", 111 .prepare_for_write = dm_bitmap_prepare_for_write, 112 .check = dm_bitmap_check, 113 }; 114 115 /*----------------------------------------------------------------*/ 116 117 #define ENTRIES_PER_WORD 32 118 #define ENTRIES_SHIFT 5 119 120 static void *dm_bitmap_data(struct dm_block *b) 121 { 122 return dm_block_data(b) + sizeof(struct disk_bitmap_header); 123 } 124 125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL 126 127 static unsigned dm_bitmap_word_used(void *addr, unsigned b) 128 { 129 __le64 *words_le = addr; 130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 131 132 uint64_t bits = le64_to_cpu(*w_le); 133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH; 134 135 return !(~bits & mask); 136 } 137 138 static unsigned sm_lookup_bitmap(void *addr, unsigned b) 139 { 140 __le64 *words_le = addr; 141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 142 unsigned hi, lo; 143 144 b = (b & (ENTRIES_PER_WORD - 1)) << 1; 145 hi = !!test_bit_le(b, (void *) w_le); 146 lo = !!test_bit_le(b + 1, (void *) w_le); 147 return (hi << 1) | lo; 148 } 149 150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val) 151 { 152 __le64 *words_le = addr; 153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 154 155 b = (b & (ENTRIES_PER_WORD - 1)) << 1; 156 157 if (val & 2) 158 __set_bit_le(b, (void *) w_le); 159 else 160 __clear_bit_le(b, (void *) w_le); 161 162 if (val & 1) 163 __set_bit_le(b + 1, (void *) w_le); 164 else 165 __clear_bit_le(b + 1, (void *) w_le); 166 } 167 168 static int sm_find_free(void *addr, unsigned begin, unsigned end, 169 unsigned *result) 170 { 171 while (begin < end) { 172 if (!(begin & (ENTRIES_PER_WORD - 1)) && 173 dm_bitmap_word_used(addr, begin)) { 174 begin += ENTRIES_PER_WORD; 175 continue; 176 } 177 178 if (!sm_lookup_bitmap(addr, begin)) { 179 *result = begin; 180 return 0; 181 } 182 183 begin++; 184 } 185 186 return -ENOSPC; 187 } 188 189 /*----------------------------------------------------------------*/ 190 191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm) 192 { 193 memset(ll, 0, sizeof(struct ll_disk)); 194 195 ll->tm = tm; 196 197 ll->bitmap_info.tm = tm; 198 ll->bitmap_info.levels = 1; 199 200 /* 201 * Because the new bitmap blocks are created via a shadow 202 * operation, the old entry has already had its reference count 203 * decremented and we don't need the btree to do any bookkeeping. 204 */ 205 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry); 206 ll->bitmap_info.value_type.inc = NULL; 207 ll->bitmap_info.value_type.dec = NULL; 208 ll->bitmap_info.value_type.equal = NULL; 209 210 ll->ref_count_info.tm = tm; 211 ll->ref_count_info.levels = 1; 212 ll->ref_count_info.value_type.size = sizeof(uint32_t); 213 ll->ref_count_info.value_type.inc = NULL; 214 ll->ref_count_info.value_type.dec = NULL; 215 ll->ref_count_info.value_type.equal = NULL; 216 217 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm)); 218 219 if (ll->block_size > (1 << 30)) { 220 DMERR("block size too big to hold bitmaps"); 221 return -EINVAL; 222 } 223 224 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) * 225 ENTRIES_PER_BYTE; 226 ll->nr_blocks = 0; 227 ll->bitmap_root = 0; 228 ll->ref_count_root = 0; 229 ll->bitmap_index_changed = false; 230 231 return 0; 232 } 233 234 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks) 235 { 236 int r; 237 dm_block_t i, nr_blocks, nr_indexes; 238 unsigned old_blocks, blocks; 239 240 nr_blocks = ll->nr_blocks + extra_blocks; 241 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block); 242 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block); 243 244 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); 245 if (nr_indexes > ll->max_entries(ll)) { 246 DMERR("space map too large"); 247 return -EINVAL; 248 } 249 250 /* 251 * We need to set this before the dm_tm_new_block() call below. 252 */ 253 ll->nr_blocks = nr_blocks; 254 for (i = old_blocks; i < blocks; i++) { 255 struct dm_block *b; 256 struct disk_index_entry idx; 257 258 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b); 259 if (r < 0) 260 return r; 261 262 idx.blocknr = cpu_to_le64(dm_block_location(b)); 263 264 dm_tm_unlock(ll->tm, b); 265 266 idx.nr_free = cpu_to_le32(ll->entries_per_block); 267 idx.none_free_before = 0; 268 269 r = ll->save_ie(ll, i, &idx); 270 if (r < 0) 271 return r; 272 } 273 274 return 0; 275 } 276 277 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) 278 { 279 int r; 280 dm_block_t index = b; 281 struct disk_index_entry ie_disk; 282 struct dm_block *blk; 283 284 b = do_div(index, ll->entries_per_block); 285 r = ll->load_ie(ll, index, &ie_disk); 286 if (r < 0) 287 return r; 288 289 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), 290 &dm_sm_bitmap_validator, &blk); 291 if (r < 0) 292 return r; 293 294 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b); 295 296 dm_tm_unlock(ll->tm, blk); 297 298 return 0; 299 } 300 301 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b, 302 uint32_t *result) 303 { 304 __le32 le_rc; 305 int r; 306 307 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc); 308 if (r < 0) 309 return r; 310 311 *result = le32_to_cpu(le_rc); 312 313 return r; 314 } 315 316 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result) 317 { 318 int r = sm_ll_lookup_bitmap(ll, b, result); 319 320 if (r) 321 return r; 322 323 if (*result != 3) 324 return r; 325 326 return sm_ll_lookup_big_ref_count(ll, b, result); 327 } 328 329 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin, 330 dm_block_t end, dm_block_t *result) 331 { 332 int r; 333 struct disk_index_entry ie_disk; 334 dm_block_t i, index_begin = begin; 335 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block); 336 337 /* 338 * FIXME: Use shifts 339 */ 340 begin = do_div(index_begin, ll->entries_per_block); 341 end = do_div(end, ll->entries_per_block); 342 if (end == 0) 343 end = ll->entries_per_block; 344 345 for (i = index_begin; i < index_end; i++, begin = 0) { 346 struct dm_block *blk; 347 unsigned position; 348 uint32_t bit_end; 349 350 r = ll->load_ie(ll, i, &ie_disk); 351 if (r < 0) 352 return r; 353 354 if (le32_to_cpu(ie_disk.nr_free) == 0) 355 continue; 356 357 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr), 358 &dm_sm_bitmap_validator, &blk); 359 if (r < 0) 360 return r; 361 362 bit_end = (i == index_end - 1) ? end : ll->entries_per_block; 363 364 r = sm_find_free(dm_bitmap_data(blk), 365 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)), 366 bit_end, &position); 367 if (r == -ENOSPC) { 368 /* 369 * This might happen because we started searching 370 * part way through the bitmap. 371 */ 372 dm_tm_unlock(ll->tm, blk); 373 continue; 374 } 375 376 dm_tm_unlock(ll->tm, blk); 377 378 *result = i * ll->entries_per_block + (dm_block_t) position; 379 return 0; 380 } 381 382 return -ENOSPC; 383 } 384 385 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll, 386 dm_block_t begin, dm_block_t end, dm_block_t *b) 387 { 388 int r; 389 uint32_t count; 390 391 do { 392 r = sm_ll_find_free_block(new_ll, begin, new_ll->nr_blocks, b); 393 if (r) 394 break; 395 396 /* double check this block wasn't used in the old transaction */ 397 if (*b >= old_ll->nr_blocks) 398 count = 0; 399 else { 400 r = sm_ll_lookup(old_ll, *b, &count); 401 if (r) 402 break; 403 404 if (count) 405 begin = *b + 1; 406 } 407 } while (count); 408 409 return r; 410 } 411 412 static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b, 413 int (*mutator)(void *context, uint32_t old, uint32_t *new), 414 void *context, enum allocation_event *ev) 415 { 416 int r; 417 uint32_t bit, old, ref_count; 418 struct dm_block *nb; 419 dm_block_t index = b; 420 struct disk_index_entry ie_disk; 421 void *bm_le; 422 int inc; 423 424 bit = do_div(index, ll->entries_per_block); 425 r = ll->load_ie(ll, index, &ie_disk); 426 if (r < 0) 427 return r; 428 429 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr), 430 &dm_sm_bitmap_validator, &nb, &inc); 431 if (r < 0) { 432 DMERR("dm_tm_shadow_block() failed"); 433 return r; 434 } 435 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb)); 436 437 bm_le = dm_bitmap_data(nb); 438 old = sm_lookup_bitmap(bm_le, bit); 439 440 if (old > 2) { 441 r = sm_ll_lookup_big_ref_count(ll, b, &old); 442 if (r < 0) { 443 dm_tm_unlock(ll->tm, nb); 444 return r; 445 } 446 } 447 448 r = mutator(context, old, &ref_count); 449 if (r) { 450 dm_tm_unlock(ll->tm, nb); 451 return r; 452 } 453 454 if (ref_count <= 2) { 455 sm_set_bitmap(bm_le, bit, ref_count); 456 457 dm_tm_unlock(ll->tm, nb); 458 459 if (old > 2) { 460 r = dm_btree_remove(&ll->ref_count_info, 461 ll->ref_count_root, 462 &b, &ll->ref_count_root); 463 if (r) 464 return r; 465 } 466 467 } else { 468 __le32 le_rc = cpu_to_le32(ref_count); 469 470 sm_set_bitmap(bm_le, bit, 3); 471 dm_tm_unlock(ll->tm, nb); 472 473 __dm_bless_for_disk(&le_rc); 474 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root, 475 &b, &le_rc, &ll->ref_count_root); 476 if (r < 0) { 477 DMERR("ref count insert failed"); 478 return r; 479 } 480 } 481 482 if (ref_count && !old) { 483 *ev = SM_ALLOC; 484 ll->nr_allocated++; 485 le32_add_cpu(&ie_disk.nr_free, -1); 486 if (le32_to_cpu(ie_disk.none_free_before) == bit) 487 ie_disk.none_free_before = cpu_to_le32(bit + 1); 488 489 } else if (old && !ref_count) { 490 *ev = SM_FREE; 491 ll->nr_allocated--; 492 le32_add_cpu(&ie_disk.nr_free, 1); 493 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit)); 494 } else 495 *ev = SM_NONE; 496 497 return ll->save_ie(ll, index, &ie_disk); 498 } 499 500 static int set_ref_count(void *context, uint32_t old, uint32_t *new) 501 { 502 *new = *((uint32_t *) context); 503 return 0; 504 } 505 506 int sm_ll_insert(struct ll_disk *ll, dm_block_t b, 507 uint32_t ref_count, enum allocation_event *ev) 508 { 509 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev); 510 } 511 512 static int inc_ref_count(void *context, uint32_t old, uint32_t *new) 513 { 514 *new = old + 1; 515 return 0; 516 } 517 518 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 519 { 520 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev); 521 } 522 523 static int dec_ref_count(void *context, uint32_t old, uint32_t *new) 524 { 525 if (!old) { 526 DMERR_LIMIT("unable to decrement a reference count below 0"); 527 return -EINVAL; 528 } 529 530 *new = old - 1; 531 return 0; 532 } 533 534 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) 535 { 536 return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev); 537 } 538 539 int sm_ll_commit(struct ll_disk *ll) 540 { 541 int r = 0; 542 543 if (ll->bitmap_index_changed) { 544 r = ll->commit(ll); 545 if (!r) 546 ll->bitmap_index_changed = false; 547 } 548 549 return r; 550 } 551 552 /*----------------------------------------------------------------*/ 553 554 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index, 555 struct disk_index_entry *ie) 556 { 557 memcpy(ie, ll->mi_le.index + index, sizeof(*ie)); 558 return 0; 559 } 560 561 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index, 562 struct disk_index_entry *ie) 563 { 564 ll->bitmap_index_changed = true; 565 memcpy(ll->mi_le.index + index, ie, sizeof(*ie)); 566 return 0; 567 } 568 569 static int metadata_ll_init_index(struct ll_disk *ll) 570 { 571 int r; 572 struct dm_block *b; 573 574 r = dm_tm_new_block(ll->tm, &index_validator, &b); 575 if (r < 0) 576 return r; 577 578 ll->bitmap_root = dm_block_location(b); 579 580 dm_tm_unlock(ll->tm, b); 581 582 return 0; 583 } 584 585 static int metadata_ll_open(struct ll_disk *ll) 586 { 587 int r; 588 struct dm_block *block; 589 590 r = dm_tm_read_lock(ll->tm, ll->bitmap_root, 591 &index_validator, &block); 592 if (r) 593 return r; 594 595 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le)); 596 dm_tm_unlock(ll->tm, block); 597 598 return 0; 599 } 600 601 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll) 602 { 603 return MAX_METADATA_BITMAPS; 604 } 605 606 static int metadata_ll_commit(struct ll_disk *ll) 607 { 608 int r, inc; 609 struct dm_block *b; 610 611 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc); 612 if (r) 613 return r; 614 615 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le)); 616 ll->bitmap_root = dm_block_location(b); 617 618 dm_tm_unlock(ll->tm, b); 619 620 return 0; 621 } 622 623 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm) 624 { 625 int r; 626 627 r = sm_ll_init(ll, tm); 628 if (r < 0) 629 return r; 630 631 ll->load_ie = metadata_ll_load_ie; 632 ll->save_ie = metadata_ll_save_ie; 633 ll->init_index = metadata_ll_init_index; 634 ll->open_index = metadata_ll_open; 635 ll->max_entries = metadata_ll_max_entries; 636 ll->commit = metadata_ll_commit; 637 638 ll->nr_blocks = 0; 639 ll->nr_allocated = 0; 640 641 r = ll->init_index(ll); 642 if (r < 0) 643 return r; 644 645 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); 646 if (r < 0) 647 return r; 648 649 return 0; 650 } 651 652 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm, 653 void *root_le, size_t len) 654 { 655 int r; 656 struct disk_sm_root smr; 657 658 if (len < sizeof(struct disk_sm_root)) { 659 DMERR("sm_metadata root too small"); 660 return -ENOMEM; 661 } 662 663 /* 664 * We don't know the alignment of the root_le buffer, so need to 665 * copy into a new structure. 666 */ 667 memcpy(&smr, root_le, sizeof(smr)); 668 669 r = sm_ll_init(ll, tm); 670 if (r < 0) 671 return r; 672 673 ll->load_ie = metadata_ll_load_ie; 674 ll->save_ie = metadata_ll_save_ie; 675 ll->init_index = metadata_ll_init_index; 676 ll->open_index = metadata_ll_open; 677 ll->max_entries = metadata_ll_max_entries; 678 ll->commit = metadata_ll_commit; 679 680 ll->nr_blocks = le64_to_cpu(smr.nr_blocks); 681 ll->nr_allocated = le64_to_cpu(smr.nr_allocated); 682 ll->bitmap_root = le64_to_cpu(smr.bitmap_root); 683 ll->ref_count_root = le64_to_cpu(smr.ref_count_root); 684 685 return ll->open_index(ll); 686 } 687 688 /*----------------------------------------------------------------*/ 689 690 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index, 691 struct disk_index_entry *ie) 692 { 693 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie); 694 } 695 696 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index, 697 struct disk_index_entry *ie) 698 { 699 __dm_bless_for_disk(ie); 700 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root, 701 &index, ie, &ll->bitmap_root); 702 } 703 704 static int disk_ll_init_index(struct ll_disk *ll) 705 { 706 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root); 707 } 708 709 static int disk_ll_open(struct ll_disk *ll) 710 { 711 /* nothing to do */ 712 return 0; 713 } 714 715 static dm_block_t disk_ll_max_entries(struct ll_disk *ll) 716 { 717 return -1ULL; 718 } 719 720 static int disk_ll_commit(struct ll_disk *ll) 721 { 722 return 0; 723 } 724 725 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm) 726 { 727 int r; 728 729 r = sm_ll_init(ll, tm); 730 if (r < 0) 731 return r; 732 733 ll->load_ie = disk_ll_load_ie; 734 ll->save_ie = disk_ll_save_ie; 735 ll->init_index = disk_ll_init_index; 736 ll->open_index = disk_ll_open; 737 ll->max_entries = disk_ll_max_entries; 738 ll->commit = disk_ll_commit; 739 740 ll->nr_blocks = 0; 741 ll->nr_allocated = 0; 742 743 r = ll->init_index(ll); 744 if (r < 0) 745 return r; 746 747 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root); 748 if (r < 0) 749 return r; 750 751 return 0; 752 } 753 754 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm, 755 void *root_le, size_t len) 756 { 757 int r; 758 struct disk_sm_root *smr = root_le; 759 760 if (len < sizeof(struct disk_sm_root)) { 761 DMERR("sm_metadata root too small"); 762 return -ENOMEM; 763 } 764 765 r = sm_ll_init(ll, tm); 766 if (r < 0) 767 return r; 768 769 ll->load_ie = disk_ll_load_ie; 770 ll->save_ie = disk_ll_save_ie; 771 ll->init_index = disk_ll_init_index; 772 ll->open_index = disk_ll_open; 773 ll->max_entries = disk_ll_max_entries; 774 ll->commit = disk_ll_commit; 775 776 ll->nr_blocks = le64_to_cpu(smr->nr_blocks); 777 ll->nr_allocated = le64_to_cpu(smr->nr_allocated); 778 ll->bitmap_root = le64_to_cpu(smr->bitmap_root); 779 ll->ref_count_root = le64_to_cpu(smr->ref_count_root); 780 781 return ll->open_index(ll); 782 } 783 784 /*----------------------------------------------------------------*/ 785