1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * extent_map.c 5 * 6 * Block/Cluster mapping functions 7 * 8 * Copyright (C) 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License, version 2, as published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public 20 * License along with this program; if not, write to the 21 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 22 * Boston, MA 021110-1307, USA. 23 */ 24 25 #include <linux/fs.h> 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/fiemap.h> 29 30 #define MLOG_MASK_PREFIX ML_EXTENT_MAP 31 #include <cluster/masklog.h> 32 33 #include "ocfs2.h" 34 35 #include "alloc.h" 36 #include "dlmglue.h" 37 #include "extent_map.h" 38 #include "inode.h" 39 #include "super.h" 40 41 #include "buffer_head_io.h" 42 43 /* 44 * The extent caching implementation is intentionally trivial. 45 * 46 * We only cache a small number of extents stored directly on the 47 * inode, so linear order operations are acceptable. If we ever want 48 * to increase the size of the extent map, then these algorithms must 49 * get smarter. 50 */ 51 52 void ocfs2_extent_map_init(struct inode *inode) 53 { 54 struct ocfs2_inode_info *oi = OCFS2_I(inode); 55 56 oi->ip_extent_map.em_num_items = 0; 57 INIT_LIST_HEAD(&oi->ip_extent_map.em_list); 58 } 59 60 static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em, 61 unsigned int cpos, 62 struct ocfs2_extent_map_item **ret_emi) 63 { 64 unsigned int range; 65 struct ocfs2_extent_map_item *emi; 66 67 *ret_emi = NULL; 68 69 list_for_each_entry(emi, &em->em_list, ei_list) { 70 range = emi->ei_cpos + emi->ei_clusters; 71 72 if (cpos >= emi->ei_cpos && cpos < range) { 73 list_move(&emi->ei_list, &em->em_list); 74 75 *ret_emi = emi; 76 break; 77 } 78 } 79 } 80 81 static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos, 82 unsigned int *phys, unsigned int *len, 83 unsigned int *flags) 84 { 85 unsigned int coff; 86 struct ocfs2_inode_info *oi = OCFS2_I(inode); 87 struct ocfs2_extent_map_item *emi; 88 89 spin_lock(&oi->ip_lock); 90 91 __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi); 92 if (emi) { 93 coff = cpos - emi->ei_cpos; 94 *phys = emi->ei_phys + coff; 95 if (len) 96 *len = emi->ei_clusters - coff; 97 if (flags) 98 *flags = emi->ei_flags; 99 } 100 101 spin_unlock(&oi->ip_lock); 102 103 if (emi == NULL) 104 return -ENOENT; 105 106 return 0; 107 } 108 109 /* 110 * Forget about all clusters equal to or greater than cpos. 111 */ 112 void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos) 113 { 114 struct ocfs2_extent_map_item *emi, *n; 115 struct ocfs2_inode_info *oi = OCFS2_I(inode); 116 struct ocfs2_extent_map *em = &oi->ip_extent_map; 117 LIST_HEAD(tmp_list); 118 unsigned int range; 119 120 spin_lock(&oi->ip_lock); 121 list_for_each_entry_safe(emi, n, &em->em_list, ei_list) { 122 if (emi->ei_cpos >= cpos) { 123 /* Full truncate of this record. */ 124 list_move(&emi->ei_list, &tmp_list); 125 BUG_ON(em->em_num_items == 0); 126 em->em_num_items--; 127 continue; 128 } 129 130 range = emi->ei_cpos + emi->ei_clusters; 131 if (range > cpos) { 132 /* Partial truncate */ 133 emi->ei_clusters = cpos - emi->ei_cpos; 134 } 135 } 136 spin_unlock(&oi->ip_lock); 137 138 list_for_each_entry_safe(emi, n, &tmp_list, ei_list) { 139 list_del(&emi->ei_list); 140 kfree(emi); 141 } 142 } 143 144 /* 145 * Is any part of emi2 contained within emi1 146 */ 147 static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1, 148 struct ocfs2_extent_map_item *emi2) 149 { 150 unsigned int range1, range2; 151 152 /* 153 * Check if logical start of emi2 is inside emi1 154 */ 155 range1 = emi1->ei_cpos + emi1->ei_clusters; 156 if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1) 157 return 1; 158 159 /* 160 * Check if logical end of emi2 is inside emi1 161 */ 162 range2 = emi2->ei_cpos + emi2->ei_clusters; 163 if (range2 > emi1->ei_cpos && range2 <= range1) 164 return 1; 165 166 return 0; 167 } 168 169 static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest, 170 struct ocfs2_extent_map_item *src) 171 { 172 dest->ei_cpos = src->ei_cpos; 173 dest->ei_phys = src->ei_phys; 174 dest->ei_clusters = src->ei_clusters; 175 dest->ei_flags = src->ei_flags; 176 } 177 178 /* 179 * Try to merge emi with ins. Returns 1 if merge succeeds, zero 180 * otherwise. 181 */ 182 static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi, 183 struct ocfs2_extent_map_item *ins) 184 { 185 /* 186 * Handle contiguousness 187 */ 188 if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) && 189 ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) && 190 ins->ei_flags == emi->ei_flags) { 191 emi->ei_clusters += ins->ei_clusters; 192 return 1; 193 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && 194 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && 195 ins->ei_flags == emi->ei_flags) { 196 emi->ei_phys = ins->ei_phys; 197 emi->ei_cpos = ins->ei_cpos; 198 emi->ei_clusters += ins->ei_clusters; 199 return 1; 200 } 201 202 /* 203 * Overlapping extents - this shouldn't happen unless we've 204 * split an extent to change it's flags. That is exceedingly 205 * rare, so there's no sense in trying to optimize it yet. 206 */ 207 if (ocfs2_ei_is_contained(emi, ins) || 208 ocfs2_ei_is_contained(ins, emi)) { 209 ocfs2_copy_emi_fields(emi, ins); 210 return 1; 211 } 212 213 /* No merge was possible. */ 214 return 0; 215 } 216 217 /* 218 * In order to reduce complexity on the caller, this insert function 219 * is intentionally liberal in what it will accept. 220 * 221 * The only rule is that the truncate call *must* be used whenever 222 * records have been deleted. This avoids inserting overlapping 223 * records with different physical mappings. 224 */ 225 void ocfs2_extent_map_insert_rec(struct inode *inode, 226 struct ocfs2_extent_rec *rec) 227 { 228 struct ocfs2_inode_info *oi = OCFS2_I(inode); 229 struct ocfs2_extent_map *em = &oi->ip_extent_map; 230 struct ocfs2_extent_map_item *emi, *new_emi = NULL; 231 struct ocfs2_extent_map_item ins; 232 233 ins.ei_cpos = le32_to_cpu(rec->e_cpos); 234 ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb, 235 le64_to_cpu(rec->e_blkno)); 236 ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters); 237 ins.ei_flags = rec->e_flags; 238 239 search: 240 spin_lock(&oi->ip_lock); 241 242 list_for_each_entry(emi, &em->em_list, ei_list) { 243 if (ocfs2_try_to_merge_extent_map(emi, &ins)) { 244 list_move(&emi->ei_list, &em->em_list); 245 spin_unlock(&oi->ip_lock); 246 goto out; 247 } 248 } 249 250 /* 251 * No item could be merged. 252 * 253 * Either allocate and add a new item, or overwrite the last recently 254 * inserted. 255 */ 256 257 if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) { 258 if (new_emi == NULL) { 259 spin_unlock(&oi->ip_lock); 260 261 new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS); 262 if (new_emi == NULL) 263 goto out; 264 265 goto search; 266 } 267 268 ocfs2_copy_emi_fields(new_emi, &ins); 269 list_add(&new_emi->ei_list, &em->em_list); 270 em->em_num_items++; 271 new_emi = NULL; 272 } else { 273 BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0); 274 emi = list_entry(em->em_list.prev, 275 struct ocfs2_extent_map_item, ei_list); 276 list_move(&emi->ei_list, &em->em_list); 277 ocfs2_copy_emi_fields(emi, &ins); 278 } 279 280 spin_unlock(&oi->ip_lock); 281 282 out: 283 if (new_emi) 284 kfree(new_emi); 285 } 286 287 static int ocfs2_last_eb_is_empty(struct inode *inode, 288 struct ocfs2_dinode *di) 289 { 290 int ret, next_free; 291 u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk); 292 struct buffer_head *eb_bh = NULL; 293 struct ocfs2_extent_block *eb; 294 struct ocfs2_extent_list *el; 295 296 ret = ocfs2_read_block(inode, last_eb_blk, &eb_bh); 297 if (ret) { 298 mlog_errno(ret); 299 goto out; 300 } 301 302 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 303 el = &eb->h_list; 304 305 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { 306 ret = -EROFS; 307 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); 308 goto out; 309 } 310 311 if (el->l_tree_depth) { 312 ocfs2_error(inode->i_sb, 313 "Inode %lu has non zero tree depth in " 314 "leaf block %llu\n", inode->i_ino, 315 (unsigned long long)eb_bh->b_blocknr); 316 ret = -EROFS; 317 goto out; 318 } 319 320 next_free = le16_to_cpu(el->l_next_free_rec); 321 322 if (next_free == 0 || 323 (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) 324 ret = 1; 325 326 out: 327 brelse(eb_bh); 328 return ret; 329 } 330 331 /* 332 * Return the 1st index within el which contains an extent start 333 * larger than v_cluster. 334 */ 335 static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el, 336 u32 v_cluster) 337 { 338 int i; 339 struct ocfs2_extent_rec *rec; 340 341 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 342 rec = &el->l_recs[i]; 343 344 if (v_cluster < le32_to_cpu(rec->e_cpos)) 345 break; 346 } 347 348 return i; 349 } 350 351 /* 352 * Figure out the size of a hole which starts at v_cluster within the given 353 * extent list. 354 * 355 * If there is no more allocation past v_cluster, we return the maximum 356 * cluster size minus v_cluster. 357 * 358 * If we have in-inode extents, then el points to the dinode list and 359 * eb_bh is NULL. Otherwise, eb_bh should point to the extent block 360 * containing el. 361 */ 362 static int ocfs2_figure_hole_clusters(struct inode *inode, 363 struct ocfs2_extent_list *el, 364 struct buffer_head *eb_bh, 365 u32 v_cluster, 366 u32 *num_clusters) 367 { 368 int ret, i; 369 struct buffer_head *next_eb_bh = NULL; 370 struct ocfs2_extent_block *eb, *next_eb; 371 372 i = ocfs2_search_for_hole_index(el, v_cluster); 373 374 if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) { 375 eb = (struct ocfs2_extent_block *)eb_bh->b_data; 376 377 /* 378 * Check the next leaf for any extents. 379 */ 380 381 if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) 382 goto no_more_extents; 383 384 ret = ocfs2_read_block(inode, 385 le64_to_cpu(eb->h_next_leaf_blk), 386 &next_eb_bh); 387 if (ret) { 388 mlog_errno(ret); 389 goto out; 390 } 391 next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data; 392 393 if (!OCFS2_IS_VALID_EXTENT_BLOCK(next_eb)) { 394 ret = -EROFS; 395 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, next_eb); 396 goto out; 397 } 398 399 el = &next_eb->h_list; 400 401 i = ocfs2_search_for_hole_index(el, v_cluster); 402 } 403 404 no_more_extents: 405 if (i == le16_to_cpu(el->l_next_free_rec)) { 406 /* 407 * We're at the end of our existing allocation. Just 408 * return the maximum number of clusters we could 409 * possibly allocate. 410 */ 411 *num_clusters = UINT_MAX - v_cluster; 412 } else { 413 *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster; 414 } 415 416 ret = 0; 417 out: 418 brelse(next_eb_bh); 419 return ret; 420 } 421 422 static int ocfs2_get_clusters_nocache(struct inode *inode, 423 struct buffer_head *di_bh, 424 u32 v_cluster, unsigned int *hole_len, 425 struct ocfs2_extent_rec *ret_rec, 426 unsigned int *is_last) 427 { 428 int i, ret, tree_height, len; 429 struct ocfs2_dinode *di; 430 struct ocfs2_extent_block *uninitialized_var(eb); 431 struct ocfs2_extent_list *el; 432 struct ocfs2_extent_rec *rec; 433 struct buffer_head *eb_bh = NULL; 434 435 memset(ret_rec, 0, sizeof(*ret_rec)); 436 if (is_last) 437 *is_last = 0; 438 439 di = (struct ocfs2_dinode *) di_bh->b_data; 440 el = &di->id2.i_list; 441 tree_height = le16_to_cpu(el->l_tree_depth); 442 443 if (tree_height > 0) { 444 ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); 445 if (ret) { 446 mlog_errno(ret); 447 goto out; 448 } 449 450 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 451 el = &eb->h_list; 452 453 if (el->l_tree_depth) { 454 ocfs2_error(inode->i_sb, 455 "Inode %lu has non zero tree depth in " 456 "leaf block %llu\n", inode->i_ino, 457 (unsigned long long)eb_bh->b_blocknr); 458 ret = -EROFS; 459 goto out; 460 } 461 } 462 463 i = ocfs2_search_extent_list(el, v_cluster); 464 if (i == -1) { 465 /* 466 * Holes can be larger than the maximum size of an 467 * extent, so we return their lengths in a seperate 468 * field. 469 */ 470 if (hole_len) { 471 ret = ocfs2_figure_hole_clusters(inode, el, eb_bh, 472 v_cluster, &len); 473 if (ret) { 474 mlog_errno(ret); 475 goto out; 476 } 477 478 *hole_len = len; 479 } 480 goto out_hole; 481 } 482 483 rec = &el->l_recs[i]; 484 485 BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); 486 487 if (!rec->e_blkno) { 488 ocfs2_error(inode->i_sb, "Inode %lu has bad extent " 489 "record (%u, %u, 0)", inode->i_ino, 490 le32_to_cpu(rec->e_cpos), 491 ocfs2_rec_clusters(el, rec)); 492 ret = -EROFS; 493 goto out; 494 } 495 496 *ret_rec = *rec; 497 498 /* 499 * Checking for last extent is potentially expensive - we 500 * might have to look at the next leaf over to see if it's 501 * empty. 502 * 503 * The first two checks are to see whether the caller even 504 * cares for this information, and if the extent is at least 505 * the last in it's list. 506 * 507 * If those hold true, then the extent is last if any of the 508 * additional conditions hold true: 509 * - Extent list is in-inode 510 * - Extent list is right-most 511 * - Extent list is 2nd to rightmost, with empty right-most 512 */ 513 if (is_last) { 514 if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) { 515 if (tree_height == 0) 516 *is_last = 1; 517 else if (eb->h_blkno == di->i_last_eb_blk) 518 *is_last = 1; 519 else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { 520 ret = ocfs2_last_eb_is_empty(inode, di); 521 if (ret < 0) { 522 mlog_errno(ret); 523 goto out; 524 } 525 if (ret == 1) 526 *is_last = 1; 527 } 528 } 529 } 530 531 out_hole: 532 ret = 0; 533 out: 534 brelse(eb_bh); 535 return ret; 536 } 537 538 static void ocfs2_relative_extent_offsets(struct super_block *sb, 539 u32 v_cluster, 540 struct ocfs2_extent_rec *rec, 541 u32 *p_cluster, u32 *num_clusters) 542 543 { 544 u32 coff = v_cluster - le32_to_cpu(rec->e_cpos); 545 546 *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno)); 547 *p_cluster = *p_cluster + coff; 548 549 if (num_clusters) 550 *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff; 551 } 552 553 int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, 554 u32 *p_cluster, u32 *num_clusters, 555 struct ocfs2_extent_list *el) 556 { 557 int ret = 0, i; 558 struct buffer_head *eb_bh = NULL; 559 struct ocfs2_extent_block *eb; 560 struct ocfs2_extent_rec *rec; 561 u32 coff; 562 563 if (el->l_tree_depth) { 564 ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); 565 if (ret) { 566 mlog_errno(ret); 567 goto out; 568 } 569 570 eb = (struct ocfs2_extent_block *) eb_bh->b_data; 571 el = &eb->h_list; 572 573 if (el->l_tree_depth) { 574 ocfs2_error(inode->i_sb, 575 "Inode %lu has non zero tree depth in " 576 "xattr leaf block %llu\n", inode->i_ino, 577 (unsigned long long)eb_bh->b_blocknr); 578 ret = -EROFS; 579 goto out; 580 } 581 } 582 583 i = ocfs2_search_extent_list(el, v_cluster); 584 if (i == -1) { 585 ret = -EROFS; 586 mlog_errno(ret); 587 goto out; 588 } else { 589 rec = &el->l_recs[i]; 590 BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)); 591 592 if (!rec->e_blkno) { 593 ocfs2_error(inode->i_sb, "Inode %lu has bad extent " 594 "record (%u, %u, 0) in xattr", inode->i_ino, 595 le32_to_cpu(rec->e_cpos), 596 ocfs2_rec_clusters(el, rec)); 597 ret = -EROFS; 598 goto out; 599 } 600 coff = v_cluster - le32_to_cpu(rec->e_cpos); 601 *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb, 602 le64_to_cpu(rec->e_blkno)); 603 *p_cluster = *p_cluster + coff; 604 if (num_clusters) 605 *num_clusters = ocfs2_rec_clusters(el, rec) - coff; 606 } 607 out: 608 if (eb_bh) 609 brelse(eb_bh); 610 return ret; 611 } 612 613 int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, 614 u32 *p_cluster, u32 *num_clusters, 615 unsigned int *extent_flags) 616 { 617 int ret; 618 unsigned int uninitialized_var(hole_len), flags = 0; 619 struct buffer_head *di_bh = NULL; 620 struct ocfs2_extent_rec rec; 621 622 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 623 ret = -ERANGE; 624 mlog_errno(ret); 625 goto out; 626 } 627 628 ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster, 629 num_clusters, extent_flags); 630 if (ret == 0) 631 goto out; 632 633 ret = ocfs2_read_block(inode, OCFS2_I(inode)->ip_blkno, &di_bh); 634 if (ret) { 635 mlog_errno(ret); 636 goto out; 637 } 638 639 ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len, 640 &rec, NULL); 641 if (ret) { 642 mlog_errno(ret); 643 goto out; 644 } 645 646 if (rec.e_blkno == 0ULL) { 647 /* 648 * A hole was found. Return some canned values that 649 * callers can key on. If asked for, num_clusters will 650 * be populated with the size of the hole. 651 */ 652 *p_cluster = 0; 653 if (num_clusters) { 654 *num_clusters = hole_len; 655 } 656 } else { 657 ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec, 658 p_cluster, num_clusters); 659 flags = rec.e_flags; 660 661 ocfs2_extent_map_insert_rec(inode, &rec); 662 } 663 664 if (extent_flags) 665 *extent_flags = flags; 666 667 out: 668 brelse(di_bh); 669 return ret; 670 } 671 672 /* 673 * This expects alloc_sem to be held. The allocation cannot change at 674 * all while the map is in the process of being updated. 675 */ 676 int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, 677 u64 *ret_count, unsigned int *extent_flags) 678 { 679 int ret; 680 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); 681 u32 cpos, num_clusters, p_cluster; 682 u64 boff = 0; 683 684 cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno); 685 686 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters, 687 extent_flags); 688 if (ret) { 689 mlog_errno(ret); 690 goto out; 691 } 692 693 /* 694 * p_cluster == 0 indicates a hole. 695 */ 696 if (p_cluster) { 697 boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); 698 boff += (v_blkno & (u64)(bpc - 1)); 699 } 700 701 *p_blkno = boff; 702 703 if (ret_count) { 704 *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters); 705 *ret_count -= v_blkno & (u64)(bpc - 1); 706 } 707 708 out: 709 return ret; 710 } 711 712 static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh, 713 struct fiemap_extent_info *fieinfo, 714 u64 map_start) 715 { 716 int ret; 717 unsigned int id_count; 718 struct ocfs2_dinode *di; 719 u64 phys; 720 u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST; 721 struct ocfs2_inode_info *oi = OCFS2_I(inode); 722 723 di = (struct ocfs2_dinode *)di_bh->b_data; 724 id_count = le16_to_cpu(di->id2.i_data.id_count); 725 726 if (map_start < id_count) { 727 phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits; 728 phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data); 729 730 ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count, 731 flags); 732 if (ret < 0) 733 return ret; 734 } 735 736 return 0; 737 } 738 739 #define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) 740 741 int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 742 u64 map_start, u64 map_len) 743 { 744 int ret, is_last; 745 u32 mapping_end, cpos; 746 unsigned int hole_size; 747 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 748 u64 len_bytes, phys_bytes, virt_bytes; 749 struct buffer_head *di_bh = NULL; 750 struct ocfs2_extent_rec rec; 751 752 ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS); 753 if (ret) 754 return ret; 755 756 ret = ocfs2_inode_lock(inode, &di_bh, 0); 757 if (ret) { 758 mlog_errno(ret); 759 goto out; 760 } 761 762 down_read(&OCFS2_I(inode)->ip_alloc_sem); 763 764 /* 765 * Handle inline-data separately. 766 */ 767 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { 768 ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start); 769 goto out_unlock; 770 } 771 772 cpos = map_start >> osb->s_clustersize_bits; 773 mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, 774 map_start + map_len); 775 mapping_end -= cpos; 776 is_last = 0; 777 while (cpos < mapping_end && !is_last) { 778 u32 fe_flags; 779 780 ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, 781 &hole_size, &rec, &is_last); 782 if (ret) { 783 mlog_errno(ret); 784 goto out; 785 } 786 787 if (rec.e_blkno == 0ULL) { 788 cpos += hole_size; 789 continue; 790 } 791 792 fe_flags = 0; 793 if (rec.e_flags & OCFS2_EXT_UNWRITTEN) 794 fe_flags |= FIEMAP_EXTENT_UNWRITTEN; 795 if (is_last) 796 fe_flags |= FIEMAP_EXTENT_LAST; 797 len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits; 798 phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits; 799 virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits; 800 801 ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes, 802 len_bytes, fe_flags); 803 if (ret) 804 break; 805 806 cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters); 807 } 808 809 if (ret > 0) 810 ret = 0; 811 812 out_unlock: 813 brelse(di_bh); 814 815 up_read(&OCFS2_I(inode)->ip_alloc_sem); 816 817 ocfs2_inode_unlock(inode, 0); 818 out: 819 820 return ret; 821 } 822