xref: /openbmc/linux/fs/ocfs2/extent_map.c (revision ecba1060)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * extent_map.c
5  *
6  * Block/Cluster mapping functions
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License, version 2,  as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public
20  * License along with this program; if not, write to the
21  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
22  * Boston, MA 021110-1307, USA.
23  */
24 
25 #include <linux/fs.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/fiemap.h>
29 
30 #define MLOG_MASK_PREFIX ML_EXTENT_MAP
31 #include <cluster/masklog.h>
32 
33 #include "ocfs2.h"
34 
35 #include "alloc.h"
36 #include "dlmglue.h"
37 #include "extent_map.h"
38 #include "inode.h"
39 #include "super.h"
40 
41 #include "buffer_head_io.h"
42 
43 /*
44  * The extent caching implementation is intentionally trivial.
45  *
46  * We only cache a small number of extents stored directly on the
47  * inode, so linear order operations are acceptable. If we ever want
48  * to increase the size of the extent map, then these algorithms must
49  * get smarter.
50  */
51 
52 void ocfs2_extent_map_init(struct inode *inode)
53 {
54 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
55 
56 	oi->ip_extent_map.em_num_items = 0;
57 	INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
58 }
59 
60 static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
61 				      unsigned int cpos,
62 				      struct ocfs2_extent_map_item **ret_emi)
63 {
64 	unsigned int range;
65 	struct ocfs2_extent_map_item *emi;
66 
67 	*ret_emi = NULL;
68 
69 	list_for_each_entry(emi, &em->em_list, ei_list) {
70 		range = emi->ei_cpos + emi->ei_clusters;
71 
72 		if (cpos >= emi->ei_cpos && cpos < range) {
73 			list_move(&emi->ei_list, &em->em_list);
74 
75 			*ret_emi = emi;
76 			break;
77 		}
78 	}
79 }
80 
81 static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
82 				   unsigned int *phys, unsigned int *len,
83 				   unsigned int *flags)
84 {
85 	unsigned int coff;
86 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
87 	struct ocfs2_extent_map_item *emi;
88 
89 	spin_lock(&oi->ip_lock);
90 
91 	__ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
92 	if (emi) {
93 		coff = cpos - emi->ei_cpos;
94 		*phys = emi->ei_phys + coff;
95 		if (len)
96 			*len = emi->ei_clusters - coff;
97 		if (flags)
98 			*flags = emi->ei_flags;
99 	}
100 
101 	spin_unlock(&oi->ip_lock);
102 
103 	if (emi == NULL)
104 		return -ENOENT;
105 
106 	return 0;
107 }
108 
109 /*
110  * Forget about all clusters equal to or greater than cpos.
111  */
112 void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
113 {
114 	struct ocfs2_extent_map_item *emi, *n;
115 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
116 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
117 	LIST_HEAD(tmp_list);
118 	unsigned int range;
119 
120 	spin_lock(&oi->ip_lock);
121 	list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
122 		if (emi->ei_cpos >= cpos) {
123 			/* Full truncate of this record. */
124 			list_move(&emi->ei_list, &tmp_list);
125 			BUG_ON(em->em_num_items == 0);
126 			em->em_num_items--;
127 			continue;
128 		}
129 
130 		range = emi->ei_cpos + emi->ei_clusters;
131 		if (range > cpos) {
132 			/* Partial truncate */
133 			emi->ei_clusters = cpos - emi->ei_cpos;
134 		}
135 	}
136 	spin_unlock(&oi->ip_lock);
137 
138 	list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
139 		list_del(&emi->ei_list);
140 		kfree(emi);
141 	}
142 }
143 
144 /*
145  * Is any part of emi2 contained within emi1
146  */
147 static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
148 				 struct ocfs2_extent_map_item *emi2)
149 {
150 	unsigned int range1, range2;
151 
152 	/*
153 	 * Check if logical start of emi2 is inside emi1
154 	 */
155 	range1 = emi1->ei_cpos + emi1->ei_clusters;
156 	if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
157 		return 1;
158 
159 	/*
160 	 * Check if logical end of emi2 is inside emi1
161 	 */
162 	range2 = emi2->ei_cpos + emi2->ei_clusters;
163 	if (range2 > emi1->ei_cpos && range2 <= range1)
164 		return 1;
165 
166 	return 0;
167 }
168 
169 static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
170 				  struct ocfs2_extent_map_item *src)
171 {
172 	dest->ei_cpos = src->ei_cpos;
173 	dest->ei_phys = src->ei_phys;
174 	dest->ei_clusters = src->ei_clusters;
175 	dest->ei_flags = src->ei_flags;
176 }
177 
178 /*
179  * Try to merge emi with ins. Returns 1 if merge succeeds, zero
180  * otherwise.
181  */
182 static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
183 					 struct ocfs2_extent_map_item *ins)
184 {
185 	/*
186 	 * Handle contiguousness
187 	 */
188 	if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
189 	    ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
190 	    ins->ei_flags == emi->ei_flags) {
191 		emi->ei_clusters += ins->ei_clusters;
192 		return 1;
193 	} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
194 		   (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
195 		   ins->ei_flags == emi->ei_flags) {
196 		emi->ei_phys = ins->ei_phys;
197 		emi->ei_cpos = ins->ei_cpos;
198 		emi->ei_clusters += ins->ei_clusters;
199 		return 1;
200 	}
201 
202 	/*
203 	 * Overlapping extents - this shouldn't happen unless we've
204 	 * split an extent to change it's flags. That is exceedingly
205 	 * rare, so there's no sense in trying to optimize it yet.
206 	 */
207 	if (ocfs2_ei_is_contained(emi, ins) ||
208 	    ocfs2_ei_is_contained(ins, emi)) {
209 		ocfs2_copy_emi_fields(emi, ins);
210 		return 1;
211 	}
212 
213 	/* No merge was possible. */
214 	return 0;
215 }
216 
217 /*
218  * In order to reduce complexity on the caller, this insert function
219  * is intentionally liberal in what it will accept.
220  *
221  * The only rule is that the truncate call *must* be used whenever
222  * records have been deleted. This avoids inserting overlapping
223  * records with different physical mappings.
224  */
225 void ocfs2_extent_map_insert_rec(struct inode *inode,
226 				 struct ocfs2_extent_rec *rec)
227 {
228 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
229 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
230 	struct ocfs2_extent_map_item *emi, *new_emi = NULL;
231 	struct ocfs2_extent_map_item ins;
232 
233 	ins.ei_cpos = le32_to_cpu(rec->e_cpos);
234 	ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
235 					       le64_to_cpu(rec->e_blkno));
236 	ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
237 	ins.ei_flags = rec->e_flags;
238 
239 search:
240 	spin_lock(&oi->ip_lock);
241 
242 	list_for_each_entry(emi, &em->em_list, ei_list) {
243 		if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
244 			list_move(&emi->ei_list, &em->em_list);
245 			spin_unlock(&oi->ip_lock);
246 			goto out;
247 		}
248 	}
249 
250 	/*
251 	 * No item could be merged.
252 	 *
253 	 * Either allocate and add a new item, or overwrite the last recently
254 	 * inserted.
255 	 */
256 
257 	if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
258 		if (new_emi == NULL) {
259 			spin_unlock(&oi->ip_lock);
260 
261 			new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
262 			if (new_emi == NULL)
263 				goto out;
264 
265 			goto search;
266 		}
267 
268 		ocfs2_copy_emi_fields(new_emi, &ins);
269 		list_add(&new_emi->ei_list, &em->em_list);
270 		em->em_num_items++;
271 		new_emi = NULL;
272 	} else {
273 		BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
274 		emi = list_entry(em->em_list.prev,
275 				 struct ocfs2_extent_map_item, ei_list);
276 		list_move(&emi->ei_list, &em->em_list);
277 		ocfs2_copy_emi_fields(emi, &ins);
278 	}
279 
280 	spin_unlock(&oi->ip_lock);
281 
282 out:
283 	if (new_emi)
284 		kfree(new_emi);
285 }
286 
287 static int ocfs2_last_eb_is_empty(struct inode *inode,
288 				  struct ocfs2_dinode *di)
289 {
290 	int ret, next_free;
291 	u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
292 	struct buffer_head *eb_bh = NULL;
293 	struct ocfs2_extent_block *eb;
294 	struct ocfs2_extent_list *el;
295 
296 	ret = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh);
297 	if (ret) {
298 		mlog_errno(ret);
299 		goto out;
300 	}
301 
302 	eb = (struct ocfs2_extent_block *) eb_bh->b_data;
303 	el = &eb->h_list;
304 
305 	if (el->l_tree_depth) {
306 		ocfs2_error(inode->i_sb,
307 			    "Inode %lu has non zero tree depth in "
308 			    "leaf block %llu\n", inode->i_ino,
309 			    (unsigned long long)eb_bh->b_blocknr);
310 		ret = -EROFS;
311 		goto out;
312 	}
313 
314 	next_free = le16_to_cpu(el->l_next_free_rec);
315 
316 	if (next_free == 0 ||
317 	    (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
318 		ret = 1;
319 
320 out:
321 	brelse(eb_bh);
322 	return ret;
323 }
324 
325 /*
326  * Return the 1st index within el which contains an extent start
327  * larger than v_cluster.
328  */
329 static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
330 				       u32 v_cluster)
331 {
332 	int i;
333 	struct ocfs2_extent_rec *rec;
334 
335 	for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
336 		rec = &el->l_recs[i];
337 
338 		if (v_cluster < le32_to_cpu(rec->e_cpos))
339 			break;
340 	}
341 
342 	return i;
343 }
344 
345 /*
346  * Figure out the size of a hole which starts at v_cluster within the given
347  * extent list.
348  *
349  * If there is no more allocation past v_cluster, we return the maximum
350  * cluster size minus v_cluster.
351  *
352  * If we have in-inode extents, then el points to the dinode list and
353  * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
354  * containing el.
355  */
356 static int ocfs2_figure_hole_clusters(struct inode *inode,
357 				      struct ocfs2_extent_list *el,
358 				      struct buffer_head *eb_bh,
359 				      u32 v_cluster,
360 				      u32 *num_clusters)
361 {
362 	int ret, i;
363 	struct buffer_head *next_eb_bh = NULL;
364 	struct ocfs2_extent_block *eb, *next_eb;
365 
366 	i = ocfs2_search_for_hole_index(el, v_cluster);
367 
368 	if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
369 		eb = (struct ocfs2_extent_block *)eb_bh->b_data;
370 
371 		/*
372 		 * Check the next leaf for any extents.
373 		 */
374 
375 		if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
376 			goto no_more_extents;
377 
378 		ret = ocfs2_read_extent_block(inode,
379 					      le64_to_cpu(eb->h_next_leaf_blk),
380 					      &next_eb_bh);
381 		if (ret) {
382 			mlog_errno(ret);
383 			goto out;
384 		}
385 
386 		next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
387 		el = &next_eb->h_list;
388 		i = ocfs2_search_for_hole_index(el, v_cluster);
389 	}
390 
391 no_more_extents:
392 	if (i == le16_to_cpu(el->l_next_free_rec)) {
393 		/*
394 		 * We're at the end of our existing allocation. Just
395 		 * return the maximum number of clusters we could
396 		 * possibly allocate.
397 		 */
398 		*num_clusters = UINT_MAX - v_cluster;
399 	} else {
400 		*num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
401 	}
402 
403 	ret = 0;
404 out:
405 	brelse(next_eb_bh);
406 	return ret;
407 }
408 
409 static int ocfs2_get_clusters_nocache(struct inode *inode,
410 				      struct buffer_head *di_bh,
411 				      u32 v_cluster, unsigned int *hole_len,
412 				      struct ocfs2_extent_rec *ret_rec,
413 				      unsigned int *is_last)
414 {
415 	int i, ret, tree_height, len;
416 	struct ocfs2_dinode *di;
417 	struct ocfs2_extent_block *uninitialized_var(eb);
418 	struct ocfs2_extent_list *el;
419 	struct ocfs2_extent_rec *rec;
420 	struct buffer_head *eb_bh = NULL;
421 
422 	memset(ret_rec, 0, sizeof(*ret_rec));
423 	if (is_last)
424 		*is_last = 0;
425 
426 	di = (struct ocfs2_dinode *) di_bh->b_data;
427 	el = &di->id2.i_list;
428 	tree_height = le16_to_cpu(el->l_tree_depth);
429 
430 	if (tree_height > 0) {
431 		ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
432 		if (ret) {
433 			mlog_errno(ret);
434 			goto out;
435 		}
436 
437 		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
438 		el = &eb->h_list;
439 
440 		if (el->l_tree_depth) {
441 			ocfs2_error(inode->i_sb,
442 				    "Inode %lu has non zero tree depth in "
443 				    "leaf block %llu\n", inode->i_ino,
444 				    (unsigned long long)eb_bh->b_blocknr);
445 			ret = -EROFS;
446 			goto out;
447 		}
448 	}
449 
450 	i = ocfs2_search_extent_list(el, v_cluster);
451 	if (i == -1) {
452 		/*
453 		 * Holes can be larger than the maximum size of an
454 		 * extent, so we return their lengths in a seperate
455 		 * field.
456 		 */
457 		if (hole_len) {
458 			ret = ocfs2_figure_hole_clusters(inode, el, eb_bh,
459 							 v_cluster, &len);
460 			if (ret) {
461 				mlog_errno(ret);
462 				goto out;
463 			}
464 
465 			*hole_len = len;
466 		}
467 		goto out_hole;
468 	}
469 
470 	rec = &el->l_recs[i];
471 
472 	BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
473 
474 	if (!rec->e_blkno) {
475 		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
476 			    "record (%u, %u, 0)", inode->i_ino,
477 			    le32_to_cpu(rec->e_cpos),
478 			    ocfs2_rec_clusters(el, rec));
479 		ret = -EROFS;
480 		goto out;
481 	}
482 
483 	*ret_rec = *rec;
484 
485 	/*
486 	 * Checking for last extent is potentially expensive - we
487 	 * might have to look at the next leaf over to see if it's
488 	 * empty.
489 	 *
490 	 * The first two checks are to see whether the caller even
491 	 * cares for this information, and if the extent is at least
492 	 * the last in it's list.
493 	 *
494 	 * If those hold true, then the extent is last if any of the
495 	 * additional conditions hold true:
496 	 *  - Extent list is in-inode
497 	 *  - Extent list is right-most
498 	 *  - Extent list is 2nd to rightmost, with empty right-most
499 	 */
500 	if (is_last) {
501 		if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
502 			if (tree_height == 0)
503 				*is_last = 1;
504 			else if (eb->h_blkno == di->i_last_eb_blk)
505 				*is_last = 1;
506 			else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
507 				ret = ocfs2_last_eb_is_empty(inode, di);
508 				if (ret < 0) {
509 					mlog_errno(ret);
510 					goto out;
511 				}
512 				if (ret == 1)
513 					*is_last = 1;
514 			}
515 		}
516 	}
517 
518 out_hole:
519 	ret = 0;
520 out:
521 	brelse(eb_bh);
522 	return ret;
523 }
524 
525 static void ocfs2_relative_extent_offsets(struct super_block *sb,
526 					  u32 v_cluster,
527 					  struct ocfs2_extent_rec *rec,
528 					  u32 *p_cluster, u32 *num_clusters)
529 
530 {
531 	u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
532 
533 	*p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
534 	*p_cluster = *p_cluster + coff;
535 
536 	if (num_clusters)
537 		*num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
538 }
539 
540 int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
541 			     u32 *p_cluster, u32 *num_clusters,
542 			     struct ocfs2_extent_list *el)
543 {
544 	int ret = 0, i;
545 	struct buffer_head *eb_bh = NULL;
546 	struct ocfs2_extent_block *eb;
547 	struct ocfs2_extent_rec *rec;
548 	u32 coff;
549 
550 	if (el->l_tree_depth) {
551 		ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
552 		if (ret) {
553 			mlog_errno(ret);
554 			goto out;
555 		}
556 
557 		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
558 		el = &eb->h_list;
559 
560 		if (el->l_tree_depth) {
561 			ocfs2_error(inode->i_sb,
562 				    "Inode %lu has non zero tree depth in "
563 				    "xattr leaf block %llu\n", inode->i_ino,
564 				    (unsigned long long)eb_bh->b_blocknr);
565 			ret = -EROFS;
566 			goto out;
567 		}
568 	}
569 
570 	i = ocfs2_search_extent_list(el, v_cluster);
571 	if (i == -1) {
572 		ret = -EROFS;
573 		mlog_errno(ret);
574 		goto out;
575 	} else {
576 		rec = &el->l_recs[i];
577 		BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
578 
579 		if (!rec->e_blkno) {
580 			ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
581 				    "record (%u, %u, 0) in xattr", inode->i_ino,
582 				    le32_to_cpu(rec->e_cpos),
583 				    ocfs2_rec_clusters(el, rec));
584 			ret = -EROFS;
585 			goto out;
586 		}
587 		coff = v_cluster - le32_to_cpu(rec->e_cpos);
588 		*p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
589 						    le64_to_cpu(rec->e_blkno));
590 		*p_cluster = *p_cluster + coff;
591 		if (num_clusters)
592 			*num_clusters = ocfs2_rec_clusters(el, rec) - coff;
593 	}
594 out:
595 	if (eb_bh)
596 		brelse(eb_bh);
597 	return ret;
598 }
599 
600 int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
601 		       u32 *p_cluster, u32 *num_clusters,
602 		       unsigned int *extent_flags)
603 {
604 	int ret;
605 	unsigned int uninitialized_var(hole_len), flags = 0;
606 	struct buffer_head *di_bh = NULL;
607 	struct ocfs2_extent_rec rec;
608 
609 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
610 		ret = -ERANGE;
611 		mlog_errno(ret);
612 		goto out;
613 	}
614 
615 	ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
616 				      num_clusters, extent_flags);
617 	if (ret == 0)
618 		goto out;
619 
620 	ret = ocfs2_read_inode_block(inode, &di_bh);
621 	if (ret) {
622 		mlog_errno(ret);
623 		goto out;
624 	}
625 
626 	ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
627 					 &rec, NULL);
628 	if (ret) {
629 		mlog_errno(ret);
630 		goto out;
631 	}
632 
633 	if (rec.e_blkno == 0ULL) {
634 		/*
635 		 * A hole was found. Return some canned values that
636 		 * callers can key on. If asked for, num_clusters will
637 		 * be populated with the size of the hole.
638 		 */
639 		*p_cluster = 0;
640 		if (num_clusters) {
641 			*num_clusters = hole_len;
642 		}
643 	} else {
644 		ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
645 					      p_cluster, num_clusters);
646 		flags = rec.e_flags;
647 
648 		ocfs2_extent_map_insert_rec(inode, &rec);
649 	}
650 
651 	if (extent_flags)
652 		*extent_flags = flags;
653 
654 out:
655 	brelse(di_bh);
656 	return ret;
657 }
658 
659 /*
660  * This expects alloc_sem to be held. The allocation cannot change at
661  * all while the map is in the process of being updated.
662  */
663 int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
664 				u64 *ret_count, unsigned int *extent_flags)
665 {
666 	int ret;
667 	int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
668 	u32 cpos, num_clusters, p_cluster;
669 	u64 boff = 0;
670 
671 	cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
672 
673 	ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
674 				 extent_flags);
675 	if (ret) {
676 		mlog_errno(ret);
677 		goto out;
678 	}
679 
680 	/*
681 	 * p_cluster == 0 indicates a hole.
682 	 */
683 	if (p_cluster) {
684 		boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
685 		boff += (v_blkno & (u64)(bpc - 1));
686 	}
687 
688 	*p_blkno = boff;
689 
690 	if (ret_count) {
691 		*ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
692 		*ret_count -= v_blkno & (u64)(bpc - 1);
693 	}
694 
695 out:
696 	return ret;
697 }
698 
699 static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
700 			       struct fiemap_extent_info *fieinfo,
701 			       u64 map_start)
702 {
703 	int ret;
704 	unsigned int id_count;
705 	struct ocfs2_dinode *di;
706 	u64 phys;
707 	u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
708 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
709 
710 	di = (struct ocfs2_dinode *)di_bh->b_data;
711 	id_count = le16_to_cpu(di->id2.i_data.id_count);
712 
713 	if (map_start < id_count) {
714 		phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
715 		phys += offsetof(struct ocfs2_dinode, id2.i_data.id_data);
716 
717 		ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
718 					      flags);
719 		if (ret < 0)
720 			return ret;
721 	}
722 
723 	return 0;
724 }
725 
726 #define OCFS2_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
727 
728 int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
729 		 u64 map_start, u64 map_len)
730 {
731 	int ret, is_last;
732 	u32 mapping_end, cpos;
733 	unsigned int hole_size;
734 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
735 	u64 len_bytes, phys_bytes, virt_bytes;
736 	struct buffer_head *di_bh = NULL;
737 	struct ocfs2_extent_rec rec;
738 
739 	ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS);
740 	if (ret)
741 		return ret;
742 
743 	ret = ocfs2_inode_lock(inode, &di_bh, 0);
744 	if (ret) {
745 		mlog_errno(ret);
746 		goto out;
747 	}
748 
749 	down_read(&OCFS2_I(inode)->ip_alloc_sem);
750 
751 	/*
752 	 * Handle inline-data separately.
753 	 */
754 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
755 		ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
756 		goto out_unlock;
757 	}
758 
759 	cpos = map_start >> osb->s_clustersize_bits;
760 	mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
761 					       map_start + map_len);
762 	mapping_end -= cpos;
763 	is_last = 0;
764 	while (cpos < mapping_end && !is_last) {
765 		u32 fe_flags;
766 
767 		ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
768 						 &hole_size, &rec, &is_last);
769 		if (ret) {
770 			mlog_errno(ret);
771 			goto out;
772 		}
773 
774 		if (rec.e_blkno == 0ULL) {
775 			cpos += hole_size;
776 			continue;
777 		}
778 
779 		fe_flags = 0;
780 		if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
781 			fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
782 		if (is_last)
783 			fe_flags |= FIEMAP_EXTENT_LAST;
784 		len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
785 		phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
786 		virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
787 
788 		ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
789 					      len_bytes, fe_flags);
790 		if (ret)
791 			break;
792 
793 		cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
794 	}
795 
796 	if (ret > 0)
797 		ret = 0;
798 
799 out_unlock:
800 	brelse(di_bh);
801 
802 	up_read(&OCFS2_I(inode)->ip_alloc_sem);
803 
804 	ocfs2_inode_unlock(inode, 0);
805 out:
806 
807 	return ret;
808 }
809 
810 int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
811 			   struct buffer_head *bhs[], int flags,
812 			   int (*validate)(struct super_block *sb,
813 					   struct buffer_head *bh))
814 {
815 	int rc = 0;
816 	u64 p_block, p_count;
817 	int i, count, done = 0;
818 
819 	mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, "
820 		   "flags = %x, validate = %p)\n",
821 		   inode, (unsigned long long)v_block, nr, bhs, flags,
822 		   validate);
823 
824 	if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
825 	    i_size_read(inode)) {
826 		BUG_ON(!(flags & OCFS2_BH_READAHEAD));
827 		goto out;
828 	}
829 
830 	while (done < nr) {
831 		down_read(&OCFS2_I(inode)->ip_alloc_sem);
832 		rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
833 						 &p_block, &p_count, NULL);
834 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
835 		if (rc) {
836 			mlog_errno(rc);
837 			break;
838 		}
839 
840 		if (!p_block) {
841 			rc = -EIO;
842 			mlog(ML_ERROR,
843 			     "Inode #%llu contains a hole at offset %llu\n",
844 			     (unsigned long long)OCFS2_I(inode)->ip_blkno,
845 			     (unsigned long long)(v_block + done) <<
846 			     inode->i_sb->s_blocksize_bits);
847 			break;
848 		}
849 
850 		count = nr - done;
851 		if (p_count < count)
852 			count = p_count;
853 
854 		/*
855 		 * If the caller passed us bhs, they should have come
856 		 * from a previous readahead call to this function.  Thus,
857 		 * they should have the right b_blocknr.
858 		 */
859 		for (i = 0; i < count; i++) {
860 			if (!bhs[done + i])
861 				continue;
862 			BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
863 		}
864 
865 		rc = ocfs2_read_blocks(inode, p_block, count, bhs + done,
866 				       flags, validate);
867 		if (rc) {
868 			mlog_errno(rc);
869 			break;
870 		}
871 		done += count;
872 	}
873 
874 out:
875 	mlog_exit(rc);
876 	return rc;
877 }
878 
879 
880