xref: /openbmc/linux/fs/ocfs2/extent_map.c (revision 7dd65feb)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * extent_map.c
5  *
6  * Block/Cluster mapping functions
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License, version 2,  as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public
20  * License along with this program; if not, write to the
21  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
22  * Boston, MA 021110-1307, USA.
23  */
24 
25 #include <linux/fs.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/fiemap.h>
29 
30 #define MLOG_MASK_PREFIX ML_EXTENT_MAP
31 #include <cluster/masklog.h>
32 
33 #include "ocfs2.h"
34 
35 #include "alloc.h"
36 #include "dlmglue.h"
37 #include "extent_map.h"
38 #include "inode.h"
39 #include "super.h"
40 #include "symlink.h"
41 
42 #include "buffer_head_io.h"
43 
44 /*
45  * The extent caching implementation is intentionally trivial.
46  *
47  * We only cache a small number of extents stored directly on the
48  * inode, so linear order operations are acceptable. If we ever want
49  * to increase the size of the extent map, then these algorithms must
50  * get smarter.
51  */
52 
53 void ocfs2_extent_map_init(struct inode *inode)
54 {
55 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
56 
57 	oi->ip_extent_map.em_num_items = 0;
58 	INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
59 }
60 
61 static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
62 				      unsigned int cpos,
63 				      struct ocfs2_extent_map_item **ret_emi)
64 {
65 	unsigned int range;
66 	struct ocfs2_extent_map_item *emi;
67 
68 	*ret_emi = NULL;
69 
70 	list_for_each_entry(emi, &em->em_list, ei_list) {
71 		range = emi->ei_cpos + emi->ei_clusters;
72 
73 		if (cpos >= emi->ei_cpos && cpos < range) {
74 			list_move(&emi->ei_list, &em->em_list);
75 
76 			*ret_emi = emi;
77 			break;
78 		}
79 	}
80 }
81 
82 static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
83 				   unsigned int *phys, unsigned int *len,
84 				   unsigned int *flags)
85 {
86 	unsigned int coff;
87 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
88 	struct ocfs2_extent_map_item *emi;
89 
90 	spin_lock(&oi->ip_lock);
91 
92 	__ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
93 	if (emi) {
94 		coff = cpos - emi->ei_cpos;
95 		*phys = emi->ei_phys + coff;
96 		if (len)
97 			*len = emi->ei_clusters - coff;
98 		if (flags)
99 			*flags = emi->ei_flags;
100 	}
101 
102 	spin_unlock(&oi->ip_lock);
103 
104 	if (emi == NULL)
105 		return -ENOENT;
106 
107 	return 0;
108 }
109 
110 /*
111  * Forget about all clusters equal to or greater than cpos.
112  */
113 void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
114 {
115 	struct ocfs2_extent_map_item *emi, *n;
116 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
117 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
118 	LIST_HEAD(tmp_list);
119 	unsigned int range;
120 
121 	spin_lock(&oi->ip_lock);
122 	list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
123 		if (emi->ei_cpos >= cpos) {
124 			/* Full truncate of this record. */
125 			list_move(&emi->ei_list, &tmp_list);
126 			BUG_ON(em->em_num_items == 0);
127 			em->em_num_items--;
128 			continue;
129 		}
130 
131 		range = emi->ei_cpos + emi->ei_clusters;
132 		if (range > cpos) {
133 			/* Partial truncate */
134 			emi->ei_clusters = cpos - emi->ei_cpos;
135 		}
136 	}
137 	spin_unlock(&oi->ip_lock);
138 
139 	list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
140 		list_del(&emi->ei_list);
141 		kfree(emi);
142 	}
143 }
144 
145 /*
146  * Is any part of emi2 contained within emi1
147  */
148 static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
149 				 struct ocfs2_extent_map_item *emi2)
150 {
151 	unsigned int range1, range2;
152 
153 	/*
154 	 * Check if logical start of emi2 is inside emi1
155 	 */
156 	range1 = emi1->ei_cpos + emi1->ei_clusters;
157 	if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
158 		return 1;
159 
160 	/*
161 	 * Check if logical end of emi2 is inside emi1
162 	 */
163 	range2 = emi2->ei_cpos + emi2->ei_clusters;
164 	if (range2 > emi1->ei_cpos && range2 <= range1)
165 		return 1;
166 
167 	return 0;
168 }
169 
170 static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
171 				  struct ocfs2_extent_map_item *src)
172 {
173 	dest->ei_cpos = src->ei_cpos;
174 	dest->ei_phys = src->ei_phys;
175 	dest->ei_clusters = src->ei_clusters;
176 	dest->ei_flags = src->ei_flags;
177 }
178 
179 /*
180  * Try to merge emi with ins. Returns 1 if merge succeeds, zero
181  * otherwise.
182  */
183 static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
184 					 struct ocfs2_extent_map_item *ins)
185 {
186 	/*
187 	 * Handle contiguousness
188 	 */
189 	if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
190 	    ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
191 	    ins->ei_flags == emi->ei_flags) {
192 		emi->ei_clusters += ins->ei_clusters;
193 		return 1;
194 	} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
195 		   (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
196 		   ins->ei_flags == emi->ei_flags) {
197 		emi->ei_phys = ins->ei_phys;
198 		emi->ei_cpos = ins->ei_cpos;
199 		emi->ei_clusters += ins->ei_clusters;
200 		return 1;
201 	}
202 
203 	/*
204 	 * Overlapping extents - this shouldn't happen unless we've
205 	 * split an extent to change it's flags. That is exceedingly
206 	 * rare, so there's no sense in trying to optimize it yet.
207 	 */
208 	if (ocfs2_ei_is_contained(emi, ins) ||
209 	    ocfs2_ei_is_contained(ins, emi)) {
210 		ocfs2_copy_emi_fields(emi, ins);
211 		return 1;
212 	}
213 
214 	/* No merge was possible. */
215 	return 0;
216 }
217 
218 /*
219  * In order to reduce complexity on the caller, this insert function
220  * is intentionally liberal in what it will accept.
221  *
222  * The only rule is that the truncate call *must* be used whenever
223  * records have been deleted. This avoids inserting overlapping
224  * records with different physical mappings.
225  */
226 void ocfs2_extent_map_insert_rec(struct inode *inode,
227 				 struct ocfs2_extent_rec *rec)
228 {
229 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
230 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
231 	struct ocfs2_extent_map_item *emi, *new_emi = NULL;
232 	struct ocfs2_extent_map_item ins;
233 
234 	ins.ei_cpos = le32_to_cpu(rec->e_cpos);
235 	ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
236 					       le64_to_cpu(rec->e_blkno));
237 	ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
238 	ins.ei_flags = rec->e_flags;
239 
240 search:
241 	spin_lock(&oi->ip_lock);
242 
243 	list_for_each_entry(emi, &em->em_list, ei_list) {
244 		if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
245 			list_move(&emi->ei_list, &em->em_list);
246 			spin_unlock(&oi->ip_lock);
247 			goto out;
248 		}
249 	}
250 
251 	/*
252 	 * No item could be merged.
253 	 *
254 	 * Either allocate and add a new item, or overwrite the last recently
255 	 * inserted.
256 	 */
257 
258 	if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
259 		if (new_emi == NULL) {
260 			spin_unlock(&oi->ip_lock);
261 
262 			new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
263 			if (new_emi == NULL)
264 				goto out;
265 
266 			goto search;
267 		}
268 
269 		ocfs2_copy_emi_fields(new_emi, &ins);
270 		list_add(&new_emi->ei_list, &em->em_list);
271 		em->em_num_items++;
272 		new_emi = NULL;
273 	} else {
274 		BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
275 		emi = list_entry(em->em_list.prev,
276 				 struct ocfs2_extent_map_item, ei_list);
277 		list_move(&emi->ei_list, &em->em_list);
278 		ocfs2_copy_emi_fields(emi, &ins);
279 	}
280 
281 	spin_unlock(&oi->ip_lock);
282 
283 out:
284 	if (new_emi)
285 		kfree(new_emi);
286 }
287 
288 static int ocfs2_last_eb_is_empty(struct inode *inode,
289 				  struct ocfs2_dinode *di)
290 {
291 	int ret, next_free;
292 	u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
293 	struct buffer_head *eb_bh = NULL;
294 	struct ocfs2_extent_block *eb;
295 	struct ocfs2_extent_list *el;
296 
297 	ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
298 	if (ret) {
299 		mlog_errno(ret);
300 		goto out;
301 	}
302 
303 	eb = (struct ocfs2_extent_block *) eb_bh->b_data;
304 	el = &eb->h_list;
305 
306 	if (el->l_tree_depth) {
307 		ocfs2_error(inode->i_sb,
308 			    "Inode %lu has non zero tree depth in "
309 			    "leaf block %llu\n", inode->i_ino,
310 			    (unsigned long long)eb_bh->b_blocknr);
311 		ret = -EROFS;
312 		goto out;
313 	}
314 
315 	next_free = le16_to_cpu(el->l_next_free_rec);
316 
317 	if (next_free == 0 ||
318 	    (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
319 		ret = 1;
320 
321 out:
322 	brelse(eb_bh);
323 	return ret;
324 }
325 
326 /*
327  * Return the 1st index within el which contains an extent start
328  * larger than v_cluster.
329  */
330 static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
331 				       u32 v_cluster)
332 {
333 	int i;
334 	struct ocfs2_extent_rec *rec;
335 
336 	for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
337 		rec = &el->l_recs[i];
338 
339 		if (v_cluster < le32_to_cpu(rec->e_cpos))
340 			break;
341 	}
342 
343 	return i;
344 }
345 
346 /*
347  * Figure out the size of a hole which starts at v_cluster within the given
348  * extent list.
349  *
350  * If there is no more allocation past v_cluster, we return the maximum
351  * cluster size minus v_cluster.
352  *
353  * If we have in-inode extents, then el points to the dinode list and
354  * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
355  * containing el.
356  */
357 int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
358 			       struct ocfs2_extent_list *el,
359 			       struct buffer_head *eb_bh,
360 			       u32 v_cluster,
361 			       u32 *num_clusters)
362 {
363 	int ret, i;
364 	struct buffer_head *next_eb_bh = NULL;
365 	struct ocfs2_extent_block *eb, *next_eb;
366 
367 	i = ocfs2_search_for_hole_index(el, v_cluster);
368 
369 	if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
370 		eb = (struct ocfs2_extent_block *)eb_bh->b_data;
371 
372 		/*
373 		 * Check the next leaf for any extents.
374 		 */
375 
376 		if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
377 			goto no_more_extents;
378 
379 		ret = ocfs2_read_extent_block(ci,
380 					      le64_to_cpu(eb->h_next_leaf_blk),
381 					      &next_eb_bh);
382 		if (ret) {
383 			mlog_errno(ret);
384 			goto out;
385 		}
386 
387 		next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
388 		el = &next_eb->h_list;
389 		i = ocfs2_search_for_hole_index(el, v_cluster);
390 	}
391 
392 no_more_extents:
393 	if (i == le16_to_cpu(el->l_next_free_rec)) {
394 		/*
395 		 * We're at the end of our existing allocation. Just
396 		 * return the maximum number of clusters we could
397 		 * possibly allocate.
398 		 */
399 		*num_clusters = UINT_MAX - v_cluster;
400 	} else {
401 		*num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
402 	}
403 
404 	ret = 0;
405 out:
406 	brelse(next_eb_bh);
407 	return ret;
408 }
409 
410 static int ocfs2_get_clusters_nocache(struct inode *inode,
411 				      struct buffer_head *di_bh,
412 				      u32 v_cluster, unsigned int *hole_len,
413 				      struct ocfs2_extent_rec *ret_rec,
414 				      unsigned int *is_last)
415 {
416 	int i, ret, tree_height, len;
417 	struct ocfs2_dinode *di;
418 	struct ocfs2_extent_block *uninitialized_var(eb);
419 	struct ocfs2_extent_list *el;
420 	struct ocfs2_extent_rec *rec;
421 	struct buffer_head *eb_bh = NULL;
422 
423 	memset(ret_rec, 0, sizeof(*ret_rec));
424 	if (is_last)
425 		*is_last = 0;
426 
427 	di = (struct ocfs2_dinode *) di_bh->b_data;
428 	el = &di->id2.i_list;
429 	tree_height = le16_to_cpu(el->l_tree_depth);
430 
431 	if (tree_height > 0) {
432 		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
433 				      &eb_bh);
434 		if (ret) {
435 			mlog_errno(ret);
436 			goto out;
437 		}
438 
439 		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
440 		el = &eb->h_list;
441 
442 		if (el->l_tree_depth) {
443 			ocfs2_error(inode->i_sb,
444 				    "Inode %lu has non zero tree depth in "
445 				    "leaf block %llu\n", inode->i_ino,
446 				    (unsigned long long)eb_bh->b_blocknr);
447 			ret = -EROFS;
448 			goto out;
449 		}
450 	}
451 
452 	i = ocfs2_search_extent_list(el, v_cluster);
453 	if (i == -1) {
454 		/*
455 		 * Holes can be larger than the maximum size of an
456 		 * extent, so we return their lengths in a seperate
457 		 * field.
458 		 */
459 		if (hole_len) {
460 			ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
461 							 el, eb_bh,
462 							 v_cluster, &len);
463 			if (ret) {
464 				mlog_errno(ret);
465 				goto out;
466 			}
467 
468 			*hole_len = len;
469 		}
470 		goto out_hole;
471 	}
472 
473 	rec = &el->l_recs[i];
474 
475 	BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
476 
477 	if (!rec->e_blkno) {
478 		ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
479 			    "record (%u, %u, 0)", inode->i_ino,
480 			    le32_to_cpu(rec->e_cpos),
481 			    ocfs2_rec_clusters(el, rec));
482 		ret = -EROFS;
483 		goto out;
484 	}
485 
486 	*ret_rec = *rec;
487 
488 	/*
489 	 * Checking for last extent is potentially expensive - we
490 	 * might have to look at the next leaf over to see if it's
491 	 * empty.
492 	 *
493 	 * The first two checks are to see whether the caller even
494 	 * cares for this information, and if the extent is at least
495 	 * the last in it's list.
496 	 *
497 	 * If those hold true, then the extent is last if any of the
498 	 * additional conditions hold true:
499 	 *  - Extent list is in-inode
500 	 *  - Extent list is right-most
501 	 *  - Extent list is 2nd to rightmost, with empty right-most
502 	 */
503 	if (is_last) {
504 		if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
505 			if (tree_height == 0)
506 				*is_last = 1;
507 			else if (eb->h_blkno == di->i_last_eb_blk)
508 				*is_last = 1;
509 			else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
510 				ret = ocfs2_last_eb_is_empty(inode, di);
511 				if (ret < 0) {
512 					mlog_errno(ret);
513 					goto out;
514 				}
515 				if (ret == 1)
516 					*is_last = 1;
517 			}
518 		}
519 	}
520 
521 out_hole:
522 	ret = 0;
523 out:
524 	brelse(eb_bh);
525 	return ret;
526 }
527 
528 static void ocfs2_relative_extent_offsets(struct super_block *sb,
529 					  u32 v_cluster,
530 					  struct ocfs2_extent_rec *rec,
531 					  u32 *p_cluster, u32 *num_clusters)
532 
533 {
534 	u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
535 
536 	*p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
537 	*p_cluster = *p_cluster + coff;
538 
539 	if (num_clusters)
540 		*num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
541 }
542 
543 int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
544 			     u32 *p_cluster, u32 *num_clusters,
545 			     struct ocfs2_extent_list *el,
546 			     unsigned int *extent_flags)
547 {
548 	int ret = 0, i;
549 	struct buffer_head *eb_bh = NULL;
550 	struct ocfs2_extent_block *eb;
551 	struct ocfs2_extent_rec *rec;
552 	u32 coff;
553 
554 	if (el->l_tree_depth) {
555 		ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
556 				      &eb_bh);
557 		if (ret) {
558 			mlog_errno(ret);
559 			goto out;
560 		}
561 
562 		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
563 		el = &eb->h_list;
564 
565 		if (el->l_tree_depth) {
566 			ocfs2_error(inode->i_sb,
567 				    "Inode %lu has non zero tree depth in "
568 				    "xattr leaf block %llu\n", inode->i_ino,
569 				    (unsigned long long)eb_bh->b_blocknr);
570 			ret = -EROFS;
571 			goto out;
572 		}
573 	}
574 
575 	i = ocfs2_search_extent_list(el, v_cluster);
576 	if (i == -1) {
577 		ret = -EROFS;
578 		mlog_errno(ret);
579 		goto out;
580 	} else {
581 		rec = &el->l_recs[i];
582 		BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
583 
584 		if (!rec->e_blkno) {
585 			ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
586 				    "record (%u, %u, 0) in xattr", inode->i_ino,
587 				    le32_to_cpu(rec->e_cpos),
588 				    ocfs2_rec_clusters(el, rec));
589 			ret = -EROFS;
590 			goto out;
591 		}
592 		coff = v_cluster - le32_to_cpu(rec->e_cpos);
593 		*p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
594 						    le64_to_cpu(rec->e_blkno));
595 		*p_cluster = *p_cluster + coff;
596 		if (num_clusters)
597 			*num_clusters = ocfs2_rec_clusters(el, rec) - coff;
598 
599 		if (extent_flags)
600 			*extent_flags = rec->e_flags;
601 	}
602 out:
603 	if (eb_bh)
604 		brelse(eb_bh);
605 	return ret;
606 }
607 
608 int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
609 		       u32 *p_cluster, u32 *num_clusters,
610 		       unsigned int *extent_flags)
611 {
612 	int ret;
613 	unsigned int uninitialized_var(hole_len), flags = 0;
614 	struct buffer_head *di_bh = NULL;
615 	struct ocfs2_extent_rec rec;
616 
617 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
618 		ret = -ERANGE;
619 		mlog_errno(ret);
620 		goto out;
621 	}
622 
623 	ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
624 				      num_clusters, extent_flags);
625 	if (ret == 0)
626 		goto out;
627 
628 	ret = ocfs2_read_inode_block(inode, &di_bh);
629 	if (ret) {
630 		mlog_errno(ret);
631 		goto out;
632 	}
633 
634 	ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
635 					 &rec, NULL);
636 	if (ret) {
637 		mlog_errno(ret);
638 		goto out;
639 	}
640 
641 	if (rec.e_blkno == 0ULL) {
642 		/*
643 		 * A hole was found. Return some canned values that
644 		 * callers can key on. If asked for, num_clusters will
645 		 * be populated with the size of the hole.
646 		 */
647 		*p_cluster = 0;
648 		if (num_clusters) {
649 			*num_clusters = hole_len;
650 		}
651 	} else {
652 		ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
653 					      p_cluster, num_clusters);
654 		flags = rec.e_flags;
655 
656 		ocfs2_extent_map_insert_rec(inode, &rec);
657 	}
658 
659 	if (extent_flags)
660 		*extent_flags = flags;
661 
662 out:
663 	brelse(di_bh);
664 	return ret;
665 }
666 
667 /*
668  * This expects alloc_sem to be held. The allocation cannot change at
669  * all while the map is in the process of being updated.
670  */
671 int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
672 				u64 *ret_count, unsigned int *extent_flags)
673 {
674 	int ret;
675 	int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
676 	u32 cpos, num_clusters, p_cluster;
677 	u64 boff = 0;
678 
679 	cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
680 
681 	ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
682 				 extent_flags);
683 	if (ret) {
684 		mlog_errno(ret);
685 		goto out;
686 	}
687 
688 	/*
689 	 * p_cluster == 0 indicates a hole.
690 	 */
691 	if (p_cluster) {
692 		boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
693 		boff += (v_blkno & (u64)(bpc - 1));
694 	}
695 
696 	*p_blkno = boff;
697 
698 	if (ret_count) {
699 		*ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
700 		*ret_count -= v_blkno & (u64)(bpc - 1);
701 	}
702 
703 out:
704 	return ret;
705 }
706 
707 /*
708  * The ocfs2_fiemap_inline() may be a little bit misleading, since
709  * it not only handles the fiemap for inlined files, but also deals
710  * with the fast symlink, cause they have no difference for extent
711  * mapping per se.
712  */
713 static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
714 			       struct fiemap_extent_info *fieinfo,
715 			       u64 map_start)
716 {
717 	int ret;
718 	unsigned int id_count;
719 	struct ocfs2_dinode *di;
720 	u64 phys;
721 	u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
722 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
723 
724 	di = (struct ocfs2_dinode *)di_bh->b_data;
725 	if (ocfs2_inode_is_fast_symlink(inode))
726 		id_count = ocfs2_fast_symlink_chars(inode->i_sb);
727 	else
728 		id_count = le16_to_cpu(di->id2.i_data.id_count);
729 
730 	if (map_start < id_count) {
731 		phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
732 		if (ocfs2_inode_is_fast_symlink(inode))
733 			phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
734 		else
735 			phys += offsetof(struct ocfs2_dinode,
736 					 id2.i_data.id_data);
737 
738 		ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
739 					      flags);
740 		if (ret < 0)
741 			return ret;
742 	}
743 
744 	return 0;
745 }
746 
747 #define OCFS2_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC)
748 
749 int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
750 		 u64 map_start, u64 map_len)
751 {
752 	int ret, is_last;
753 	u32 mapping_end, cpos;
754 	unsigned int hole_size;
755 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
756 	u64 len_bytes, phys_bytes, virt_bytes;
757 	struct buffer_head *di_bh = NULL;
758 	struct ocfs2_extent_rec rec;
759 
760 	ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS);
761 	if (ret)
762 		return ret;
763 
764 	ret = ocfs2_inode_lock(inode, &di_bh, 0);
765 	if (ret) {
766 		mlog_errno(ret);
767 		goto out;
768 	}
769 
770 	down_read(&OCFS2_I(inode)->ip_alloc_sem);
771 
772 	/*
773 	 * Handle inline-data and fast symlink separately.
774 	 */
775 	if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
776 	    ocfs2_inode_is_fast_symlink(inode)) {
777 		ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
778 		goto out_unlock;
779 	}
780 
781 	cpos = map_start >> osb->s_clustersize_bits;
782 	mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
783 					       map_start + map_len);
784 	mapping_end -= cpos;
785 	is_last = 0;
786 	while (cpos < mapping_end && !is_last) {
787 		u32 fe_flags;
788 
789 		ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
790 						 &hole_size, &rec, &is_last);
791 		if (ret) {
792 			mlog_errno(ret);
793 			goto out;
794 		}
795 
796 		if (rec.e_blkno == 0ULL) {
797 			cpos += hole_size;
798 			continue;
799 		}
800 
801 		fe_flags = 0;
802 		if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
803 			fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
804 		if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
805 			fe_flags |= FIEMAP_EXTENT_SHARED;
806 		if (is_last)
807 			fe_flags |= FIEMAP_EXTENT_LAST;
808 		len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
809 		phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
810 		virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
811 
812 		ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
813 					      len_bytes, fe_flags);
814 		if (ret)
815 			break;
816 
817 		cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
818 	}
819 
820 	if (ret > 0)
821 		ret = 0;
822 
823 out_unlock:
824 	brelse(di_bh);
825 
826 	up_read(&OCFS2_I(inode)->ip_alloc_sem);
827 
828 	ocfs2_inode_unlock(inode, 0);
829 out:
830 
831 	return ret;
832 }
833 
834 int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
835 			   struct buffer_head *bhs[], int flags,
836 			   int (*validate)(struct super_block *sb,
837 					   struct buffer_head *bh))
838 {
839 	int rc = 0;
840 	u64 p_block, p_count;
841 	int i, count, done = 0;
842 
843 	mlog_entry("(inode = %p, v_block = %llu, nr = %d, bhs = %p, "
844 		   "flags = %x, validate = %p)\n",
845 		   inode, (unsigned long long)v_block, nr, bhs, flags,
846 		   validate);
847 
848 	if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
849 	    i_size_read(inode)) {
850 		BUG_ON(!(flags & OCFS2_BH_READAHEAD));
851 		goto out;
852 	}
853 
854 	while (done < nr) {
855 		down_read(&OCFS2_I(inode)->ip_alloc_sem);
856 		rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
857 						 &p_block, &p_count, NULL);
858 		up_read(&OCFS2_I(inode)->ip_alloc_sem);
859 		if (rc) {
860 			mlog_errno(rc);
861 			break;
862 		}
863 
864 		if (!p_block) {
865 			rc = -EIO;
866 			mlog(ML_ERROR,
867 			     "Inode #%llu contains a hole at offset %llu\n",
868 			     (unsigned long long)OCFS2_I(inode)->ip_blkno,
869 			     (unsigned long long)(v_block + done) <<
870 			     inode->i_sb->s_blocksize_bits);
871 			break;
872 		}
873 
874 		count = nr - done;
875 		if (p_count < count)
876 			count = p_count;
877 
878 		/*
879 		 * If the caller passed us bhs, they should have come
880 		 * from a previous readahead call to this function.  Thus,
881 		 * they should have the right b_blocknr.
882 		 */
883 		for (i = 0; i < count; i++) {
884 			if (!bhs[done + i])
885 				continue;
886 			BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
887 		}
888 
889 		rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
890 				       bhs + done, flags, validate);
891 		if (rc) {
892 			mlog_errno(rc);
893 			break;
894 		}
895 		done += count;
896 	}
897 
898 out:
899 	mlog_exit(rc);
900 	return rc;
901 }
902 
903 
904