xref: /openbmc/linux/fs/ocfs2/extent_map.c (revision 64c70b1c)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * extent_map.c
5  *
6  * Block/Cluster mapping functions
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License, version 2,  as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public
20  * License along with this program; if not, write to the
21  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
22  * Boston, MA 021110-1307, USA.
23  */
24 
25 #include <linux/fs.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 
29 #define MLOG_MASK_PREFIX ML_EXTENT_MAP
30 #include <cluster/masklog.h>
31 
32 #include "ocfs2.h"
33 
34 #include "alloc.h"
35 #include "extent_map.h"
36 #include "inode.h"
37 #include "super.h"
38 
39 #include "buffer_head_io.h"
40 
41 /*
42  * The extent caching implementation is intentionally trivial.
43  *
44  * We only cache a small number of extents stored directly on the
45  * inode, so linear order operations are acceptable. If we ever want
46  * to increase the size of the extent map, then these algorithms must
47  * get smarter.
48  */
49 
50 void ocfs2_extent_map_init(struct inode *inode)
51 {
52 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
53 
54 	oi->ip_extent_map.em_num_items = 0;
55 	INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
56 }
57 
58 static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
59 				      unsigned int cpos,
60 				      struct ocfs2_extent_map_item **ret_emi)
61 {
62 	unsigned int range;
63 	struct ocfs2_extent_map_item *emi;
64 
65 	*ret_emi = NULL;
66 
67 	list_for_each_entry(emi, &em->em_list, ei_list) {
68 		range = emi->ei_cpos + emi->ei_clusters;
69 
70 		if (cpos >= emi->ei_cpos && cpos < range) {
71 			list_move(&emi->ei_list, &em->em_list);
72 
73 			*ret_emi = emi;
74 			break;
75 		}
76 	}
77 }
78 
79 static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
80 				   unsigned int *phys, unsigned int *len,
81 				   unsigned int *flags)
82 {
83 	unsigned int coff;
84 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
85 	struct ocfs2_extent_map_item *emi;
86 
87 	spin_lock(&oi->ip_lock);
88 
89 	__ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
90 	if (emi) {
91 		coff = cpos - emi->ei_cpos;
92 		*phys = emi->ei_phys + coff;
93 		if (len)
94 			*len = emi->ei_clusters - coff;
95 		if (flags)
96 			*flags = emi->ei_flags;
97 	}
98 
99 	spin_unlock(&oi->ip_lock);
100 
101 	if (emi == NULL)
102 		return -ENOENT;
103 
104 	return 0;
105 }
106 
107 /*
108  * Forget about all clusters equal to or greater than cpos.
109  */
110 void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
111 {
112 	struct list_head *p, *n;
113 	struct ocfs2_extent_map_item *emi;
114 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
115 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
116 	LIST_HEAD(tmp_list);
117 	unsigned int range;
118 
119 	spin_lock(&oi->ip_lock);
120 	list_for_each_safe(p, n, &em->em_list) {
121 		emi = list_entry(p, struct ocfs2_extent_map_item, ei_list);
122 
123 		if (emi->ei_cpos >= cpos) {
124 			/* Full truncate of this record. */
125 			list_move(&emi->ei_list, &tmp_list);
126 			BUG_ON(em->em_num_items == 0);
127 			em->em_num_items--;
128 			continue;
129 		}
130 
131 		range = emi->ei_cpos + emi->ei_clusters;
132 		if (range > cpos) {
133 			/* Partial truncate */
134 			emi->ei_clusters = cpos - emi->ei_cpos;
135 		}
136 	}
137 	spin_unlock(&oi->ip_lock);
138 
139 	list_for_each_safe(p, n, &tmp_list) {
140 		emi = list_entry(p, struct ocfs2_extent_map_item, ei_list);
141 		list_del(&emi->ei_list);
142 		kfree(emi);
143 	}
144 }
145 
146 /*
147  * Is any part of emi2 contained within emi1
148  */
149 static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
150 				 struct ocfs2_extent_map_item *emi2)
151 {
152 	unsigned int range1, range2;
153 
154 	/*
155 	 * Check if logical start of emi2 is inside emi1
156 	 */
157 	range1 = emi1->ei_cpos + emi1->ei_clusters;
158 	if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
159 		return 1;
160 
161 	/*
162 	 * Check if logical end of emi2 is inside emi1
163 	 */
164 	range2 = emi2->ei_cpos + emi2->ei_clusters;
165 	if (range2 > emi1->ei_cpos && range2 <= range1)
166 		return 1;
167 
168 	return 0;
169 }
170 
171 static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
172 				  struct ocfs2_extent_map_item *src)
173 {
174 	dest->ei_cpos = src->ei_cpos;
175 	dest->ei_phys = src->ei_phys;
176 	dest->ei_clusters = src->ei_clusters;
177 	dest->ei_flags = src->ei_flags;
178 }
179 
180 /*
181  * Try to merge emi with ins. Returns 1 if merge succeeds, zero
182  * otherwise.
183  */
184 static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
185 					 struct ocfs2_extent_map_item *ins)
186 {
187 	/*
188 	 * Handle contiguousness
189 	 */
190 	if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
191 	    ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
192 	    ins->ei_flags == emi->ei_flags) {
193 		emi->ei_clusters += ins->ei_clusters;
194 		return 1;
195 	} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
196 		   (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
197 		   ins->ei_flags == emi->ei_flags) {
198 		emi->ei_phys = ins->ei_phys;
199 		emi->ei_cpos = ins->ei_cpos;
200 		emi->ei_clusters += ins->ei_clusters;
201 		return 1;
202 	}
203 
204 	/*
205 	 * Overlapping extents - this shouldn't happen unless we've
206 	 * split an extent to change it's flags. That is exceedingly
207 	 * rare, so there's no sense in trying to optimize it yet.
208 	 */
209 	if (ocfs2_ei_is_contained(emi, ins) ||
210 	    ocfs2_ei_is_contained(ins, emi)) {
211 		ocfs2_copy_emi_fields(emi, ins);
212 		return 1;
213 	}
214 
215 	/* No merge was possible. */
216 	return 0;
217 }
218 
219 /*
220  * In order to reduce complexity on the caller, this insert function
221  * is intentionally liberal in what it will accept.
222  *
223  * The only rule is that the truncate call *must* be used whenever
224  * records have been deleted. This avoids inserting overlapping
225  * records with different physical mappings.
226  */
227 void ocfs2_extent_map_insert_rec(struct inode *inode,
228 				 struct ocfs2_extent_rec *rec)
229 {
230 	struct ocfs2_inode_info *oi = OCFS2_I(inode);
231 	struct ocfs2_extent_map *em = &oi->ip_extent_map;
232 	struct ocfs2_extent_map_item *emi, *new_emi = NULL;
233 	struct ocfs2_extent_map_item ins;
234 
235 	ins.ei_cpos = le32_to_cpu(rec->e_cpos);
236 	ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
237 					       le64_to_cpu(rec->e_blkno));
238 	ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
239 	ins.ei_flags = rec->e_flags;
240 
241 search:
242 	spin_lock(&oi->ip_lock);
243 
244 	list_for_each_entry(emi, &em->em_list, ei_list) {
245 		if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
246 			list_move(&emi->ei_list, &em->em_list);
247 			spin_unlock(&oi->ip_lock);
248 			goto out;
249 		}
250 	}
251 
252 	/*
253 	 * No item could be merged.
254 	 *
255 	 * Either allocate and add a new item, or overwrite the last recently
256 	 * inserted.
257 	 */
258 
259 	if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
260 		if (new_emi == NULL) {
261 			spin_unlock(&oi->ip_lock);
262 
263 			new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
264 			if (new_emi == NULL)
265 				goto out;
266 
267 			goto search;
268 		}
269 
270 		ocfs2_copy_emi_fields(new_emi, &ins);
271 		list_add(&new_emi->ei_list, &em->em_list);
272 		em->em_num_items++;
273 		new_emi = NULL;
274 	} else {
275 		BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
276 		emi = list_entry(em->em_list.prev,
277 				 struct ocfs2_extent_map_item, ei_list);
278 		list_move(&emi->ei_list, &em->em_list);
279 		ocfs2_copy_emi_fields(emi, &ins);
280 	}
281 
282 	spin_unlock(&oi->ip_lock);
283 
284 out:
285 	if (new_emi)
286 		kfree(new_emi);
287 }
288 
289 /*
290  * Return the 1st index within el which contains an extent start
291  * larger than v_cluster.
292  */
293 static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
294 				       u32 v_cluster)
295 {
296 	int i;
297 	struct ocfs2_extent_rec *rec;
298 
299 	for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
300 		rec = &el->l_recs[i];
301 
302 		if (v_cluster < le32_to_cpu(rec->e_cpos))
303 			break;
304 	}
305 
306 	return i;
307 }
308 
309 /*
310  * Figure out the size of a hole which starts at v_cluster within the given
311  * extent list.
312  *
313  * If there is no more allocation past v_cluster, we return the maximum
314  * cluster size minus v_cluster.
315  *
316  * If we have in-inode extents, then el points to the dinode list and
317  * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
318  * containing el.
319  */
320 static int ocfs2_figure_hole_clusters(struct inode *inode,
321 				      struct ocfs2_extent_list *el,
322 				      struct buffer_head *eb_bh,
323 				      u32 v_cluster,
324 				      u32 *num_clusters)
325 {
326 	int ret, i;
327 	struct buffer_head *next_eb_bh = NULL;
328 	struct ocfs2_extent_block *eb, *next_eb;
329 
330 	i = ocfs2_search_for_hole_index(el, v_cluster);
331 
332 	if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
333 		eb = (struct ocfs2_extent_block *)eb_bh->b_data;
334 
335 		/*
336 		 * Check the next leaf for any extents.
337 		 */
338 
339 		if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
340 			goto no_more_extents;
341 
342 		ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
343 				       le64_to_cpu(eb->h_next_leaf_blk),
344 				       &next_eb_bh, OCFS2_BH_CACHED, inode);
345 		if (ret) {
346 			mlog_errno(ret);
347 			goto out;
348 		}
349 		next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
350 
351 		if (!OCFS2_IS_VALID_EXTENT_BLOCK(next_eb)) {
352 			ret = -EROFS;
353 			OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, next_eb);
354 			goto out;
355 		}
356 
357 		el = &next_eb->h_list;
358 
359 		i = ocfs2_search_for_hole_index(el, v_cluster);
360 	}
361 
362 no_more_extents:
363 	if (i == le16_to_cpu(el->l_next_free_rec)) {
364 		/*
365 		 * We're at the end of our existing allocation. Just
366 		 * return the maximum number of clusters we could
367 		 * possibly allocate.
368 		 */
369 		*num_clusters = UINT_MAX - v_cluster;
370 	} else {
371 		*num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
372 	}
373 
374 	ret = 0;
375 out:
376 	brelse(next_eb_bh);
377 	return ret;
378 }
379 
380 /*
381  * Return the index of the extent record which contains cluster #v_cluster.
382  * -1 is returned if it was not found.
383  *
384  * Should work fine on interior and exterior nodes.
385  */
386 static int ocfs2_search_extent_list(struct ocfs2_extent_list *el,
387 				    u32 v_cluster)
388 {
389 	int ret = -1;
390 	int i;
391 	struct ocfs2_extent_rec *rec;
392 	u32 rec_end, rec_start, clusters;
393 
394 	for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
395 		rec = &el->l_recs[i];
396 
397 		rec_start = le32_to_cpu(rec->e_cpos);
398 		clusters = ocfs2_rec_clusters(el, rec);
399 
400 		rec_end = rec_start + clusters;
401 
402 		if (v_cluster >= rec_start && v_cluster < rec_end) {
403 			ret = i;
404 			break;
405 		}
406 	}
407 
408 	return ret;
409 }
410 
411 int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
412 		       u32 *p_cluster, u32 *num_clusters,
413 		       unsigned int *extent_flags)
414 {
415 	int ret, i;
416 	unsigned int flags = 0;
417 	struct buffer_head *di_bh = NULL;
418 	struct buffer_head *eb_bh = NULL;
419 	struct ocfs2_dinode *di;
420 	struct ocfs2_extent_block *eb;
421 	struct ocfs2_extent_list *el;
422 	struct ocfs2_extent_rec *rec;
423 	u32 coff;
424 
425 	ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
426 				      num_clusters, extent_flags);
427 	if (ret == 0)
428 		goto out;
429 
430 	ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), OCFS2_I(inode)->ip_blkno,
431 			       &di_bh, OCFS2_BH_CACHED, inode);
432 	if (ret) {
433 		mlog_errno(ret);
434 		goto out;
435 	}
436 
437 	di = (struct ocfs2_dinode *) di_bh->b_data;
438 	el = &di->id2.i_list;
439 
440 	if (el->l_tree_depth) {
441 		ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
442 		if (ret) {
443 			mlog_errno(ret);
444 			goto out;
445 		}
446 
447 		eb = (struct ocfs2_extent_block *) eb_bh->b_data;
448 		el = &eb->h_list;
449 
450 		if (el->l_tree_depth) {
451 			ocfs2_error(inode->i_sb,
452 				    "Inode %lu has non zero tree depth in "
453 				    "leaf block %llu\n", inode->i_ino,
454 				    (unsigned long long)eb_bh->b_blocknr);
455 			ret = -EROFS;
456 			goto out;
457 		}
458 	}
459 
460 	i = ocfs2_search_extent_list(el, v_cluster);
461 	if (i == -1) {
462 		/*
463 		 * A hole was found. Return some canned values that
464 		 * callers can key on. If asked for, num_clusters will
465 		 * be populated with the size of the hole.
466 		 */
467 		*p_cluster = 0;
468 		if (num_clusters) {
469 			ret = ocfs2_figure_hole_clusters(inode, el, eb_bh,
470 							 v_cluster,
471 							 num_clusters);
472 			if (ret) {
473 				mlog_errno(ret);
474 				goto out;
475 			}
476 		}
477 	} else {
478 		rec = &el->l_recs[i];
479 
480 		BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
481 
482 		if (!rec->e_blkno) {
483 			ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
484 				    "record (%u, %u, 0)", inode->i_ino,
485 				    le32_to_cpu(rec->e_cpos),
486 				    ocfs2_rec_clusters(el, rec));
487 			ret = -EROFS;
488 			goto out;
489 		}
490 
491 		coff = v_cluster - le32_to_cpu(rec->e_cpos);
492 
493 		*p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
494 						    le64_to_cpu(rec->e_blkno));
495 		*p_cluster = *p_cluster + coff;
496 
497 		if (num_clusters)
498 			*num_clusters = ocfs2_rec_clusters(el, rec) - coff;
499 
500 		flags = rec->e_flags;
501 
502 		ocfs2_extent_map_insert_rec(inode, rec);
503 	}
504 
505 	if (extent_flags)
506 		*extent_flags = flags;
507 
508 out:
509 	brelse(di_bh);
510 	brelse(eb_bh);
511 	return ret;
512 }
513 
514 /*
515  * This expects alloc_sem to be held. The allocation cannot change at
516  * all while the map is in the process of being updated.
517  */
518 int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
519 				u64 *ret_count, unsigned int *extent_flags)
520 {
521 	int ret;
522 	int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
523 	u32 cpos, num_clusters, p_cluster;
524 	u64 boff = 0;
525 
526 	cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
527 
528 	ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
529 				 extent_flags);
530 	if (ret) {
531 		mlog_errno(ret);
532 		goto out;
533 	}
534 
535 	/*
536 	 * p_cluster == 0 indicates a hole.
537 	 */
538 	if (p_cluster) {
539 		boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
540 		boff += (v_blkno & (u64)(bpc - 1));
541 	}
542 
543 	*p_blkno = boff;
544 
545 	if (ret_count) {
546 		*ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
547 		*ret_count -= v_blkno & (u64)(bpc - 1);
548 	}
549 
550 out:
551 	return ret;
552 }
553