xref: /openbmc/linux/fs/ocfs2/move_extents.c (revision 3a83e4e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* -*- mode: c; c-basic-offset: 8; -*-
3  * vim: noexpandtab sw=8 ts=8 sts=0:
4  *
5  * move_extents.c
6  *
7  * Copyright (C) 2011 Oracle.  All rights reserved.
8  */
9 #include <linux/fs.h>
10 #include <linux/types.h>
11 #include <linux/mount.h>
12 #include <linux/swap.h>
13 
14 #include <cluster/masklog.h>
15 
16 #include "ocfs2.h"
17 #include "ocfs2_ioctl.h"
18 
19 #include "alloc.h"
20 #include "localalloc.h"
21 #include "aops.h"
22 #include "dlmglue.h"
23 #include "extent_map.h"
24 #include "inode.h"
25 #include "journal.h"
26 #include "suballoc.h"
27 #include "uptodate.h"
28 #include "super.h"
29 #include "dir.h"
30 #include "buffer_head_io.h"
31 #include "sysfile.h"
32 #include "refcounttree.h"
33 #include "move_extents.h"
34 
35 struct ocfs2_move_extents_context {
36 	struct inode *inode;
37 	struct file *file;
38 	int auto_defrag;
39 	int partial;
40 	int credits;
41 	u32 new_phys_cpos;
42 	u32 clusters_moved;
43 	u64 refcount_loc;
44 	struct ocfs2_move_extents *range;
45 	struct ocfs2_extent_tree et;
46 	struct ocfs2_alloc_context *meta_ac;
47 	struct ocfs2_alloc_context *data_ac;
48 	struct ocfs2_cached_dealloc_ctxt dealloc;
49 };
50 
51 static int __ocfs2_move_extent(handle_t *handle,
52 			       struct ocfs2_move_extents_context *context,
53 			       u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
54 			       int ext_flags)
55 {
56 	int ret = 0, index;
57 	struct inode *inode = context->inode;
58 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
59 	struct ocfs2_extent_rec *rec, replace_rec;
60 	struct ocfs2_path *path = NULL;
61 	struct ocfs2_extent_list *el;
62 	u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
63 	u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
64 
65 	ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
66 					       p_cpos, new_p_cpos, len);
67 	if (ret) {
68 		mlog_errno(ret);
69 		goto out;
70 	}
71 
72 	memset(&replace_rec, 0, sizeof(replace_rec));
73 	replace_rec.e_cpos = cpu_to_le32(cpos);
74 	replace_rec.e_leaf_clusters = cpu_to_le16(len);
75 	replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
76 								   new_p_cpos));
77 
78 	path = ocfs2_new_path_from_et(&context->et);
79 	if (!path) {
80 		ret = -ENOMEM;
81 		mlog_errno(ret);
82 		goto out;
83 	}
84 
85 	ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
86 	if (ret) {
87 		mlog_errno(ret);
88 		goto out;
89 	}
90 
91 	el = path_leaf_el(path);
92 
93 	index = ocfs2_search_extent_list(el, cpos);
94 	if (index == -1) {
95 		ret = ocfs2_error(inode->i_sb,
96 				  "Inode %llu has an extent at cpos %u which can no longer be found\n",
97 				  (unsigned long long)ino, cpos);
98 		goto out;
99 	}
100 
101 	rec = &el->l_recs[index];
102 
103 	BUG_ON(ext_flags != rec->e_flags);
104 	/*
105 	 * after moving/defraging to new location, the extent is not going
106 	 * to be refcounted anymore.
107 	 */
108 	replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
109 
110 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
111 				      context->et.et_root_bh,
112 				      OCFS2_JOURNAL_ACCESS_WRITE);
113 	if (ret) {
114 		mlog_errno(ret);
115 		goto out;
116 	}
117 
118 	ret = ocfs2_split_extent(handle, &context->et, path, index,
119 				 &replace_rec, context->meta_ac,
120 				 &context->dealloc);
121 	if (ret) {
122 		mlog_errno(ret);
123 		goto out;
124 	}
125 
126 	ocfs2_journal_dirty(handle, context->et.et_root_bh);
127 
128 	context->new_phys_cpos = new_p_cpos;
129 
130 	/*
131 	 * need I to append truncate log for old clusters?
132 	 */
133 	if (old_blkno) {
134 		if (ext_flags & OCFS2_EXT_REFCOUNTED)
135 			ret = ocfs2_decrease_refcount(inode, handle,
136 					ocfs2_blocks_to_clusters(osb->sb,
137 								 old_blkno),
138 					len, context->meta_ac,
139 					&context->dealloc, 1);
140 		else
141 			ret = ocfs2_truncate_log_append(osb, handle,
142 							old_blkno, len);
143 	}
144 
145 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
146 out:
147 	ocfs2_free_path(path);
148 	return ret;
149 }
150 
151 /*
152  * lock allocator, and reserve appropriate number of bits for
153  * meta blocks.
154  */
155 static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
156 					struct ocfs2_extent_tree *et,
157 					u32 clusters_to_move,
158 					u32 extents_to_split,
159 					struct ocfs2_alloc_context **meta_ac,
160 					int extra_blocks,
161 					int *credits)
162 {
163 	int ret, num_free_extents;
164 	unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
165 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
166 
167 	num_free_extents = ocfs2_num_free_extents(et);
168 	if (num_free_extents < 0) {
169 		ret = num_free_extents;
170 		mlog_errno(ret);
171 		goto out;
172 	}
173 
174 	if (!num_free_extents ||
175 	    (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
176 		extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
177 
178 	ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
179 	if (ret) {
180 		mlog_errno(ret);
181 		goto out;
182 	}
183 
184 
185 	*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
186 
187 	mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
188 	     extra_blocks, clusters_to_move, *credits);
189 out:
190 	if (ret) {
191 		if (*meta_ac) {
192 			ocfs2_free_alloc_context(*meta_ac);
193 			*meta_ac = NULL;
194 		}
195 	}
196 
197 	return ret;
198 }
199 
200 /*
201  * Using one journal handle to guarantee the data consistency in case
202  * crash happens anywhere.
203  *
204  *  XXX: defrag can end up with finishing partial extent as requested,
205  * due to not enough contiguous clusters can be found in allocator.
206  */
207 static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
208 			       u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
209 {
210 	int ret, credits = 0, extra_blocks = 0, partial = context->partial;
211 	handle_t *handle;
212 	struct inode *inode = context->inode;
213 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
214 	struct inode *tl_inode = osb->osb_tl_inode;
215 	struct ocfs2_refcount_tree *ref_tree = NULL;
216 	u32 new_phys_cpos, new_len;
217 	u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
218 	int need_free = 0;
219 
220 	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
221 		BUG_ON(!ocfs2_is_refcount_inode(inode));
222 		BUG_ON(!context->refcount_loc);
223 
224 		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
225 					       &ref_tree, NULL);
226 		if (ret) {
227 			mlog_errno(ret);
228 			return ret;
229 		}
230 
231 		ret = ocfs2_prepare_refcount_change_for_del(inode,
232 							context->refcount_loc,
233 							phys_blkno,
234 							*len,
235 							&credits,
236 							&extra_blocks);
237 		if (ret) {
238 			mlog_errno(ret);
239 			goto out;
240 		}
241 	}
242 
243 	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
244 						*len, 1,
245 						&context->meta_ac,
246 						extra_blocks, &credits);
247 	if (ret) {
248 		mlog_errno(ret);
249 		goto out;
250 	}
251 
252 	/*
253 	 * should be using allocation reservation strategy there?
254 	 *
255 	 * if (context->data_ac)
256 	 *	context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
257 	 */
258 
259 	inode_lock(tl_inode);
260 
261 	if (ocfs2_truncate_log_needs_flush(osb)) {
262 		ret = __ocfs2_flush_truncate_log(osb);
263 		if (ret < 0) {
264 			mlog_errno(ret);
265 			goto out_unlock_mutex;
266 		}
267 	}
268 
269 	/*
270 	 * Make sure ocfs2_reserve_cluster is called after
271 	 * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
272 	 *
273 	 * If ocfs2_reserve_cluster is called
274 	 * before __ocfs2_flush_truncate_log, dead lock on global bitmap
275 	 * may happen.
276 	 *
277 	 */
278 	ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
279 	if (ret) {
280 		mlog_errno(ret);
281 		goto out_unlock_mutex;
282 	}
283 
284 	handle = ocfs2_start_trans(osb, credits);
285 	if (IS_ERR(handle)) {
286 		ret = PTR_ERR(handle);
287 		mlog_errno(ret);
288 		goto out_unlock_mutex;
289 	}
290 
291 	ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
292 				     &new_phys_cpos, &new_len);
293 	if (ret) {
294 		mlog_errno(ret);
295 		goto out_commit;
296 	}
297 
298 	/*
299 	 * allowing partial extent moving is kind of 'pros and cons', it makes
300 	 * whole defragmentation less likely to fail, on the contrary, the bad
301 	 * thing is it may make the fs even more fragmented after moving, let
302 	 * userspace make a good decision here.
303 	 */
304 	if (new_len != *len) {
305 		mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
306 		if (!partial) {
307 			context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
308 			ret = -ENOSPC;
309 			need_free = 1;
310 			goto out_commit;
311 		}
312 	}
313 
314 	mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
315 	     phys_cpos, new_phys_cpos);
316 
317 	ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
318 				  new_phys_cpos, ext_flags);
319 	if (ret)
320 		mlog_errno(ret);
321 
322 	if (partial && (new_len != *len))
323 		*len = new_len;
324 
325 	/*
326 	 * Here we should write the new page out first if we are
327 	 * in write-back mode.
328 	 */
329 	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
330 	if (ret)
331 		mlog_errno(ret);
332 
333 out_commit:
334 	if (need_free && context->data_ac) {
335 		struct ocfs2_alloc_context *data_ac = context->data_ac;
336 
337 		if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
338 			ocfs2_free_local_alloc_bits(osb, handle, data_ac,
339 					new_phys_cpos, new_len);
340 		else
341 			ocfs2_free_clusters(handle,
342 					data_ac->ac_inode,
343 					data_ac->ac_bh,
344 					ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
345 					new_len);
346 	}
347 
348 	ocfs2_commit_trans(osb, handle);
349 
350 out_unlock_mutex:
351 	inode_unlock(tl_inode);
352 
353 	if (context->data_ac) {
354 		ocfs2_free_alloc_context(context->data_ac);
355 		context->data_ac = NULL;
356 	}
357 
358 	if (context->meta_ac) {
359 		ocfs2_free_alloc_context(context->meta_ac);
360 		context->meta_ac = NULL;
361 	}
362 
363 out:
364 	if (ref_tree)
365 		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
366 
367 	return ret;
368 }
369 
370 /*
371  * find the victim alloc group, where #blkno fits.
372  */
373 static int ocfs2_find_victim_alloc_group(struct inode *inode,
374 					 u64 vict_blkno,
375 					 int type, int slot,
376 					 int *vict_bit,
377 					 struct buffer_head **ret_bh)
378 {
379 	int ret, i, bits_per_unit = 0;
380 	u64 blkno;
381 	char namebuf[40];
382 
383 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
384 	struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
385 	struct ocfs2_chain_list *cl;
386 	struct ocfs2_chain_rec *rec;
387 	struct ocfs2_dinode *ac_dinode;
388 	struct ocfs2_group_desc *bg;
389 
390 	ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
391 	ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
392 					 strlen(namebuf), &blkno);
393 	if (ret) {
394 		ret = -ENOENT;
395 		goto out;
396 	}
397 
398 	ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
399 	if (ret) {
400 		mlog_errno(ret);
401 		goto out;
402 	}
403 
404 	ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
405 	cl = &(ac_dinode->id2.i_chain);
406 	rec = &(cl->cl_recs[0]);
407 
408 	if (type == GLOBAL_BITMAP_SYSTEM_INODE)
409 		bits_per_unit = osb->s_clustersize_bits -
410 					inode->i_sb->s_blocksize_bits;
411 	/*
412 	 * 'vict_blkno' was out of the valid range.
413 	 */
414 	if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
415 	    (vict_blkno >= ((u64)le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
416 				bits_per_unit))) {
417 		ret = -EINVAL;
418 		goto out;
419 	}
420 
421 	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
422 
423 		rec = &(cl->cl_recs[i]);
424 		if (!rec)
425 			continue;
426 
427 		bg = NULL;
428 
429 		do {
430 			if (!bg)
431 				blkno = le64_to_cpu(rec->c_blkno);
432 			else
433 				blkno = le64_to_cpu(bg->bg_next_group);
434 
435 			if (gd_bh) {
436 				brelse(gd_bh);
437 				gd_bh = NULL;
438 			}
439 
440 			ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
441 			if (ret) {
442 				mlog_errno(ret);
443 				goto out;
444 			}
445 
446 			bg = (struct ocfs2_group_desc *)gd_bh->b_data;
447 
448 			if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
449 						le16_to_cpu(bg->bg_bits))) {
450 
451 				*ret_bh = gd_bh;
452 				*vict_bit = (vict_blkno - blkno) >>
453 							bits_per_unit;
454 				mlog(0, "find the victim group: #%llu, "
455 				     "total_bits: %u, vict_bit: %u\n",
456 				     blkno, le16_to_cpu(bg->bg_bits),
457 				     *vict_bit);
458 				goto out;
459 			}
460 
461 		} while (le64_to_cpu(bg->bg_next_group));
462 	}
463 
464 	ret = -EINVAL;
465 out:
466 	brelse(ac_bh);
467 
468 	/*
469 	 * caller has to release the gd_bh properly.
470 	 */
471 	return ret;
472 }
473 
474 /*
475  * XXX: helper to validate and adjust moving goal.
476  */
477 static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
478 					       struct ocfs2_move_extents *range)
479 {
480 	int ret, goal_bit = 0;
481 
482 	struct buffer_head *gd_bh = NULL;
483 	struct ocfs2_group_desc *bg;
484 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
485 	int c_to_b = 1 << (osb->s_clustersize_bits -
486 					inode->i_sb->s_blocksize_bits);
487 
488 	/*
489 	 * make goal become cluster aligned.
490 	 */
491 	range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
492 						      range->me_goal);
493 	/*
494 	 * validate goal sits within global_bitmap, and return the victim
495 	 * group desc
496 	 */
497 	ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
498 					    GLOBAL_BITMAP_SYSTEM_INODE,
499 					    OCFS2_INVALID_SLOT,
500 					    &goal_bit, &gd_bh);
501 	if (ret)
502 		goto out;
503 
504 	bg = (struct ocfs2_group_desc *)gd_bh->b_data;
505 
506 	/*
507 	 * moving goal is not allowd to start with a group desc blok(#0 blk)
508 	 * let's compromise to the latter cluster.
509 	 */
510 	if (range->me_goal == le64_to_cpu(bg->bg_blkno))
511 		range->me_goal += c_to_b;
512 
513 	/*
514 	 * movement is not gonna cross two groups.
515 	 */
516 	if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
517 								range->me_len) {
518 		ret = -EINVAL;
519 		goto out;
520 	}
521 	/*
522 	 * more exact validations/adjustments will be performed later during
523 	 * moving operation for each extent range.
524 	 */
525 	mlog(0, "extents get ready to be moved to #%llu block\n",
526 	     range->me_goal);
527 
528 out:
529 	brelse(gd_bh);
530 
531 	return ret;
532 }
533 
534 static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
535 				    int *goal_bit, u32 move_len, u32 max_hop,
536 				    u32 *phys_cpos)
537 {
538 	int i, used, last_free_bits = 0, base_bit = *goal_bit;
539 	struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
540 	u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
541 						 le64_to_cpu(gd->bg_blkno));
542 
543 	for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
544 
545 		used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
546 		if (used) {
547 			/*
548 			 * we even tried searching the free chunk by jumping
549 			 * a 'max_hop' distance, but still failed.
550 			 */
551 			if ((i - base_bit) > max_hop) {
552 				*phys_cpos = 0;
553 				break;
554 			}
555 
556 			if (last_free_bits)
557 				last_free_bits = 0;
558 
559 			continue;
560 		} else
561 			last_free_bits++;
562 
563 		if (last_free_bits == move_len) {
564 			*goal_bit = i;
565 			*phys_cpos = base_cpos + i;
566 			break;
567 		}
568 	}
569 
570 	mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
571 }
572 
573 static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
574 			     u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
575 			     u32 len, int ext_flags)
576 {
577 	int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
578 	handle_t *handle;
579 	struct inode *inode = context->inode;
580 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
581 	struct inode *tl_inode = osb->osb_tl_inode;
582 	struct inode *gb_inode = NULL;
583 	struct buffer_head *gb_bh = NULL;
584 	struct buffer_head *gd_bh = NULL;
585 	struct ocfs2_group_desc *gd;
586 	struct ocfs2_refcount_tree *ref_tree = NULL;
587 	u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
588 						    context->range->me_threshold);
589 	u64 phys_blkno, new_phys_blkno;
590 
591 	phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
592 
593 	if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
594 		BUG_ON(!ocfs2_is_refcount_inode(inode));
595 		BUG_ON(!context->refcount_loc);
596 
597 		ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
598 					       &ref_tree, NULL);
599 		if (ret) {
600 			mlog_errno(ret);
601 			return ret;
602 		}
603 
604 		ret = ocfs2_prepare_refcount_change_for_del(inode,
605 							context->refcount_loc,
606 							phys_blkno,
607 							len,
608 							&credits,
609 							&extra_blocks);
610 		if (ret) {
611 			mlog_errno(ret);
612 			goto out;
613 		}
614 	}
615 
616 	ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
617 						len, 1,
618 						&context->meta_ac,
619 						extra_blocks, &credits);
620 	if (ret) {
621 		mlog_errno(ret);
622 		goto out;
623 	}
624 
625 	/*
626 	 * need to count 2 extra credits for global_bitmap inode and
627 	 * group descriptor.
628 	 */
629 	credits += OCFS2_INODE_UPDATE_CREDITS + 1;
630 
631 	/*
632 	 * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
633 	 * logic, while we still need to lock the global_bitmap.
634 	 */
635 	gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
636 					       OCFS2_INVALID_SLOT);
637 	if (!gb_inode) {
638 		mlog(ML_ERROR, "unable to get global_bitmap inode\n");
639 		ret = -EIO;
640 		goto out;
641 	}
642 
643 	inode_lock(gb_inode);
644 
645 	ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
646 	if (ret) {
647 		mlog_errno(ret);
648 		goto out_unlock_gb_mutex;
649 	}
650 
651 	inode_lock(tl_inode);
652 
653 	handle = ocfs2_start_trans(osb, credits);
654 	if (IS_ERR(handle)) {
655 		ret = PTR_ERR(handle);
656 		mlog_errno(ret);
657 		goto out_unlock_tl_inode;
658 	}
659 
660 	new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
661 	ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
662 					    GLOBAL_BITMAP_SYSTEM_INODE,
663 					    OCFS2_INVALID_SLOT,
664 					    &goal_bit, &gd_bh);
665 	if (ret) {
666 		mlog_errno(ret);
667 		goto out_commit;
668 	}
669 
670 	/*
671 	 * probe the victim cluster group to find a proper
672 	 * region to fit wanted movement, it even will perfrom
673 	 * a best-effort attempt by compromising to a threshold
674 	 * around the goal.
675 	 */
676 	ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
677 				new_phys_cpos);
678 	if (!*new_phys_cpos) {
679 		ret = -ENOSPC;
680 		goto out_commit;
681 	}
682 
683 	ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
684 				  *new_phys_cpos, ext_flags);
685 	if (ret) {
686 		mlog_errno(ret);
687 		goto out_commit;
688 	}
689 
690 	gd = (struct ocfs2_group_desc *)gd_bh->b_data;
691 	ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
692 					       le16_to_cpu(gd->bg_chain));
693 	if (ret) {
694 		mlog_errno(ret);
695 		goto out_commit;
696 	}
697 
698 	ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
699 					 goal_bit, len);
700 	if (ret) {
701 		ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
702 					       le16_to_cpu(gd->bg_chain));
703 		mlog_errno(ret);
704 	}
705 
706 	/*
707 	 * Here we should write the new page out first if we are
708 	 * in write-back mode.
709 	 */
710 	ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
711 	if (ret)
712 		mlog_errno(ret);
713 
714 out_commit:
715 	ocfs2_commit_trans(osb, handle);
716 	brelse(gd_bh);
717 
718 out_unlock_tl_inode:
719 	inode_unlock(tl_inode);
720 
721 	ocfs2_inode_unlock(gb_inode, 1);
722 out_unlock_gb_mutex:
723 	inode_unlock(gb_inode);
724 	brelse(gb_bh);
725 	iput(gb_inode);
726 
727 out:
728 	if (context->meta_ac) {
729 		ocfs2_free_alloc_context(context->meta_ac);
730 		context->meta_ac = NULL;
731 	}
732 
733 	if (ref_tree)
734 		ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
735 
736 	return ret;
737 }
738 
739 /*
740  * Helper to calculate the defraging length in one run according to threshold.
741  */
742 static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
743 					 u32 threshold, int *skip)
744 {
745 	if ((*alloc_size + *len_defraged) < threshold) {
746 		/*
747 		 * proceed defragmentation until we meet the thresh
748 		 */
749 		*len_defraged += *alloc_size;
750 	} else if (*len_defraged == 0) {
751 		/*
752 		 * XXX: skip a large extent.
753 		 */
754 		*skip = 1;
755 	} else {
756 		/*
757 		 * split this extent to coalesce with former pieces as
758 		 * to reach the threshold.
759 		 *
760 		 * we're done here with one cycle of defragmentation
761 		 * in a size of 'thresh', resetting 'len_defraged'
762 		 * forces a new defragmentation.
763 		 */
764 		*alloc_size = threshold - *len_defraged;
765 		*len_defraged = 0;
766 	}
767 }
768 
769 static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
770 				struct ocfs2_move_extents_context *context)
771 {
772 	int ret = 0, flags, do_defrag, skip = 0;
773 	u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
774 	u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
775 
776 	struct inode *inode = context->inode;
777 	struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778 	struct ocfs2_move_extents *range = context->range;
779 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
780 
781 	if ((i_size_read(inode) == 0) || (range->me_len == 0))
782 		return 0;
783 
784 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
785 		return 0;
786 
787 	context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
788 
789 	ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
790 	ocfs2_init_dealloc_ctxt(&context->dealloc);
791 
792 	/*
793 	 * TO-DO XXX:
794 	 *
795 	 * - xattr extents.
796 	 */
797 
798 	do_defrag = context->auto_defrag;
799 
800 	/*
801 	 * extents moving happens in unit of clusters, for the sake
802 	 * of simplicity, we may ignore two clusters where 'byte_start'
803 	 * and 'byte_start + len' were within.
804 	 */
805 	move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
806 	len_to_move = (range->me_start + range->me_len) >>
807 						osb->s_clustersize_bits;
808 	if (len_to_move >= move_start)
809 		len_to_move -= move_start;
810 	else
811 		len_to_move = 0;
812 
813 	if (do_defrag) {
814 		defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
815 		if (defrag_thresh <= 1)
816 			goto done;
817 	} else
818 		new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
819 							 range->me_goal);
820 
821 	mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
822 	     "thresh: %u\n",
823 	     (unsigned long long)OCFS2_I(inode)->ip_blkno,
824 	     (unsigned long long)range->me_start,
825 	     (unsigned long long)range->me_len,
826 	     move_start, len_to_move, defrag_thresh);
827 
828 	cpos = move_start;
829 	while (len_to_move) {
830 		ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
831 					 &flags);
832 		if (ret) {
833 			mlog_errno(ret);
834 			goto out;
835 		}
836 
837 		if (alloc_size > len_to_move)
838 			alloc_size = len_to_move;
839 
840 		/*
841 		 * XXX: how to deal with a hole:
842 		 *
843 		 * - skip the hole of course
844 		 * - force a new defragmentation
845 		 */
846 		if (!phys_cpos) {
847 			if (do_defrag)
848 				len_defraged = 0;
849 
850 			goto next;
851 		}
852 
853 		if (do_defrag) {
854 			ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
855 						     defrag_thresh, &skip);
856 			/*
857 			 * skip large extents
858 			 */
859 			if (skip) {
860 				skip = 0;
861 				goto next;
862 			}
863 
864 			mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
865 			     "alloc_size: %u, len_defraged: %u\n",
866 			     cpos, phys_cpos, alloc_size, len_defraged);
867 
868 			ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
869 						  &alloc_size, flags);
870 		} else {
871 			ret = ocfs2_move_extent(context, cpos, phys_cpos,
872 						&new_phys_cpos, alloc_size,
873 						flags);
874 
875 			new_phys_cpos += alloc_size;
876 		}
877 
878 		if (ret < 0) {
879 			mlog_errno(ret);
880 			goto out;
881 		}
882 
883 		context->clusters_moved += alloc_size;
884 next:
885 		cpos += alloc_size;
886 		len_to_move -= alloc_size;
887 	}
888 
889 done:
890 	range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
891 
892 out:
893 	range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
894 						      context->clusters_moved);
895 	range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
896 						       context->new_phys_cpos);
897 
898 	ocfs2_schedule_truncate_log_flush(osb, 1);
899 	ocfs2_run_deallocs(osb, &context->dealloc);
900 
901 	return ret;
902 }
903 
904 static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
905 {
906 	int status;
907 	handle_t *handle;
908 	struct inode *inode = context->inode;
909 	struct ocfs2_dinode *di;
910 	struct buffer_head *di_bh = NULL;
911 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
912 
913 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
914 		return -EROFS;
915 
916 	inode_lock(inode);
917 
918 	/*
919 	 * This prevents concurrent writes from other nodes
920 	 */
921 	status = ocfs2_rw_lock(inode, 1);
922 	if (status) {
923 		mlog_errno(status);
924 		goto out;
925 	}
926 
927 	status = ocfs2_inode_lock(inode, &di_bh, 1);
928 	if (status) {
929 		mlog_errno(status);
930 		goto out_rw_unlock;
931 	}
932 
933 	/*
934 	 * rememer ip_xattr_sem also needs to be held if necessary
935 	 */
936 	down_write(&OCFS2_I(inode)->ip_alloc_sem);
937 
938 	status = __ocfs2_move_extents_range(di_bh, context);
939 
940 	up_write(&OCFS2_I(inode)->ip_alloc_sem);
941 	if (status) {
942 		mlog_errno(status);
943 		goto out_inode_unlock;
944 	}
945 
946 	/*
947 	 * We update ctime for these changes
948 	 */
949 	handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
950 	if (IS_ERR(handle)) {
951 		status = PTR_ERR(handle);
952 		mlog_errno(status);
953 		goto out_inode_unlock;
954 	}
955 
956 	status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
957 					 OCFS2_JOURNAL_ACCESS_WRITE);
958 	if (status) {
959 		mlog_errno(status);
960 		goto out_commit;
961 	}
962 
963 	di = (struct ocfs2_dinode *)di_bh->b_data;
964 	inode->i_ctime = current_time(inode);
965 	di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
966 	di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
967 	ocfs2_update_inode_fsync_trans(handle, inode, 0);
968 
969 	ocfs2_journal_dirty(handle, di_bh);
970 
971 out_commit:
972 	ocfs2_commit_trans(osb, handle);
973 
974 out_inode_unlock:
975 	brelse(di_bh);
976 	ocfs2_inode_unlock(inode, 1);
977 out_rw_unlock:
978 	ocfs2_rw_unlock(inode, 1);
979 out:
980 	inode_unlock(inode);
981 
982 	return status;
983 }
984 
985 int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
986 {
987 	int status;
988 
989 	struct inode *inode = file_inode(filp);
990 	struct ocfs2_move_extents range;
991 	struct ocfs2_move_extents_context *context;
992 
993 	if (!argp)
994 		return -EINVAL;
995 
996 	status = mnt_want_write_file(filp);
997 	if (status)
998 		return status;
999 
1000 	if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) {
1001 		status = -EPERM;
1002 		goto out_drop;
1003 	}
1004 
1005 	if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1006 		status = -EPERM;
1007 		goto out_drop;
1008 	}
1009 
1010 	context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
1011 	if (!context) {
1012 		status = -ENOMEM;
1013 		mlog_errno(status);
1014 		goto out_drop;
1015 	}
1016 
1017 	context->inode = inode;
1018 	context->file = filp;
1019 
1020 	if (copy_from_user(&range, argp, sizeof(range))) {
1021 		status = -EFAULT;
1022 		goto out_free;
1023 	}
1024 
1025 	if (range.me_start > i_size_read(inode)) {
1026 		status = -EINVAL;
1027 		goto out_free;
1028 	}
1029 
1030 	if (range.me_start + range.me_len > i_size_read(inode))
1031 			range.me_len = i_size_read(inode) - range.me_start;
1032 
1033 	context->range = &range;
1034 
1035 	if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
1036 		context->auto_defrag = 1;
1037 		/*
1038 		 * ok, the default theshold for the defragmentation
1039 		 * is 1M, since our maximum clustersize was 1M also.
1040 		 * any thought?
1041 		 */
1042 		if (!range.me_threshold)
1043 			range.me_threshold = 1024 * 1024;
1044 
1045 		if (range.me_threshold > i_size_read(inode))
1046 			range.me_threshold = i_size_read(inode);
1047 
1048 		if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
1049 			context->partial = 1;
1050 	} else {
1051 		/*
1052 		 * first best-effort attempt to validate and adjust the goal
1053 		 * (physical address in block), while it can't guarantee later
1054 		 * operation can succeed all the time since global_bitmap may
1055 		 * change a bit over time.
1056 		 */
1057 
1058 		status = ocfs2_validate_and_adjust_move_goal(inode, &range);
1059 		if (status)
1060 			goto out_copy;
1061 	}
1062 
1063 	status = ocfs2_move_extents(context);
1064 	if (status)
1065 		mlog_errno(status);
1066 out_copy:
1067 	/*
1068 	 * movement/defragmentation may end up being partially completed,
1069 	 * that's the reason why we need to return userspace the finished
1070 	 * length and new_offset even if failure happens somewhere.
1071 	 */
1072 	if (copy_to_user(argp, &range, sizeof(range)))
1073 		status = -EFAULT;
1074 
1075 out_free:
1076 	kfree(context);
1077 out_drop:
1078 	mnt_drop_write_file(filp);
1079 
1080 	return status;
1081 }
1082