xref: /openbmc/linux/fs/ext4/migrate.c (revision 39f555fb)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  * Copyright IBM Corporation, 2007
4  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5  *
6  */
7 
8 #include <linux/slab.h>
9 #include "ext4_jbd2.h"
10 #include "ext4_extents.h"
11 
12 /*
13  * The contiguous blocks details which can be
14  * represented by a single extent
15  */
16 struct migrate_struct {
17 	ext4_lblk_t first_block, last_block, curr_block;
18 	ext4_fsblk_t first_pblock, last_pblock;
19 };
20 
21 static int finish_range(handle_t *handle, struct inode *inode,
22 				struct migrate_struct *lb)
23 
24 {
25 	int retval = 0, needed;
26 	struct ext4_extent newext;
27 	struct ext4_ext_path *path;
28 	if (lb->first_pblock == 0)
29 		return 0;
30 
31 	/* Add the extent to temp inode*/
32 	newext.ee_block = cpu_to_le32(lb->first_block);
33 	newext.ee_len   = cpu_to_le16(lb->last_block - lb->first_block + 1);
34 	ext4_ext_store_pblock(&newext, lb->first_pblock);
35 	/* Locking only for convenience since we are operating on temp inode */
36 	down_write(&EXT4_I(inode)->i_data_sem);
37 	path = ext4_find_extent(inode, lb->first_block, NULL, 0);
38 	if (IS_ERR(path)) {
39 		retval = PTR_ERR(path);
40 		path = NULL;
41 		goto err_out;
42 	}
43 
44 	/*
45 	 * Calculate the credit needed to inserting this extent
46 	 * Since we are doing this in loop we may accumulate extra
47 	 * credit. But below we try to not accumulate too much
48 	 * of them by restarting the journal.
49 	 */
50 	needed = ext4_ext_calc_credits_for_single_extent(inode,
51 		    lb->last_block - lb->first_block + 1, path);
52 
53 	retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
54 	if (retval < 0)
55 		goto err_out;
56 	retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
57 err_out:
58 	up_write((&EXT4_I(inode)->i_data_sem));
59 	ext4_free_ext_path(path);
60 	lb->first_pblock = 0;
61 	return retval;
62 }
63 
64 static int update_extent_range(handle_t *handle, struct inode *inode,
65 			       ext4_fsblk_t pblock, struct migrate_struct *lb)
66 {
67 	int retval;
68 	/*
69 	 * See if we can add on to the existing range (if it exists)
70 	 */
71 	if (lb->first_pblock &&
72 		(lb->last_pblock+1 == pblock) &&
73 		(lb->last_block+1 == lb->curr_block)) {
74 		lb->last_pblock = pblock;
75 		lb->last_block = lb->curr_block;
76 		lb->curr_block++;
77 		return 0;
78 	}
79 	/*
80 	 * Start a new range.
81 	 */
82 	retval = finish_range(handle, inode, lb);
83 	lb->first_pblock = lb->last_pblock = pblock;
84 	lb->first_block = lb->last_block = lb->curr_block;
85 	lb->curr_block++;
86 	return retval;
87 }
88 
89 static int update_ind_extent_range(handle_t *handle, struct inode *inode,
90 				   ext4_fsblk_t pblock,
91 				   struct migrate_struct *lb)
92 {
93 	struct buffer_head *bh;
94 	__le32 *i_data;
95 	int i, retval = 0;
96 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
97 
98 	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
99 	if (IS_ERR(bh))
100 		return PTR_ERR(bh);
101 
102 	i_data = (__le32 *)bh->b_data;
103 	for (i = 0; i < max_entries; i++) {
104 		if (i_data[i]) {
105 			retval = update_extent_range(handle, inode,
106 						le32_to_cpu(i_data[i]), lb);
107 			if (retval)
108 				break;
109 		} else {
110 			lb->curr_block++;
111 		}
112 	}
113 	put_bh(bh);
114 	return retval;
115 
116 }
117 
118 static int update_dind_extent_range(handle_t *handle, struct inode *inode,
119 				    ext4_fsblk_t pblock,
120 				    struct migrate_struct *lb)
121 {
122 	struct buffer_head *bh;
123 	__le32 *i_data;
124 	int i, retval = 0;
125 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
126 
127 	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
128 	if (IS_ERR(bh))
129 		return PTR_ERR(bh);
130 
131 	i_data = (__le32 *)bh->b_data;
132 	for (i = 0; i < max_entries; i++) {
133 		if (i_data[i]) {
134 			retval = update_ind_extent_range(handle, inode,
135 						le32_to_cpu(i_data[i]), lb);
136 			if (retval)
137 				break;
138 		} else {
139 			/* Only update the file block number */
140 			lb->curr_block += max_entries;
141 		}
142 	}
143 	put_bh(bh);
144 	return retval;
145 
146 }
147 
148 static int update_tind_extent_range(handle_t *handle, struct inode *inode,
149 				    ext4_fsblk_t pblock,
150 				    struct migrate_struct *lb)
151 {
152 	struct buffer_head *bh;
153 	__le32 *i_data;
154 	int i, retval = 0;
155 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
156 
157 	bh = ext4_sb_bread(inode->i_sb, pblock, 0);
158 	if (IS_ERR(bh))
159 		return PTR_ERR(bh);
160 
161 	i_data = (__le32 *)bh->b_data;
162 	for (i = 0; i < max_entries; i++) {
163 		if (i_data[i]) {
164 			retval = update_dind_extent_range(handle, inode,
165 						le32_to_cpu(i_data[i]), lb);
166 			if (retval)
167 				break;
168 		} else {
169 			/* Only update the file block number */
170 			lb->curr_block += max_entries * max_entries;
171 		}
172 	}
173 	put_bh(bh);
174 	return retval;
175 
176 }
177 
178 static int free_dind_blocks(handle_t *handle,
179 				struct inode *inode, __le32 i_data)
180 {
181 	int i;
182 	__le32 *tmp_idata;
183 	struct buffer_head *bh;
184 	struct super_block *sb = inode->i_sb;
185 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
186 	int err;
187 
188 	bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
189 	if (IS_ERR(bh))
190 		return PTR_ERR(bh);
191 
192 	tmp_idata = (__le32 *)bh->b_data;
193 	for (i = 0; i < max_entries; i++) {
194 		if (tmp_idata[i]) {
195 			err = ext4_journal_ensure_credits(handle,
196 				EXT4_RESERVE_TRANS_BLOCKS,
197 				ext4_free_metadata_revoke_credits(sb, 1));
198 			if (err < 0) {
199 				put_bh(bh);
200 				return err;
201 			}
202 			ext4_free_blocks(handle, inode, NULL,
203 					 le32_to_cpu(tmp_idata[i]), 1,
204 					 EXT4_FREE_BLOCKS_METADATA |
205 					 EXT4_FREE_BLOCKS_FORGET);
206 		}
207 	}
208 	put_bh(bh);
209 	err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
210 				ext4_free_metadata_revoke_credits(sb, 1));
211 	if (err < 0)
212 		return err;
213 	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
214 			 EXT4_FREE_BLOCKS_METADATA |
215 			 EXT4_FREE_BLOCKS_FORGET);
216 	return 0;
217 }
218 
219 static int free_tind_blocks(handle_t *handle,
220 				struct inode *inode, __le32 i_data)
221 {
222 	int i, retval = 0;
223 	__le32 *tmp_idata;
224 	struct buffer_head *bh;
225 	unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
226 
227 	bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
228 	if (IS_ERR(bh))
229 		return PTR_ERR(bh);
230 
231 	tmp_idata = (__le32 *)bh->b_data;
232 	for (i = 0; i < max_entries; i++) {
233 		if (tmp_idata[i]) {
234 			retval = free_dind_blocks(handle,
235 					inode, tmp_idata[i]);
236 			if (retval) {
237 				put_bh(bh);
238 				return retval;
239 			}
240 		}
241 	}
242 	put_bh(bh);
243 	retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
244 			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
245 	if (retval < 0)
246 		return retval;
247 	ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
248 			 EXT4_FREE_BLOCKS_METADATA |
249 			 EXT4_FREE_BLOCKS_FORGET);
250 	return 0;
251 }
252 
253 static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
254 {
255 	int retval;
256 
257 	/* ei->i_data[EXT4_IND_BLOCK] */
258 	if (i_data[0]) {
259 		retval = ext4_journal_ensure_credits(handle,
260 			EXT4_RESERVE_TRANS_BLOCKS,
261 			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
262 		if (retval < 0)
263 			return retval;
264 		ext4_free_blocks(handle, inode, NULL,
265 				le32_to_cpu(i_data[0]), 1,
266 				 EXT4_FREE_BLOCKS_METADATA |
267 				 EXT4_FREE_BLOCKS_FORGET);
268 	}
269 
270 	/* ei->i_data[EXT4_DIND_BLOCK] */
271 	if (i_data[1]) {
272 		retval = free_dind_blocks(handle, inode, i_data[1]);
273 		if (retval)
274 			return retval;
275 	}
276 
277 	/* ei->i_data[EXT4_TIND_BLOCK] */
278 	if (i_data[2]) {
279 		retval = free_tind_blocks(handle, inode, i_data[2]);
280 		if (retval)
281 			return retval;
282 	}
283 	return 0;
284 }
285 
286 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
287 						struct inode *tmp_inode)
288 {
289 	int retval, retval2 = 0;
290 	__le32	i_data[3];
291 	struct ext4_inode_info *ei = EXT4_I(inode);
292 	struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
293 
294 	/*
295 	 * One credit accounted for writing the
296 	 * i_data field of the original inode
297 	 */
298 	retval = ext4_journal_ensure_credits(handle, 1, 0);
299 	if (retval < 0)
300 		goto err_out;
301 
302 	i_data[0] = ei->i_data[EXT4_IND_BLOCK];
303 	i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
304 	i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
305 
306 	down_write(&EXT4_I(inode)->i_data_sem);
307 	/*
308 	 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
309 	 * happened after we started the migrate. We need to
310 	 * fail the migrate
311 	 */
312 	if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
313 		retval = -EAGAIN;
314 		up_write(&EXT4_I(inode)->i_data_sem);
315 		goto err_out;
316 	} else
317 		ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
318 	/*
319 	 * We have the extent map build with the tmp inode.
320 	 * Now copy the i_data across
321 	 */
322 	ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
323 	memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
324 
325 	/*
326 	 * Update i_blocks with the new blocks that got
327 	 * allocated while adding extents for extent index
328 	 * blocks.
329 	 *
330 	 * While converting to extents we need not
331 	 * update the original inode i_blocks for extent blocks
332 	 * via quota APIs. The quota update happened via tmp_inode already.
333 	 */
334 	spin_lock(&inode->i_lock);
335 	inode->i_blocks += tmp_inode->i_blocks;
336 	spin_unlock(&inode->i_lock);
337 	up_write(&EXT4_I(inode)->i_data_sem);
338 
339 	/*
340 	 * We mark the inode dirty after, because we decrement the
341 	 * i_blocks when freeing the indirect meta-data blocks
342 	 */
343 	retval = free_ind_block(handle, inode, i_data);
344 	retval2 = ext4_mark_inode_dirty(handle, inode);
345 	if (unlikely(retval2 && !retval))
346 		retval = retval2;
347 
348 err_out:
349 	return retval;
350 }
351 
352 static int free_ext_idx(handle_t *handle, struct inode *inode,
353 					struct ext4_extent_idx *ix)
354 {
355 	int i, retval = 0;
356 	ext4_fsblk_t block;
357 	struct buffer_head *bh;
358 	struct ext4_extent_header *eh;
359 
360 	block = ext4_idx_pblock(ix);
361 	bh = ext4_sb_bread(inode->i_sb, block, 0);
362 	if (IS_ERR(bh))
363 		return PTR_ERR(bh);
364 
365 	eh = (struct ext4_extent_header *)bh->b_data;
366 	if (eh->eh_depth != 0) {
367 		ix = EXT_FIRST_INDEX(eh);
368 		for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
369 			retval = free_ext_idx(handle, inode, ix);
370 			if (retval) {
371 				put_bh(bh);
372 				return retval;
373 			}
374 		}
375 	}
376 	put_bh(bh);
377 	retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
378 			ext4_free_metadata_revoke_credits(inode->i_sb, 1));
379 	if (retval < 0)
380 		return retval;
381 	ext4_free_blocks(handle, inode, NULL, block, 1,
382 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
383 	return 0;
384 }
385 
386 /*
387  * Free the extent meta data blocks only
388  */
389 static int free_ext_block(handle_t *handle, struct inode *inode)
390 {
391 	int i, retval = 0;
392 	struct ext4_inode_info *ei = EXT4_I(inode);
393 	struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
394 	struct ext4_extent_idx *ix;
395 	if (eh->eh_depth == 0)
396 		/*
397 		 * No extra blocks allocated for extent meta data
398 		 */
399 		return 0;
400 	ix = EXT_FIRST_INDEX(eh);
401 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
402 		retval = free_ext_idx(handle, inode, ix);
403 		if (retval)
404 			return retval;
405 	}
406 	return retval;
407 }
408 
409 int ext4_ext_migrate(struct inode *inode)
410 {
411 	handle_t *handle;
412 	int retval = 0, i;
413 	__le32 *i_data;
414 	struct ext4_inode_info *ei;
415 	struct inode *tmp_inode = NULL;
416 	struct migrate_struct lb;
417 	unsigned long max_entries;
418 	__u32 goal, tmp_csum_seed;
419 	uid_t owner[2];
420 	int alloc_ctx;
421 
422 	/*
423 	 * If the filesystem does not support extents, or the inode
424 	 * already is extent-based, error out.
425 	 */
426 	if (!ext4_has_feature_extents(inode->i_sb) ||
427 	    ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
428 	    ext4_has_inline_data(inode))
429 		return -EINVAL;
430 
431 	if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
432 		/*
433 		 * don't migrate fast symlink
434 		 */
435 		return retval;
436 
437 	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
438 
439 	/*
440 	 * Worst case we can touch the allocation bitmaps and a block
441 	 * group descriptor block.  We do need to worry about
442 	 * credits for modifying the quota inode.
443 	 */
444 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
445 		3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
446 
447 	if (IS_ERR(handle)) {
448 		retval = PTR_ERR(handle);
449 		goto out_unlock;
450 	}
451 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
452 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
453 	owner[0] = i_uid_read(inode);
454 	owner[1] = i_gid_read(inode);
455 	tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
456 				   S_IFREG, NULL, goal, owner, 0);
457 	if (IS_ERR(tmp_inode)) {
458 		retval = PTR_ERR(tmp_inode);
459 		ext4_journal_stop(handle);
460 		goto out_unlock;
461 	}
462 	/*
463 	 * Use the correct seed for checksum (i.e. the seed from 'inode').  This
464 	 * is so that the metadata blocks will have the correct checksum after
465 	 * the migration.
466 	 */
467 	ei = EXT4_I(inode);
468 	tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
469 	EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
470 	i_size_write(tmp_inode, i_size_read(inode));
471 	/*
472 	 * Set the i_nlink to zero so it will be deleted later
473 	 * when we drop inode reference.
474 	 */
475 	clear_nlink(tmp_inode);
476 
477 	ext4_ext_tree_init(handle, tmp_inode);
478 	ext4_journal_stop(handle);
479 
480 	/*
481 	 * start with one credit accounted for
482 	 * superblock modification.
483 	 *
484 	 * For the tmp_inode we already have committed the
485 	 * transaction that created the inode. Later as and
486 	 * when we add extents we extent the journal
487 	 */
488 	/*
489 	 * Even though we take i_rwsem we can still cause block
490 	 * allocation via mmap write to holes. If we have allocated
491 	 * new blocks we fail migrate.  New block allocation will
492 	 * clear EXT4_STATE_EXT_MIGRATE flag.  The flag is updated
493 	 * with i_data_sem held to prevent racing with block
494 	 * allocation.
495 	 */
496 	down_read(&EXT4_I(inode)->i_data_sem);
497 	ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
498 	up_read((&EXT4_I(inode)->i_data_sem));
499 
500 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
501 	if (IS_ERR(handle)) {
502 		retval = PTR_ERR(handle);
503 		goto out_tmp_inode;
504 	}
505 
506 	i_data = ei->i_data;
507 	memset(&lb, 0, sizeof(lb));
508 
509 	/* 32 bit block address 4 bytes */
510 	max_entries = inode->i_sb->s_blocksize >> 2;
511 	for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
512 		if (i_data[i]) {
513 			retval = update_extent_range(handle, tmp_inode,
514 						le32_to_cpu(i_data[i]), &lb);
515 			if (retval)
516 				goto err_out;
517 		} else
518 			lb.curr_block++;
519 	}
520 	if (i_data[EXT4_IND_BLOCK]) {
521 		retval = update_ind_extent_range(handle, tmp_inode,
522 				le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
523 		if (retval)
524 			goto err_out;
525 	} else
526 		lb.curr_block += max_entries;
527 	if (i_data[EXT4_DIND_BLOCK]) {
528 		retval = update_dind_extent_range(handle, tmp_inode,
529 				le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
530 		if (retval)
531 			goto err_out;
532 	} else
533 		lb.curr_block += max_entries * max_entries;
534 	if (i_data[EXT4_TIND_BLOCK]) {
535 		retval = update_tind_extent_range(handle, tmp_inode,
536 				le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
537 		if (retval)
538 			goto err_out;
539 	}
540 	/*
541 	 * Build the last extent
542 	 */
543 	retval = finish_range(handle, tmp_inode, &lb);
544 err_out:
545 	if (retval)
546 		/*
547 		 * Failure case delete the extent information with the
548 		 * tmp_inode
549 		 */
550 		free_ext_block(handle, tmp_inode);
551 	else {
552 		retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
553 		if (retval)
554 			/*
555 			 * if we fail to swap inode data free the extent
556 			 * details of the tmp inode
557 			 */
558 			free_ext_block(handle, tmp_inode);
559 	}
560 
561 	/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
562 	retval = ext4_journal_ensure_credits(handle, 1, 0);
563 	if (retval < 0)
564 		goto out_stop;
565 	/*
566 	 * Mark the tmp_inode as of size zero
567 	 */
568 	i_size_write(tmp_inode, 0);
569 
570 	/*
571 	 * set the  i_blocks count to zero
572 	 * so that the ext4_evict_inode() does the
573 	 * right job
574 	 *
575 	 * We don't need to take the i_lock because
576 	 * the inode is not visible to user space.
577 	 */
578 	tmp_inode->i_blocks = 0;
579 	EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
580 
581 	/* Reset the extent details */
582 	ext4_ext_tree_init(handle, tmp_inode);
583 out_stop:
584 	ext4_journal_stop(handle);
585 out_tmp_inode:
586 	unlock_new_inode(tmp_inode);
587 	iput(tmp_inode);
588 out_unlock:
589 	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
590 	return retval;
591 }
592 
593 /*
594  * Migrate a simple extent-based inode to use the i_blocks[] array
595  */
596 int ext4_ind_migrate(struct inode *inode)
597 {
598 	struct ext4_extent_header	*eh;
599 	struct ext4_sb_info		*sbi = EXT4_SB(inode->i_sb);
600 	struct ext4_super_block		*es = sbi->s_es;
601 	struct ext4_inode_info		*ei = EXT4_I(inode);
602 	struct ext4_extent		*ex;
603 	unsigned int			i, len;
604 	ext4_lblk_t			start, end;
605 	ext4_fsblk_t			blk;
606 	handle_t			*handle;
607 	int				ret, ret2 = 0;
608 	int				alloc_ctx;
609 
610 	if (!ext4_has_feature_extents(inode->i_sb) ||
611 	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
612 		return -EINVAL;
613 
614 	if (ext4_has_feature_bigalloc(inode->i_sb))
615 		return -EOPNOTSUPP;
616 
617 	/*
618 	 * In order to get correct extent info, force all delayed allocation
619 	 * blocks to be allocated, otherwise delayed allocation blocks may not
620 	 * be reflected and bypass the checks on extent header.
621 	 */
622 	if (test_opt(inode->i_sb, DELALLOC))
623 		ext4_alloc_da_blocks(inode);
624 
625 	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
626 
627 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
628 	if (IS_ERR(handle)) {
629 		ret = PTR_ERR(handle);
630 		goto out_unlock;
631 	}
632 
633 	down_write(&EXT4_I(inode)->i_data_sem);
634 	ret = ext4_ext_check_inode(inode);
635 	if (ret)
636 		goto errout;
637 
638 	eh = ext_inode_hdr(inode);
639 	ex  = EXT_FIRST_EXTENT(eh);
640 	if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
641 	    eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
642 		ret = -EOPNOTSUPP;
643 		goto errout;
644 	}
645 	if (eh->eh_entries == 0)
646 		blk = len = start = end = 0;
647 	else {
648 		len = le16_to_cpu(ex->ee_len);
649 		blk = ext4_ext_pblock(ex);
650 		start = le32_to_cpu(ex->ee_block);
651 		end = start + len - 1;
652 		if (end >= EXT4_NDIR_BLOCKS) {
653 			ret = -EOPNOTSUPP;
654 			goto errout;
655 		}
656 	}
657 
658 	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
659 	memset(ei->i_data, 0, sizeof(ei->i_data));
660 	for (i = start; i <= end; i++)
661 		ei->i_data[i] = cpu_to_le32(blk++);
662 	ret2 = ext4_mark_inode_dirty(handle, inode);
663 	if (unlikely(ret2 && !ret))
664 		ret = ret2;
665 errout:
666 	ext4_journal_stop(handle);
667 	up_write(&EXT4_I(inode)->i_data_sem);
668 out_unlock:
669 	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
670 	return ret;
671 }
672