xref: /openbmc/linux/fs/ext4/extents.c (revision a9b8241594adda0a7a4fb3b87bf29d2dff0d997d)
1a86c6181SAlex Tomas /*
2a86c6181SAlex Tomas  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3a86c6181SAlex Tomas  * Written by Alex Tomas <alex@clusterfs.com>
4a86c6181SAlex Tomas  *
5a86c6181SAlex Tomas  * Architecture independence:
6a86c6181SAlex Tomas  *   Copyright (c) 2005, Bull S.A.
7a86c6181SAlex Tomas  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8a86c6181SAlex Tomas  *
9a86c6181SAlex Tomas  * This program is free software; you can redistribute it and/or modify
10a86c6181SAlex Tomas  * it under the terms of the GNU General Public License version 2 as
11a86c6181SAlex Tomas  * published by the Free Software Foundation.
12a86c6181SAlex Tomas  *
13a86c6181SAlex Tomas  * This program is distributed in the hope that it will be useful,
14a86c6181SAlex Tomas  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15a86c6181SAlex Tomas  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16a86c6181SAlex Tomas  * GNU General Public License for more details.
17a86c6181SAlex Tomas  *
18a86c6181SAlex Tomas  * You should have received a copy of the GNU General Public Licens
19a86c6181SAlex Tomas  * along with this program; if not, write to the Free Software
20a86c6181SAlex Tomas  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21a86c6181SAlex Tomas  */
22a86c6181SAlex Tomas 
23a86c6181SAlex Tomas /*
24a86c6181SAlex Tomas  * Extents support for EXT4
25a86c6181SAlex Tomas  *
26a86c6181SAlex Tomas  * TODO:
27a86c6181SAlex Tomas  *   - ext4*_error() should be used in some situations
28a86c6181SAlex Tomas  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29a86c6181SAlex Tomas  *   - smart tree reduction
30a86c6181SAlex Tomas  */
31a86c6181SAlex Tomas 
32a86c6181SAlex Tomas #include <linux/fs.h>
33a86c6181SAlex Tomas #include <linux/time.h>
34cd02ff0bSMingming Cao #include <linux/jbd2.h>
35a86c6181SAlex Tomas #include <linux/highuid.h>
36a86c6181SAlex Tomas #include <linux/pagemap.h>
37a86c6181SAlex Tomas #include <linux/quotaops.h>
38a86c6181SAlex Tomas #include <linux/string.h>
39a86c6181SAlex Tomas #include <linux/slab.h>
40a2df2a63SAmit Arora #include <linux/falloc.h>
41a86c6181SAlex Tomas #include <asm/uaccess.h>
426873fa0dSEric Sandeen #include <linux/fiemap.h>
433dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
444a092d73STheodore Ts'o #include "ext4_extents.h"
45f19d5870STao Ma #include "xattr.h"
46a86c6181SAlex Tomas 
470562e0baSJiaying Zhang #include <trace/events/ext4.h>
480562e0baSJiaying Zhang 
495f95d21fSLukas Czerner /*
505f95d21fSLukas Czerner  * used by extent splitting.
515f95d21fSLukas Czerner  */
525f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
535f95d21fSLukas Czerner 					due to ENOSPC */
545f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT1	0x2  /* mark first half uninitialized */
555f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT2	0x4  /* mark second half uninitialized */
565f95d21fSLukas Czerner 
57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
58dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
59dee1f973SDmitry Monakhov 
607ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode,
617ac5990dSDarrick J. Wong 				     struct ext4_extent_header *eh)
627ac5990dSDarrick J. Wong {
637ac5990dSDarrick J. Wong 	struct ext4_inode_info *ei = EXT4_I(inode);
647ac5990dSDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
657ac5990dSDarrick J. Wong 	__u32 csum;
667ac5990dSDarrick J. Wong 
677ac5990dSDarrick J. Wong 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
687ac5990dSDarrick J. Wong 			   EXT4_EXTENT_TAIL_OFFSET(eh));
697ac5990dSDarrick J. Wong 	return cpu_to_le32(csum);
707ac5990dSDarrick J. Wong }
717ac5990dSDarrick J. Wong 
727ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode,
737ac5990dSDarrick J. Wong 					 struct ext4_extent_header *eh)
747ac5990dSDarrick J. Wong {
757ac5990dSDarrick J. Wong 	struct ext4_extent_tail *et;
767ac5990dSDarrick J. Wong 
777ac5990dSDarrick J. Wong 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
787ac5990dSDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
797ac5990dSDarrick J. Wong 		return 1;
807ac5990dSDarrick J. Wong 
817ac5990dSDarrick J. Wong 	et = find_ext4_extent_tail(eh);
827ac5990dSDarrick J. Wong 	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
837ac5990dSDarrick J. Wong 		return 0;
847ac5990dSDarrick J. Wong 	return 1;
857ac5990dSDarrick J. Wong }
867ac5990dSDarrick J. Wong 
877ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode,
887ac5990dSDarrick J. Wong 				       struct ext4_extent_header *eh)
897ac5990dSDarrick J. Wong {
907ac5990dSDarrick J. Wong 	struct ext4_extent_tail *et;
917ac5990dSDarrick J. Wong 
927ac5990dSDarrick J. Wong 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
937ac5990dSDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
947ac5990dSDarrick J. Wong 		return;
957ac5990dSDarrick J. Wong 
967ac5990dSDarrick J. Wong 	et = find_ext4_extent_tail(eh);
977ac5990dSDarrick J. Wong 	et->et_checksum = ext4_extent_block_csum(inode, eh);
987ac5990dSDarrick J. Wong }
997ac5990dSDarrick J. Wong 
100d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle,
101d583fb87SAllison Henderson 				struct inode *inode,
102d583fb87SAllison Henderson 				struct ext4_ext_path *path,
103d583fb87SAllison Henderson 				struct ext4_map_blocks *map,
104d583fb87SAllison Henderson 				int split_flag,
105d583fb87SAllison Henderson 				int flags);
106d583fb87SAllison Henderson 
1075f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle,
1085f95d21fSLukas Czerner 			     struct inode *inode,
1095f95d21fSLukas Czerner 			     struct ext4_ext_path *path,
1105f95d21fSLukas Czerner 			     ext4_lblk_t split,
1115f95d21fSLukas Czerner 			     int split_flag,
1125f95d21fSLukas Czerner 			     int flags);
1135f95d21fSLukas Czerner 
11491dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode,
11569eb33dcSZheng Liu 				    struct extent_status *newes);
11691dd8c11SLukas Czerner 
117487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle,
118487caeefSJan Kara 					    struct inode *inode,
119487caeefSJan Kara 					    int needed)
120a86c6181SAlex Tomas {
121a86c6181SAlex Tomas 	int err;
122a86c6181SAlex Tomas 
1230390131bSFrank Mayhar 	if (!ext4_handle_valid(handle))
1240390131bSFrank Mayhar 		return 0;
125a86c6181SAlex Tomas 	if (handle->h_buffer_credits > needed)
1269102e4faSShen Feng 		return 0;
1279102e4faSShen Feng 	err = ext4_journal_extend(handle, needed);
1280123c939STheodore Ts'o 	if (err <= 0)
1299102e4faSShen Feng 		return err;
130487caeefSJan Kara 	err = ext4_truncate_restart_trans(handle, inode, needed);
1310617b83fSDmitry Monakhov 	if (err == 0)
1320617b83fSDmitry Monakhov 		err = -EAGAIN;
133487caeefSJan Kara 
134487caeefSJan Kara 	return err;
135a86c6181SAlex Tomas }
136a86c6181SAlex Tomas 
137a86c6181SAlex Tomas /*
138a86c6181SAlex Tomas  * could return:
139a86c6181SAlex Tomas  *  - EROFS
140a86c6181SAlex Tomas  *  - ENOMEM
141a86c6181SAlex Tomas  */
142a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143a86c6181SAlex Tomas 				struct ext4_ext_path *path)
144a86c6181SAlex Tomas {
145a86c6181SAlex Tomas 	if (path->p_bh) {
146a86c6181SAlex Tomas 		/* path points to block */
147a86c6181SAlex Tomas 		return ext4_journal_get_write_access(handle, path->p_bh);
148a86c6181SAlex Tomas 	}
149a86c6181SAlex Tomas 	/* path points to leaf/index in inode body */
150a86c6181SAlex Tomas 	/* we use in-core data, no need to protect them */
151a86c6181SAlex Tomas 	return 0;
152a86c6181SAlex Tomas }
153a86c6181SAlex Tomas 
154a86c6181SAlex Tomas /*
155a86c6181SAlex Tomas  * could return:
156a86c6181SAlex Tomas  *  - EROFS
157a86c6181SAlex Tomas  *  - ENOMEM
158a86c6181SAlex Tomas  *  - EIO
159a86c6181SAlex Tomas  */
1602656497bSDarrick J. Wong int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
1612656497bSDarrick J. Wong 		     struct inode *inode, struct ext4_ext_path *path)
162a86c6181SAlex Tomas {
163a86c6181SAlex Tomas 	int err;
164a86c6181SAlex Tomas 	if (path->p_bh) {
1657ac5990dSDarrick J. Wong 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
166a86c6181SAlex Tomas 		/* path points to block */
1679ea7a0dfSTheodore Ts'o 		err = __ext4_handle_dirty_metadata(where, line, handle,
1689ea7a0dfSTheodore Ts'o 						   inode, path->p_bh);
169a86c6181SAlex Tomas 	} else {
170a86c6181SAlex Tomas 		/* path points to leaf/index in inode body */
171a86c6181SAlex Tomas 		err = ext4_mark_inode_dirty(handle, inode);
172a86c6181SAlex Tomas 	}
173a86c6181SAlex Tomas 	return err;
174a86c6181SAlex Tomas }
175a86c6181SAlex Tomas 
176f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
177a86c6181SAlex Tomas 			      struct ext4_ext_path *path,
178725d26d3SAneesh Kumar K.V 			      ext4_lblk_t block)
179a86c6181SAlex Tomas {
180a86c6181SAlex Tomas 	if (path) {
18181fdbb4aSYongqiang Yang 		int depth = path->p_depth;
182a86c6181SAlex Tomas 		struct ext4_extent *ex;
183a86c6181SAlex Tomas 
184ad4fb9caSKazuya Mio 		/*
185ad4fb9caSKazuya Mio 		 * Try to predict block placement assuming that we are
186ad4fb9caSKazuya Mio 		 * filling in a file which will eventually be
187ad4fb9caSKazuya Mio 		 * non-sparse --- i.e., in the case of libbfd writing
188ad4fb9caSKazuya Mio 		 * an ELF object sections out-of-order but in a way
189ad4fb9caSKazuya Mio 		 * the eventually results in a contiguous object or
190ad4fb9caSKazuya Mio 		 * executable file, or some database extending a table
191ad4fb9caSKazuya Mio 		 * space file.  However, this is actually somewhat
192ad4fb9caSKazuya Mio 		 * non-ideal if we are writing a sparse file such as
193ad4fb9caSKazuya Mio 		 * qemu or KVM writing a raw image file that is going
194ad4fb9caSKazuya Mio 		 * to stay fairly sparse, since it will end up
195ad4fb9caSKazuya Mio 		 * fragmenting the file system's free space.  Maybe we
196ad4fb9caSKazuya Mio 		 * should have some hueristics or some way to allow
197ad4fb9caSKazuya Mio 		 * userspace to pass a hint to file system,
198b8d6568aSTao Ma 		 * especially if the latter case turns out to be
199ad4fb9caSKazuya Mio 		 * common.
200ad4fb9caSKazuya Mio 		 */
2017e028976SAvantika Mathur 		ex = path[depth].p_ext;
202ad4fb9caSKazuya Mio 		if (ex) {
203ad4fb9caSKazuya Mio 			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
204ad4fb9caSKazuya Mio 			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
205ad4fb9caSKazuya Mio 
206ad4fb9caSKazuya Mio 			if (block > ext_block)
207ad4fb9caSKazuya Mio 				return ext_pblk + (block - ext_block);
208ad4fb9caSKazuya Mio 			else
209ad4fb9caSKazuya Mio 				return ext_pblk - (ext_block - block);
210ad4fb9caSKazuya Mio 		}
211a86c6181SAlex Tomas 
212d0d856e8SRandy Dunlap 		/* it looks like index is empty;
213d0d856e8SRandy Dunlap 		 * try to find starting block from index itself */
214a86c6181SAlex Tomas 		if (path[depth].p_bh)
215a86c6181SAlex Tomas 			return path[depth].p_bh->b_blocknr;
216a86c6181SAlex Tomas 	}
217a86c6181SAlex Tomas 
218a86c6181SAlex Tomas 	/* OK. use inode's group */
219f86186b4SEric Sandeen 	return ext4_inode_to_goal_block(inode);
220a86c6181SAlex Tomas }
221a86c6181SAlex Tomas 
222654b4908SAneesh Kumar K.V /*
223654b4908SAneesh Kumar K.V  * Allocation for a meta data block
224654b4908SAneesh Kumar K.V  */
225f65e6fbaSAlex Tomas static ext4_fsblk_t
226654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
227a86c6181SAlex Tomas 			struct ext4_ext_path *path,
22855f020dbSAllison Henderson 			struct ext4_extent *ex, int *err, unsigned int flags)
229a86c6181SAlex Tomas {
230f65e6fbaSAlex Tomas 	ext4_fsblk_t goal, newblock;
231a86c6181SAlex Tomas 
232a86c6181SAlex Tomas 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
23355f020dbSAllison Henderson 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
23455f020dbSAllison Henderson 					NULL, err);
235a86c6181SAlex Tomas 	return newblock;
236a86c6181SAlex Tomas }
237a86c6181SAlex Tomas 
23855ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check)
239a86c6181SAlex Tomas {
240a86c6181SAlex Tomas 	int size;
241a86c6181SAlex Tomas 
242a86c6181SAlex Tomas 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
243a86c6181SAlex Tomas 			/ sizeof(struct ext4_extent);
244bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
24502dc62fbSYongqiang Yang 	if (!check && size > 6)
246a86c6181SAlex Tomas 		size = 6;
247a86c6181SAlex Tomas #endif
248a86c6181SAlex Tomas 	return size;
249a86c6181SAlex Tomas }
250a86c6181SAlex Tomas 
25155ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
252a86c6181SAlex Tomas {
253a86c6181SAlex Tomas 	int size;
254a86c6181SAlex Tomas 
255a86c6181SAlex Tomas 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
256a86c6181SAlex Tomas 			/ sizeof(struct ext4_extent_idx);
257bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
25802dc62fbSYongqiang Yang 	if (!check && size > 5)
259a86c6181SAlex Tomas 		size = 5;
260a86c6181SAlex Tomas #endif
261a86c6181SAlex Tomas 	return size;
262a86c6181SAlex Tomas }
263a86c6181SAlex Tomas 
26455ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check)
265a86c6181SAlex Tomas {
266a86c6181SAlex Tomas 	int size;
267a86c6181SAlex Tomas 
268a86c6181SAlex Tomas 	size = sizeof(EXT4_I(inode)->i_data);
269a86c6181SAlex Tomas 	size -= sizeof(struct ext4_extent_header);
270a86c6181SAlex Tomas 	size /= sizeof(struct ext4_extent);
271bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
27202dc62fbSYongqiang Yang 	if (!check && size > 3)
273a86c6181SAlex Tomas 		size = 3;
274a86c6181SAlex Tomas #endif
275a86c6181SAlex Tomas 	return size;
276a86c6181SAlex Tomas }
277a86c6181SAlex Tomas 
27855ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
279a86c6181SAlex Tomas {
280a86c6181SAlex Tomas 	int size;
281a86c6181SAlex Tomas 
282a86c6181SAlex Tomas 	size = sizeof(EXT4_I(inode)->i_data);
283a86c6181SAlex Tomas 	size -= sizeof(struct ext4_extent_header);
284a86c6181SAlex Tomas 	size /= sizeof(struct ext4_extent_idx);
285bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
28602dc62fbSYongqiang Yang 	if (!check && size > 4)
287a86c6181SAlex Tomas 		size = 4;
288a86c6181SAlex Tomas #endif
289a86c6181SAlex Tomas 	return size;
290a86c6181SAlex Tomas }
291a86c6181SAlex Tomas 
292d2a17637SMingming Cao /*
293d2a17637SMingming Cao  * Calculate the number of metadata blocks needed
294d2a17637SMingming Cao  * to allocate @blocks
295d2a17637SMingming Cao  * Worse case is one block per extent
296d2a17637SMingming Cao  */
29701f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
298d2a17637SMingming Cao {
2999d0be502STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
30081fdbb4aSYongqiang Yang 	int idxs;
301d2a17637SMingming Cao 
3029d0be502STheodore Ts'o 	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
3039d0be502STheodore Ts'o 		/ sizeof(struct ext4_extent_idx));
304d2a17637SMingming Cao 
305d2a17637SMingming Cao 	/*
3069d0be502STheodore Ts'o 	 * If the new delayed allocation block is contiguous with the
3079d0be502STheodore Ts'o 	 * previous da block, it can share index blocks with the
3089d0be502STheodore Ts'o 	 * previous block, so we only need to allocate a new index
3099d0be502STheodore Ts'o 	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
3109d0be502STheodore Ts'o 	 * an additional index block, and at ldxs**3 blocks, yet
3119d0be502STheodore Ts'o 	 * another index blocks.
312d2a17637SMingming Cao 	 */
3139d0be502STheodore Ts'o 	if (ei->i_da_metadata_calc_len &&
3149d0be502STheodore Ts'o 	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
31581fdbb4aSYongqiang Yang 		int num = 0;
31681fdbb4aSYongqiang Yang 
3179d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % idxs) == 0)
3189d0be502STheodore Ts'o 			num++;
3199d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
3209d0be502STheodore Ts'o 			num++;
3219d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
3229d0be502STheodore Ts'o 			num++;
3239d0be502STheodore Ts'o 			ei->i_da_metadata_calc_len = 0;
3249d0be502STheodore Ts'o 		} else
3259d0be502STheodore Ts'o 			ei->i_da_metadata_calc_len++;
3269d0be502STheodore Ts'o 		ei->i_da_metadata_calc_last_lblock++;
327d2a17637SMingming Cao 		return num;
328d2a17637SMingming Cao 	}
329d2a17637SMingming Cao 
3309d0be502STheodore Ts'o 	/*
3319d0be502STheodore Ts'o 	 * In the worst case we need a new set of index blocks at
3329d0be502STheodore Ts'o 	 * every level of the inode's extent tree.
3339d0be502STheodore Ts'o 	 */
3349d0be502STheodore Ts'o 	ei->i_da_metadata_calc_len = 1;
3359d0be502STheodore Ts'o 	ei->i_da_metadata_calc_last_lblock = lblock;
3369d0be502STheodore Ts'o 	return ext_depth(inode) + 1;
3379d0be502STheodore Ts'o }
3389d0be502STheodore Ts'o 
339c29c0ae7SAlex Tomas static int
340c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth)
341c29c0ae7SAlex Tomas {
342c29c0ae7SAlex Tomas 	int max;
343c29c0ae7SAlex Tomas 
344c29c0ae7SAlex Tomas 	if (depth == ext_depth(inode)) {
345c29c0ae7SAlex Tomas 		if (depth == 0)
34655ad63bfSTheodore Ts'o 			max = ext4_ext_space_root(inode, 1);
347c29c0ae7SAlex Tomas 		else
34855ad63bfSTheodore Ts'o 			max = ext4_ext_space_root_idx(inode, 1);
349c29c0ae7SAlex Tomas 	} else {
350c29c0ae7SAlex Tomas 		if (depth == 0)
35155ad63bfSTheodore Ts'o 			max = ext4_ext_space_block(inode, 1);
352c29c0ae7SAlex Tomas 		else
35355ad63bfSTheodore Ts'o 			max = ext4_ext_space_block_idx(inode, 1);
354c29c0ae7SAlex Tomas 	}
355c29c0ae7SAlex Tomas 
356c29c0ae7SAlex Tomas 	return max;
357c29c0ae7SAlex Tomas }
358c29c0ae7SAlex Tomas 
35956b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
36056b19868SAneesh Kumar K.V {
361bf89d16fSTheodore Ts'o 	ext4_fsblk_t block = ext4_ext_pblock(ext);
36256b19868SAneesh Kumar K.V 	int len = ext4_ext_get_actual_len(ext);
3635946d089SEryu Guan 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
3645946d089SEryu Guan 	ext4_lblk_t last = lblock + len - 1;
365e84a26ceSTheodore Ts'o 
3665946d089SEryu Guan 	if (lblock > last)
36731d4f3a2STheodore Ts'o 		return 0;
3686fd058f7STheodore Ts'o 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
36956b19868SAneesh Kumar K.V }
37056b19868SAneesh Kumar K.V 
37156b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode,
37256b19868SAneesh Kumar K.V 				struct ext4_extent_idx *ext_idx)
37356b19868SAneesh Kumar K.V {
374bf89d16fSTheodore Ts'o 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
375e84a26ceSTheodore Ts'o 
3766fd058f7STheodore Ts'o 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
37756b19868SAneesh Kumar K.V }
37856b19868SAneesh Kumar K.V 
37956b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode,
38056b19868SAneesh Kumar K.V 				struct ext4_extent_header *eh,
38156b19868SAneesh Kumar K.V 				int depth)
38256b19868SAneesh Kumar K.V {
38356b19868SAneesh Kumar K.V 	unsigned short entries;
38456b19868SAneesh Kumar K.V 	if (eh->eh_entries == 0)
38556b19868SAneesh Kumar K.V 		return 1;
38656b19868SAneesh Kumar K.V 
38756b19868SAneesh Kumar K.V 	entries = le16_to_cpu(eh->eh_entries);
38856b19868SAneesh Kumar K.V 
38956b19868SAneesh Kumar K.V 	if (depth == 0) {
39056b19868SAneesh Kumar K.V 		/* leaf entries */
39181fdbb4aSYongqiang Yang 		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
3925946d089SEryu Guan 		struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
3935946d089SEryu Guan 		ext4_fsblk_t pblock = 0;
3945946d089SEryu Guan 		ext4_lblk_t lblock = 0;
3955946d089SEryu Guan 		ext4_lblk_t prev = 0;
3965946d089SEryu Guan 		int len = 0;
39756b19868SAneesh Kumar K.V 		while (entries) {
39856b19868SAneesh Kumar K.V 			if (!ext4_valid_extent(inode, ext))
39956b19868SAneesh Kumar K.V 				return 0;
4005946d089SEryu Guan 
4015946d089SEryu Guan 			/* Check for overlapping extents */
4025946d089SEryu Guan 			lblock = le32_to_cpu(ext->ee_block);
4035946d089SEryu Guan 			len = ext4_ext_get_actual_len(ext);
4045946d089SEryu Guan 			if ((lblock <= prev) && prev) {
4055946d089SEryu Guan 				pblock = ext4_ext_pblock(ext);
4065946d089SEryu Guan 				es->s_last_error_block = cpu_to_le64(pblock);
4075946d089SEryu Guan 				return 0;
4085946d089SEryu Guan 			}
40956b19868SAneesh Kumar K.V 			ext++;
41056b19868SAneesh Kumar K.V 			entries--;
4115946d089SEryu Guan 			prev = lblock + len - 1;
41256b19868SAneesh Kumar K.V 		}
41356b19868SAneesh Kumar K.V 	} else {
41481fdbb4aSYongqiang Yang 		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
41556b19868SAneesh Kumar K.V 		while (entries) {
41656b19868SAneesh Kumar K.V 			if (!ext4_valid_extent_idx(inode, ext_idx))
41756b19868SAneesh Kumar K.V 				return 0;
41856b19868SAneesh Kumar K.V 			ext_idx++;
41956b19868SAneesh Kumar K.V 			entries--;
42056b19868SAneesh Kumar K.V 		}
42156b19868SAneesh Kumar K.V 	}
42256b19868SAneesh Kumar K.V 	return 1;
42356b19868SAneesh Kumar K.V }
42456b19868SAneesh Kumar K.V 
425c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line,
426c398eda0STheodore Ts'o 			    struct inode *inode, struct ext4_extent_header *eh,
427c349179bSTheodore Ts'o 			    int depth, ext4_fsblk_t pblk)
428c29c0ae7SAlex Tomas {
429c29c0ae7SAlex Tomas 	const char *error_msg;
430c29c0ae7SAlex Tomas 	int max = 0;
431c29c0ae7SAlex Tomas 
432c29c0ae7SAlex Tomas 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
433c29c0ae7SAlex Tomas 		error_msg = "invalid magic";
434c29c0ae7SAlex Tomas 		goto corrupted;
435c29c0ae7SAlex Tomas 	}
436c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
437c29c0ae7SAlex Tomas 		error_msg = "unexpected eh_depth";
438c29c0ae7SAlex Tomas 		goto corrupted;
439c29c0ae7SAlex Tomas 	}
440c29c0ae7SAlex Tomas 	if (unlikely(eh->eh_max == 0)) {
441c29c0ae7SAlex Tomas 		error_msg = "invalid eh_max";
442c29c0ae7SAlex Tomas 		goto corrupted;
443c29c0ae7SAlex Tomas 	}
444c29c0ae7SAlex Tomas 	max = ext4_ext_max_entries(inode, depth);
445c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
446c29c0ae7SAlex Tomas 		error_msg = "too large eh_max";
447c29c0ae7SAlex Tomas 		goto corrupted;
448c29c0ae7SAlex Tomas 	}
449c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
450c29c0ae7SAlex Tomas 		error_msg = "invalid eh_entries";
451c29c0ae7SAlex Tomas 		goto corrupted;
452c29c0ae7SAlex Tomas 	}
45356b19868SAneesh Kumar K.V 	if (!ext4_valid_extent_entries(inode, eh, depth)) {
45456b19868SAneesh Kumar K.V 		error_msg = "invalid extent entries";
45556b19868SAneesh Kumar K.V 		goto corrupted;
45656b19868SAneesh Kumar K.V 	}
4577ac5990dSDarrick J. Wong 	/* Verify checksum on non-root extent tree nodes */
4587ac5990dSDarrick J. Wong 	if (ext_depth(inode) != depth &&
4597ac5990dSDarrick J. Wong 	    !ext4_extent_block_csum_verify(inode, eh)) {
4607ac5990dSDarrick J. Wong 		error_msg = "extent tree corrupted";
4617ac5990dSDarrick J. Wong 		goto corrupted;
4627ac5990dSDarrick J. Wong 	}
463c29c0ae7SAlex Tomas 	return 0;
464c29c0ae7SAlex Tomas 
465c29c0ae7SAlex Tomas corrupted:
466c398eda0STheodore Ts'o 	ext4_error_inode(inode, function, line, 0,
467c349179bSTheodore Ts'o 			 "pblk %llu bad header/extent: %s - magic %x, "
468c29c0ae7SAlex Tomas 			 "entries %u, max %u(%u), depth %u(%u)",
469c349179bSTheodore Ts'o 			 (unsigned long long) pblk, error_msg,
470c349179bSTheodore Ts'o 			 le16_to_cpu(eh->eh_magic),
471c29c0ae7SAlex Tomas 			 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
472c29c0ae7SAlex Tomas 			 max, le16_to_cpu(eh->eh_depth), depth);
473c29c0ae7SAlex Tomas 	return -EIO;
474c29c0ae7SAlex Tomas }
475c29c0ae7SAlex Tomas 
476c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk)			\
477c349179bSTheodore Ts'o 	__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
478c29c0ae7SAlex Tomas 
4797a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode)
4807a262f7cSAneesh Kumar K.V {
481c349179bSTheodore Ts'o 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
4827a262f7cSAneesh Kumar K.V }
4837a262f7cSAneesh Kumar K.V 
4847d7ea89eSTheodore Ts'o static struct buffer_head *
4857d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line,
486107a7bd3STheodore Ts'o 			 struct inode *inode, ext4_fsblk_t pblk, int depth,
487107a7bd3STheodore Ts'o 			 int flags)
488f8489128SDarrick J. Wong {
4897d7ea89eSTheodore Ts'o 	struct buffer_head		*bh;
4907d7ea89eSTheodore Ts'o 	int				err;
491f8489128SDarrick J. Wong 
4927d7ea89eSTheodore Ts'o 	bh = sb_getblk(inode->i_sb, pblk);
4937d7ea89eSTheodore Ts'o 	if (unlikely(!bh))
4947d7ea89eSTheodore Ts'o 		return ERR_PTR(-ENOMEM);
4957d7ea89eSTheodore Ts'o 
4967d7ea89eSTheodore Ts'o 	if (!bh_uptodate_or_lock(bh)) {
4977d7ea89eSTheodore Ts'o 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
4987d7ea89eSTheodore Ts'o 		err = bh_submit_read(bh);
4997d7ea89eSTheodore Ts'o 		if (err < 0)
5007d7ea89eSTheodore Ts'o 			goto errout;
5017d7ea89eSTheodore Ts'o 	}
5027869a4a6STheodore Ts'o 	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
5037d7ea89eSTheodore Ts'o 		return bh;
5047d7ea89eSTheodore Ts'o 	err = __ext4_ext_check(function, line, inode,
505c349179bSTheodore Ts'o 			       ext_block_hdr(bh), depth, pblk);
5067d7ea89eSTheodore Ts'o 	if (err)
5077d7ea89eSTheodore Ts'o 		goto errout;
508f8489128SDarrick J. Wong 	set_buffer_verified(bh);
509107a7bd3STheodore Ts'o 	/*
510107a7bd3STheodore Ts'o 	 * If this is a leaf block, cache all of its entries
511107a7bd3STheodore Ts'o 	 */
512107a7bd3STheodore Ts'o 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
513107a7bd3STheodore Ts'o 		struct ext4_extent_header *eh = ext_block_hdr(bh);
514107a7bd3STheodore Ts'o 		struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
515107a7bd3STheodore Ts'o 		ext4_lblk_t prev = 0;
516107a7bd3STheodore Ts'o 		int i;
517107a7bd3STheodore Ts'o 
518107a7bd3STheodore Ts'o 		for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
519107a7bd3STheodore Ts'o 			unsigned int status = EXTENT_STATUS_WRITTEN;
520107a7bd3STheodore Ts'o 			ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
521107a7bd3STheodore Ts'o 			int len = ext4_ext_get_actual_len(ex);
522107a7bd3STheodore Ts'o 
523107a7bd3STheodore Ts'o 			if (prev && (prev != lblk))
524107a7bd3STheodore Ts'o 				ext4_es_cache_extent(inode, prev,
525107a7bd3STheodore Ts'o 						     lblk - prev, ~0,
526107a7bd3STheodore Ts'o 						     EXTENT_STATUS_HOLE);
527107a7bd3STheodore Ts'o 
528107a7bd3STheodore Ts'o 			if (ext4_ext_is_uninitialized(ex))
529107a7bd3STheodore Ts'o 				status = EXTENT_STATUS_UNWRITTEN;
530107a7bd3STheodore Ts'o 			ext4_es_cache_extent(inode, lblk, len,
531107a7bd3STheodore Ts'o 					     ext4_ext_pblock(ex), status);
532107a7bd3STheodore Ts'o 			prev = lblk + len;
533107a7bd3STheodore Ts'o 		}
534107a7bd3STheodore Ts'o 	}
5357d7ea89eSTheodore Ts'o 	return bh;
5367d7ea89eSTheodore Ts'o errout:
5377d7ea89eSTheodore Ts'o 	put_bh(bh);
5387d7ea89eSTheodore Ts'o 	return ERR_PTR(err);
5397d7ea89eSTheodore Ts'o 
540f8489128SDarrick J. Wong }
541f8489128SDarrick J. Wong 
542107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags)		\
543107a7bd3STheodore Ts'o 	__read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
544107a7bd3STheodore Ts'o 				 (depth), (flags))
545f8489128SDarrick J. Wong 
5467869a4a6STheodore Ts'o /*
5477869a4a6STheodore Ts'o  * This function is called to cache a file's extent information in the
5487869a4a6STheodore Ts'o  * extent status tree
5497869a4a6STheodore Ts'o  */
5507869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode)
5517869a4a6STheodore Ts'o {
5527869a4a6STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
5537869a4a6STheodore Ts'o 	struct ext4_ext_path *path = NULL;
5547869a4a6STheodore Ts'o 	struct buffer_head *bh;
5557869a4a6STheodore Ts'o 	int i = 0, depth, ret = 0;
5567869a4a6STheodore Ts'o 
5577869a4a6STheodore Ts'o 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5587869a4a6STheodore Ts'o 		return 0;	/* not an extent-mapped inode */
5597869a4a6STheodore Ts'o 
5607869a4a6STheodore Ts'o 	down_read(&ei->i_data_sem);
5617869a4a6STheodore Ts'o 	depth = ext_depth(inode);
5627869a4a6STheodore Ts'o 
5637869a4a6STheodore Ts'o 	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
5647869a4a6STheodore Ts'o 		       GFP_NOFS);
5657869a4a6STheodore Ts'o 	if (path == NULL) {
5667869a4a6STheodore Ts'o 		up_read(&ei->i_data_sem);
5677869a4a6STheodore Ts'o 		return -ENOMEM;
5687869a4a6STheodore Ts'o 	}
5697869a4a6STheodore Ts'o 
5707869a4a6STheodore Ts'o 	/* Don't cache anything if there are no external extent blocks */
5717869a4a6STheodore Ts'o 	if (depth == 0)
5727869a4a6STheodore Ts'o 		goto out;
5737869a4a6STheodore Ts'o 	path[0].p_hdr = ext_inode_hdr(inode);
5747869a4a6STheodore Ts'o 	ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
5757869a4a6STheodore Ts'o 	if (ret)
5767869a4a6STheodore Ts'o 		goto out;
5777869a4a6STheodore Ts'o 	path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
5787869a4a6STheodore Ts'o 	while (i >= 0) {
5797869a4a6STheodore Ts'o 		/*
5807869a4a6STheodore Ts'o 		 * If this is a leaf block or we've reached the end of
5817869a4a6STheodore Ts'o 		 * the index block, go up
5827869a4a6STheodore Ts'o 		 */
5837869a4a6STheodore Ts'o 		if ((i == depth) ||
5847869a4a6STheodore Ts'o 		    path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
5857869a4a6STheodore Ts'o 			brelse(path[i].p_bh);
5867869a4a6STheodore Ts'o 			path[i].p_bh = NULL;
5877869a4a6STheodore Ts'o 			i--;
5887869a4a6STheodore Ts'o 			continue;
5897869a4a6STheodore Ts'o 		}
5907869a4a6STheodore Ts'o 		bh = read_extent_tree_block(inode,
5917869a4a6STheodore Ts'o 					    ext4_idx_pblock(path[i].p_idx++),
5927869a4a6STheodore Ts'o 					    depth - i - 1,
5937869a4a6STheodore Ts'o 					    EXT4_EX_FORCE_CACHE);
5947869a4a6STheodore Ts'o 		if (IS_ERR(bh)) {
5957869a4a6STheodore Ts'o 			ret = PTR_ERR(bh);
5967869a4a6STheodore Ts'o 			break;
5977869a4a6STheodore Ts'o 		}
5987869a4a6STheodore Ts'o 		i++;
5997869a4a6STheodore Ts'o 		path[i].p_bh = bh;
6007869a4a6STheodore Ts'o 		path[i].p_hdr = ext_block_hdr(bh);
6017869a4a6STheodore Ts'o 		path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
6027869a4a6STheodore Ts'o 	}
6037869a4a6STheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
6047869a4a6STheodore Ts'o out:
6057869a4a6STheodore Ts'o 	up_read(&ei->i_data_sem);
6067869a4a6STheodore Ts'o 	ext4_ext_drop_refs(path);
6077869a4a6STheodore Ts'o 	kfree(path);
6087869a4a6STheodore Ts'o 	return ret;
6097869a4a6STheodore Ts'o }
6107869a4a6STheodore Ts'o 
611a86c6181SAlex Tomas #ifdef EXT_DEBUG
612a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
613a86c6181SAlex Tomas {
614a86c6181SAlex Tomas 	int k, l = path->p_depth;
615a86c6181SAlex Tomas 
616a86c6181SAlex Tomas 	ext_debug("path:");
617a86c6181SAlex Tomas 	for (k = 0; k <= l; k++, path++) {
618a86c6181SAlex Tomas 		if (path->p_idx) {
6192ae02107SMingming Cao 		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
620bf89d16fSTheodore Ts'o 			    ext4_idx_pblock(path->p_idx));
621a86c6181SAlex Tomas 		} else if (path->p_ext) {
622553f9008SMingming 			ext_debug("  %d:[%d]%d:%llu ",
623a86c6181SAlex Tomas 				  le32_to_cpu(path->p_ext->ee_block),
624553f9008SMingming 				  ext4_ext_is_uninitialized(path->p_ext),
625a2df2a63SAmit Arora 				  ext4_ext_get_actual_len(path->p_ext),
626bf89d16fSTheodore Ts'o 				  ext4_ext_pblock(path->p_ext));
627a86c6181SAlex Tomas 		} else
628a86c6181SAlex Tomas 			ext_debug("  []");
629a86c6181SAlex Tomas 	}
630a86c6181SAlex Tomas 	ext_debug("\n");
631a86c6181SAlex Tomas }
632a86c6181SAlex Tomas 
633a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
634a86c6181SAlex Tomas {
635a86c6181SAlex Tomas 	int depth = ext_depth(inode);
636a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
637a86c6181SAlex Tomas 	struct ext4_extent *ex;
638a86c6181SAlex Tomas 	int i;
639a86c6181SAlex Tomas 
640a86c6181SAlex Tomas 	if (!path)
641a86c6181SAlex Tomas 		return;
642a86c6181SAlex Tomas 
643a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
644a86c6181SAlex Tomas 	ex = EXT_FIRST_EXTENT(eh);
645a86c6181SAlex Tomas 
646553f9008SMingming 	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
647553f9008SMingming 
648a86c6181SAlex Tomas 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
649553f9008SMingming 		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
650553f9008SMingming 			  ext4_ext_is_uninitialized(ex),
651bf89d16fSTheodore Ts'o 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
652a86c6181SAlex Tomas 	}
653a86c6181SAlex Tomas 	ext_debug("\n");
654a86c6181SAlex Tomas }
6551b16da77SYongqiang Yang 
6561b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
6571b16da77SYongqiang Yang 			ext4_fsblk_t newblock, int level)
6581b16da77SYongqiang Yang {
6591b16da77SYongqiang Yang 	int depth = ext_depth(inode);
6601b16da77SYongqiang Yang 	struct ext4_extent *ex;
6611b16da77SYongqiang Yang 
6621b16da77SYongqiang Yang 	if (depth != level) {
6631b16da77SYongqiang Yang 		struct ext4_extent_idx *idx;
6641b16da77SYongqiang Yang 		idx = path[level].p_idx;
6651b16da77SYongqiang Yang 		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
6661b16da77SYongqiang Yang 			ext_debug("%d: move %d:%llu in new index %llu\n", level,
6671b16da77SYongqiang Yang 					le32_to_cpu(idx->ei_block),
6681b16da77SYongqiang Yang 					ext4_idx_pblock(idx),
6691b16da77SYongqiang Yang 					newblock);
6701b16da77SYongqiang Yang 			idx++;
6711b16da77SYongqiang Yang 		}
6721b16da77SYongqiang Yang 
6731b16da77SYongqiang Yang 		return;
6741b16da77SYongqiang Yang 	}
6751b16da77SYongqiang Yang 
6761b16da77SYongqiang Yang 	ex = path[depth].p_ext;
6771b16da77SYongqiang Yang 	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
6781b16da77SYongqiang Yang 		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
6791b16da77SYongqiang Yang 				le32_to_cpu(ex->ee_block),
6801b16da77SYongqiang Yang 				ext4_ext_pblock(ex),
6811b16da77SYongqiang Yang 				ext4_ext_is_uninitialized(ex),
6821b16da77SYongqiang Yang 				ext4_ext_get_actual_len(ex),
6831b16da77SYongqiang Yang 				newblock);
6841b16da77SYongqiang Yang 		ex++;
6851b16da77SYongqiang Yang 	}
6861b16da77SYongqiang Yang }
6871b16da77SYongqiang Yang 
688a86c6181SAlex Tomas #else
689a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path)
690a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path)
6911b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level)
692a86c6181SAlex Tomas #endif
693a86c6181SAlex Tomas 
694b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path)
695a86c6181SAlex Tomas {
696a86c6181SAlex Tomas 	int depth = path->p_depth;
697a86c6181SAlex Tomas 	int i;
698a86c6181SAlex Tomas 
699a86c6181SAlex Tomas 	for (i = 0; i <= depth; i++, path++)
700a86c6181SAlex Tomas 		if (path->p_bh) {
701a86c6181SAlex Tomas 			brelse(path->p_bh);
702a86c6181SAlex Tomas 			path->p_bh = NULL;
703a86c6181SAlex Tomas 		}
704a86c6181SAlex Tomas }
705a86c6181SAlex Tomas 
706a86c6181SAlex Tomas /*
707d0d856e8SRandy Dunlap  * ext4_ext_binsearch_idx:
708d0d856e8SRandy Dunlap  * binary search for the closest index of the given block
709c29c0ae7SAlex Tomas  * the header must be checked before calling this
710a86c6181SAlex Tomas  */
711a86c6181SAlex Tomas static void
712725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode,
713725d26d3SAneesh Kumar K.V 			struct ext4_ext_path *path, ext4_lblk_t block)
714a86c6181SAlex Tomas {
715a86c6181SAlex Tomas 	struct ext4_extent_header *eh = path->p_hdr;
716a86c6181SAlex Tomas 	struct ext4_extent_idx *r, *l, *m;
717a86c6181SAlex Tomas 
718a86c6181SAlex Tomas 
719bba90743SEric Sandeen 	ext_debug("binsearch for %u(idx):  ", block);
720a86c6181SAlex Tomas 
721a86c6181SAlex Tomas 	l = EXT_FIRST_INDEX(eh) + 1;
722e9f410b1SDmitry Monakhov 	r = EXT_LAST_INDEX(eh);
723a86c6181SAlex Tomas 	while (l <= r) {
724a86c6181SAlex Tomas 		m = l + (r - l) / 2;
725a86c6181SAlex Tomas 		if (block < le32_to_cpu(m->ei_block))
726a86c6181SAlex Tomas 			r = m - 1;
727a86c6181SAlex Tomas 		else
728a86c6181SAlex Tomas 			l = m + 1;
72926d535edSDmitry Monakhov 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
73026d535edSDmitry Monakhov 				m, le32_to_cpu(m->ei_block),
73126d535edSDmitry Monakhov 				r, le32_to_cpu(r->ei_block));
732a86c6181SAlex Tomas 	}
733a86c6181SAlex Tomas 
734a86c6181SAlex Tomas 	path->p_idx = l - 1;
7354a3c3a51SZheng Liu 	ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
736bf89d16fSTheodore Ts'o 		  ext4_idx_pblock(path->p_idx));
737a86c6181SAlex Tomas 
738a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
739a86c6181SAlex Tomas 	{
740a86c6181SAlex Tomas 		struct ext4_extent_idx *chix, *ix;
741a86c6181SAlex Tomas 		int k;
742a86c6181SAlex Tomas 
743a86c6181SAlex Tomas 		chix = ix = EXT_FIRST_INDEX(eh);
744a86c6181SAlex Tomas 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
745a86c6181SAlex Tomas 		  if (k != 0 &&
746a86c6181SAlex Tomas 		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
7474776004fSTheodore Ts'o 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
7484776004fSTheodore Ts'o 				       "first=0x%p\n", k,
749a86c6181SAlex Tomas 				       ix, EXT_FIRST_INDEX(eh));
7504776004fSTheodore Ts'o 				printk(KERN_DEBUG "%u <= %u\n",
751a86c6181SAlex Tomas 				       le32_to_cpu(ix->ei_block),
752a86c6181SAlex Tomas 				       le32_to_cpu(ix[-1].ei_block));
753a86c6181SAlex Tomas 			}
754a86c6181SAlex Tomas 			BUG_ON(k && le32_to_cpu(ix->ei_block)
755a86c6181SAlex Tomas 					   <= le32_to_cpu(ix[-1].ei_block));
756a86c6181SAlex Tomas 			if (block < le32_to_cpu(ix->ei_block))
757a86c6181SAlex Tomas 				break;
758a86c6181SAlex Tomas 			chix = ix;
759a86c6181SAlex Tomas 		}
760a86c6181SAlex Tomas 		BUG_ON(chix != path->p_idx);
761a86c6181SAlex Tomas 	}
762a86c6181SAlex Tomas #endif
763a86c6181SAlex Tomas 
764a86c6181SAlex Tomas }
765a86c6181SAlex Tomas 
766a86c6181SAlex Tomas /*
767d0d856e8SRandy Dunlap  * ext4_ext_binsearch:
768d0d856e8SRandy Dunlap  * binary search for closest extent of the given block
769c29c0ae7SAlex Tomas  * the header must be checked before calling this
770a86c6181SAlex Tomas  */
771a86c6181SAlex Tomas static void
772725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode,
773725d26d3SAneesh Kumar K.V 		struct ext4_ext_path *path, ext4_lblk_t block)
774a86c6181SAlex Tomas {
775a86c6181SAlex Tomas 	struct ext4_extent_header *eh = path->p_hdr;
776a86c6181SAlex Tomas 	struct ext4_extent *r, *l, *m;
777a86c6181SAlex Tomas 
778a86c6181SAlex Tomas 	if (eh->eh_entries == 0) {
779a86c6181SAlex Tomas 		/*
780d0d856e8SRandy Dunlap 		 * this leaf is empty:
781a86c6181SAlex Tomas 		 * we get such a leaf in split/add case
782a86c6181SAlex Tomas 		 */
783a86c6181SAlex Tomas 		return;
784a86c6181SAlex Tomas 	}
785a86c6181SAlex Tomas 
786bba90743SEric Sandeen 	ext_debug("binsearch for %u:  ", block);
787a86c6181SAlex Tomas 
788a86c6181SAlex Tomas 	l = EXT_FIRST_EXTENT(eh) + 1;
789e9f410b1SDmitry Monakhov 	r = EXT_LAST_EXTENT(eh);
790a86c6181SAlex Tomas 
791a86c6181SAlex Tomas 	while (l <= r) {
792a86c6181SAlex Tomas 		m = l + (r - l) / 2;
793a86c6181SAlex Tomas 		if (block < le32_to_cpu(m->ee_block))
794a86c6181SAlex Tomas 			r = m - 1;
795a86c6181SAlex Tomas 		else
796a86c6181SAlex Tomas 			l = m + 1;
79726d535edSDmitry Monakhov 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
79826d535edSDmitry Monakhov 				m, le32_to_cpu(m->ee_block),
79926d535edSDmitry Monakhov 				r, le32_to_cpu(r->ee_block));
800a86c6181SAlex Tomas 	}
801a86c6181SAlex Tomas 
802a86c6181SAlex Tomas 	path->p_ext = l - 1;
803553f9008SMingming 	ext_debug("  -> %d:%llu:[%d]%d ",
804a86c6181SAlex Tomas 			le32_to_cpu(path->p_ext->ee_block),
805bf89d16fSTheodore Ts'o 			ext4_ext_pblock(path->p_ext),
806553f9008SMingming 			ext4_ext_is_uninitialized(path->p_ext),
807a2df2a63SAmit Arora 			ext4_ext_get_actual_len(path->p_ext));
808a86c6181SAlex Tomas 
809a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
810a86c6181SAlex Tomas 	{
811a86c6181SAlex Tomas 		struct ext4_extent *chex, *ex;
812a86c6181SAlex Tomas 		int k;
813a86c6181SAlex Tomas 
814a86c6181SAlex Tomas 		chex = ex = EXT_FIRST_EXTENT(eh);
815a86c6181SAlex Tomas 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
816a86c6181SAlex Tomas 			BUG_ON(k && le32_to_cpu(ex->ee_block)
817a86c6181SAlex Tomas 					  <= le32_to_cpu(ex[-1].ee_block));
818a86c6181SAlex Tomas 			if (block < le32_to_cpu(ex->ee_block))
819a86c6181SAlex Tomas 				break;
820a86c6181SAlex Tomas 			chex = ex;
821a86c6181SAlex Tomas 		}
822a86c6181SAlex Tomas 		BUG_ON(chex != path->p_ext);
823a86c6181SAlex Tomas 	}
824a86c6181SAlex Tomas #endif
825a86c6181SAlex Tomas 
826a86c6181SAlex Tomas }
827a86c6181SAlex Tomas 
828a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
829a86c6181SAlex Tomas {
830a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
831a86c6181SAlex Tomas 
832a86c6181SAlex Tomas 	eh = ext_inode_hdr(inode);
833a86c6181SAlex Tomas 	eh->eh_depth = 0;
834a86c6181SAlex Tomas 	eh->eh_entries = 0;
835a86c6181SAlex Tomas 	eh->eh_magic = EXT4_EXT_MAGIC;
83655ad63bfSTheodore Ts'o 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
837a86c6181SAlex Tomas 	ext4_mark_inode_dirty(handle, inode);
838a86c6181SAlex Tomas 	return 0;
839a86c6181SAlex Tomas }
840a86c6181SAlex Tomas 
841a86c6181SAlex Tomas struct ext4_ext_path *
842725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
843107a7bd3STheodore Ts'o 		     struct ext4_ext_path *path, int flags)
844a86c6181SAlex Tomas {
845a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
846a86c6181SAlex Tomas 	struct buffer_head *bh;
847a86c6181SAlex Tomas 	short int depth, i, ppos = 0, alloc = 0;
848860d21e2STheodore Ts'o 	int ret;
849a86c6181SAlex Tomas 
850a86c6181SAlex Tomas 	eh = ext_inode_hdr(inode);
851c29c0ae7SAlex Tomas 	depth = ext_depth(inode);
852a86c6181SAlex Tomas 
853a86c6181SAlex Tomas 	/* account possible depth increase */
854a86c6181SAlex Tomas 	if (!path) {
8555d4958f9SAvantika Mathur 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
856a86c6181SAlex Tomas 				GFP_NOFS);
857a86c6181SAlex Tomas 		if (!path)
858a86c6181SAlex Tomas 			return ERR_PTR(-ENOMEM);
859a86c6181SAlex Tomas 		alloc = 1;
860a86c6181SAlex Tomas 	}
861a86c6181SAlex Tomas 	path[0].p_hdr = eh;
8621973adcbSShen Feng 	path[0].p_bh = NULL;
863a86c6181SAlex Tomas 
864c29c0ae7SAlex Tomas 	i = depth;
865a86c6181SAlex Tomas 	/* walk through the tree */
866a86c6181SAlex Tomas 	while (i) {
867a86c6181SAlex Tomas 		ext_debug("depth %d: num %d, max %d\n",
868a86c6181SAlex Tomas 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
869c29c0ae7SAlex Tomas 
870a86c6181SAlex Tomas 		ext4_ext_binsearch_idx(inode, path + ppos, block);
871bf89d16fSTheodore Ts'o 		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
872a86c6181SAlex Tomas 		path[ppos].p_depth = i;
873a86c6181SAlex Tomas 		path[ppos].p_ext = NULL;
874a86c6181SAlex Tomas 
875107a7bd3STheodore Ts'o 		bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
876107a7bd3STheodore Ts'o 					    flags);
8777d7ea89eSTheodore Ts'o 		if (IS_ERR(bh)) {
8787d7ea89eSTheodore Ts'o 			ret = PTR_ERR(bh);
879a86c6181SAlex Tomas 			goto err;
880860d21e2STheodore Ts'o 		}
8817d7ea89eSTheodore Ts'o 
882a86c6181SAlex Tomas 		eh = ext_block_hdr(bh);
883a86c6181SAlex Tomas 		ppos++;
884273df556SFrank Mayhar 		if (unlikely(ppos > depth)) {
885273df556SFrank Mayhar 			put_bh(bh);
886273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
887273df556SFrank Mayhar 					 "ppos %d > depth %d", ppos, depth);
888860d21e2STheodore Ts'o 			ret = -EIO;
889273df556SFrank Mayhar 			goto err;
890273df556SFrank Mayhar 		}
891a86c6181SAlex Tomas 		path[ppos].p_bh = bh;
892a86c6181SAlex Tomas 		path[ppos].p_hdr = eh;
893a86c6181SAlex Tomas 	}
894a86c6181SAlex Tomas 
895a86c6181SAlex Tomas 	path[ppos].p_depth = i;
896a86c6181SAlex Tomas 	path[ppos].p_ext = NULL;
897a86c6181SAlex Tomas 	path[ppos].p_idx = NULL;
898a86c6181SAlex Tomas 
899a86c6181SAlex Tomas 	/* find extent */
900a86c6181SAlex Tomas 	ext4_ext_binsearch(inode, path + ppos, block);
9011973adcbSShen Feng 	/* if not an empty leaf */
9021973adcbSShen Feng 	if (path[ppos].p_ext)
903bf89d16fSTheodore Ts'o 		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
904a86c6181SAlex Tomas 
905a86c6181SAlex Tomas 	ext4_ext_show_path(inode, path);
906a86c6181SAlex Tomas 
907a86c6181SAlex Tomas 	return path;
908a86c6181SAlex Tomas 
909a86c6181SAlex Tomas err:
910a86c6181SAlex Tomas 	ext4_ext_drop_refs(path);
911a86c6181SAlex Tomas 	if (alloc)
912a86c6181SAlex Tomas 		kfree(path);
913860d21e2STheodore Ts'o 	return ERR_PTR(ret);
914a86c6181SAlex Tomas }
915a86c6181SAlex Tomas 
916a86c6181SAlex Tomas /*
917d0d856e8SRandy Dunlap  * ext4_ext_insert_index:
918d0d856e8SRandy Dunlap  * insert new index [@logical;@ptr] into the block at @curp;
919d0d856e8SRandy Dunlap  * check where to insert: before @curp or after @curp
920a86c6181SAlex Tomas  */
9211f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
922a86c6181SAlex Tomas 				 struct ext4_ext_path *curp,
923f65e6fbaSAlex Tomas 				 int logical, ext4_fsblk_t ptr)
924a86c6181SAlex Tomas {
925a86c6181SAlex Tomas 	struct ext4_extent_idx *ix;
926a86c6181SAlex Tomas 	int len, err;
927a86c6181SAlex Tomas 
9287e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, curp);
9297e028976SAvantika Mathur 	if (err)
930a86c6181SAlex Tomas 		return err;
931a86c6181SAlex Tomas 
932273df556SFrank Mayhar 	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
933273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
934273df556SFrank Mayhar 				 "logical %d == ei_block %d!",
935273df556SFrank Mayhar 				 logical, le32_to_cpu(curp->p_idx->ei_block));
936273df556SFrank Mayhar 		return -EIO;
937273df556SFrank Mayhar 	}
938d4620315SRobin Dong 
939d4620315SRobin Dong 	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
940d4620315SRobin Dong 			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
941d4620315SRobin Dong 		EXT4_ERROR_INODE(inode,
942d4620315SRobin Dong 				 "eh_entries %d >= eh_max %d!",
943d4620315SRobin Dong 				 le16_to_cpu(curp->p_hdr->eh_entries),
944d4620315SRobin Dong 				 le16_to_cpu(curp->p_hdr->eh_max));
945d4620315SRobin Dong 		return -EIO;
946d4620315SRobin Dong 	}
947d4620315SRobin Dong 
948a86c6181SAlex Tomas 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
949a86c6181SAlex Tomas 		/* insert after */
95080e675f9SEric Gouriou 		ext_debug("insert new index %d after: %llu\n", logical, ptr);
951a86c6181SAlex Tomas 		ix = curp->p_idx + 1;
952a86c6181SAlex Tomas 	} else {
953a86c6181SAlex Tomas 		/* insert before */
95480e675f9SEric Gouriou 		ext_debug("insert new index %d before: %llu\n", logical, ptr);
955a86c6181SAlex Tomas 		ix = curp->p_idx;
956a86c6181SAlex Tomas 	}
957a86c6181SAlex Tomas 
95880e675f9SEric Gouriou 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
95980e675f9SEric Gouriou 	BUG_ON(len < 0);
96080e675f9SEric Gouriou 	if (len > 0) {
96180e675f9SEric Gouriou 		ext_debug("insert new index %d: "
96280e675f9SEric Gouriou 				"move %d indices from 0x%p to 0x%p\n",
96380e675f9SEric Gouriou 				logical, len, ix, ix + 1);
96480e675f9SEric Gouriou 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
96580e675f9SEric Gouriou 	}
96680e675f9SEric Gouriou 
967f472e026STao Ma 	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
968f472e026STao Ma 		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
969f472e026STao Ma 		return -EIO;
970f472e026STao Ma 	}
971f472e026STao Ma 
972a86c6181SAlex Tomas 	ix->ei_block = cpu_to_le32(logical);
973f65e6fbaSAlex Tomas 	ext4_idx_store_pblock(ix, ptr);
974e8546d06SMarcin Slusarz 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
975a86c6181SAlex Tomas 
976273df556SFrank Mayhar 	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
977273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
978273df556SFrank Mayhar 		return -EIO;
979273df556SFrank Mayhar 	}
980a86c6181SAlex Tomas 
981a86c6181SAlex Tomas 	err = ext4_ext_dirty(handle, inode, curp);
982a86c6181SAlex Tomas 	ext4_std_error(inode->i_sb, err);
983a86c6181SAlex Tomas 
984a86c6181SAlex Tomas 	return err;
985a86c6181SAlex Tomas }
986a86c6181SAlex Tomas 
987a86c6181SAlex Tomas /*
988d0d856e8SRandy Dunlap  * ext4_ext_split:
989d0d856e8SRandy Dunlap  * inserts new subtree into the path, using free index entry
990d0d856e8SRandy Dunlap  * at depth @at:
991a86c6181SAlex Tomas  * - allocates all needed blocks (new leaf and all intermediate index blocks)
992a86c6181SAlex Tomas  * - makes decision where to split
993d0d856e8SRandy Dunlap  * - moves remaining extents and index entries (right to the split point)
994a86c6181SAlex Tomas  *   into the newly allocated blocks
995d0d856e8SRandy Dunlap  * - initializes subtree
996a86c6181SAlex Tomas  */
997a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode,
99855f020dbSAllison Henderson 			  unsigned int flags,
999a86c6181SAlex Tomas 			  struct ext4_ext_path *path,
1000a86c6181SAlex Tomas 			  struct ext4_extent *newext, int at)
1001a86c6181SAlex Tomas {
1002a86c6181SAlex Tomas 	struct buffer_head *bh = NULL;
1003a86c6181SAlex Tomas 	int depth = ext_depth(inode);
1004a86c6181SAlex Tomas 	struct ext4_extent_header *neh;
1005a86c6181SAlex Tomas 	struct ext4_extent_idx *fidx;
1006a86c6181SAlex Tomas 	int i = at, k, m, a;
1007f65e6fbaSAlex Tomas 	ext4_fsblk_t newblock, oldblock;
1008a86c6181SAlex Tomas 	__le32 border;
1009f65e6fbaSAlex Tomas 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1010a86c6181SAlex Tomas 	int err = 0;
1011a86c6181SAlex Tomas 
1012a86c6181SAlex Tomas 	/* make decision: where to split? */
1013d0d856e8SRandy Dunlap 	/* FIXME: now decision is simplest: at current extent */
1014a86c6181SAlex Tomas 
1015d0d856e8SRandy Dunlap 	/* if current leaf will be split, then we should use
1016a86c6181SAlex Tomas 	 * border from split point */
1017273df556SFrank Mayhar 	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1018273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1019273df556SFrank Mayhar 		return -EIO;
1020273df556SFrank Mayhar 	}
1021a86c6181SAlex Tomas 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1022a86c6181SAlex Tomas 		border = path[depth].p_ext[1].ee_block;
1023d0d856e8SRandy Dunlap 		ext_debug("leaf will be split."
1024a86c6181SAlex Tomas 				" next leaf starts at %d\n",
1025a86c6181SAlex Tomas 				  le32_to_cpu(border));
1026a86c6181SAlex Tomas 	} else {
1027a86c6181SAlex Tomas 		border = newext->ee_block;
1028a86c6181SAlex Tomas 		ext_debug("leaf will be added."
1029a86c6181SAlex Tomas 				" next leaf starts at %d\n",
1030a86c6181SAlex Tomas 				le32_to_cpu(border));
1031a86c6181SAlex Tomas 	}
1032a86c6181SAlex Tomas 
1033a86c6181SAlex Tomas 	/*
1034d0d856e8SRandy Dunlap 	 * If error occurs, then we break processing
1035d0d856e8SRandy Dunlap 	 * and mark filesystem read-only. index won't
1036a86c6181SAlex Tomas 	 * be inserted and tree will be in consistent
1037d0d856e8SRandy Dunlap 	 * state. Next mount will repair buffers too.
1038a86c6181SAlex Tomas 	 */
1039a86c6181SAlex Tomas 
1040a86c6181SAlex Tomas 	/*
1041d0d856e8SRandy Dunlap 	 * Get array to track all allocated blocks.
1042d0d856e8SRandy Dunlap 	 * We need this to handle errors and free blocks
1043d0d856e8SRandy Dunlap 	 * upon them.
1044a86c6181SAlex Tomas 	 */
10455d4958f9SAvantika Mathur 	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
1046a86c6181SAlex Tomas 	if (!ablocks)
1047a86c6181SAlex Tomas 		return -ENOMEM;
1048a86c6181SAlex Tomas 
1049a86c6181SAlex Tomas 	/* allocate all needed blocks */
1050a86c6181SAlex Tomas 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1051a86c6181SAlex Tomas 	for (a = 0; a < depth - at; a++) {
1052654b4908SAneesh Kumar K.V 		newblock = ext4_ext_new_meta_block(handle, inode, path,
105355f020dbSAllison Henderson 						   newext, &err, flags);
1054a86c6181SAlex Tomas 		if (newblock == 0)
1055a86c6181SAlex Tomas 			goto cleanup;
1056a86c6181SAlex Tomas 		ablocks[a] = newblock;
1057a86c6181SAlex Tomas 	}
1058a86c6181SAlex Tomas 
1059a86c6181SAlex Tomas 	/* initialize new leaf */
1060a86c6181SAlex Tomas 	newblock = ablocks[--a];
1061273df556SFrank Mayhar 	if (unlikely(newblock == 0)) {
1062273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "newblock == 0!");
1063273df556SFrank Mayhar 		err = -EIO;
1064273df556SFrank Mayhar 		goto cleanup;
1065273df556SFrank Mayhar 	}
1066a86c6181SAlex Tomas 	bh = sb_getblk(inode->i_sb, newblock);
1067aebf0243SWang Shilong 	if (unlikely(!bh)) {
1068860d21e2STheodore Ts'o 		err = -ENOMEM;
1069a86c6181SAlex Tomas 		goto cleanup;
1070a86c6181SAlex Tomas 	}
1071a86c6181SAlex Tomas 	lock_buffer(bh);
1072a86c6181SAlex Tomas 
10737e028976SAvantika Mathur 	err = ext4_journal_get_create_access(handle, bh);
10747e028976SAvantika Mathur 	if (err)
1075a86c6181SAlex Tomas 		goto cleanup;
1076a86c6181SAlex Tomas 
1077a86c6181SAlex Tomas 	neh = ext_block_hdr(bh);
1078a86c6181SAlex Tomas 	neh->eh_entries = 0;
107955ad63bfSTheodore Ts'o 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1080a86c6181SAlex Tomas 	neh->eh_magic = EXT4_EXT_MAGIC;
1081a86c6181SAlex Tomas 	neh->eh_depth = 0;
1082a86c6181SAlex Tomas 
1083d0d856e8SRandy Dunlap 	/* move remainder of path[depth] to the new leaf */
1084273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr->eh_entries !=
1085273df556SFrank Mayhar 		     path[depth].p_hdr->eh_max)) {
1086273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1087273df556SFrank Mayhar 				 path[depth].p_hdr->eh_entries,
1088273df556SFrank Mayhar 				 path[depth].p_hdr->eh_max);
1089273df556SFrank Mayhar 		err = -EIO;
1090273df556SFrank Mayhar 		goto cleanup;
1091273df556SFrank Mayhar 	}
1092a86c6181SAlex Tomas 	/* start copy from next extent */
10931b16da77SYongqiang Yang 	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
10941b16da77SYongqiang Yang 	ext4_ext_show_move(inode, path, newblock, depth);
1095a86c6181SAlex Tomas 	if (m) {
10961b16da77SYongqiang Yang 		struct ext4_extent *ex;
10971b16da77SYongqiang Yang 		ex = EXT_FIRST_EXTENT(neh);
10981b16da77SYongqiang Yang 		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1099e8546d06SMarcin Slusarz 		le16_add_cpu(&neh->eh_entries, m);
1100a86c6181SAlex Tomas 	}
1101a86c6181SAlex Tomas 
11027ac5990dSDarrick J. Wong 	ext4_extent_block_csum_set(inode, neh);
1103a86c6181SAlex Tomas 	set_buffer_uptodate(bh);
1104a86c6181SAlex Tomas 	unlock_buffer(bh);
1105a86c6181SAlex Tomas 
11060390131bSFrank Mayhar 	err = ext4_handle_dirty_metadata(handle, inode, bh);
11077e028976SAvantika Mathur 	if (err)
1108a86c6181SAlex Tomas 		goto cleanup;
1109a86c6181SAlex Tomas 	brelse(bh);
1110a86c6181SAlex Tomas 	bh = NULL;
1111a86c6181SAlex Tomas 
1112a86c6181SAlex Tomas 	/* correct old leaf */
1113a86c6181SAlex Tomas 	if (m) {
11147e028976SAvantika Mathur 		err = ext4_ext_get_access(handle, inode, path + depth);
11157e028976SAvantika Mathur 		if (err)
1116a86c6181SAlex Tomas 			goto cleanup;
1117e8546d06SMarcin Slusarz 		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
11187e028976SAvantika Mathur 		err = ext4_ext_dirty(handle, inode, path + depth);
11197e028976SAvantika Mathur 		if (err)
1120a86c6181SAlex Tomas 			goto cleanup;
1121a86c6181SAlex Tomas 
1122a86c6181SAlex Tomas 	}
1123a86c6181SAlex Tomas 
1124a86c6181SAlex Tomas 	/* create intermediate indexes */
1125a86c6181SAlex Tomas 	k = depth - at - 1;
1126273df556SFrank Mayhar 	if (unlikely(k < 0)) {
1127273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1128273df556SFrank Mayhar 		err = -EIO;
1129273df556SFrank Mayhar 		goto cleanup;
1130273df556SFrank Mayhar 	}
1131a86c6181SAlex Tomas 	if (k)
1132a86c6181SAlex Tomas 		ext_debug("create %d intermediate indices\n", k);
1133a86c6181SAlex Tomas 	/* insert new index into current index block */
1134a86c6181SAlex Tomas 	/* current depth stored in i var */
1135a86c6181SAlex Tomas 	i = depth - 1;
1136a86c6181SAlex Tomas 	while (k--) {
1137a86c6181SAlex Tomas 		oldblock = newblock;
1138a86c6181SAlex Tomas 		newblock = ablocks[--a];
1139bba90743SEric Sandeen 		bh = sb_getblk(inode->i_sb, newblock);
1140aebf0243SWang Shilong 		if (unlikely(!bh)) {
1141860d21e2STheodore Ts'o 			err = -ENOMEM;
1142a86c6181SAlex Tomas 			goto cleanup;
1143a86c6181SAlex Tomas 		}
1144a86c6181SAlex Tomas 		lock_buffer(bh);
1145a86c6181SAlex Tomas 
11467e028976SAvantika Mathur 		err = ext4_journal_get_create_access(handle, bh);
11477e028976SAvantika Mathur 		if (err)
1148a86c6181SAlex Tomas 			goto cleanup;
1149a86c6181SAlex Tomas 
1150a86c6181SAlex Tomas 		neh = ext_block_hdr(bh);
1151a86c6181SAlex Tomas 		neh->eh_entries = cpu_to_le16(1);
1152a86c6181SAlex Tomas 		neh->eh_magic = EXT4_EXT_MAGIC;
115355ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1154a86c6181SAlex Tomas 		neh->eh_depth = cpu_to_le16(depth - i);
1155a86c6181SAlex Tomas 		fidx = EXT_FIRST_INDEX(neh);
1156a86c6181SAlex Tomas 		fidx->ei_block = border;
1157f65e6fbaSAlex Tomas 		ext4_idx_store_pblock(fidx, oldblock);
1158a86c6181SAlex Tomas 
1159bba90743SEric Sandeen 		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1160bba90743SEric Sandeen 				i, newblock, le32_to_cpu(border), oldblock);
1161a86c6181SAlex Tomas 
11621b16da77SYongqiang Yang 		/* move remainder of path[i] to the new index block */
1163273df556SFrank Mayhar 		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1164273df556SFrank Mayhar 					EXT_LAST_INDEX(path[i].p_hdr))) {
1165273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1166273df556SFrank Mayhar 					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1167273df556SFrank Mayhar 					 le32_to_cpu(path[i].p_ext->ee_block));
1168273df556SFrank Mayhar 			err = -EIO;
1169273df556SFrank Mayhar 			goto cleanup;
1170273df556SFrank Mayhar 		}
11711b16da77SYongqiang Yang 		/* start copy indexes */
11721b16da77SYongqiang Yang 		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
11731b16da77SYongqiang Yang 		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
11741b16da77SYongqiang Yang 				EXT_MAX_INDEX(path[i].p_hdr));
11751b16da77SYongqiang Yang 		ext4_ext_show_move(inode, path, newblock, i);
1176a86c6181SAlex Tomas 		if (m) {
11771b16da77SYongqiang Yang 			memmove(++fidx, path[i].p_idx,
1178a86c6181SAlex Tomas 				sizeof(struct ext4_extent_idx) * m);
1179e8546d06SMarcin Slusarz 			le16_add_cpu(&neh->eh_entries, m);
1180a86c6181SAlex Tomas 		}
11817ac5990dSDarrick J. Wong 		ext4_extent_block_csum_set(inode, neh);
1182a86c6181SAlex Tomas 		set_buffer_uptodate(bh);
1183a86c6181SAlex Tomas 		unlock_buffer(bh);
1184a86c6181SAlex Tomas 
11850390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
11867e028976SAvantika Mathur 		if (err)
1187a86c6181SAlex Tomas 			goto cleanup;
1188a86c6181SAlex Tomas 		brelse(bh);
1189a86c6181SAlex Tomas 		bh = NULL;
1190a86c6181SAlex Tomas 
1191a86c6181SAlex Tomas 		/* correct old index */
1192a86c6181SAlex Tomas 		if (m) {
1193a86c6181SAlex Tomas 			err = ext4_ext_get_access(handle, inode, path + i);
1194a86c6181SAlex Tomas 			if (err)
1195a86c6181SAlex Tomas 				goto cleanup;
1196e8546d06SMarcin Slusarz 			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1197a86c6181SAlex Tomas 			err = ext4_ext_dirty(handle, inode, path + i);
1198a86c6181SAlex Tomas 			if (err)
1199a86c6181SAlex Tomas 				goto cleanup;
1200a86c6181SAlex Tomas 		}
1201a86c6181SAlex Tomas 
1202a86c6181SAlex Tomas 		i--;
1203a86c6181SAlex Tomas 	}
1204a86c6181SAlex Tomas 
1205a86c6181SAlex Tomas 	/* insert new index */
1206a86c6181SAlex Tomas 	err = ext4_ext_insert_index(handle, inode, path + at,
1207a86c6181SAlex Tomas 				    le32_to_cpu(border), newblock);
1208a86c6181SAlex Tomas 
1209a86c6181SAlex Tomas cleanup:
1210a86c6181SAlex Tomas 	if (bh) {
1211a86c6181SAlex Tomas 		if (buffer_locked(bh))
1212a86c6181SAlex Tomas 			unlock_buffer(bh);
1213a86c6181SAlex Tomas 		brelse(bh);
1214a86c6181SAlex Tomas 	}
1215a86c6181SAlex Tomas 
1216a86c6181SAlex Tomas 	if (err) {
1217a86c6181SAlex Tomas 		/* free all allocated blocks in error case */
1218a86c6181SAlex Tomas 		for (i = 0; i < depth; i++) {
1219a86c6181SAlex Tomas 			if (!ablocks[i])
1220a86c6181SAlex Tomas 				continue;
12217dc57615SPeter Huewe 			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1222e6362609STheodore Ts'o 					 EXT4_FREE_BLOCKS_METADATA);
1223a86c6181SAlex Tomas 		}
1224a86c6181SAlex Tomas 	}
1225a86c6181SAlex Tomas 	kfree(ablocks);
1226a86c6181SAlex Tomas 
1227a86c6181SAlex Tomas 	return err;
1228a86c6181SAlex Tomas }
1229a86c6181SAlex Tomas 
1230a86c6181SAlex Tomas /*
1231d0d856e8SRandy Dunlap  * ext4_ext_grow_indepth:
1232d0d856e8SRandy Dunlap  * implements tree growing procedure:
1233a86c6181SAlex Tomas  * - allocates new block
1234a86c6181SAlex Tomas  * - moves top-level data (index block or leaf) into the new block
1235d0d856e8SRandy Dunlap  * - initializes new top-level, creating index that points to the
1236a86c6181SAlex Tomas  *   just created block
1237a86c6181SAlex Tomas  */
1238a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
123955f020dbSAllison Henderson 				 unsigned int flags,
1240a86c6181SAlex Tomas 				 struct ext4_extent *newext)
1241a86c6181SAlex Tomas {
1242a86c6181SAlex Tomas 	struct ext4_extent_header *neh;
1243a86c6181SAlex Tomas 	struct buffer_head *bh;
1244f65e6fbaSAlex Tomas 	ext4_fsblk_t newblock;
1245a86c6181SAlex Tomas 	int err = 0;
1246a86c6181SAlex Tomas 
12471939dd84SDmitry Monakhov 	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
124855f020dbSAllison Henderson 		newext, &err, flags);
1249a86c6181SAlex Tomas 	if (newblock == 0)
1250a86c6181SAlex Tomas 		return err;
1251a86c6181SAlex Tomas 
1252a86c6181SAlex Tomas 	bh = sb_getblk(inode->i_sb, newblock);
1253aebf0243SWang Shilong 	if (unlikely(!bh))
1254860d21e2STheodore Ts'o 		return -ENOMEM;
1255a86c6181SAlex Tomas 	lock_buffer(bh);
1256a86c6181SAlex Tomas 
12577e028976SAvantika Mathur 	err = ext4_journal_get_create_access(handle, bh);
12587e028976SAvantika Mathur 	if (err) {
1259a86c6181SAlex Tomas 		unlock_buffer(bh);
1260a86c6181SAlex Tomas 		goto out;
1261a86c6181SAlex Tomas 	}
1262a86c6181SAlex Tomas 
1263a86c6181SAlex Tomas 	/* move top-level index/leaf into new block */
12641939dd84SDmitry Monakhov 	memmove(bh->b_data, EXT4_I(inode)->i_data,
12651939dd84SDmitry Monakhov 		sizeof(EXT4_I(inode)->i_data));
1266a86c6181SAlex Tomas 
1267a86c6181SAlex Tomas 	/* set size of new block */
1268a86c6181SAlex Tomas 	neh = ext_block_hdr(bh);
1269a86c6181SAlex Tomas 	/* old root could have indexes or leaves
1270a86c6181SAlex Tomas 	 * so calculate e_max right way */
1271a86c6181SAlex Tomas 	if (ext_depth(inode))
127255ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1273a86c6181SAlex Tomas 	else
127455ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1275a86c6181SAlex Tomas 	neh->eh_magic = EXT4_EXT_MAGIC;
12767ac5990dSDarrick J. Wong 	ext4_extent_block_csum_set(inode, neh);
1277a86c6181SAlex Tomas 	set_buffer_uptodate(bh);
1278a86c6181SAlex Tomas 	unlock_buffer(bh);
1279a86c6181SAlex Tomas 
12800390131bSFrank Mayhar 	err = ext4_handle_dirty_metadata(handle, inode, bh);
12817e028976SAvantika Mathur 	if (err)
1282a86c6181SAlex Tomas 		goto out;
1283a86c6181SAlex Tomas 
12841939dd84SDmitry Monakhov 	/* Update top-level index: num,max,pointer */
1285a86c6181SAlex Tomas 	neh = ext_inode_hdr(inode);
12861939dd84SDmitry Monakhov 	neh->eh_entries = cpu_to_le16(1);
12871939dd84SDmitry Monakhov 	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
12881939dd84SDmitry Monakhov 	if (neh->eh_depth == 0) {
12891939dd84SDmitry Monakhov 		/* Root extent block becomes index block */
12901939dd84SDmitry Monakhov 		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
12911939dd84SDmitry Monakhov 		EXT_FIRST_INDEX(neh)->ei_block =
12921939dd84SDmitry Monakhov 			EXT_FIRST_EXTENT(neh)->ee_block;
12931939dd84SDmitry Monakhov 	}
12942ae02107SMingming Cao 	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1295a86c6181SAlex Tomas 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
12965a0790c2SAndi Kleen 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1297bf89d16fSTheodore Ts'o 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1298a86c6181SAlex Tomas 
1299ba39ebb6SWei Yongjun 	le16_add_cpu(&neh->eh_depth, 1);
13001939dd84SDmitry Monakhov 	ext4_mark_inode_dirty(handle, inode);
1301a86c6181SAlex Tomas out:
1302a86c6181SAlex Tomas 	brelse(bh);
1303a86c6181SAlex Tomas 
1304a86c6181SAlex Tomas 	return err;
1305a86c6181SAlex Tomas }
1306a86c6181SAlex Tomas 
1307a86c6181SAlex Tomas /*
1308d0d856e8SRandy Dunlap  * ext4_ext_create_new_leaf:
1309d0d856e8SRandy Dunlap  * finds empty index and adds new leaf.
1310d0d856e8SRandy Dunlap  * if no free index is found, then it requests in-depth growing.
1311a86c6181SAlex Tomas  */
1312a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1313107a7bd3STheodore Ts'o 				    unsigned int mb_flags,
1314107a7bd3STheodore Ts'o 				    unsigned int gb_flags,
1315a86c6181SAlex Tomas 				    struct ext4_ext_path *path,
1316a86c6181SAlex Tomas 				    struct ext4_extent *newext)
1317a86c6181SAlex Tomas {
1318a86c6181SAlex Tomas 	struct ext4_ext_path *curp;
1319a86c6181SAlex Tomas 	int depth, i, err = 0;
1320a86c6181SAlex Tomas 
1321a86c6181SAlex Tomas repeat:
1322a86c6181SAlex Tomas 	i = depth = ext_depth(inode);
1323a86c6181SAlex Tomas 
1324a86c6181SAlex Tomas 	/* walk up to the tree and look for free index entry */
1325a86c6181SAlex Tomas 	curp = path + depth;
1326a86c6181SAlex Tomas 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1327a86c6181SAlex Tomas 		i--;
1328a86c6181SAlex Tomas 		curp--;
1329a86c6181SAlex Tomas 	}
1330a86c6181SAlex Tomas 
1331d0d856e8SRandy Dunlap 	/* we use already allocated block for index block,
1332d0d856e8SRandy Dunlap 	 * so subsequent data blocks should be contiguous */
1333a86c6181SAlex Tomas 	if (EXT_HAS_FREE_INDEX(curp)) {
1334a86c6181SAlex Tomas 		/* if we found index with free entry, then use that
1335a86c6181SAlex Tomas 		 * entry: create all needed subtree and add new leaf */
1336107a7bd3STheodore Ts'o 		err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1337787e0981SShen Feng 		if (err)
1338787e0981SShen Feng 			goto out;
1339a86c6181SAlex Tomas 
1340a86c6181SAlex Tomas 		/* refill path */
1341a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
1342a86c6181SAlex Tomas 		path = ext4_ext_find_extent(inode,
1343725d26d3SAneesh Kumar K.V 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1344107a7bd3STheodore Ts'o 				    path, gb_flags);
1345a86c6181SAlex Tomas 		if (IS_ERR(path))
1346a86c6181SAlex Tomas 			err = PTR_ERR(path);
1347a86c6181SAlex Tomas 	} else {
1348a86c6181SAlex Tomas 		/* tree is full, time to grow in depth */
1349107a7bd3STheodore Ts'o 		err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
1350a86c6181SAlex Tomas 		if (err)
1351a86c6181SAlex Tomas 			goto out;
1352a86c6181SAlex Tomas 
1353a86c6181SAlex Tomas 		/* refill path */
1354a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
1355a86c6181SAlex Tomas 		path = ext4_ext_find_extent(inode,
1356725d26d3SAneesh Kumar K.V 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1357107a7bd3STheodore Ts'o 				    path, gb_flags);
1358a86c6181SAlex Tomas 		if (IS_ERR(path)) {
1359a86c6181SAlex Tomas 			err = PTR_ERR(path);
1360a86c6181SAlex Tomas 			goto out;
1361a86c6181SAlex Tomas 		}
1362a86c6181SAlex Tomas 
1363a86c6181SAlex Tomas 		/*
1364d0d856e8SRandy Dunlap 		 * only first (depth 0 -> 1) produces free space;
1365d0d856e8SRandy Dunlap 		 * in all other cases we have to split the grown tree
1366a86c6181SAlex Tomas 		 */
1367a86c6181SAlex Tomas 		depth = ext_depth(inode);
1368a86c6181SAlex Tomas 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1369d0d856e8SRandy Dunlap 			/* now we need to split */
1370a86c6181SAlex Tomas 			goto repeat;
1371a86c6181SAlex Tomas 		}
1372a86c6181SAlex Tomas 	}
1373a86c6181SAlex Tomas 
1374a86c6181SAlex Tomas out:
1375a86c6181SAlex Tomas 	return err;
1376a86c6181SAlex Tomas }
1377a86c6181SAlex Tomas 
1378a86c6181SAlex Tomas /*
13791988b51eSAlex Tomas  * search the closest allocated block to the left for *logical
13801988b51eSAlex Tomas  * and returns it at @logical + it's physical address at @phys
13811988b51eSAlex Tomas  * if *logical is the smallest allocated block, the function
13821988b51eSAlex Tomas  * returns 0 at @phys
13831988b51eSAlex Tomas  * return value contains 0 (success) or error code
13841988b51eSAlex Tomas  */
13851f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode,
13861f109d5aSTheodore Ts'o 				struct ext4_ext_path *path,
13871988b51eSAlex Tomas 				ext4_lblk_t *logical, ext4_fsblk_t *phys)
13881988b51eSAlex Tomas {
13891988b51eSAlex Tomas 	struct ext4_extent_idx *ix;
13901988b51eSAlex Tomas 	struct ext4_extent *ex;
1391b939e376SAneesh Kumar K.V 	int depth, ee_len;
13921988b51eSAlex Tomas 
1393273df556SFrank Mayhar 	if (unlikely(path == NULL)) {
1394273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1395273df556SFrank Mayhar 		return -EIO;
1396273df556SFrank Mayhar 	}
13971988b51eSAlex Tomas 	depth = path->p_depth;
13981988b51eSAlex Tomas 	*phys = 0;
13991988b51eSAlex Tomas 
14001988b51eSAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
14011988b51eSAlex Tomas 		return 0;
14021988b51eSAlex Tomas 
14031988b51eSAlex Tomas 	/* usually extent in the path covers blocks smaller
14041988b51eSAlex Tomas 	 * then *logical, but it can be that extent is the
14051988b51eSAlex Tomas 	 * first one in the file */
14061988b51eSAlex Tomas 
14071988b51eSAlex Tomas 	ex = path[depth].p_ext;
1408b939e376SAneesh Kumar K.V 	ee_len = ext4_ext_get_actual_len(ex);
14091988b51eSAlex Tomas 	if (*logical < le32_to_cpu(ex->ee_block)) {
1410273df556SFrank Mayhar 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1411273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1412273df556SFrank Mayhar 					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1413273df556SFrank Mayhar 					 *logical, le32_to_cpu(ex->ee_block));
1414273df556SFrank Mayhar 			return -EIO;
1415273df556SFrank Mayhar 		}
14161988b51eSAlex Tomas 		while (--depth >= 0) {
14171988b51eSAlex Tomas 			ix = path[depth].p_idx;
1418273df556SFrank Mayhar 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1419273df556SFrank Mayhar 				EXT4_ERROR_INODE(inode,
1420273df556SFrank Mayhar 				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
14216ee3b212STao Ma 				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1422273df556SFrank Mayhar 				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
14236ee3b212STao Ma 		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1424273df556SFrank Mayhar 				  depth);
1425273df556SFrank Mayhar 				return -EIO;
1426273df556SFrank Mayhar 			}
14271988b51eSAlex Tomas 		}
14281988b51eSAlex Tomas 		return 0;
14291988b51eSAlex Tomas 	}
14301988b51eSAlex Tomas 
1431273df556SFrank Mayhar 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1432273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1433273df556SFrank Mayhar 				 "logical %d < ee_block %d + ee_len %d!",
1434273df556SFrank Mayhar 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1435273df556SFrank Mayhar 		return -EIO;
1436273df556SFrank Mayhar 	}
14371988b51eSAlex Tomas 
1438b939e376SAneesh Kumar K.V 	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1439bf89d16fSTheodore Ts'o 	*phys = ext4_ext_pblock(ex) + ee_len - 1;
14401988b51eSAlex Tomas 	return 0;
14411988b51eSAlex Tomas }
14421988b51eSAlex Tomas 
14431988b51eSAlex Tomas /*
14441988b51eSAlex Tomas  * search the closest allocated block to the right for *logical
14451988b51eSAlex Tomas  * and returns it at @logical + it's physical address at @phys
1446df3ab170STao Ma  * if *logical is the largest allocated block, the function
14471988b51eSAlex Tomas  * returns 0 at @phys
14481988b51eSAlex Tomas  * return value contains 0 (success) or error code
14491988b51eSAlex Tomas  */
14501f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode,
14511f109d5aSTheodore Ts'o 				 struct ext4_ext_path *path,
14524d33b1efSTheodore Ts'o 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
14534d33b1efSTheodore Ts'o 				 struct ext4_extent **ret_ex)
14541988b51eSAlex Tomas {
14551988b51eSAlex Tomas 	struct buffer_head *bh = NULL;
14561988b51eSAlex Tomas 	struct ext4_extent_header *eh;
14571988b51eSAlex Tomas 	struct ext4_extent_idx *ix;
14581988b51eSAlex Tomas 	struct ext4_extent *ex;
14591988b51eSAlex Tomas 	ext4_fsblk_t block;
1460395a87bfSEric Sandeen 	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1461395a87bfSEric Sandeen 	int ee_len;
14621988b51eSAlex Tomas 
1463273df556SFrank Mayhar 	if (unlikely(path == NULL)) {
1464273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1465273df556SFrank Mayhar 		return -EIO;
1466273df556SFrank Mayhar 	}
14671988b51eSAlex Tomas 	depth = path->p_depth;
14681988b51eSAlex Tomas 	*phys = 0;
14691988b51eSAlex Tomas 
14701988b51eSAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
14711988b51eSAlex Tomas 		return 0;
14721988b51eSAlex Tomas 
14731988b51eSAlex Tomas 	/* usually extent in the path covers blocks smaller
14741988b51eSAlex Tomas 	 * then *logical, but it can be that extent is the
14751988b51eSAlex Tomas 	 * first one in the file */
14761988b51eSAlex Tomas 
14771988b51eSAlex Tomas 	ex = path[depth].p_ext;
1478b939e376SAneesh Kumar K.V 	ee_len = ext4_ext_get_actual_len(ex);
14791988b51eSAlex Tomas 	if (*logical < le32_to_cpu(ex->ee_block)) {
1480273df556SFrank Mayhar 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1481273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1482273df556SFrank Mayhar 					 "first_extent(path[%d].p_hdr) != ex",
1483273df556SFrank Mayhar 					 depth);
1484273df556SFrank Mayhar 			return -EIO;
1485273df556SFrank Mayhar 		}
14861988b51eSAlex Tomas 		while (--depth >= 0) {
14871988b51eSAlex Tomas 			ix = path[depth].p_idx;
1488273df556SFrank Mayhar 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1489273df556SFrank Mayhar 				EXT4_ERROR_INODE(inode,
1490273df556SFrank Mayhar 						 "ix != EXT_FIRST_INDEX *logical %d!",
1491273df556SFrank Mayhar 						 *logical);
1492273df556SFrank Mayhar 				return -EIO;
1493273df556SFrank Mayhar 			}
14941988b51eSAlex Tomas 		}
14954d33b1efSTheodore Ts'o 		goto found_extent;
14961988b51eSAlex Tomas 	}
14971988b51eSAlex Tomas 
1498273df556SFrank Mayhar 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1499273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1500273df556SFrank Mayhar 				 "logical %d < ee_block %d + ee_len %d!",
1501273df556SFrank Mayhar 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1502273df556SFrank Mayhar 		return -EIO;
1503273df556SFrank Mayhar 	}
15041988b51eSAlex Tomas 
15051988b51eSAlex Tomas 	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
15061988b51eSAlex Tomas 		/* next allocated block in this leaf */
15071988b51eSAlex Tomas 		ex++;
15084d33b1efSTheodore Ts'o 		goto found_extent;
15091988b51eSAlex Tomas 	}
15101988b51eSAlex Tomas 
15111988b51eSAlex Tomas 	/* go up and search for index to the right */
15121988b51eSAlex Tomas 	while (--depth >= 0) {
15131988b51eSAlex Tomas 		ix = path[depth].p_idx;
15141988b51eSAlex Tomas 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
151525f1ee3aSWu Fengguang 			goto got_index;
15161988b51eSAlex Tomas 	}
15171988b51eSAlex Tomas 
151825f1ee3aSWu Fengguang 	/* we've gone up to the root and found no index to the right */
15191988b51eSAlex Tomas 	return 0;
15201988b51eSAlex Tomas 
152125f1ee3aSWu Fengguang got_index:
15221988b51eSAlex Tomas 	/* we've found index to the right, let's
15231988b51eSAlex Tomas 	 * follow it and find the closest allocated
15241988b51eSAlex Tomas 	 * block to the right */
15251988b51eSAlex Tomas 	ix++;
1526bf89d16fSTheodore Ts'o 	block = ext4_idx_pblock(ix);
15271988b51eSAlex Tomas 	while (++depth < path->p_depth) {
1528395a87bfSEric Sandeen 		/* subtract from p_depth to get proper eh_depth */
15297d7ea89eSTheodore Ts'o 		bh = read_extent_tree_block(inode, block,
1530107a7bd3STheodore Ts'o 					    path->p_depth - depth, 0);
15317d7ea89eSTheodore Ts'o 		if (IS_ERR(bh))
15327d7ea89eSTheodore Ts'o 			return PTR_ERR(bh);
15337d7ea89eSTheodore Ts'o 		eh = ext_block_hdr(bh);
15341988b51eSAlex Tomas 		ix = EXT_FIRST_INDEX(eh);
1535bf89d16fSTheodore Ts'o 		block = ext4_idx_pblock(ix);
15361988b51eSAlex Tomas 		put_bh(bh);
15371988b51eSAlex Tomas 	}
15381988b51eSAlex Tomas 
1539107a7bd3STheodore Ts'o 	bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
15407d7ea89eSTheodore Ts'o 	if (IS_ERR(bh))
15417d7ea89eSTheodore Ts'o 		return PTR_ERR(bh);
15421988b51eSAlex Tomas 	eh = ext_block_hdr(bh);
15431988b51eSAlex Tomas 	ex = EXT_FIRST_EXTENT(eh);
15444d33b1efSTheodore Ts'o found_extent:
15451988b51eSAlex Tomas 	*logical = le32_to_cpu(ex->ee_block);
1546bf89d16fSTheodore Ts'o 	*phys = ext4_ext_pblock(ex);
15474d33b1efSTheodore Ts'o 	*ret_ex = ex;
15484d33b1efSTheodore Ts'o 	if (bh)
15491988b51eSAlex Tomas 		put_bh(bh);
15501988b51eSAlex Tomas 	return 0;
15511988b51eSAlex Tomas }
15521988b51eSAlex Tomas 
15531988b51eSAlex Tomas /*
1554d0d856e8SRandy Dunlap  * ext4_ext_next_allocated_block:
1555f17722f9SLukas Czerner  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1556d0d856e8SRandy Dunlap  * NOTE: it considers block number from index entry as
1557d0d856e8SRandy Dunlap  * allocated block. Thus, index entries have to be consistent
1558d0d856e8SRandy Dunlap  * with leaves.
1559a86c6181SAlex Tomas  */
1560725d26d3SAneesh Kumar K.V static ext4_lblk_t
1561a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1562a86c6181SAlex Tomas {
1563a86c6181SAlex Tomas 	int depth;
1564a86c6181SAlex Tomas 
1565a86c6181SAlex Tomas 	BUG_ON(path == NULL);
1566a86c6181SAlex Tomas 	depth = path->p_depth;
1567a86c6181SAlex Tomas 
1568a86c6181SAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
1569f17722f9SLukas Czerner 		return EXT_MAX_BLOCKS;
1570a86c6181SAlex Tomas 
1571a86c6181SAlex Tomas 	while (depth >= 0) {
1572a86c6181SAlex Tomas 		if (depth == path->p_depth) {
1573a86c6181SAlex Tomas 			/* leaf */
15746f8ff537SCurt Wohlgemuth 			if (path[depth].p_ext &&
15756f8ff537SCurt Wohlgemuth 				path[depth].p_ext !=
1576a86c6181SAlex Tomas 					EXT_LAST_EXTENT(path[depth].p_hdr))
1577a86c6181SAlex Tomas 			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1578a86c6181SAlex Tomas 		} else {
1579a86c6181SAlex Tomas 			/* index */
1580a86c6181SAlex Tomas 			if (path[depth].p_idx !=
1581a86c6181SAlex Tomas 					EXT_LAST_INDEX(path[depth].p_hdr))
1582a86c6181SAlex Tomas 			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1583a86c6181SAlex Tomas 		}
1584a86c6181SAlex Tomas 		depth--;
1585a86c6181SAlex Tomas 	}
1586a86c6181SAlex Tomas 
1587f17722f9SLukas Czerner 	return EXT_MAX_BLOCKS;
1588a86c6181SAlex Tomas }
1589a86c6181SAlex Tomas 
1590a86c6181SAlex Tomas /*
1591d0d856e8SRandy Dunlap  * ext4_ext_next_leaf_block:
1592f17722f9SLukas Czerner  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1593a86c6181SAlex Tomas  */
15945718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1595a86c6181SAlex Tomas {
1596a86c6181SAlex Tomas 	int depth;
1597a86c6181SAlex Tomas 
1598a86c6181SAlex Tomas 	BUG_ON(path == NULL);
1599a86c6181SAlex Tomas 	depth = path->p_depth;
1600a86c6181SAlex Tomas 
1601a86c6181SAlex Tomas 	/* zero-tree has no leaf blocks at all */
1602a86c6181SAlex Tomas 	if (depth == 0)
1603f17722f9SLukas Czerner 		return EXT_MAX_BLOCKS;
1604a86c6181SAlex Tomas 
1605a86c6181SAlex Tomas 	/* go to index block */
1606a86c6181SAlex Tomas 	depth--;
1607a86c6181SAlex Tomas 
1608a86c6181SAlex Tomas 	while (depth >= 0) {
1609a86c6181SAlex Tomas 		if (path[depth].p_idx !=
1610a86c6181SAlex Tomas 				EXT_LAST_INDEX(path[depth].p_hdr))
1611725d26d3SAneesh Kumar K.V 			return (ext4_lblk_t)
1612725d26d3SAneesh Kumar K.V 				le32_to_cpu(path[depth].p_idx[1].ei_block);
1613a86c6181SAlex Tomas 		depth--;
1614a86c6181SAlex Tomas 	}
1615a86c6181SAlex Tomas 
1616f17722f9SLukas Czerner 	return EXT_MAX_BLOCKS;
1617a86c6181SAlex Tomas }
1618a86c6181SAlex Tomas 
1619a86c6181SAlex Tomas /*
1620d0d856e8SRandy Dunlap  * ext4_ext_correct_indexes:
1621d0d856e8SRandy Dunlap  * if leaf gets modified and modified extent is first in the leaf,
1622d0d856e8SRandy Dunlap  * then we have to correct all indexes above.
1623a86c6181SAlex Tomas  * TODO: do we need to correct tree in all cases?
1624a86c6181SAlex Tomas  */
16251d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1626a86c6181SAlex Tomas 				struct ext4_ext_path *path)
1627a86c6181SAlex Tomas {
1628a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
1629a86c6181SAlex Tomas 	int depth = ext_depth(inode);
1630a86c6181SAlex Tomas 	struct ext4_extent *ex;
1631a86c6181SAlex Tomas 	__le32 border;
1632a86c6181SAlex Tomas 	int k, err = 0;
1633a86c6181SAlex Tomas 
1634a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
1635a86c6181SAlex Tomas 	ex = path[depth].p_ext;
1636273df556SFrank Mayhar 
1637273df556SFrank Mayhar 	if (unlikely(ex == NULL || eh == NULL)) {
1638273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1639273df556SFrank Mayhar 				 "ex %p == NULL or eh %p == NULL", ex, eh);
1640273df556SFrank Mayhar 		return -EIO;
1641273df556SFrank Mayhar 	}
1642a86c6181SAlex Tomas 
1643a86c6181SAlex Tomas 	if (depth == 0) {
1644a86c6181SAlex Tomas 		/* there is no tree at all */
1645a86c6181SAlex Tomas 		return 0;
1646a86c6181SAlex Tomas 	}
1647a86c6181SAlex Tomas 
1648a86c6181SAlex Tomas 	if (ex != EXT_FIRST_EXTENT(eh)) {
1649a86c6181SAlex Tomas 		/* we correct tree if first leaf got modified only */
1650a86c6181SAlex Tomas 		return 0;
1651a86c6181SAlex Tomas 	}
1652a86c6181SAlex Tomas 
1653a86c6181SAlex Tomas 	/*
1654d0d856e8SRandy Dunlap 	 * TODO: we need correction if border is smaller than current one
1655a86c6181SAlex Tomas 	 */
1656a86c6181SAlex Tomas 	k = depth - 1;
1657a86c6181SAlex Tomas 	border = path[depth].p_ext->ee_block;
16587e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path + k);
16597e028976SAvantika Mathur 	if (err)
1660a86c6181SAlex Tomas 		return err;
1661a86c6181SAlex Tomas 	path[k].p_idx->ei_block = border;
16627e028976SAvantika Mathur 	err = ext4_ext_dirty(handle, inode, path + k);
16637e028976SAvantika Mathur 	if (err)
1664a86c6181SAlex Tomas 		return err;
1665a86c6181SAlex Tomas 
1666a86c6181SAlex Tomas 	while (k--) {
1667a86c6181SAlex Tomas 		/* change all left-side indexes */
1668a86c6181SAlex Tomas 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1669a86c6181SAlex Tomas 			break;
16707e028976SAvantika Mathur 		err = ext4_ext_get_access(handle, inode, path + k);
16717e028976SAvantika Mathur 		if (err)
1672a86c6181SAlex Tomas 			break;
1673a86c6181SAlex Tomas 		path[k].p_idx->ei_block = border;
16747e028976SAvantika Mathur 		err = ext4_ext_dirty(handle, inode, path + k);
16757e028976SAvantika Mathur 		if (err)
1676a86c6181SAlex Tomas 			break;
1677a86c6181SAlex Tomas 	}
1678a86c6181SAlex Tomas 
1679a86c6181SAlex Tomas 	return err;
1680a86c6181SAlex Tomas }
1681a86c6181SAlex Tomas 
1682748de673SAkira Fujita int
1683a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1684a86c6181SAlex Tomas 				struct ext4_extent *ex2)
1685a86c6181SAlex Tomas {
1686da0169b3SEric Sandeen 	unsigned short ext1_ee_len, ext2_ee_len;
1687a2df2a63SAmit Arora 
1688a2df2a63SAmit Arora 	/*
1689ec22ba8eSDmitry Monakhov 	 * Make sure that both extents are initialized. We don't merge
1690ec22ba8eSDmitry Monakhov 	 * uninitialized extents so that we can be sure that end_io code has
1691ec22ba8eSDmitry Monakhov 	 * the extent that was written properly split out and conversion to
1692ec22ba8eSDmitry Monakhov 	 * initialized is trivial.
1693a2df2a63SAmit Arora 	 */
1694*a9b82415SDarrick J. Wong 	if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2))
1695a2df2a63SAmit Arora 		return 0;
1696a2df2a63SAmit Arora 
1697a2df2a63SAmit Arora 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1698a2df2a63SAmit Arora 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1699a2df2a63SAmit Arora 
1700a2df2a63SAmit Arora 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
170163f57933SAndrew Morton 			le32_to_cpu(ex2->ee_block))
1702a86c6181SAlex Tomas 		return 0;
1703a86c6181SAlex Tomas 
1704471d4011SSuparna Bhattacharya 	/*
1705471d4011SSuparna Bhattacharya 	 * To allow future support for preallocated extents to be added
1706471d4011SSuparna Bhattacharya 	 * as an RO_COMPAT feature, refuse to merge to extents if
1707d0d856e8SRandy Dunlap 	 * this can result in the top bit of ee_len being set.
1708471d4011SSuparna Bhattacharya 	 */
1709da0169b3SEric Sandeen 	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1710471d4011SSuparna Bhattacharya 		return 0;
1711*a9b82415SDarrick J. Wong 	if (ext4_ext_is_uninitialized(ex1) &&
1712*a9b82415SDarrick J. Wong 	    (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
1713*a9b82415SDarrick J. Wong 	     atomic_read(&EXT4_I(inode)->i_unwritten) ||
1714*a9b82415SDarrick J. Wong 	     (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN)))
1715*a9b82415SDarrick J. Wong 		return 0;
1716bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
1717b939e376SAneesh Kumar K.V 	if (ext1_ee_len >= 4)
1718a86c6181SAlex Tomas 		return 0;
1719a86c6181SAlex Tomas #endif
1720a86c6181SAlex Tomas 
1721bf89d16fSTheodore Ts'o 	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1722a86c6181SAlex Tomas 		return 1;
1723a86c6181SAlex Tomas 	return 0;
1724a86c6181SAlex Tomas }
1725a86c6181SAlex Tomas 
1726a86c6181SAlex Tomas /*
172756055d3aSAmit Arora  * This function tries to merge the "ex" extent to the next extent in the tree.
172856055d3aSAmit Arora  * It always tries to merge towards right. If you want to merge towards
172956055d3aSAmit Arora  * left, pass "ex - 1" as argument instead of "ex".
173056055d3aSAmit Arora  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
173156055d3aSAmit Arora  * 1 if they got merged.
173256055d3aSAmit Arora  */
1733197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode,
173456055d3aSAmit Arora 				 struct ext4_ext_path *path,
173556055d3aSAmit Arora 				 struct ext4_extent *ex)
173656055d3aSAmit Arora {
173756055d3aSAmit Arora 	struct ext4_extent_header *eh;
173856055d3aSAmit Arora 	unsigned int depth, len;
1739*a9b82415SDarrick J. Wong 	int merge_done = 0, uninit;
174056055d3aSAmit Arora 
174156055d3aSAmit Arora 	depth = ext_depth(inode);
174256055d3aSAmit Arora 	BUG_ON(path[depth].p_hdr == NULL);
174356055d3aSAmit Arora 	eh = path[depth].p_hdr;
174456055d3aSAmit Arora 
174556055d3aSAmit Arora 	while (ex < EXT_LAST_EXTENT(eh)) {
174656055d3aSAmit Arora 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
174756055d3aSAmit Arora 			break;
174856055d3aSAmit Arora 		/* merge with next extent! */
1749*a9b82415SDarrick J. Wong 		uninit = ext4_ext_is_uninitialized(ex);
175056055d3aSAmit Arora 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
175156055d3aSAmit Arora 				+ ext4_ext_get_actual_len(ex + 1));
1752*a9b82415SDarrick J. Wong 		if (uninit)
1753*a9b82415SDarrick J. Wong 			ext4_ext_mark_uninitialized(ex);
175456055d3aSAmit Arora 
175556055d3aSAmit Arora 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
175656055d3aSAmit Arora 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
175756055d3aSAmit Arora 				* sizeof(struct ext4_extent);
175856055d3aSAmit Arora 			memmove(ex + 1, ex + 2, len);
175956055d3aSAmit Arora 		}
1760e8546d06SMarcin Slusarz 		le16_add_cpu(&eh->eh_entries, -1);
176156055d3aSAmit Arora 		merge_done = 1;
176256055d3aSAmit Arora 		WARN_ON(eh->eh_entries == 0);
176356055d3aSAmit Arora 		if (!eh->eh_entries)
176424676da4STheodore Ts'o 			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
176556055d3aSAmit Arora 	}
176656055d3aSAmit Arora 
176756055d3aSAmit Arora 	return merge_done;
176856055d3aSAmit Arora }
176956055d3aSAmit Arora 
177056055d3aSAmit Arora /*
1771ecb94f5fSTheodore Ts'o  * This function does a very simple check to see if we can collapse
1772ecb94f5fSTheodore Ts'o  * an extent tree with a single extent tree leaf block into the inode.
1773ecb94f5fSTheodore Ts'o  */
1774ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle,
1775ecb94f5fSTheodore Ts'o 				     struct inode *inode,
1776ecb94f5fSTheodore Ts'o 				     struct ext4_ext_path *path)
1777ecb94f5fSTheodore Ts'o {
1778ecb94f5fSTheodore Ts'o 	size_t s;
1779ecb94f5fSTheodore Ts'o 	unsigned max_root = ext4_ext_space_root(inode, 0);
1780ecb94f5fSTheodore Ts'o 	ext4_fsblk_t blk;
1781ecb94f5fSTheodore Ts'o 
1782ecb94f5fSTheodore Ts'o 	if ((path[0].p_depth != 1) ||
1783ecb94f5fSTheodore Ts'o 	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1784ecb94f5fSTheodore Ts'o 	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1785ecb94f5fSTheodore Ts'o 		return;
1786ecb94f5fSTheodore Ts'o 
1787ecb94f5fSTheodore Ts'o 	/*
1788ecb94f5fSTheodore Ts'o 	 * We need to modify the block allocation bitmap and the block
1789ecb94f5fSTheodore Ts'o 	 * group descriptor to release the extent tree block.  If we
1790ecb94f5fSTheodore Ts'o 	 * can't get the journal credits, give up.
1791ecb94f5fSTheodore Ts'o 	 */
1792ecb94f5fSTheodore Ts'o 	if (ext4_journal_extend(handle, 2))
1793ecb94f5fSTheodore Ts'o 		return;
1794ecb94f5fSTheodore Ts'o 
1795ecb94f5fSTheodore Ts'o 	/*
1796ecb94f5fSTheodore Ts'o 	 * Copy the extent data up to the inode
1797ecb94f5fSTheodore Ts'o 	 */
1798ecb94f5fSTheodore Ts'o 	blk = ext4_idx_pblock(path[0].p_idx);
1799ecb94f5fSTheodore Ts'o 	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1800ecb94f5fSTheodore Ts'o 		sizeof(struct ext4_extent_idx);
1801ecb94f5fSTheodore Ts'o 	s += sizeof(struct ext4_extent_header);
1802ecb94f5fSTheodore Ts'o 
1803ecb94f5fSTheodore Ts'o 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1804ecb94f5fSTheodore Ts'o 	path[0].p_depth = 0;
1805ecb94f5fSTheodore Ts'o 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1806ecb94f5fSTheodore Ts'o 		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1807ecb94f5fSTheodore Ts'o 	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1808ecb94f5fSTheodore Ts'o 
1809ecb94f5fSTheodore Ts'o 	brelse(path[1].p_bh);
1810ecb94f5fSTheodore Ts'o 	ext4_free_blocks(handle, inode, NULL, blk, 1,
18117d734532SJan Kara 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
18127d734532SJan Kara 			 EXT4_FREE_BLOCKS_RESERVE);
1813ecb94f5fSTheodore Ts'o }
1814ecb94f5fSTheodore Ts'o 
1815ecb94f5fSTheodore Ts'o /*
1816197217a5SYongqiang Yang  * This function tries to merge the @ex extent to neighbours in the tree.
1817197217a5SYongqiang Yang  * return 1 if merge left else 0.
1818197217a5SYongqiang Yang  */
1819ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle,
1820ecb94f5fSTheodore Ts'o 				  struct inode *inode,
1821197217a5SYongqiang Yang 				  struct ext4_ext_path *path,
1822197217a5SYongqiang Yang 				  struct ext4_extent *ex) {
1823197217a5SYongqiang Yang 	struct ext4_extent_header *eh;
1824197217a5SYongqiang Yang 	unsigned int depth;
1825197217a5SYongqiang Yang 	int merge_done = 0;
1826197217a5SYongqiang Yang 
1827197217a5SYongqiang Yang 	depth = ext_depth(inode);
1828197217a5SYongqiang Yang 	BUG_ON(path[depth].p_hdr == NULL);
1829197217a5SYongqiang Yang 	eh = path[depth].p_hdr;
1830197217a5SYongqiang Yang 
1831197217a5SYongqiang Yang 	if (ex > EXT_FIRST_EXTENT(eh))
1832197217a5SYongqiang Yang 		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1833197217a5SYongqiang Yang 
1834197217a5SYongqiang Yang 	if (!merge_done)
1835ecb94f5fSTheodore Ts'o 		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1836197217a5SYongqiang Yang 
1837ecb94f5fSTheodore Ts'o 	ext4_ext_try_to_merge_up(handle, inode, path);
1838197217a5SYongqiang Yang }
1839197217a5SYongqiang Yang 
1840197217a5SYongqiang Yang /*
184125d14f98SAmit Arora  * check if a portion of the "newext" extent overlaps with an
184225d14f98SAmit Arora  * existing extent.
184325d14f98SAmit Arora  *
184425d14f98SAmit Arora  * If there is an overlap discovered, it updates the length of the newext
184525d14f98SAmit Arora  * such that there will be no overlap, and then returns 1.
184625d14f98SAmit Arora  * If there is no overlap found, it returns 0.
184725d14f98SAmit Arora  */
18484d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
18494d33b1efSTheodore Ts'o 					   struct inode *inode,
185025d14f98SAmit Arora 					   struct ext4_extent *newext,
185125d14f98SAmit Arora 					   struct ext4_ext_path *path)
185225d14f98SAmit Arora {
1853725d26d3SAneesh Kumar K.V 	ext4_lblk_t b1, b2;
185425d14f98SAmit Arora 	unsigned int depth, len1;
185525d14f98SAmit Arora 	unsigned int ret = 0;
185625d14f98SAmit Arora 
185725d14f98SAmit Arora 	b1 = le32_to_cpu(newext->ee_block);
1858a2df2a63SAmit Arora 	len1 = ext4_ext_get_actual_len(newext);
185925d14f98SAmit Arora 	depth = ext_depth(inode);
186025d14f98SAmit Arora 	if (!path[depth].p_ext)
186125d14f98SAmit Arora 		goto out;
1862f5a44db5STheodore Ts'o 	b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
186325d14f98SAmit Arora 
186425d14f98SAmit Arora 	/*
186525d14f98SAmit Arora 	 * get the next allocated block if the extent in the path
186625d14f98SAmit Arora 	 * is before the requested block(s)
186725d14f98SAmit Arora 	 */
186825d14f98SAmit Arora 	if (b2 < b1) {
186925d14f98SAmit Arora 		b2 = ext4_ext_next_allocated_block(path);
1870f17722f9SLukas Czerner 		if (b2 == EXT_MAX_BLOCKS)
187125d14f98SAmit Arora 			goto out;
1872f5a44db5STheodore Ts'o 		b2 = EXT4_LBLK_CMASK(sbi, b2);
187325d14f98SAmit Arora 	}
187425d14f98SAmit Arora 
1875725d26d3SAneesh Kumar K.V 	/* check for wrap through zero on extent logical start block*/
187625d14f98SAmit Arora 	if (b1 + len1 < b1) {
1877f17722f9SLukas Czerner 		len1 = EXT_MAX_BLOCKS - b1;
187825d14f98SAmit Arora 		newext->ee_len = cpu_to_le16(len1);
187925d14f98SAmit Arora 		ret = 1;
188025d14f98SAmit Arora 	}
188125d14f98SAmit Arora 
188225d14f98SAmit Arora 	/* check for overlap */
188325d14f98SAmit Arora 	if (b1 + len1 > b2) {
188425d14f98SAmit Arora 		newext->ee_len = cpu_to_le16(b2 - b1);
188525d14f98SAmit Arora 		ret = 1;
188625d14f98SAmit Arora 	}
188725d14f98SAmit Arora out:
188825d14f98SAmit Arora 	return ret;
188925d14f98SAmit Arora }
189025d14f98SAmit Arora 
189125d14f98SAmit Arora /*
1892d0d856e8SRandy Dunlap  * ext4_ext_insert_extent:
1893d0d856e8SRandy Dunlap  * tries to merge requsted extent into the existing extent or
1894d0d856e8SRandy Dunlap  * inserts requested extent as new one into the tree,
1895d0d856e8SRandy Dunlap  * creating new leaf in the no-space case.
1896a86c6181SAlex Tomas  */
1897a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1898a86c6181SAlex Tomas 				struct ext4_ext_path *path,
1899107a7bd3STheodore Ts'o 				struct ext4_extent *newext, int gb_flags)
1900a86c6181SAlex Tomas {
1901a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
1902a86c6181SAlex Tomas 	struct ext4_extent *ex, *fex;
1903a86c6181SAlex Tomas 	struct ext4_extent *nearex; /* nearest extent */
1904a86c6181SAlex Tomas 	struct ext4_ext_path *npath = NULL;
1905725d26d3SAneesh Kumar K.V 	int depth, len, err;
1906725d26d3SAneesh Kumar K.V 	ext4_lblk_t next;
1907*a9b82415SDarrick J. Wong 	int mb_flags = 0, uninit;
1908a86c6181SAlex Tomas 
1909273df556SFrank Mayhar 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1910273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1911273df556SFrank Mayhar 		return -EIO;
1912273df556SFrank Mayhar 	}
1913a86c6181SAlex Tomas 	depth = ext_depth(inode);
1914a86c6181SAlex Tomas 	ex = path[depth].p_ext;
1915be8981beSLukas Czerner 	eh = path[depth].p_hdr;
1916273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr == NULL)) {
1917273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1918273df556SFrank Mayhar 		return -EIO;
1919273df556SFrank Mayhar 	}
1920a86c6181SAlex Tomas 
1921a86c6181SAlex Tomas 	/* try to insert block into found extent and return */
1922107a7bd3STheodore Ts'o 	if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1923be8981beSLukas Czerner 
1924be8981beSLukas Czerner 		/*
1925be8981beSLukas Czerner 		 * Try to see whether we should rather test the extent on
1926be8981beSLukas Czerner 		 * right from ex, or from the left of ex. This is because
1927be8981beSLukas Czerner 		 * ext4_ext_find_extent() can return either extent on the
1928be8981beSLukas Czerner 		 * left, or on the right from the searched position. This
1929be8981beSLukas Czerner 		 * will make merging more effective.
1930be8981beSLukas Czerner 		 */
1931be8981beSLukas Czerner 		if (ex < EXT_LAST_EXTENT(eh) &&
1932be8981beSLukas Czerner 		    (le32_to_cpu(ex->ee_block) +
1933be8981beSLukas Czerner 		    ext4_ext_get_actual_len(ex) <
1934be8981beSLukas Czerner 		    le32_to_cpu(newext->ee_block))) {
1935be8981beSLukas Czerner 			ex += 1;
1936be8981beSLukas Czerner 			goto prepend;
1937be8981beSLukas Czerner 		} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1938be8981beSLukas Czerner 			   (le32_to_cpu(newext->ee_block) +
1939be8981beSLukas Czerner 			   ext4_ext_get_actual_len(newext) <
1940be8981beSLukas Czerner 			   le32_to_cpu(ex->ee_block)))
1941be8981beSLukas Czerner 			ex -= 1;
1942be8981beSLukas Czerner 
1943be8981beSLukas Czerner 		/* Try to append newex to the ex */
1944be8981beSLukas Czerner 		if (ext4_can_extents_be_merged(inode, ex, newext)) {
1945be8981beSLukas Czerner 			ext_debug("append [%d]%d block to %u:[%d]%d"
1946be8981beSLukas Czerner 				  "(from %llu)\n",
1947553f9008SMingming 				  ext4_ext_is_uninitialized(newext),
1948a2df2a63SAmit Arora 				  ext4_ext_get_actual_len(newext),
1949a86c6181SAlex Tomas 				  le32_to_cpu(ex->ee_block),
1950553f9008SMingming 				  ext4_ext_is_uninitialized(ex),
1951bf89d16fSTheodore Ts'o 				  ext4_ext_get_actual_len(ex),
1952bf89d16fSTheodore Ts'o 				  ext4_ext_pblock(ex));
1953be8981beSLukas Czerner 			err = ext4_ext_get_access(handle, inode,
1954be8981beSLukas Czerner 						  path + depth);
19557e028976SAvantika Mathur 			if (err)
1956a86c6181SAlex Tomas 				return err;
1957*a9b82415SDarrick J. Wong 			uninit = ext4_ext_is_uninitialized(ex);
1958a2df2a63SAmit Arora 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1959a2df2a63SAmit Arora 					+ ext4_ext_get_actual_len(newext));
1960*a9b82415SDarrick J. Wong 			if (uninit)
1961*a9b82415SDarrick J. Wong 				ext4_ext_mark_uninitialized(ex);
1962a86c6181SAlex Tomas 			eh = path[depth].p_hdr;
1963a86c6181SAlex Tomas 			nearex = ex;
1964a86c6181SAlex Tomas 			goto merge;
1965a86c6181SAlex Tomas 		}
1966a86c6181SAlex Tomas 
1967be8981beSLukas Czerner prepend:
1968be8981beSLukas Czerner 		/* Try to prepend newex to the ex */
1969be8981beSLukas Czerner 		if (ext4_can_extents_be_merged(inode, newext, ex)) {
1970be8981beSLukas Czerner 			ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1971be8981beSLukas Czerner 				  "(from %llu)\n",
1972be8981beSLukas Czerner 				  le32_to_cpu(newext->ee_block),
1973be8981beSLukas Czerner 				  ext4_ext_is_uninitialized(newext),
1974be8981beSLukas Czerner 				  ext4_ext_get_actual_len(newext),
1975be8981beSLukas Czerner 				  le32_to_cpu(ex->ee_block),
1976be8981beSLukas Czerner 				  ext4_ext_is_uninitialized(ex),
1977be8981beSLukas Czerner 				  ext4_ext_get_actual_len(ex),
1978be8981beSLukas Czerner 				  ext4_ext_pblock(ex));
1979be8981beSLukas Czerner 			err = ext4_ext_get_access(handle, inode,
1980be8981beSLukas Czerner 						  path + depth);
1981be8981beSLukas Czerner 			if (err)
1982be8981beSLukas Czerner 				return err;
1983be8981beSLukas Czerner 
1984*a9b82415SDarrick J. Wong 			uninit = ext4_ext_is_uninitialized(ex);
1985be8981beSLukas Czerner 			ex->ee_block = newext->ee_block;
1986be8981beSLukas Czerner 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1987be8981beSLukas Czerner 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1988be8981beSLukas Czerner 					+ ext4_ext_get_actual_len(newext));
1989*a9b82415SDarrick J. Wong 			if (uninit)
1990*a9b82415SDarrick J. Wong 				ext4_ext_mark_uninitialized(ex);
1991be8981beSLukas Czerner 			eh = path[depth].p_hdr;
1992be8981beSLukas Czerner 			nearex = ex;
1993be8981beSLukas Czerner 			goto merge;
1994be8981beSLukas Czerner 		}
1995be8981beSLukas Czerner 	}
1996be8981beSLukas Czerner 
1997a86c6181SAlex Tomas 	depth = ext_depth(inode);
1998a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
1999a86c6181SAlex Tomas 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2000a86c6181SAlex Tomas 		goto has_space;
2001a86c6181SAlex Tomas 
2002a86c6181SAlex Tomas 	/* probably next leaf has space for us? */
2003a86c6181SAlex Tomas 	fex = EXT_LAST_EXTENT(eh);
2004598dbdf2SRobin Dong 	next = EXT_MAX_BLOCKS;
2005598dbdf2SRobin Dong 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
20065718789dSRobin Dong 		next = ext4_ext_next_leaf_block(path);
2007598dbdf2SRobin Dong 	if (next != EXT_MAX_BLOCKS) {
200832de6756SYongqiang Yang 		ext_debug("next leaf block - %u\n", next);
2009a86c6181SAlex Tomas 		BUG_ON(npath != NULL);
2010107a7bd3STheodore Ts'o 		npath = ext4_ext_find_extent(inode, next, NULL, 0);
2011a86c6181SAlex Tomas 		if (IS_ERR(npath))
2012a86c6181SAlex Tomas 			return PTR_ERR(npath);
2013a86c6181SAlex Tomas 		BUG_ON(npath->p_depth != path->p_depth);
2014a86c6181SAlex Tomas 		eh = npath[depth].p_hdr;
2015a86c6181SAlex Tomas 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
201625985edcSLucas De Marchi 			ext_debug("next leaf isn't full(%d)\n",
2017a86c6181SAlex Tomas 				  le16_to_cpu(eh->eh_entries));
2018a86c6181SAlex Tomas 			path = npath;
2019ffb505ffSRobin Dong 			goto has_space;
2020a86c6181SAlex Tomas 		}
2021a86c6181SAlex Tomas 		ext_debug("next leaf has no free space(%d,%d)\n",
2022a86c6181SAlex Tomas 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2023a86c6181SAlex Tomas 	}
2024a86c6181SAlex Tomas 
2025a86c6181SAlex Tomas 	/*
2026d0d856e8SRandy Dunlap 	 * There is no free space in the found leaf.
2027d0d856e8SRandy Dunlap 	 * We're gonna add a new leaf in the tree.
2028a86c6181SAlex Tomas 	 */
2029107a7bd3STheodore Ts'o 	if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2030107a7bd3STheodore Ts'o 		mb_flags = EXT4_MB_USE_RESERVED;
2031107a7bd3STheodore Ts'o 	err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2032107a7bd3STheodore Ts'o 				       path, newext);
2033a86c6181SAlex Tomas 	if (err)
2034a86c6181SAlex Tomas 		goto cleanup;
2035a86c6181SAlex Tomas 	depth = ext_depth(inode);
2036a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
2037a86c6181SAlex Tomas 
2038a86c6181SAlex Tomas has_space:
2039a86c6181SAlex Tomas 	nearex = path[depth].p_ext;
2040a86c6181SAlex Tomas 
20417e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path + depth);
20427e028976SAvantika Mathur 	if (err)
2043a86c6181SAlex Tomas 		goto cleanup;
2044a86c6181SAlex Tomas 
2045a86c6181SAlex Tomas 	if (!nearex) {
2046a86c6181SAlex Tomas 		/* there is no extent in this leaf, create first one */
204732de6756SYongqiang Yang 		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2048a86c6181SAlex Tomas 				le32_to_cpu(newext->ee_block),
2049bf89d16fSTheodore Ts'o 				ext4_ext_pblock(newext),
2050553f9008SMingming 				ext4_ext_is_uninitialized(newext),
2051a2df2a63SAmit Arora 				ext4_ext_get_actual_len(newext));
205280e675f9SEric Gouriou 		nearex = EXT_FIRST_EXTENT(eh);
2053a86c6181SAlex Tomas 	} else {
205480e675f9SEric Gouriou 		if (le32_to_cpu(newext->ee_block)
205580e675f9SEric Gouriou 			   > le32_to_cpu(nearex->ee_block)) {
205680e675f9SEric Gouriou 			/* Insert after */
205732de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d before: "
205832de6756SYongqiang Yang 					"nearest %p\n",
2059a86c6181SAlex Tomas 					le32_to_cpu(newext->ee_block),
2060bf89d16fSTheodore Ts'o 					ext4_ext_pblock(newext),
2061553f9008SMingming 					ext4_ext_is_uninitialized(newext),
2062a2df2a63SAmit Arora 					ext4_ext_get_actual_len(newext),
206380e675f9SEric Gouriou 					nearex);
206480e675f9SEric Gouriou 			nearex++;
206580e675f9SEric Gouriou 		} else {
206680e675f9SEric Gouriou 			/* Insert before */
206780e675f9SEric Gouriou 			BUG_ON(newext->ee_block == nearex->ee_block);
206832de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d after: "
206932de6756SYongqiang Yang 					"nearest %p\n",
207080e675f9SEric Gouriou 					le32_to_cpu(newext->ee_block),
207180e675f9SEric Gouriou 					ext4_ext_pblock(newext),
207280e675f9SEric Gouriou 					ext4_ext_is_uninitialized(newext),
207380e675f9SEric Gouriou 					ext4_ext_get_actual_len(newext),
207480e675f9SEric Gouriou 					nearex);
207580e675f9SEric Gouriou 		}
207680e675f9SEric Gouriou 		len = EXT_LAST_EXTENT(eh) - nearex + 1;
207780e675f9SEric Gouriou 		if (len > 0) {
207832de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d: "
207980e675f9SEric Gouriou 					"move %d extents from 0x%p to 0x%p\n",
208080e675f9SEric Gouriou 					le32_to_cpu(newext->ee_block),
208180e675f9SEric Gouriou 					ext4_ext_pblock(newext),
208280e675f9SEric Gouriou 					ext4_ext_is_uninitialized(newext),
208380e675f9SEric Gouriou 					ext4_ext_get_actual_len(newext),
208480e675f9SEric Gouriou 					len, nearex, nearex + 1);
208580e675f9SEric Gouriou 			memmove(nearex + 1, nearex,
208680e675f9SEric Gouriou 				len * sizeof(struct ext4_extent));
208780e675f9SEric Gouriou 		}
2088a86c6181SAlex Tomas 	}
2089a86c6181SAlex Tomas 
2090e8546d06SMarcin Slusarz 	le16_add_cpu(&eh->eh_entries, 1);
209180e675f9SEric Gouriou 	path[depth].p_ext = nearex;
2092a86c6181SAlex Tomas 	nearex->ee_block = newext->ee_block;
2093bf89d16fSTheodore Ts'o 	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2094a86c6181SAlex Tomas 	nearex->ee_len = newext->ee_len;
2095a86c6181SAlex Tomas 
2096a86c6181SAlex Tomas merge:
2097e7bcf823SHaiboLiu 	/* try to merge extents */
2098107a7bd3STheodore Ts'o 	if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2099ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, nearex);
2100a86c6181SAlex Tomas 
2101a86c6181SAlex Tomas 
2102a86c6181SAlex Tomas 	/* time to correct all indexes above */
2103a86c6181SAlex Tomas 	err = ext4_ext_correct_indexes(handle, inode, path);
2104a86c6181SAlex Tomas 	if (err)
2105a86c6181SAlex Tomas 		goto cleanup;
2106a86c6181SAlex Tomas 
2107ecb94f5fSTheodore Ts'o 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2108a86c6181SAlex Tomas 
2109a86c6181SAlex Tomas cleanup:
2110a86c6181SAlex Tomas 	if (npath) {
2111a86c6181SAlex Tomas 		ext4_ext_drop_refs(npath);
2112a86c6181SAlex Tomas 		kfree(npath);
2113a86c6181SAlex Tomas 	}
2114a86c6181SAlex Tomas 	return err;
2115a86c6181SAlex Tomas }
2116a86c6181SAlex Tomas 
211791dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode,
211891dd8c11SLukas Czerner 				    ext4_lblk_t block, ext4_lblk_t num,
211991dd8c11SLukas Czerner 				    struct fiemap_extent_info *fieinfo)
21206873fa0dSEric Sandeen {
21216873fa0dSEric Sandeen 	struct ext4_ext_path *path = NULL;
21226873fa0dSEric Sandeen 	struct ext4_extent *ex;
212369eb33dcSZheng Liu 	struct extent_status es;
212491dd8c11SLukas Czerner 	ext4_lblk_t next, next_del, start = 0, end = 0;
21256873fa0dSEric Sandeen 	ext4_lblk_t last = block + num;
212691dd8c11SLukas Czerner 	int exists, depth = 0, err = 0;
212791dd8c11SLukas Czerner 	unsigned int flags = 0;
212891dd8c11SLukas Czerner 	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
21296873fa0dSEric Sandeen 
2130f17722f9SLukas Czerner 	while (block < last && block != EXT_MAX_BLOCKS) {
21316873fa0dSEric Sandeen 		num = last - block;
21326873fa0dSEric Sandeen 		/* find extent for this block */
2133fab3a549STheodore Ts'o 		down_read(&EXT4_I(inode)->i_data_sem);
213491dd8c11SLukas Czerner 
213591dd8c11SLukas Czerner 		if (path && ext_depth(inode) != depth) {
213691dd8c11SLukas Czerner 			/* depth was changed. we have to realloc path */
213791dd8c11SLukas Czerner 			kfree(path);
213891dd8c11SLukas Czerner 			path = NULL;
213991dd8c11SLukas Czerner 		}
214091dd8c11SLukas Czerner 
2141107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, block, path, 0);
21426873fa0dSEric Sandeen 		if (IS_ERR(path)) {
214391dd8c11SLukas Czerner 			up_read(&EXT4_I(inode)->i_data_sem);
21446873fa0dSEric Sandeen 			err = PTR_ERR(path);
21456873fa0dSEric Sandeen 			path = NULL;
21466873fa0dSEric Sandeen 			break;
21476873fa0dSEric Sandeen 		}
21486873fa0dSEric Sandeen 
21496873fa0dSEric Sandeen 		depth = ext_depth(inode);
2150273df556SFrank Mayhar 		if (unlikely(path[depth].p_hdr == NULL)) {
215191dd8c11SLukas Czerner 			up_read(&EXT4_I(inode)->i_data_sem);
2152273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2153273df556SFrank Mayhar 			err = -EIO;
2154273df556SFrank Mayhar 			break;
2155273df556SFrank Mayhar 		}
21566873fa0dSEric Sandeen 		ex = path[depth].p_ext;
21576873fa0dSEric Sandeen 		next = ext4_ext_next_allocated_block(path);
215891dd8c11SLukas Czerner 		ext4_ext_drop_refs(path);
21596873fa0dSEric Sandeen 
216091dd8c11SLukas Czerner 		flags = 0;
21616873fa0dSEric Sandeen 		exists = 0;
21626873fa0dSEric Sandeen 		if (!ex) {
21636873fa0dSEric Sandeen 			/* there is no extent yet, so try to allocate
21646873fa0dSEric Sandeen 			 * all requested space */
21656873fa0dSEric Sandeen 			start = block;
21666873fa0dSEric Sandeen 			end = block + num;
21676873fa0dSEric Sandeen 		} else if (le32_to_cpu(ex->ee_block) > block) {
21686873fa0dSEric Sandeen 			/* need to allocate space before found extent */
21696873fa0dSEric Sandeen 			start = block;
21706873fa0dSEric Sandeen 			end = le32_to_cpu(ex->ee_block);
21716873fa0dSEric Sandeen 			if (block + num < end)
21726873fa0dSEric Sandeen 				end = block + num;
21736873fa0dSEric Sandeen 		} else if (block >= le32_to_cpu(ex->ee_block)
21746873fa0dSEric Sandeen 					+ ext4_ext_get_actual_len(ex)) {
21756873fa0dSEric Sandeen 			/* need to allocate space after found extent */
21766873fa0dSEric Sandeen 			start = block;
21776873fa0dSEric Sandeen 			end = block + num;
21786873fa0dSEric Sandeen 			if (end >= next)
21796873fa0dSEric Sandeen 				end = next;
21806873fa0dSEric Sandeen 		} else if (block >= le32_to_cpu(ex->ee_block)) {
21816873fa0dSEric Sandeen 			/*
21826873fa0dSEric Sandeen 			 * some part of requested space is covered
21836873fa0dSEric Sandeen 			 * by found extent
21846873fa0dSEric Sandeen 			 */
21856873fa0dSEric Sandeen 			start = block;
21866873fa0dSEric Sandeen 			end = le32_to_cpu(ex->ee_block)
21876873fa0dSEric Sandeen 				+ ext4_ext_get_actual_len(ex);
21886873fa0dSEric Sandeen 			if (block + num < end)
21896873fa0dSEric Sandeen 				end = block + num;
21906873fa0dSEric Sandeen 			exists = 1;
21916873fa0dSEric Sandeen 		} else {
21926873fa0dSEric Sandeen 			BUG();
21936873fa0dSEric Sandeen 		}
21946873fa0dSEric Sandeen 		BUG_ON(end <= start);
21956873fa0dSEric Sandeen 
21966873fa0dSEric Sandeen 		if (!exists) {
219769eb33dcSZheng Liu 			es.es_lblk = start;
219869eb33dcSZheng Liu 			es.es_len = end - start;
219969eb33dcSZheng Liu 			es.es_pblk = 0;
22006873fa0dSEric Sandeen 		} else {
220169eb33dcSZheng Liu 			es.es_lblk = le32_to_cpu(ex->ee_block);
220269eb33dcSZheng Liu 			es.es_len = ext4_ext_get_actual_len(ex);
220369eb33dcSZheng Liu 			es.es_pblk = ext4_ext_pblock(ex);
220491dd8c11SLukas Czerner 			if (ext4_ext_is_uninitialized(ex))
220591dd8c11SLukas Czerner 				flags |= FIEMAP_EXTENT_UNWRITTEN;
22066873fa0dSEric Sandeen 		}
22076873fa0dSEric Sandeen 
220891dd8c11SLukas Czerner 		/*
220969eb33dcSZheng Liu 		 * Find delayed extent and update es accordingly. We call
221069eb33dcSZheng Liu 		 * it even in !exists case to find out whether es is the
221191dd8c11SLukas Czerner 		 * last existing extent or not.
221291dd8c11SLukas Czerner 		 */
221369eb33dcSZheng Liu 		next_del = ext4_find_delayed_extent(inode, &es);
221491dd8c11SLukas Czerner 		if (!exists && next_del) {
221591dd8c11SLukas Czerner 			exists = 1;
221672dac95dSJie Liu 			flags |= (FIEMAP_EXTENT_DELALLOC |
221772dac95dSJie Liu 				  FIEMAP_EXTENT_UNKNOWN);
221891dd8c11SLukas Czerner 		}
221991dd8c11SLukas Czerner 		up_read(&EXT4_I(inode)->i_data_sem);
222091dd8c11SLukas Czerner 
222169eb33dcSZheng Liu 		if (unlikely(es.es_len == 0)) {
222269eb33dcSZheng Liu 			EXT4_ERROR_INODE(inode, "es.es_len == 0");
2223273df556SFrank Mayhar 			err = -EIO;
2224273df556SFrank Mayhar 			break;
2225273df556SFrank Mayhar 		}
22266873fa0dSEric Sandeen 
2227f7fec032SZheng Liu 		/*
2228f7fec032SZheng Liu 		 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2229f7fec032SZheng Liu 		 * we need to check next == EXT_MAX_BLOCKS because it is
2230f7fec032SZheng Liu 		 * possible that an extent is with unwritten and delayed
2231f7fec032SZheng Liu 		 * status due to when an extent is delayed allocated and
2232f7fec032SZheng Liu 		 * is allocated by fallocate status tree will track both of
2233f7fec032SZheng Liu 		 * them in a extent.
2234f7fec032SZheng Liu 		 *
2235f7fec032SZheng Liu 		 * So we could return a unwritten and delayed extent, and
2236f7fec032SZheng Liu 		 * its block is equal to 'next'.
2237f7fec032SZheng Liu 		 */
2238f7fec032SZheng Liu 		if (next == next_del && next == EXT_MAX_BLOCKS) {
223991dd8c11SLukas Czerner 			flags |= FIEMAP_EXTENT_LAST;
224091dd8c11SLukas Czerner 			if (unlikely(next_del != EXT_MAX_BLOCKS ||
224191dd8c11SLukas Czerner 				     next != EXT_MAX_BLOCKS)) {
224291dd8c11SLukas Czerner 				EXT4_ERROR_INODE(inode,
224391dd8c11SLukas Czerner 						 "next extent == %u, next "
224491dd8c11SLukas Czerner 						 "delalloc extent = %u",
224591dd8c11SLukas Czerner 						 next, next_del);
224691dd8c11SLukas Czerner 				err = -EIO;
224791dd8c11SLukas Czerner 				break;
224891dd8c11SLukas Czerner 			}
224991dd8c11SLukas Czerner 		}
225091dd8c11SLukas Czerner 
225191dd8c11SLukas Czerner 		if (exists) {
225291dd8c11SLukas Czerner 			err = fiemap_fill_next_extent(fieinfo,
225369eb33dcSZheng Liu 				(__u64)es.es_lblk << blksize_bits,
225469eb33dcSZheng Liu 				(__u64)es.es_pblk << blksize_bits,
225569eb33dcSZheng Liu 				(__u64)es.es_len << blksize_bits,
225691dd8c11SLukas Czerner 				flags);
22576873fa0dSEric Sandeen 			if (err < 0)
22586873fa0dSEric Sandeen 				break;
225991dd8c11SLukas Czerner 			if (err == 1) {
22606873fa0dSEric Sandeen 				err = 0;
22616873fa0dSEric Sandeen 				break;
22626873fa0dSEric Sandeen 			}
22636873fa0dSEric Sandeen 		}
22646873fa0dSEric Sandeen 
226569eb33dcSZheng Liu 		block = es.es_lblk + es.es_len;
22666873fa0dSEric Sandeen 	}
22676873fa0dSEric Sandeen 
22686873fa0dSEric Sandeen 	if (path) {
22696873fa0dSEric Sandeen 		ext4_ext_drop_refs(path);
22706873fa0dSEric Sandeen 		kfree(path);
22716873fa0dSEric Sandeen 	}
22726873fa0dSEric Sandeen 
22736873fa0dSEric Sandeen 	return err;
22746873fa0dSEric Sandeen }
22756873fa0dSEric Sandeen 
2276a86c6181SAlex Tomas /*
2277d0d856e8SRandy Dunlap  * ext4_ext_put_gap_in_cache:
2278d0d856e8SRandy Dunlap  * calculate boundaries of the gap that the requested block fits into
2279a86c6181SAlex Tomas  * and cache this gap
2280a86c6181SAlex Tomas  */
228109b88252SAvantika Mathur static void
2282a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2283725d26d3SAneesh Kumar K.V 				ext4_lblk_t block)
2284a86c6181SAlex Tomas {
2285a86c6181SAlex Tomas 	int depth = ext_depth(inode);
228627b1b228SAndi Shyti 	unsigned long len = 0;
228727b1b228SAndi Shyti 	ext4_lblk_t lblock = 0;
2288a86c6181SAlex Tomas 	struct ext4_extent *ex;
2289a86c6181SAlex Tomas 
2290a86c6181SAlex Tomas 	ex = path[depth].p_ext;
2291a86c6181SAlex Tomas 	if (ex == NULL) {
229269eb33dcSZheng Liu 		/*
229369eb33dcSZheng Liu 		 * there is no extent yet, so gap is [0;-] and we
229469eb33dcSZheng Liu 		 * don't cache it
229569eb33dcSZheng Liu 		 */
2296a86c6181SAlex Tomas 		ext_debug("cache gap(whole file):");
2297a86c6181SAlex Tomas 	} else if (block < le32_to_cpu(ex->ee_block)) {
2298a86c6181SAlex Tomas 		lblock = block;
2299a86c6181SAlex Tomas 		len = le32_to_cpu(ex->ee_block) - block;
2300bba90743SEric Sandeen 		ext_debug("cache gap(before): %u [%u:%u]",
2301bba90743SEric Sandeen 				block,
2302bba90743SEric Sandeen 				le32_to_cpu(ex->ee_block),
2303bba90743SEric Sandeen 				 ext4_ext_get_actual_len(ex));
2304d100eef2SZheng Liu 		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2305d100eef2SZheng Liu 			ext4_es_insert_extent(inode, lblock, len, ~0,
2306d100eef2SZheng Liu 					      EXTENT_STATUS_HOLE);
2307a86c6181SAlex Tomas 	} else if (block >= le32_to_cpu(ex->ee_block)
2308a2df2a63SAmit Arora 			+ ext4_ext_get_actual_len(ex)) {
2309725d26d3SAneesh Kumar K.V 		ext4_lblk_t next;
2310a86c6181SAlex Tomas 		lblock = le32_to_cpu(ex->ee_block)
2311a2df2a63SAmit Arora 			+ ext4_ext_get_actual_len(ex);
2312725d26d3SAneesh Kumar K.V 
2313725d26d3SAneesh Kumar K.V 		next = ext4_ext_next_allocated_block(path);
2314bba90743SEric Sandeen 		ext_debug("cache gap(after): [%u:%u] %u",
2315bba90743SEric Sandeen 				le32_to_cpu(ex->ee_block),
2316bba90743SEric Sandeen 				ext4_ext_get_actual_len(ex),
2317bba90743SEric Sandeen 				block);
2318725d26d3SAneesh Kumar K.V 		BUG_ON(next == lblock);
2319725d26d3SAneesh Kumar K.V 		len = next - lblock;
2320d100eef2SZheng Liu 		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2321d100eef2SZheng Liu 			ext4_es_insert_extent(inode, lblock, len, ~0,
2322d100eef2SZheng Liu 					      EXTENT_STATUS_HOLE);
2323a86c6181SAlex Tomas 	} else {
2324a86c6181SAlex Tomas 		BUG();
2325a86c6181SAlex Tomas 	}
2326a86c6181SAlex Tomas 
2327bba90743SEric Sandeen 	ext_debug(" -> %u:%lu\n", lblock, len);
2328a86c6181SAlex Tomas }
2329a86c6181SAlex Tomas 
2330a86c6181SAlex Tomas /*
2331d0d856e8SRandy Dunlap  * ext4_ext_rm_idx:
2332d0d856e8SRandy Dunlap  * removes index from the index block.
2333a86c6181SAlex Tomas  */
23341d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2335c36575e6SForrest Liu 			struct ext4_ext_path *path, int depth)
2336a86c6181SAlex Tomas {
2337a86c6181SAlex Tomas 	int err;
2338f65e6fbaSAlex Tomas 	ext4_fsblk_t leaf;
2339a86c6181SAlex Tomas 
2340a86c6181SAlex Tomas 	/* free index block */
2341c36575e6SForrest Liu 	depth--;
2342c36575e6SForrest Liu 	path = path + depth;
2343bf89d16fSTheodore Ts'o 	leaf = ext4_idx_pblock(path->p_idx);
2344273df556SFrank Mayhar 	if (unlikely(path->p_hdr->eh_entries == 0)) {
2345273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2346273df556SFrank Mayhar 		return -EIO;
2347273df556SFrank Mayhar 	}
23487e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path);
23497e028976SAvantika Mathur 	if (err)
2350a86c6181SAlex Tomas 		return err;
23510e1147b0SRobin Dong 
23520e1147b0SRobin Dong 	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
23530e1147b0SRobin Dong 		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
23540e1147b0SRobin Dong 		len *= sizeof(struct ext4_extent_idx);
23550e1147b0SRobin Dong 		memmove(path->p_idx, path->p_idx + 1, len);
23560e1147b0SRobin Dong 	}
23570e1147b0SRobin Dong 
2358e8546d06SMarcin Slusarz 	le16_add_cpu(&path->p_hdr->eh_entries, -1);
23597e028976SAvantika Mathur 	err = ext4_ext_dirty(handle, inode, path);
23607e028976SAvantika Mathur 	if (err)
2361a86c6181SAlex Tomas 		return err;
23622ae02107SMingming Cao 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2363d8990240SAditya Kali 	trace_ext4_ext_rm_idx(inode, leaf);
2364d8990240SAditya Kali 
23657dc57615SPeter Huewe 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2366e6362609STheodore Ts'o 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2367c36575e6SForrest Liu 
2368c36575e6SForrest Liu 	while (--depth >= 0) {
2369c36575e6SForrest Liu 		if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2370c36575e6SForrest Liu 			break;
2371c36575e6SForrest Liu 		path--;
2372c36575e6SForrest Liu 		err = ext4_ext_get_access(handle, inode, path);
2373c36575e6SForrest Liu 		if (err)
2374c36575e6SForrest Liu 			break;
2375c36575e6SForrest Liu 		path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2376c36575e6SForrest Liu 		err = ext4_ext_dirty(handle, inode, path);
2377c36575e6SForrest Liu 		if (err)
2378c36575e6SForrest Liu 			break;
2379c36575e6SForrest Liu 	}
2380a86c6181SAlex Tomas 	return err;
2381a86c6181SAlex Tomas }
2382a86c6181SAlex Tomas 
2383a86c6181SAlex Tomas /*
2384ee12b630SMingming Cao  * ext4_ext_calc_credits_for_single_extent:
2385ee12b630SMingming Cao  * This routine returns max. credits that needed to insert an extent
2386ee12b630SMingming Cao  * to the extent tree.
2387ee12b630SMingming Cao  * When pass the actual path, the caller should calculate credits
2388ee12b630SMingming Cao  * under i_data_sem.
2389a86c6181SAlex Tomas  */
2390525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2391a86c6181SAlex Tomas 						struct ext4_ext_path *path)
2392a86c6181SAlex Tomas {
2393a86c6181SAlex Tomas 	if (path) {
2394ee12b630SMingming Cao 		int depth = ext_depth(inode);
2395f3bd1f3fSMingming Cao 		int ret = 0;
2396ee12b630SMingming Cao 
2397a86c6181SAlex Tomas 		/* probably there is space in leaf? */
2398a86c6181SAlex Tomas 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2399ee12b630SMingming Cao 				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2400ee12b630SMingming Cao 
2401ee12b630SMingming Cao 			/*
2402ee12b630SMingming Cao 			 *  There are some space in the leaf tree, no
2403ee12b630SMingming Cao 			 *  need to account for leaf block credit
2404ee12b630SMingming Cao 			 *
2405ee12b630SMingming Cao 			 *  bitmaps and block group descriptor blocks
2406df3ab170STao Ma 			 *  and other metadata blocks still need to be
2407ee12b630SMingming Cao 			 *  accounted.
2408ee12b630SMingming Cao 			 */
2409525f4ed8SMingming Cao 			/* 1 bitmap, 1 block group descriptor */
2410ee12b630SMingming Cao 			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
24115887e98bSAneesh Kumar K.V 			return ret;
2412ee12b630SMingming Cao 		}
2413ee12b630SMingming Cao 	}
2414ee12b630SMingming Cao 
2415525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, nrblocks);
2416a86c6181SAlex Tomas }
2417a86c6181SAlex Tomas 
2418a86c6181SAlex Tomas /*
2419fffb2739SJan Kara  * How many index/leaf blocks need to change/allocate to add @extents extents?
2420ee12b630SMingming Cao  *
2421fffb2739SJan Kara  * If we add a single extent, then in the worse case, each tree level
2422fffb2739SJan Kara  * index/leaf need to be changed in case of the tree split.
2423ee12b630SMingming Cao  *
2424fffb2739SJan Kara  * If more extents are inserted, they could cause the whole tree split more
2425fffb2739SJan Kara  * than once, but this is really rare.
2426a86c6181SAlex Tomas  */
2427fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2428ee12b630SMingming Cao {
2429ee12b630SMingming Cao 	int index;
2430f19d5870STao Ma 	int depth;
2431f19d5870STao Ma 
2432f19d5870STao Ma 	/* If we are converting the inline data, only one is needed here. */
2433f19d5870STao Ma 	if (ext4_has_inline_data(inode))
2434f19d5870STao Ma 		return 1;
2435f19d5870STao Ma 
2436f19d5870STao Ma 	depth = ext_depth(inode);
2437a86c6181SAlex Tomas 
2438fffb2739SJan Kara 	if (extents <= 1)
2439ee12b630SMingming Cao 		index = depth * 2;
2440ee12b630SMingming Cao 	else
2441ee12b630SMingming Cao 		index = depth * 3;
2442a86c6181SAlex Tomas 
2443ee12b630SMingming Cao 	return index;
2444a86c6181SAlex Tomas }
2445a86c6181SAlex Tomas 
2446981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode)
2447981250caSTheodore Ts'o {
2448981250caSTheodore Ts'o 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2449981250caSTheodore Ts'o 		return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2450981250caSTheodore Ts'o 	else if (ext4_should_journal_data(inode))
2451981250caSTheodore Ts'o 		return EXT4_FREE_BLOCKS_FORGET;
2452981250caSTheodore Ts'o 	return 0;
2453981250caSTheodore Ts'o }
2454981250caSTheodore Ts'o 
2455a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2456a86c6181SAlex Tomas 			      struct ext4_extent *ex,
2457d23142c6SLukas Czerner 			      long long *partial_cluster,
2458725d26d3SAneesh Kumar K.V 			      ext4_lblk_t from, ext4_lblk_t to)
2459a86c6181SAlex Tomas {
24600aa06000STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2461a2df2a63SAmit Arora 	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
24620aa06000STheodore Ts'o 	ext4_fsblk_t pblk;
2463981250caSTheodore Ts'o 	int flags = get_default_free_blocks_flags(inode);
246418888cf0SAndrey Sidorov 
24650aa06000STheodore Ts'o 	/*
24660aa06000STheodore Ts'o 	 * For bigalloc file systems, we never free a partial cluster
24670aa06000STheodore Ts'o 	 * at the beginning of the extent.  Instead, we make a note
24680aa06000STheodore Ts'o 	 * that we tried freeing the cluster, and check to see if we
24690aa06000STheodore Ts'o 	 * need to free it on a subsequent call to ext4_remove_blocks,
24700aa06000STheodore Ts'o 	 * or at the end of the ext4_truncate() operation.
24710aa06000STheodore Ts'o 	 */
24720aa06000STheodore Ts'o 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
24730aa06000STheodore Ts'o 
2474d8990240SAditya Kali 	trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
24750aa06000STheodore Ts'o 	/*
24760aa06000STheodore Ts'o 	 * If we have a partial cluster, and it's different from the
24770aa06000STheodore Ts'o 	 * cluster of the last block, we need to explicitly free the
24780aa06000STheodore Ts'o 	 * partial cluster here.
24790aa06000STheodore Ts'o 	 */
24800aa06000STheodore Ts'o 	pblk = ext4_ext_pblock(ex) + ee_len - 1;
2481d23142c6SLukas Czerner 	if ((*partial_cluster > 0) &&
2482d23142c6SLukas Czerner 	    (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
24830aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL,
24840aa06000STheodore Ts'o 				 EXT4_C2B(sbi, *partial_cluster),
24850aa06000STheodore Ts'o 				 sbi->s_cluster_ratio, flags);
24860aa06000STheodore Ts'o 		*partial_cluster = 0;
24870aa06000STheodore Ts'o 	}
24880aa06000STheodore Ts'o 
2489a86c6181SAlex Tomas #ifdef EXTENTS_STATS
2490a86c6181SAlex Tomas 	{
2491a86c6181SAlex Tomas 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2492a86c6181SAlex Tomas 		spin_lock(&sbi->s_ext_stats_lock);
2493a86c6181SAlex Tomas 		sbi->s_ext_blocks += ee_len;
2494a86c6181SAlex Tomas 		sbi->s_ext_extents++;
2495a86c6181SAlex Tomas 		if (ee_len < sbi->s_ext_min)
2496a86c6181SAlex Tomas 			sbi->s_ext_min = ee_len;
2497a86c6181SAlex Tomas 		if (ee_len > sbi->s_ext_max)
2498a86c6181SAlex Tomas 			sbi->s_ext_max = ee_len;
2499a86c6181SAlex Tomas 		if (ext_depth(inode) > sbi->s_depth_max)
2500a86c6181SAlex Tomas 			sbi->s_depth_max = ext_depth(inode);
2501a86c6181SAlex Tomas 		spin_unlock(&sbi->s_ext_stats_lock);
2502a86c6181SAlex Tomas 	}
2503a86c6181SAlex Tomas #endif
2504a86c6181SAlex Tomas 	if (from >= le32_to_cpu(ex->ee_block)
2505a2df2a63SAmit Arora 	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2506a86c6181SAlex Tomas 		/* tail removal */
2507725d26d3SAneesh Kumar K.V 		ext4_lblk_t num;
2508d23142c6SLukas Czerner 		unsigned int unaligned;
2509725d26d3SAneesh Kumar K.V 
2510a2df2a63SAmit Arora 		num = le32_to_cpu(ex->ee_block) + ee_len - from;
25110aa06000STheodore Ts'o 		pblk = ext4_ext_pblock(ex) + ee_len - num;
2512d23142c6SLukas Czerner 		/*
2513d23142c6SLukas Czerner 		 * Usually we want to free partial cluster at the end of the
2514d23142c6SLukas Czerner 		 * extent, except for the situation when the cluster is still
2515d23142c6SLukas Czerner 		 * used by any other extent (partial_cluster is negative).
2516d23142c6SLukas Czerner 		 */
2517d23142c6SLukas Czerner 		if (*partial_cluster < 0 &&
2518d23142c6SLukas Czerner 		    -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2519d23142c6SLukas Czerner 			flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2520d23142c6SLukas Czerner 
2521d23142c6SLukas Czerner 		ext_debug("free last %u blocks starting %llu partial %lld\n",
2522d23142c6SLukas Czerner 			  num, pblk, *partial_cluster);
25230aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
25240aa06000STheodore Ts'o 		/*
25250aa06000STheodore Ts'o 		 * If the block range to be freed didn't start at the
25260aa06000STheodore Ts'o 		 * beginning of a cluster, and we removed the entire
2527d23142c6SLukas Czerner 		 * extent and the cluster is not used by any other extent,
2528d23142c6SLukas Czerner 		 * save the partial cluster here, since we might need to
2529d23142c6SLukas Czerner 		 * delete if we determine that the truncate operation has
2530d23142c6SLukas Czerner 		 * removed all of the blocks in the cluster.
2531d23142c6SLukas Czerner 		 *
2532d23142c6SLukas Czerner 		 * On the other hand, if we did not manage to free the whole
2533d23142c6SLukas Czerner 		 * extent, we have to mark the cluster as used (store negative
2534d23142c6SLukas Czerner 		 * cluster number in partial_cluster).
25350aa06000STheodore Ts'o 		 */
2536f5a44db5STheodore Ts'o 		unaligned = EXT4_PBLK_COFF(sbi, pblk);
2537d23142c6SLukas Czerner 		if (unaligned && (ee_len == num) &&
2538d23142c6SLukas Czerner 		    (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
25390aa06000STheodore Ts'o 			*partial_cluster = EXT4_B2C(sbi, pblk);
2540d23142c6SLukas Czerner 		else if (unaligned)
2541d23142c6SLukas Czerner 			*partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2542d23142c6SLukas Czerner 		else if (*partial_cluster > 0)
25430aa06000STheodore Ts'o 			*partial_cluster = 0;
254478fb9cdfSLukas Czerner 	} else
254578fb9cdfSLukas Czerner 		ext4_error(sbi->s_sb, "strange request: removal(2) "
2546725d26d3SAneesh Kumar K.V 			   "%u-%u from %u:%u\n",
2547a2df2a63SAmit Arora 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
2548a86c6181SAlex Tomas 	return 0;
2549a86c6181SAlex Tomas }
2550a86c6181SAlex Tomas 
2551d583fb87SAllison Henderson 
2552d583fb87SAllison Henderson /*
2553d583fb87SAllison Henderson  * ext4_ext_rm_leaf() Removes the extents associated with the
2554d583fb87SAllison Henderson  * blocks appearing between "start" and "end", and splits the extents
2555d583fb87SAllison Henderson  * if "start" and "end" appear in the same extent
2556d583fb87SAllison Henderson  *
2557d583fb87SAllison Henderson  * @handle: The journal handle
2558d583fb87SAllison Henderson  * @inode:  The files inode
2559d583fb87SAllison Henderson  * @path:   The path to the leaf
2560d23142c6SLukas Czerner  * @partial_cluster: The cluster which we'll have to free if all extents
2561d23142c6SLukas Czerner  *                   has been released from it. It gets negative in case
2562d23142c6SLukas Czerner  *                   that the cluster is still used.
2563d583fb87SAllison Henderson  * @start:  The first block to remove
2564d583fb87SAllison Henderson  * @end:   The last block to remove
2565d583fb87SAllison Henderson  */
2566a86c6181SAlex Tomas static int
2567a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2568d23142c6SLukas Czerner 		 struct ext4_ext_path *path,
2569d23142c6SLukas Czerner 		 long long *partial_cluster,
25700aa06000STheodore Ts'o 		 ext4_lblk_t start, ext4_lblk_t end)
2571a86c6181SAlex Tomas {
25720aa06000STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2573a86c6181SAlex Tomas 	int err = 0, correct_index = 0;
2574a86c6181SAlex Tomas 	int depth = ext_depth(inode), credits;
2575a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
2576750c9c47SDmitry Monakhov 	ext4_lblk_t a, b;
2577725d26d3SAneesh Kumar K.V 	unsigned num;
2578725d26d3SAneesh Kumar K.V 	ext4_lblk_t ex_ee_block;
2579a86c6181SAlex Tomas 	unsigned short ex_ee_len;
2580a2df2a63SAmit Arora 	unsigned uninitialized = 0;
2581a86c6181SAlex Tomas 	struct ext4_extent *ex;
2582d23142c6SLukas Czerner 	ext4_fsblk_t pblk;
2583a86c6181SAlex Tomas 
2584c29c0ae7SAlex Tomas 	/* the header must be checked already in ext4_ext_remove_space() */
25855f95d21fSLukas Czerner 	ext_debug("truncate since %u in leaf to %u\n", start, end);
2586a86c6181SAlex Tomas 	if (!path[depth].p_hdr)
2587a86c6181SAlex Tomas 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2588a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
2589273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr == NULL)) {
2590273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2591273df556SFrank Mayhar 		return -EIO;
2592273df556SFrank Mayhar 	}
2593a86c6181SAlex Tomas 	/* find where to start removing */
25946ae06ff5SAshish Sangwan 	ex = path[depth].p_ext;
25956ae06ff5SAshish Sangwan 	if (!ex)
2596a86c6181SAlex Tomas 		ex = EXT_LAST_EXTENT(eh);
2597a86c6181SAlex Tomas 
2598a86c6181SAlex Tomas 	ex_ee_block = le32_to_cpu(ex->ee_block);
2599a2df2a63SAmit Arora 	ex_ee_len = ext4_ext_get_actual_len(ex);
2600a86c6181SAlex Tomas 
2601d8990240SAditya Kali 	trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2602d8990240SAditya Kali 
2603a86c6181SAlex Tomas 	while (ex >= EXT_FIRST_EXTENT(eh) &&
2604a86c6181SAlex Tomas 			ex_ee_block + ex_ee_len > start) {
2605a41f2071SAneesh Kumar K.V 
2606a41f2071SAneesh Kumar K.V 		if (ext4_ext_is_uninitialized(ex))
2607a41f2071SAneesh Kumar K.V 			uninitialized = 1;
2608a41f2071SAneesh Kumar K.V 		else
2609a41f2071SAneesh Kumar K.V 			uninitialized = 0;
2610a41f2071SAneesh Kumar K.V 
2611553f9008SMingming 		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2612553f9008SMingming 			 uninitialized, ex_ee_len);
2613a86c6181SAlex Tomas 		path[depth].p_ext = ex;
2614a86c6181SAlex Tomas 
2615a86c6181SAlex Tomas 		a = ex_ee_block > start ? ex_ee_block : start;
2616d583fb87SAllison Henderson 		b = ex_ee_block+ex_ee_len - 1 < end ?
2617d583fb87SAllison Henderson 			ex_ee_block+ex_ee_len - 1 : end;
2618a86c6181SAlex Tomas 
2619a86c6181SAlex Tomas 		ext_debug("  border %u:%u\n", a, b);
2620a86c6181SAlex Tomas 
2621d583fb87SAllison Henderson 		/* If this extent is beyond the end of the hole, skip it */
26225f95d21fSLukas Czerner 		if (end < ex_ee_block) {
2623d23142c6SLukas Czerner 			/*
2624d23142c6SLukas Czerner 			 * We're going to skip this extent and move to another,
2625d23142c6SLukas Czerner 			 * so if this extent is not cluster aligned we have
2626d23142c6SLukas Czerner 			 * to mark the current cluster as used to avoid
2627d23142c6SLukas Czerner 			 * accidentally freeing it later on
2628d23142c6SLukas Czerner 			 */
2629d23142c6SLukas Czerner 			pblk = ext4_ext_pblock(ex);
2630f5a44db5STheodore Ts'o 			if (EXT4_PBLK_COFF(sbi, pblk))
2631d23142c6SLukas Czerner 				*partial_cluster =
2632d23142c6SLukas Czerner 					-((long long)EXT4_B2C(sbi, pblk));
2633d583fb87SAllison Henderson 			ex--;
2634d583fb87SAllison Henderson 			ex_ee_block = le32_to_cpu(ex->ee_block);
2635d583fb87SAllison Henderson 			ex_ee_len = ext4_ext_get_actual_len(ex);
2636d583fb87SAllison Henderson 			continue;
2637750c9c47SDmitry Monakhov 		} else if (b != ex_ee_block + ex_ee_len - 1) {
2638dc1841d6SLukas Czerner 			EXT4_ERROR_INODE(inode,
2639dc1841d6SLukas Czerner 					 "can not handle truncate %u:%u "
2640dc1841d6SLukas Czerner 					 "on extent %u:%u",
2641dc1841d6SLukas Czerner 					 start, end, ex_ee_block,
2642dc1841d6SLukas Czerner 					 ex_ee_block + ex_ee_len - 1);
2643d583fb87SAllison Henderson 			err = -EIO;
2644d583fb87SAllison Henderson 			goto out;
2645a86c6181SAlex Tomas 		} else if (a != ex_ee_block) {
2646a86c6181SAlex Tomas 			/* remove tail of the extent */
2647750c9c47SDmitry Monakhov 			num = a - ex_ee_block;
2648a86c6181SAlex Tomas 		} else {
2649a86c6181SAlex Tomas 			/* remove whole extent: excellent! */
2650a86c6181SAlex Tomas 			num = 0;
2651d583fb87SAllison Henderson 		}
265234071da7STheodore Ts'o 		/*
265334071da7STheodore Ts'o 		 * 3 for leaf, sb, and inode plus 2 (bmap and group
265434071da7STheodore Ts'o 		 * descriptor) for each block group; assume two block
265534071da7STheodore Ts'o 		 * groups plus ex_ee_len/blocks_per_block_group for
265634071da7STheodore Ts'o 		 * the worst case
265734071da7STheodore Ts'o 		 */
265834071da7STheodore Ts'o 		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2659a86c6181SAlex Tomas 		if (ex == EXT_FIRST_EXTENT(eh)) {
2660a86c6181SAlex Tomas 			correct_index = 1;
2661a86c6181SAlex Tomas 			credits += (ext_depth(inode)) + 1;
2662a86c6181SAlex Tomas 		}
26635aca07ebSDmitry Monakhov 		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2664a86c6181SAlex Tomas 
2665487caeefSJan Kara 		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
26669102e4faSShen Feng 		if (err)
2667a86c6181SAlex Tomas 			goto out;
2668a86c6181SAlex Tomas 
2669a86c6181SAlex Tomas 		err = ext4_ext_get_access(handle, inode, path + depth);
2670a86c6181SAlex Tomas 		if (err)
2671a86c6181SAlex Tomas 			goto out;
2672a86c6181SAlex Tomas 
26730aa06000STheodore Ts'o 		err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
26740aa06000STheodore Ts'o 					 a, b);
2675a86c6181SAlex Tomas 		if (err)
2676a86c6181SAlex Tomas 			goto out;
2677a86c6181SAlex Tomas 
2678750c9c47SDmitry Monakhov 		if (num == 0)
2679d0d856e8SRandy Dunlap 			/* this extent is removed; mark slot entirely unused */
2680f65e6fbaSAlex Tomas 			ext4_ext_store_pblock(ex, 0);
2681a86c6181SAlex Tomas 
2682a86c6181SAlex Tomas 		ex->ee_len = cpu_to_le16(num);
2683749269faSAmit Arora 		/*
2684749269faSAmit Arora 		 * Do not mark uninitialized if all the blocks in the
2685749269faSAmit Arora 		 * extent have been removed.
2686749269faSAmit Arora 		 */
2687749269faSAmit Arora 		if (uninitialized && num)
2688a2df2a63SAmit Arora 			ext4_ext_mark_uninitialized(ex);
2689d583fb87SAllison Henderson 		/*
2690d583fb87SAllison Henderson 		 * If the extent was completely released,
2691d583fb87SAllison Henderson 		 * we need to remove it from the leaf
2692d583fb87SAllison Henderson 		 */
2693d583fb87SAllison Henderson 		if (num == 0) {
2694f17722f9SLukas Czerner 			if (end != EXT_MAX_BLOCKS - 1) {
2695d583fb87SAllison Henderson 				/*
2696d583fb87SAllison Henderson 				 * For hole punching, we need to scoot all the
2697d583fb87SAllison Henderson 				 * extents up when an extent is removed so that
2698d583fb87SAllison Henderson 				 * we dont have blank extents in the middle
2699d583fb87SAllison Henderson 				 */
2700d583fb87SAllison Henderson 				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2701d583fb87SAllison Henderson 					sizeof(struct ext4_extent));
2702d583fb87SAllison Henderson 
2703d583fb87SAllison Henderson 				/* Now get rid of the one at the end */
2704d583fb87SAllison Henderson 				memset(EXT_LAST_EXTENT(eh), 0,
2705d583fb87SAllison Henderson 					sizeof(struct ext4_extent));
2706d583fb87SAllison Henderson 			}
2707d583fb87SAllison Henderson 			le16_add_cpu(&eh->eh_entries, -1);
2708d23142c6SLukas Czerner 		} else if (*partial_cluster > 0)
27090aa06000STheodore Ts'o 			*partial_cluster = 0;
2710d583fb87SAllison Henderson 
2711750c9c47SDmitry Monakhov 		err = ext4_ext_dirty(handle, inode, path + depth);
2712750c9c47SDmitry Monakhov 		if (err)
2713750c9c47SDmitry Monakhov 			goto out;
2714750c9c47SDmitry Monakhov 
2715bf52c6f7SYongqiang Yang 		ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2716bf89d16fSTheodore Ts'o 				ext4_ext_pblock(ex));
2717a86c6181SAlex Tomas 		ex--;
2718a86c6181SAlex Tomas 		ex_ee_block = le32_to_cpu(ex->ee_block);
2719a2df2a63SAmit Arora 		ex_ee_len = ext4_ext_get_actual_len(ex);
2720a86c6181SAlex Tomas 	}
2721a86c6181SAlex Tomas 
2722a86c6181SAlex Tomas 	if (correct_index && eh->eh_entries)
2723a86c6181SAlex Tomas 		err = ext4_ext_correct_indexes(handle, inode, path);
2724a86c6181SAlex Tomas 
27250aa06000STheodore Ts'o 	/*
2726d23142c6SLukas Czerner 	 * Free the partial cluster only if the current extent does not
2727d23142c6SLukas Czerner 	 * reference it. Otherwise we might free used cluster.
27280aa06000STheodore Ts'o 	 */
2729d23142c6SLukas Czerner 	if (*partial_cluster > 0 &&
27300aa06000STheodore Ts'o 	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
27310aa06000STheodore Ts'o 	     *partial_cluster)) {
2732981250caSTheodore Ts'o 		int flags = get_default_free_blocks_flags(inode);
27330aa06000STheodore Ts'o 
27340aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL,
27350aa06000STheodore Ts'o 				 EXT4_C2B(sbi, *partial_cluster),
27360aa06000STheodore Ts'o 				 sbi->s_cluster_ratio, flags);
27370aa06000STheodore Ts'o 		*partial_cluster = 0;
27380aa06000STheodore Ts'o 	}
27390aa06000STheodore Ts'o 
2740a86c6181SAlex Tomas 	/* if this leaf is free, then we should
2741a86c6181SAlex Tomas 	 * remove it from index block above */
2742a86c6181SAlex Tomas 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2743c36575e6SForrest Liu 		err = ext4_ext_rm_idx(handle, inode, path, depth);
2744a86c6181SAlex Tomas 
2745a86c6181SAlex Tomas out:
2746a86c6181SAlex Tomas 	return err;
2747a86c6181SAlex Tomas }
2748a86c6181SAlex Tomas 
2749a86c6181SAlex Tomas /*
2750d0d856e8SRandy Dunlap  * ext4_ext_more_to_rm:
2751d0d856e8SRandy Dunlap  * returns 1 if current index has to be freed (even partial)
2752a86c6181SAlex Tomas  */
275309b88252SAvantika Mathur static int
2754a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path)
2755a86c6181SAlex Tomas {
2756a86c6181SAlex Tomas 	BUG_ON(path->p_idx == NULL);
2757a86c6181SAlex Tomas 
2758a86c6181SAlex Tomas 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2759a86c6181SAlex Tomas 		return 0;
2760a86c6181SAlex Tomas 
2761a86c6181SAlex Tomas 	/*
2762d0d856e8SRandy Dunlap 	 * if truncate on deeper level happened, it wasn't partial,
2763a86c6181SAlex Tomas 	 * so we have to consider current index for truncation
2764a86c6181SAlex Tomas 	 */
2765a86c6181SAlex Tomas 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2766a86c6181SAlex Tomas 		return 0;
2767a86c6181SAlex Tomas 	return 1;
2768a86c6181SAlex Tomas }
2769a86c6181SAlex Tomas 
277026a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
27715f95d21fSLukas Czerner 			  ext4_lblk_t end)
2772a86c6181SAlex Tomas {
2773a86c6181SAlex Tomas 	struct super_block *sb = inode->i_sb;
2774a86c6181SAlex Tomas 	int depth = ext_depth(inode);
2775968dee77SAshish Sangwan 	struct ext4_ext_path *path = NULL;
2776d23142c6SLukas Czerner 	long long partial_cluster = 0;
2777a86c6181SAlex Tomas 	handle_t *handle;
27786f2080e6SDmitry Monakhov 	int i = 0, err = 0;
2779a86c6181SAlex Tomas 
27805f95d21fSLukas Czerner 	ext_debug("truncate since %u to %u\n", start, end);
2781a86c6181SAlex Tomas 
2782a86c6181SAlex Tomas 	/* probably first extent we're gonna free will be last in block */
27839924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2784a86c6181SAlex Tomas 	if (IS_ERR(handle))
2785a86c6181SAlex Tomas 		return PTR_ERR(handle);
2786a86c6181SAlex Tomas 
27870617b83fSDmitry Monakhov again:
278861801325SLukas Czerner 	trace_ext4_ext_remove_space(inode, start, end, depth);
2789d8990240SAditya Kali 
2790a86c6181SAlex Tomas 	/*
27915f95d21fSLukas Czerner 	 * Check if we are removing extents inside the extent tree. If that
27925f95d21fSLukas Czerner 	 * is the case, we are going to punch a hole inside the extent tree
27935f95d21fSLukas Czerner 	 * so we have to check whether we need to split the extent covering
27945f95d21fSLukas Czerner 	 * the last block to remove so we can easily remove the part of it
27955f95d21fSLukas Czerner 	 * in ext4_ext_rm_leaf().
27965f95d21fSLukas Czerner 	 */
27975f95d21fSLukas Czerner 	if (end < EXT_MAX_BLOCKS - 1) {
27985f95d21fSLukas Czerner 		struct ext4_extent *ex;
27995f95d21fSLukas Czerner 		ext4_lblk_t ee_block;
28005f95d21fSLukas Czerner 
28015f95d21fSLukas Czerner 		/* find extent for this block */
2802107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
28035f95d21fSLukas Czerner 		if (IS_ERR(path)) {
28045f95d21fSLukas Czerner 			ext4_journal_stop(handle);
28055f95d21fSLukas Czerner 			return PTR_ERR(path);
28065f95d21fSLukas Czerner 		}
28075f95d21fSLukas Czerner 		depth = ext_depth(inode);
28086f2080e6SDmitry Monakhov 		/* Leaf not may not exist only if inode has no blocks at all */
28095f95d21fSLukas Czerner 		ex = path[depth].p_ext;
2810968dee77SAshish Sangwan 		if (!ex) {
28116f2080e6SDmitry Monakhov 			if (depth) {
28126f2080e6SDmitry Monakhov 				EXT4_ERROR_INODE(inode,
28136f2080e6SDmitry Monakhov 						 "path[%d].p_hdr == NULL",
28146f2080e6SDmitry Monakhov 						 depth);
28156f2080e6SDmitry Monakhov 				err = -EIO;
28166f2080e6SDmitry Monakhov 			}
28176f2080e6SDmitry Monakhov 			goto out;
2818968dee77SAshish Sangwan 		}
28195f95d21fSLukas Czerner 
28205f95d21fSLukas Czerner 		ee_block = le32_to_cpu(ex->ee_block);
28215f95d21fSLukas Czerner 
28225f95d21fSLukas Czerner 		/*
28235f95d21fSLukas Czerner 		 * See if the last block is inside the extent, if so split
28245f95d21fSLukas Czerner 		 * the extent at 'end' block so we can easily remove the
28255f95d21fSLukas Czerner 		 * tail of the first part of the split extent in
28265f95d21fSLukas Czerner 		 * ext4_ext_rm_leaf().
28275f95d21fSLukas Czerner 		 */
28285f95d21fSLukas Czerner 		if (end >= ee_block &&
28295f95d21fSLukas Czerner 		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
28305f95d21fSLukas Czerner 			int split_flag = 0;
28315f95d21fSLukas Czerner 
28325f95d21fSLukas Czerner 			if (ext4_ext_is_uninitialized(ex))
28335f95d21fSLukas Czerner 				split_flag = EXT4_EXT_MARK_UNINIT1 |
28345f95d21fSLukas Czerner 					     EXT4_EXT_MARK_UNINIT2;
28355f95d21fSLukas Czerner 
28365f95d21fSLukas Czerner 			/*
28375f95d21fSLukas Czerner 			 * Split the extent in two so that 'end' is the last
283827dd4385SLukas Czerner 			 * block in the first new extent. Also we should not
283927dd4385SLukas Czerner 			 * fail removing space due to ENOSPC so try to use
284027dd4385SLukas Czerner 			 * reserved block if that happens.
28415f95d21fSLukas Czerner 			 */
28425f95d21fSLukas Czerner 			err = ext4_split_extent_at(handle, inode, path,
28435f95d21fSLukas Czerner 					end + 1, split_flag,
2844107a7bd3STheodore Ts'o 					EXT4_EX_NOCACHE |
28455f95d21fSLukas Czerner 					EXT4_GET_BLOCKS_PRE_IO |
284627dd4385SLukas Czerner 					EXT4_GET_BLOCKS_METADATA_NOFAIL);
28475f95d21fSLukas Czerner 
28485f95d21fSLukas Czerner 			if (err < 0)
28495f95d21fSLukas Czerner 				goto out;
28505f95d21fSLukas Czerner 		}
28515f95d21fSLukas Czerner 	}
28525f95d21fSLukas Czerner 	/*
2853d0d856e8SRandy Dunlap 	 * We start scanning from right side, freeing all the blocks
2854d0d856e8SRandy Dunlap 	 * after i_size and walking into the tree depth-wise.
2855a86c6181SAlex Tomas 	 */
28560617b83fSDmitry Monakhov 	depth = ext_depth(inode);
2857968dee77SAshish Sangwan 	if (path) {
2858968dee77SAshish Sangwan 		int k = i = depth;
2859968dee77SAshish Sangwan 		while (--k > 0)
2860968dee77SAshish Sangwan 			path[k].p_block =
2861968dee77SAshish Sangwan 				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2862968dee77SAshish Sangwan 	} else {
2863968dee77SAshish Sangwan 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2864968dee77SAshish Sangwan 			       GFP_NOFS);
2865a86c6181SAlex Tomas 		if (path == NULL) {
2866a86c6181SAlex Tomas 			ext4_journal_stop(handle);
2867a86c6181SAlex Tomas 			return -ENOMEM;
2868a86c6181SAlex Tomas 		}
28690617b83fSDmitry Monakhov 		path[0].p_depth = depth;
2870a86c6181SAlex Tomas 		path[0].p_hdr = ext_inode_hdr(inode);
287189a4e48fSTheodore Ts'o 		i = 0;
28725f95d21fSLukas Czerner 
2873c349179bSTheodore Ts'o 		if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2874a86c6181SAlex Tomas 			err = -EIO;
2875a86c6181SAlex Tomas 			goto out;
2876a86c6181SAlex Tomas 		}
2877968dee77SAshish Sangwan 	}
2878968dee77SAshish Sangwan 	err = 0;
2879a86c6181SAlex Tomas 
2880a86c6181SAlex Tomas 	while (i >= 0 && err == 0) {
2881a86c6181SAlex Tomas 		if (i == depth) {
2882a86c6181SAlex Tomas 			/* this is leaf block */
2883d583fb87SAllison Henderson 			err = ext4_ext_rm_leaf(handle, inode, path,
28840aa06000STheodore Ts'o 					       &partial_cluster, start,
28855f95d21fSLukas Czerner 					       end);
2886d0d856e8SRandy Dunlap 			/* root level has p_bh == NULL, brelse() eats this */
2887a86c6181SAlex Tomas 			brelse(path[i].p_bh);
2888a86c6181SAlex Tomas 			path[i].p_bh = NULL;
2889a86c6181SAlex Tomas 			i--;
2890a86c6181SAlex Tomas 			continue;
2891a86c6181SAlex Tomas 		}
2892a86c6181SAlex Tomas 
2893a86c6181SAlex Tomas 		/* this is index block */
2894a86c6181SAlex Tomas 		if (!path[i].p_hdr) {
2895a86c6181SAlex Tomas 			ext_debug("initialize header\n");
2896a86c6181SAlex Tomas 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2897a86c6181SAlex Tomas 		}
2898a86c6181SAlex Tomas 
2899a86c6181SAlex Tomas 		if (!path[i].p_idx) {
2900d0d856e8SRandy Dunlap 			/* this level hasn't been touched yet */
2901a86c6181SAlex Tomas 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2902a86c6181SAlex Tomas 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2903a86c6181SAlex Tomas 			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2904a86c6181SAlex Tomas 				  path[i].p_hdr,
2905a86c6181SAlex Tomas 				  le16_to_cpu(path[i].p_hdr->eh_entries));
2906a86c6181SAlex Tomas 		} else {
2907d0d856e8SRandy Dunlap 			/* we were already here, see at next index */
2908a86c6181SAlex Tomas 			path[i].p_idx--;
2909a86c6181SAlex Tomas 		}
2910a86c6181SAlex Tomas 
2911a86c6181SAlex Tomas 		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2912a86c6181SAlex Tomas 				i, EXT_FIRST_INDEX(path[i].p_hdr),
2913a86c6181SAlex Tomas 				path[i].p_idx);
2914a86c6181SAlex Tomas 		if (ext4_ext_more_to_rm(path + i)) {
2915c29c0ae7SAlex Tomas 			struct buffer_head *bh;
2916a86c6181SAlex Tomas 			/* go to the next level */
29172ae02107SMingming Cao 			ext_debug("move to level %d (block %llu)\n",
2918bf89d16fSTheodore Ts'o 				  i + 1, ext4_idx_pblock(path[i].p_idx));
2919a86c6181SAlex Tomas 			memset(path + i + 1, 0, sizeof(*path));
29207d7ea89eSTheodore Ts'o 			bh = read_extent_tree_block(inode,
2921107a7bd3STheodore Ts'o 				ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2922107a7bd3STheodore Ts'o 				EXT4_EX_NOCACHE);
29237d7ea89eSTheodore Ts'o 			if (IS_ERR(bh)) {
2924a86c6181SAlex Tomas 				/* should we reset i_size? */
29257d7ea89eSTheodore Ts'o 				err = PTR_ERR(bh);
2926a86c6181SAlex Tomas 				break;
2927a86c6181SAlex Tomas 			}
292876828c88STheodore Ts'o 			/* Yield here to deal with large extent trees.
292976828c88STheodore Ts'o 			 * Should be a no-op if we did IO above. */
293076828c88STheodore Ts'o 			cond_resched();
2931c29c0ae7SAlex Tomas 			if (WARN_ON(i + 1 > depth)) {
2932c29c0ae7SAlex Tomas 				err = -EIO;
2933c29c0ae7SAlex Tomas 				break;
2934c29c0ae7SAlex Tomas 			}
2935c29c0ae7SAlex Tomas 			path[i + 1].p_bh = bh;
2936a86c6181SAlex Tomas 
2937d0d856e8SRandy Dunlap 			/* save actual number of indexes since this
2938d0d856e8SRandy Dunlap 			 * number is changed at the next iteration */
2939a86c6181SAlex Tomas 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2940a86c6181SAlex Tomas 			i++;
2941a86c6181SAlex Tomas 		} else {
2942d0d856e8SRandy Dunlap 			/* we finished processing this index, go up */
2943a86c6181SAlex Tomas 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2944d0d856e8SRandy Dunlap 				/* index is empty, remove it;
2945a86c6181SAlex Tomas 				 * handle must be already prepared by the
2946a86c6181SAlex Tomas 				 * truncatei_leaf() */
2947c36575e6SForrest Liu 				err = ext4_ext_rm_idx(handle, inode, path, i);
2948a86c6181SAlex Tomas 			}
2949d0d856e8SRandy Dunlap 			/* root level has p_bh == NULL, brelse() eats this */
2950a86c6181SAlex Tomas 			brelse(path[i].p_bh);
2951a86c6181SAlex Tomas 			path[i].p_bh = NULL;
2952a86c6181SAlex Tomas 			i--;
2953a86c6181SAlex Tomas 			ext_debug("return to level %d\n", i);
2954a86c6181SAlex Tomas 		}
2955a86c6181SAlex Tomas 	}
2956a86c6181SAlex Tomas 
295761801325SLukas Czerner 	trace_ext4_ext_remove_space_done(inode, start, end, depth,
295861801325SLukas Czerner 			partial_cluster, path->p_hdr->eh_entries);
2959d8990240SAditya Kali 
29607b415bf6SAditya Kali 	/* If we still have something in the partial cluster and we have removed
29617b415bf6SAditya Kali 	 * even the first extent, then we should free the blocks in the partial
29627b415bf6SAditya Kali 	 * cluster as well. */
2963d23142c6SLukas Czerner 	if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
2964981250caSTheodore Ts'o 		int flags = get_default_free_blocks_flags(inode);
29657b415bf6SAditya Kali 
29667b415bf6SAditya Kali 		ext4_free_blocks(handle, inode, NULL,
29677b415bf6SAditya Kali 				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
29687b415bf6SAditya Kali 				 EXT4_SB(sb)->s_cluster_ratio, flags);
29697b415bf6SAditya Kali 		partial_cluster = 0;
29707b415bf6SAditya Kali 	}
29717b415bf6SAditya Kali 
2972a86c6181SAlex Tomas 	/* TODO: flexible tree reduction should be here */
2973a86c6181SAlex Tomas 	if (path->p_hdr->eh_entries == 0) {
2974a86c6181SAlex Tomas 		/*
2975d0d856e8SRandy Dunlap 		 * truncate to zero freed all the tree,
2976d0d856e8SRandy Dunlap 		 * so we need to correct eh_depth
2977a86c6181SAlex Tomas 		 */
2978a86c6181SAlex Tomas 		err = ext4_ext_get_access(handle, inode, path);
2979a86c6181SAlex Tomas 		if (err == 0) {
2980a86c6181SAlex Tomas 			ext_inode_hdr(inode)->eh_depth = 0;
2981a86c6181SAlex Tomas 			ext_inode_hdr(inode)->eh_max =
298255ad63bfSTheodore Ts'o 				cpu_to_le16(ext4_ext_space_root(inode, 0));
2983a86c6181SAlex Tomas 			err = ext4_ext_dirty(handle, inode, path);
2984a86c6181SAlex Tomas 		}
2985a86c6181SAlex Tomas 	}
2986a86c6181SAlex Tomas out:
2987a86c6181SAlex Tomas 	ext4_ext_drop_refs(path);
2988a86c6181SAlex Tomas 	kfree(path);
2989968dee77SAshish Sangwan 	if (err == -EAGAIN) {
2990968dee77SAshish Sangwan 		path = NULL;
29910617b83fSDmitry Monakhov 		goto again;
2992968dee77SAshish Sangwan 	}
2993a86c6181SAlex Tomas 	ext4_journal_stop(handle);
2994a86c6181SAlex Tomas 
2995a86c6181SAlex Tomas 	return err;
2996a86c6181SAlex Tomas }
2997a86c6181SAlex Tomas 
2998a86c6181SAlex Tomas /*
2999a86c6181SAlex Tomas  * called at mount time
3000a86c6181SAlex Tomas  */
3001a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb)
3002a86c6181SAlex Tomas {
3003a86c6181SAlex Tomas 	/*
3004a86c6181SAlex Tomas 	 * possible initialization would be here
3005a86c6181SAlex Tomas 	 */
3006a86c6181SAlex Tomas 
300783982b6fSTheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
300890576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
300992b97816STheodore Ts'o 		printk(KERN_INFO "EXT4-fs: file extents enabled"
3010bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
301192b97816STheodore Ts'o 		       ", aggressive tests"
3012a86c6181SAlex Tomas #endif
3013a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
301492b97816STheodore Ts'o 		       ", check binsearch"
3015a86c6181SAlex Tomas #endif
3016a86c6181SAlex Tomas #ifdef EXTENTS_STATS
301792b97816STheodore Ts'o 		       ", stats"
3018a86c6181SAlex Tomas #endif
301992b97816STheodore Ts'o 		       "\n");
302090576c0bSTheodore Ts'o #endif
3021a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3022a86c6181SAlex Tomas 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3023a86c6181SAlex Tomas 		EXT4_SB(sb)->s_ext_min = 1 << 30;
3024a86c6181SAlex Tomas 		EXT4_SB(sb)->s_ext_max = 0;
3025a86c6181SAlex Tomas #endif
3026a86c6181SAlex Tomas 	}
3027a86c6181SAlex Tomas }
3028a86c6181SAlex Tomas 
3029a86c6181SAlex Tomas /*
3030a86c6181SAlex Tomas  * called at umount time
3031a86c6181SAlex Tomas  */
3032a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb)
3033a86c6181SAlex Tomas {
303483982b6fSTheodore Ts'o 	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3035a86c6181SAlex Tomas 		return;
3036a86c6181SAlex Tomas 
3037a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3038a86c6181SAlex Tomas 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3039a86c6181SAlex Tomas 		struct ext4_sb_info *sbi = EXT4_SB(sb);
3040a86c6181SAlex Tomas 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3041a86c6181SAlex Tomas 			sbi->s_ext_blocks, sbi->s_ext_extents,
3042a86c6181SAlex Tomas 			sbi->s_ext_blocks / sbi->s_ext_extents);
3043a86c6181SAlex Tomas 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3044a86c6181SAlex Tomas 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3045a86c6181SAlex Tomas 	}
3046a86c6181SAlex Tomas #endif
3047a86c6181SAlex Tomas }
3048a86c6181SAlex Tomas 
3049d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3050d7b2a00cSZheng Liu {
3051d7b2a00cSZheng Liu 	ext4_lblk_t  ee_block;
3052d7b2a00cSZheng Liu 	ext4_fsblk_t ee_pblock;
3053d7b2a00cSZheng Liu 	unsigned int ee_len;
3054d7b2a00cSZheng Liu 
3055d7b2a00cSZheng Liu 	ee_block  = le32_to_cpu(ex->ee_block);
3056d7b2a00cSZheng Liu 	ee_len    = ext4_ext_get_actual_len(ex);
3057d7b2a00cSZheng Liu 	ee_pblock = ext4_ext_pblock(ex);
3058d7b2a00cSZheng Liu 
3059d7b2a00cSZheng Liu 	if (ee_len == 0)
3060d7b2a00cSZheng Liu 		return 0;
3061d7b2a00cSZheng Liu 
3062d7b2a00cSZheng Liu 	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3063d7b2a00cSZheng Liu 				     EXTENT_STATUS_WRITTEN);
3064d7b2a00cSZheng Liu }
3065d7b2a00cSZheng Liu 
3066093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out  */
3067093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3068093a088bSAneesh Kumar K.V {
30692407518dSLukas Czerner 	ext4_fsblk_t ee_pblock;
30702407518dSLukas Czerner 	unsigned int ee_len;
3071b720303dSJing Zhang 	int ret;
3072093a088bSAneesh Kumar K.V 
3073093a088bSAneesh Kumar K.V 	ee_len    = ext4_ext_get_actual_len(ex);
3074bf89d16fSTheodore Ts'o 	ee_pblock = ext4_ext_pblock(ex);
3075093a088bSAneesh Kumar K.V 
3076a107e5a3STheodore Ts'o 	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
30772407518dSLukas Czerner 	if (ret > 0)
30782407518dSLukas Czerner 		ret = 0;
3079093a088bSAneesh Kumar K.V 
30802407518dSLukas Czerner 	return ret;
3081093a088bSAneesh Kumar K.V }
3082093a088bSAneesh Kumar K.V 
308347ea3bb5SYongqiang Yang /*
308447ea3bb5SYongqiang Yang  * ext4_split_extent_at() splits an extent at given block.
308547ea3bb5SYongqiang Yang  *
308647ea3bb5SYongqiang Yang  * @handle: the journal handle
308747ea3bb5SYongqiang Yang  * @inode: the file inode
308847ea3bb5SYongqiang Yang  * @path: the path to the extent
308947ea3bb5SYongqiang Yang  * @split: the logical block where the extent is splitted.
309047ea3bb5SYongqiang Yang  * @split_flags: indicates if the extent could be zeroout if split fails, and
309147ea3bb5SYongqiang Yang  *		 the states(init or uninit) of new extents.
309247ea3bb5SYongqiang Yang  * @flags: flags used to insert new extent to extent tree.
309347ea3bb5SYongqiang Yang  *
309447ea3bb5SYongqiang Yang  *
309547ea3bb5SYongqiang Yang  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
309647ea3bb5SYongqiang Yang  * of which are deterimined by split_flag.
309747ea3bb5SYongqiang Yang  *
309847ea3bb5SYongqiang Yang  * There are two cases:
309947ea3bb5SYongqiang Yang  *  a> the extent are splitted into two extent.
310047ea3bb5SYongqiang Yang  *  b> split is not needed, and just mark the extent.
310147ea3bb5SYongqiang Yang  *
310247ea3bb5SYongqiang Yang  * return 0 on success.
310347ea3bb5SYongqiang Yang  */
310447ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle,
310547ea3bb5SYongqiang Yang 			     struct inode *inode,
310647ea3bb5SYongqiang Yang 			     struct ext4_ext_path *path,
310747ea3bb5SYongqiang Yang 			     ext4_lblk_t split,
310847ea3bb5SYongqiang Yang 			     int split_flag,
310947ea3bb5SYongqiang Yang 			     int flags)
311047ea3bb5SYongqiang Yang {
311147ea3bb5SYongqiang Yang 	ext4_fsblk_t newblock;
311247ea3bb5SYongqiang Yang 	ext4_lblk_t ee_block;
3113adb23551SZheng Liu 	struct ext4_extent *ex, newex, orig_ex, zero_ex;
311447ea3bb5SYongqiang Yang 	struct ext4_extent *ex2 = NULL;
311547ea3bb5SYongqiang Yang 	unsigned int ee_len, depth;
311647ea3bb5SYongqiang Yang 	int err = 0;
311747ea3bb5SYongqiang Yang 
3118dee1f973SDmitry Monakhov 	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3119dee1f973SDmitry Monakhov 	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3120dee1f973SDmitry Monakhov 
312147ea3bb5SYongqiang Yang 	ext_debug("ext4_split_extents_at: inode %lu, logical"
312247ea3bb5SYongqiang Yang 		"block %llu\n", inode->i_ino, (unsigned long long)split);
312347ea3bb5SYongqiang Yang 
312447ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
312547ea3bb5SYongqiang Yang 
312647ea3bb5SYongqiang Yang 	depth = ext_depth(inode);
312747ea3bb5SYongqiang Yang 	ex = path[depth].p_ext;
312847ea3bb5SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
312947ea3bb5SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
313047ea3bb5SYongqiang Yang 	newblock = split - ee_block + ext4_ext_pblock(ex);
313147ea3bb5SYongqiang Yang 
313247ea3bb5SYongqiang Yang 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3133357b66fdSDmitry Monakhov 	BUG_ON(!ext4_ext_is_uninitialized(ex) &&
3134357b66fdSDmitry Monakhov 	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
3135357b66fdSDmitry Monakhov 			     EXT4_EXT_MARK_UNINIT1 |
3136357b66fdSDmitry Monakhov 			     EXT4_EXT_MARK_UNINIT2));
313747ea3bb5SYongqiang Yang 
313847ea3bb5SYongqiang Yang 	err = ext4_ext_get_access(handle, inode, path + depth);
313947ea3bb5SYongqiang Yang 	if (err)
314047ea3bb5SYongqiang Yang 		goto out;
314147ea3bb5SYongqiang Yang 
314247ea3bb5SYongqiang Yang 	if (split == ee_block) {
314347ea3bb5SYongqiang Yang 		/*
314447ea3bb5SYongqiang Yang 		 * case b: block @split is the block that the extent begins with
314547ea3bb5SYongqiang Yang 		 * then we just change the state of the extent, and splitting
314647ea3bb5SYongqiang Yang 		 * is not needed.
314747ea3bb5SYongqiang Yang 		 */
314847ea3bb5SYongqiang Yang 		if (split_flag & EXT4_EXT_MARK_UNINIT2)
314947ea3bb5SYongqiang Yang 			ext4_ext_mark_uninitialized(ex);
315047ea3bb5SYongqiang Yang 		else
315147ea3bb5SYongqiang Yang 			ext4_ext_mark_initialized(ex);
315247ea3bb5SYongqiang Yang 
315347ea3bb5SYongqiang Yang 		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3154ecb94f5fSTheodore Ts'o 			ext4_ext_try_to_merge(handle, inode, path, ex);
315547ea3bb5SYongqiang Yang 
3156ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
315747ea3bb5SYongqiang Yang 		goto out;
315847ea3bb5SYongqiang Yang 	}
315947ea3bb5SYongqiang Yang 
316047ea3bb5SYongqiang Yang 	/* case a */
316147ea3bb5SYongqiang Yang 	memcpy(&orig_ex, ex, sizeof(orig_ex));
316247ea3bb5SYongqiang Yang 	ex->ee_len = cpu_to_le16(split - ee_block);
316347ea3bb5SYongqiang Yang 	if (split_flag & EXT4_EXT_MARK_UNINIT1)
316447ea3bb5SYongqiang Yang 		ext4_ext_mark_uninitialized(ex);
316547ea3bb5SYongqiang Yang 
316647ea3bb5SYongqiang Yang 	/*
316747ea3bb5SYongqiang Yang 	 * path may lead to new leaf, not to original leaf any more
316847ea3bb5SYongqiang Yang 	 * after ext4_ext_insert_extent() returns,
316947ea3bb5SYongqiang Yang 	 */
317047ea3bb5SYongqiang Yang 	err = ext4_ext_dirty(handle, inode, path + depth);
317147ea3bb5SYongqiang Yang 	if (err)
317247ea3bb5SYongqiang Yang 		goto fix_extent_len;
317347ea3bb5SYongqiang Yang 
317447ea3bb5SYongqiang Yang 	ex2 = &newex;
317547ea3bb5SYongqiang Yang 	ex2->ee_block = cpu_to_le32(split);
317647ea3bb5SYongqiang Yang 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
317747ea3bb5SYongqiang Yang 	ext4_ext_store_pblock(ex2, newblock);
317847ea3bb5SYongqiang Yang 	if (split_flag & EXT4_EXT_MARK_UNINIT2)
317947ea3bb5SYongqiang Yang 		ext4_ext_mark_uninitialized(ex2);
318047ea3bb5SYongqiang Yang 
318147ea3bb5SYongqiang Yang 	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
318247ea3bb5SYongqiang Yang 	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3183dee1f973SDmitry Monakhov 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3184adb23551SZheng Liu 			if (split_flag & EXT4_EXT_DATA_VALID1) {
3185dee1f973SDmitry Monakhov 				err = ext4_ext_zeroout(inode, ex2);
3186adb23551SZheng Liu 				zero_ex.ee_block = ex2->ee_block;
31878cde7ad1SZheng Liu 				zero_ex.ee_len = cpu_to_le16(
31888cde7ad1SZheng Liu 						ext4_ext_get_actual_len(ex2));
3189adb23551SZheng Liu 				ext4_ext_store_pblock(&zero_ex,
3190adb23551SZheng Liu 						      ext4_ext_pblock(ex2));
3191adb23551SZheng Liu 			} else {
3192dee1f973SDmitry Monakhov 				err = ext4_ext_zeroout(inode, ex);
3193adb23551SZheng Liu 				zero_ex.ee_block = ex->ee_block;
31948cde7ad1SZheng Liu 				zero_ex.ee_len = cpu_to_le16(
31958cde7ad1SZheng Liu 						ext4_ext_get_actual_len(ex));
3196adb23551SZheng Liu 				ext4_ext_store_pblock(&zero_ex,
3197adb23551SZheng Liu 						      ext4_ext_pblock(ex));
3198adb23551SZheng Liu 			}
3199adb23551SZheng Liu 		} else {
320047ea3bb5SYongqiang Yang 			err = ext4_ext_zeroout(inode, &orig_ex);
3201adb23551SZheng Liu 			zero_ex.ee_block = orig_ex.ee_block;
32028cde7ad1SZheng Liu 			zero_ex.ee_len = cpu_to_le16(
32038cde7ad1SZheng Liu 						ext4_ext_get_actual_len(&orig_ex));
3204adb23551SZheng Liu 			ext4_ext_store_pblock(&zero_ex,
3205adb23551SZheng Liu 					      ext4_ext_pblock(&orig_ex));
3206adb23551SZheng Liu 		}
3207dee1f973SDmitry Monakhov 
320847ea3bb5SYongqiang Yang 		if (err)
320947ea3bb5SYongqiang Yang 			goto fix_extent_len;
321047ea3bb5SYongqiang Yang 		/* update the extent length and mark as initialized */
3211af1584f5SAl Viro 		ex->ee_len = cpu_to_le16(ee_len);
3212ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, ex);
3213ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3214adb23551SZheng Liu 		if (err)
3215adb23551SZheng Liu 			goto fix_extent_len;
3216adb23551SZheng Liu 
3217adb23551SZheng Liu 		/* update extent status tree */
3218d7b2a00cSZheng Liu 		err = ext4_zeroout_es(inode, &zero_ex);
3219adb23551SZheng Liu 
322047ea3bb5SYongqiang Yang 		goto out;
322147ea3bb5SYongqiang Yang 	} else if (err)
322247ea3bb5SYongqiang Yang 		goto fix_extent_len;
322347ea3bb5SYongqiang Yang 
322447ea3bb5SYongqiang Yang out:
322547ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
322647ea3bb5SYongqiang Yang 	return err;
322747ea3bb5SYongqiang Yang 
322847ea3bb5SYongqiang Yang fix_extent_len:
322947ea3bb5SYongqiang Yang 	ex->ee_len = orig_ex.ee_len;
323047ea3bb5SYongqiang Yang 	ext4_ext_dirty(handle, inode, path + depth);
323147ea3bb5SYongqiang Yang 	return err;
323247ea3bb5SYongqiang Yang }
323347ea3bb5SYongqiang Yang 
323447ea3bb5SYongqiang Yang /*
323547ea3bb5SYongqiang Yang  * ext4_split_extents() splits an extent and mark extent which is covered
323647ea3bb5SYongqiang Yang  * by @map as split_flags indicates
323747ea3bb5SYongqiang Yang  *
323847ea3bb5SYongqiang Yang  * It may result in splitting the extent into multiple extents (up to three)
323947ea3bb5SYongqiang Yang  * There are three possibilities:
324047ea3bb5SYongqiang Yang  *   a> There is no split required
324147ea3bb5SYongqiang Yang  *   b> Splits in two extents: Split is happening at either end of the extent
324247ea3bb5SYongqiang Yang  *   c> Splits in three extents: Somone is splitting in middle of the extent
324347ea3bb5SYongqiang Yang  *
324447ea3bb5SYongqiang Yang  */
324547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle,
324647ea3bb5SYongqiang Yang 			      struct inode *inode,
324747ea3bb5SYongqiang Yang 			      struct ext4_ext_path *path,
324847ea3bb5SYongqiang Yang 			      struct ext4_map_blocks *map,
324947ea3bb5SYongqiang Yang 			      int split_flag,
325047ea3bb5SYongqiang Yang 			      int flags)
325147ea3bb5SYongqiang Yang {
325247ea3bb5SYongqiang Yang 	ext4_lblk_t ee_block;
325347ea3bb5SYongqiang Yang 	struct ext4_extent *ex;
325447ea3bb5SYongqiang Yang 	unsigned int ee_len, depth;
325547ea3bb5SYongqiang Yang 	int err = 0;
325647ea3bb5SYongqiang Yang 	int uninitialized;
325747ea3bb5SYongqiang Yang 	int split_flag1, flags1;
32583a225670SZheng Liu 	int allocated = map->m_len;
325947ea3bb5SYongqiang Yang 
326047ea3bb5SYongqiang Yang 	depth = ext_depth(inode);
326147ea3bb5SYongqiang Yang 	ex = path[depth].p_ext;
326247ea3bb5SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
326347ea3bb5SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
326447ea3bb5SYongqiang Yang 	uninitialized = ext4_ext_is_uninitialized(ex);
326547ea3bb5SYongqiang Yang 
326647ea3bb5SYongqiang Yang 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3267dee1f973SDmitry Monakhov 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
326847ea3bb5SYongqiang Yang 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
326947ea3bb5SYongqiang Yang 		if (uninitialized)
327047ea3bb5SYongqiang Yang 			split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
327147ea3bb5SYongqiang Yang 				       EXT4_EXT_MARK_UNINIT2;
3272dee1f973SDmitry Monakhov 		if (split_flag & EXT4_EXT_DATA_VALID2)
3273dee1f973SDmitry Monakhov 			split_flag1 |= EXT4_EXT_DATA_VALID1;
327447ea3bb5SYongqiang Yang 		err = ext4_split_extent_at(handle, inode, path,
327547ea3bb5SYongqiang Yang 				map->m_lblk + map->m_len, split_flag1, flags1);
327693917411SYongqiang Yang 		if (err)
327793917411SYongqiang Yang 			goto out;
32783a225670SZheng Liu 	} else {
32793a225670SZheng Liu 		allocated = ee_len - (map->m_lblk - ee_block);
328047ea3bb5SYongqiang Yang 	}
3281357b66fdSDmitry Monakhov 	/*
3282357b66fdSDmitry Monakhov 	 * Update path is required because previous ext4_split_extent_at() may
3283357b66fdSDmitry Monakhov 	 * result in split of original leaf or extent zeroout.
3284357b66fdSDmitry Monakhov 	 */
328547ea3bb5SYongqiang Yang 	ext4_ext_drop_refs(path);
3286107a7bd3STheodore Ts'o 	path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
328747ea3bb5SYongqiang Yang 	if (IS_ERR(path))
328847ea3bb5SYongqiang Yang 		return PTR_ERR(path);
3289357b66fdSDmitry Monakhov 	depth = ext_depth(inode);
3290357b66fdSDmitry Monakhov 	ex = path[depth].p_ext;
3291357b66fdSDmitry Monakhov 	uninitialized = ext4_ext_is_uninitialized(ex);
3292357b66fdSDmitry Monakhov 	split_flag1 = 0;
329347ea3bb5SYongqiang Yang 
329447ea3bb5SYongqiang Yang 	if (map->m_lblk >= ee_block) {
3295357b66fdSDmitry Monakhov 		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3296357b66fdSDmitry Monakhov 		if (uninitialized) {
329747ea3bb5SYongqiang Yang 			split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3298357b66fdSDmitry Monakhov 			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3299357b66fdSDmitry Monakhov 						     EXT4_EXT_MARK_UNINIT2);
3300357b66fdSDmitry Monakhov 		}
330147ea3bb5SYongqiang Yang 		err = ext4_split_extent_at(handle, inode, path,
330247ea3bb5SYongqiang Yang 				map->m_lblk, split_flag1, flags);
330347ea3bb5SYongqiang Yang 		if (err)
330447ea3bb5SYongqiang Yang 			goto out;
330547ea3bb5SYongqiang Yang 	}
330647ea3bb5SYongqiang Yang 
330747ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
330847ea3bb5SYongqiang Yang out:
33093a225670SZheng Liu 	return err ? err : allocated;
331047ea3bb5SYongqiang Yang }
331147ea3bb5SYongqiang Yang 
331256055d3aSAmit Arora /*
3313e35fd660STheodore Ts'o  * This function is called by ext4_ext_map_blocks() if someone tries to write
331456055d3aSAmit Arora  * to an uninitialized extent. It may result in splitting the uninitialized
331556055d3aSAmit Arora  * extent into multiple extents (up to three - one initialized and two
331656055d3aSAmit Arora  * uninitialized).
331756055d3aSAmit Arora  * There are three possibilities:
331856055d3aSAmit Arora  *   a> There is no split required: Entire extent should be initialized
331956055d3aSAmit Arora  *   b> Splits in two extents: Write is happening at either end of the extent
332056055d3aSAmit Arora  *   c> Splits in three extents: Somone is writing in middle of the extent
33216f91bc5fSEric Gouriou  *
33226f91bc5fSEric Gouriou  * Pre-conditions:
33236f91bc5fSEric Gouriou  *  - The extent pointed to by 'path' is uninitialized.
33246f91bc5fSEric Gouriou  *  - The extent pointed to by 'path' contains a superset
33256f91bc5fSEric Gouriou  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
33266f91bc5fSEric Gouriou  *
33276f91bc5fSEric Gouriou  * Post-conditions on success:
33286f91bc5fSEric Gouriou  *  - the returned value is the number of blocks beyond map->l_lblk
33296f91bc5fSEric Gouriou  *    that are allocated and initialized.
33306f91bc5fSEric Gouriou  *    It is guaranteed to be >= map->m_len.
333156055d3aSAmit Arora  */
3332725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle,
3333725d26d3SAneesh Kumar K.V 					   struct inode *inode,
3334e35fd660STheodore Ts'o 					   struct ext4_map_blocks *map,
333527dd4385SLukas Czerner 					   struct ext4_ext_path *path,
333627dd4385SLukas Czerner 					   int flags)
333756055d3aSAmit Arora {
333867a5da56SZheng Liu 	struct ext4_sb_info *sbi;
33396f91bc5fSEric Gouriou 	struct ext4_extent_header *eh;
3340667eff35SYongqiang Yang 	struct ext4_map_blocks split_map;
3341667eff35SYongqiang Yang 	struct ext4_extent zero_ex;
3342bc2d9db4SLukas Czerner 	struct ext4_extent *ex, *abut_ex;
334321ca087aSDmitry Monakhov 	ext4_lblk_t ee_block, eof_block;
3344bc2d9db4SLukas Czerner 	unsigned int ee_len, depth, map_len = map->m_len;
3345bc2d9db4SLukas Czerner 	int allocated = 0, max_zeroout = 0;
334656055d3aSAmit Arora 	int err = 0;
3347667eff35SYongqiang Yang 	int split_flag = 0;
334821ca087aSDmitry Monakhov 
334921ca087aSDmitry Monakhov 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
335021ca087aSDmitry Monakhov 		"block %llu, max_blocks %u\n", inode->i_ino,
3351bc2d9db4SLukas Czerner 		(unsigned long long)map->m_lblk, map_len);
335221ca087aSDmitry Monakhov 
335367a5da56SZheng Liu 	sbi = EXT4_SB(inode->i_sb);
335421ca087aSDmitry Monakhov 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
335521ca087aSDmitry Monakhov 		inode->i_sb->s_blocksize_bits;
3356bc2d9db4SLukas Czerner 	if (eof_block < map->m_lblk + map_len)
3357bc2d9db4SLukas Czerner 		eof_block = map->m_lblk + map_len;
335856055d3aSAmit Arora 
335956055d3aSAmit Arora 	depth = ext_depth(inode);
33606f91bc5fSEric Gouriou 	eh = path[depth].p_hdr;
336156055d3aSAmit Arora 	ex = path[depth].p_ext;
336256055d3aSAmit Arora 	ee_block = le32_to_cpu(ex->ee_block);
336356055d3aSAmit Arora 	ee_len = ext4_ext_get_actual_len(ex);
3364adb23551SZheng Liu 	zero_ex.ee_len = 0;
336521ca087aSDmitry Monakhov 
33666f91bc5fSEric Gouriou 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
33676f91bc5fSEric Gouriou 
33686f91bc5fSEric Gouriou 	/* Pre-conditions */
33696f91bc5fSEric Gouriou 	BUG_ON(!ext4_ext_is_uninitialized(ex));
33706f91bc5fSEric Gouriou 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
33716f91bc5fSEric Gouriou 
33726f91bc5fSEric Gouriou 	/*
33736f91bc5fSEric Gouriou 	 * Attempt to transfer newly initialized blocks from the currently
3374bc2d9db4SLukas Czerner 	 * uninitialized extent to its neighbor. This is much cheaper
33756f91bc5fSEric Gouriou 	 * than an insertion followed by a merge as those involve costly
3376bc2d9db4SLukas Czerner 	 * memmove() calls. Transferring to the left is the common case in
3377bc2d9db4SLukas Czerner 	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3378bc2d9db4SLukas Czerner 	 * followed by append writes.
33796f91bc5fSEric Gouriou 	 *
33806f91bc5fSEric Gouriou 	 * Limitations of the current logic:
3381bc2d9db4SLukas Czerner 	 *  - L1: we do not deal with writes covering the whole extent.
33826f91bc5fSEric Gouriou 	 *    This would require removing the extent if the transfer
33836f91bc5fSEric Gouriou 	 *    is possible.
3384bc2d9db4SLukas Czerner 	 *  - L2: we only attempt to merge with an extent stored in the
33856f91bc5fSEric Gouriou 	 *    same extent tree node.
33866f91bc5fSEric Gouriou 	 */
3387bc2d9db4SLukas Czerner 	if ((map->m_lblk == ee_block) &&
3388bc2d9db4SLukas Czerner 		/* See if we can merge left */
3389bc2d9db4SLukas Czerner 		(map_len < ee_len) &&		/*L1*/
3390bc2d9db4SLukas Czerner 		(ex > EXT_FIRST_EXTENT(eh))) {	/*L2*/
33916f91bc5fSEric Gouriou 		ext4_lblk_t prev_lblk;
33926f91bc5fSEric Gouriou 		ext4_fsblk_t prev_pblk, ee_pblk;
3393bc2d9db4SLukas Czerner 		unsigned int prev_len;
33946f91bc5fSEric Gouriou 
3395bc2d9db4SLukas Czerner 		abut_ex = ex - 1;
3396bc2d9db4SLukas Czerner 		prev_lblk = le32_to_cpu(abut_ex->ee_block);
3397bc2d9db4SLukas Czerner 		prev_len = ext4_ext_get_actual_len(abut_ex);
3398bc2d9db4SLukas Czerner 		prev_pblk = ext4_ext_pblock(abut_ex);
33996f91bc5fSEric Gouriou 		ee_pblk = ext4_ext_pblock(ex);
34006f91bc5fSEric Gouriou 
34016f91bc5fSEric Gouriou 		/*
3402bc2d9db4SLukas Czerner 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
34036f91bc5fSEric Gouriou 		 * upon those conditions:
3404bc2d9db4SLukas Czerner 		 * - C1: abut_ex is initialized,
3405bc2d9db4SLukas Czerner 		 * - C2: abut_ex is logically abutting ex,
3406bc2d9db4SLukas Czerner 		 * - C3: abut_ex is physically abutting ex,
3407bc2d9db4SLukas Czerner 		 * - C4: abut_ex can receive the additional blocks without
34086f91bc5fSEric Gouriou 		 *   overflowing the (initialized) length limit.
34096f91bc5fSEric Gouriou 		 */
3410bc2d9db4SLukas Czerner 		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
34116f91bc5fSEric Gouriou 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
34126f91bc5fSEric Gouriou 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3413bc2d9db4SLukas Czerner 			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
34146f91bc5fSEric Gouriou 			err = ext4_ext_get_access(handle, inode, path + depth);
34156f91bc5fSEric Gouriou 			if (err)
34166f91bc5fSEric Gouriou 				goto out;
34176f91bc5fSEric Gouriou 
34186f91bc5fSEric Gouriou 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3419bc2d9db4SLukas Czerner 				map, ex, abut_ex);
34206f91bc5fSEric Gouriou 
3421bc2d9db4SLukas Czerner 			/* Shift the start of ex by 'map_len' blocks */
3422bc2d9db4SLukas Czerner 			ex->ee_block = cpu_to_le32(ee_block + map_len);
3423bc2d9db4SLukas Czerner 			ext4_ext_store_pblock(ex, ee_pblk + map_len);
3424bc2d9db4SLukas Czerner 			ex->ee_len = cpu_to_le16(ee_len - map_len);
34256f91bc5fSEric Gouriou 			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
34266f91bc5fSEric Gouriou 
3427bc2d9db4SLukas Czerner 			/* Extend abut_ex by 'map_len' blocks */
3428bc2d9db4SLukas Czerner 			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
34296f91bc5fSEric Gouriou 
3430bc2d9db4SLukas Czerner 			/* Result: number of initialized blocks past m_lblk */
3431bc2d9db4SLukas Czerner 			allocated = map_len;
3432bc2d9db4SLukas Czerner 		}
3433bc2d9db4SLukas Czerner 	} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3434bc2d9db4SLukas Czerner 		   (map_len < ee_len) &&	/*L1*/
3435bc2d9db4SLukas Czerner 		   ex < EXT_LAST_EXTENT(eh)) {	/*L2*/
3436bc2d9db4SLukas Czerner 		/* See if we can merge right */
3437bc2d9db4SLukas Czerner 		ext4_lblk_t next_lblk;
3438bc2d9db4SLukas Czerner 		ext4_fsblk_t next_pblk, ee_pblk;
3439bc2d9db4SLukas Czerner 		unsigned int next_len;
3440bc2d9db4SLukas Czerner 
3441bc2d9db4SLukas Czerner 		abut_ex = ex + 1;
3442bc2d9db4SLukas Czerner 		next_lblk = le32_to_cpu(abut_ex->ee_block);
3443bc2d9db4SLukas Czerner 		next_len = ext4_ext_get_actual_len(abut_ex);
3444bc2d9db4SLukas Czerner 		next_pblk = ext4_ext_pblock(abut_ex);
3445bc2d9db4SLukas Czerner 		ee_pblk = ext4_ext_pblock(ex);
3446bc2d9db4SLukas Czerner 
3447bc2d9db4SLukas Czerner 		/*
3448bc2d9db4SLukas Czerner 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3449bc2d9db4SLukas Czerner 		 * upon those conditions:
3450bc2d9db4SLukas Czerner 		 * - C1: abut_ex is initialized,
3451bc2d9db4SLukas Czerner 		 * - C2: abut_ex is logically abutting ex,
3452bc2d9db4SLukas Czerner 		 * - C3: abut_ex is physically abutting ex,
3453bc2d9db4SLukas Czerner 		 * - C4: abut_ex can receive the additional blocks without
3454bc2d9db4SLukas Czerner 		 *   overflowing the (initialized) length limit.
3455bc2d9db4SLukas Czerner 		 */
3456bc2d9db4SLukas Czerner 		if ((!ext4_ext_is_uninitialized(abut_ex)) &&		/*C1*/
3457bc2d9db4SLukas Czerner 		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
3458bc2d9db4SLukas Czerner 		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
3459bc2d9db4SLukas Czerner 		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3460bc2d9db4SLukas Czerner 			err = ext4_ext_get_access(handle, inode, path + depth);
3461bc2d9db4SLukas Czerner 			if (err)
3462bc2d9db4SLukas Czerner 				goto out;
3463bc2d9db4SLukas Czerner 
3464bc2d9db4SLukas Czerner 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3465bc2d9db4SLukas Czerner 				map, ex, abut_ex);
3466bc2d9db4SLukas Czerner 
3467bc2d9db4SLukas Czerner 			/* Shift the start of abut_ex by 'map_len' blocks */
3468bc2d9db4SLukas Czerner 			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3469bc2d9db4SLukas Czerner 			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3470bc2d9db4SLukas Czerner 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3471bc2d9db4SLukas Czerner 			ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3472bc2d9db4SLukas Czerner 
3473bc2d9db4SLukas Czerner 			/* Extend abut_ex by 'map_len' blocks */
3474bc2d9db4SLukas Czerner 			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3475bc2d9db4SLukas Czerner 
3476bc2d9db4SLukas Czerner 			/* Result: number of initialized blocks past m_lblk */
3477bc2d9db4SLukas Czerner 			allocated = map_len;
3478bc2d9db4SLukas Czerner 		}
3479bc2d9db4SLukas Czerner 	}
3480bc2d9db4SLukas Czerner 	if (allocated) {
34816f91bc5fSEric Gouriou 		/* Mark the block containing both extents as dirty */
34826f91bc5fSEric Gouriou 		ext4_ext_dirty(handle, inode, path + depth);
34836f91bc5fSEric Gouriou 
34846f91bc5fSEric Gouriou 		/* Update path to point to the right extent */
3485bc2d9db4SLukas Czerner 		path[depth].p_ext = abut_ex;
34866f91bc5fSEric Gouriou 		goto out;
3487bc2d9db4SLukas Czerner 	} else
3488bc2d9db4SLukas Czerner 		allocated = ee_len - (map->m_lblk - ee_block);
34896f91bc5fSEric Gouriou 
3490667eff35SYongqiang Yang 	WARN_ON(map->m_lblk < ee_block);
349121ca087aSDmitry Monakhov 	/*
349221ca087aSDmitry Monakhov 	 * It is safe to convert extent to initialized via explicit
34939e740568SYongqiang Yang 	 * zeroout only if extent is fully inside i_size or new_size.
349421ca087aSDmitry Monakhov 	 */
3495667eff35SYongqiang Yang 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
349621ca087aSDmitry Monakhov 
349767a5da56SZheng Liu 	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
349867a5da56SZheng Liu 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
34994f42f80aSLukas Czerner 			(inode->i_sb->s_blocksize_bits - 10);
350067a5da56SZheng Liu 
350167a5da56SZheng Liu 	/* If extent is less than s_max_zeroout_kb, zeroout directly */
350267a5da56SZheng Liu 	if (max_zeroout && (ee_len <= max_zeroout)) {
3503667eff35SYongqiang Yang 		err = ext4_ext_zeroout(inode, ex);
35043977c965SAneesh Kumar K.V 		if (err)
350556055d3aSAmit Arora 			goto out;
3506adb23551SZheng Liu 		zero_ex.ee_block = ex->ee_block;
35078cde7ad1SZheng Liu 		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3508adb23551SZheng Liu 		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
35099df5643aSAneesh Kumar K.V 
35109df5643aSAneesh Kumar K.V 		err = ext4_ext_get_access(handle, inode, path + depth);
35119df5643aSAneesh Kumar K.V 		if (err)
35129df5643aSAneesh Kumar K.V 			goto out;
3513667eff35SYongqiang Yang 		ext4_ext_mark_initialized(ex);
3514ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, ex);
3515ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
351656055d3aSAmit Arora 		goto out;
3517667eff35SYongqiang Yang 	}
3518093a088bSAneesh Kumar K.V 
3519667eff35SYongqiang Yang 	/*
3520667eff35SYongqiang Yang 	 * four cases:
3521667eff35SYongqiang Yang 	 * 1. split the extent into three extents.
3522667eff35SYongqiang Yang 	 * 2. split the extent into two extents, zeroout the first half.
3523667eff35SYongqiang Yang 	 * 3. split the extent into two extents, zeroout the second half.
3524667eff35SYongqiang Yang 	 * 4. split the extent into two extents with out zeroout.
3525667eff35SYongqiang Yang 	 */
3526667eff35SYongqiang Yang 	split_map.m_lblk = map->m_lblk;
3527667eff35SYongqiang Yang 	split_map.m_len = map->m_len;
3528667eff35SYongqiang Yang 
352967a5da56SZheng Liu 	if (max_zeroout && (allocated > map->m_len)) {
353067a5da56SZheng Liu 		if (allocated <= max_zeroout) {
3531667eff35SYongqiang Yang 			/* case 3 */
3532667eff35SYongqiang Yang 			zero_ex.ee_block =
35339b940f8eSAllison Henderson 					 cpu_to_le32(map->m_lblk);
35349b940f8eSAllison Henderson 			zero_ex.ee_len = cpu_to_le16(allocated);
3535667eff35SYongqiang Yang 			ext4_ext_store_pblock(&zero_ex,
3536667eff35SYongqiang Yang 				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3537667eff35SYongqiang Yang 			err = ext4_ext_zeroout(inode, &zero_ex);
3538667eff35SYongqiang Yang 			if (err)
3539667eff35SYongqiang Yang 				goto out;
3540667eff35SYongqiang Yang 			split_map.m_lblk = map->m_lblk;
3541667eff35SYongqiang Yang 			split_map.m_len = allocated;
354267a5da56SZheng Liu 		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3543667eff35SYongqiang Yang 			/* case 2 */
3544667eff35SYongqiang Yang 			if (map->m_lblk != ee_block) {
3545667eff35SYongqiang Yang 				zero_ex.ee_block = ex->ee_block;
3546667eff35SYongqiang Yang 				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3547667eff35SYongqiang Yang 							ee_block);
3548667eff35SYongqiang Yang 				ext4_ext_store_pblock(&zero_ex,
3549667eff35SYongqiang Yang 						      ext4_ext_pblock(ex));
3550667eff35SYongqiang Yang 				err = ext4_ext_zeroout(inode, &zero_ex);
3551667eff35SYongqiang Yang 				if (err)
3552667eff35SYongqiang Yang 					goto out;
3553667eff35SYongqiang Yang 			}
3554667eff35SYongqiang Yang 
3555667eff35SYongqiang Yang 			split_map.m_lblk = ee_block;
35569b940f8eSAllison Henderson 			split_map.m_len = map->m_lblk - ee_block + map->m_len;
35579b940f8eSAllison Henderson 			allocated = map->m_len;
3558667eff35SYongqiang Yang 		}
3559667eff35SYongqiang Yang 	}
3560667eff35SYongqiang Yang 
3561667eff35SYongqiang Yang 	allocated = ext4_split_extent(handle, inode, path,
356227dd4385SLukas Czerner 				      &split_map, split_flag, flags);
3563667eff35SYongqiang Yang 	if (allocated < 0)
3564667eff35SYongqiang Yang 		err = allocated;
3565667eff35SYongqiang Yang 
3566667eff35SYongqiang Yang out:
3567adb23551SZheng Liu 	/* If we have gotten a failure, don't zero out status tree */
3568adb23551SZheng Liu 	if (!err)
3569d7b2a00cSZheng Liu 		err = ext4_zeroout_es(inode, &zero_ex);
3570667eff35SYongqiang Yang 	return err ? err : allocated;
357156055d3aSAmit Arora }
357256055d3aSAmit Arora 
3573c278bfecSAneesh Kumar K.V /*
3574e35fd660STheodore Ts'o  * This function is called by ext4_ext_map_blocks() from
35750031462bSMingming Cao  * ext4_get_blocks_dio_write() when DIO to write
35760031462bSMingming Cao  * to an uninitialized extent.
35770031462bSMingming Cao  *
3578fd018fe8SPaul Bolle  * Writing to an uninitialized extent may result in splitting the uninitialized
357930cb27d6SWang Sheng-Hui  * extent into multiple initialized/uninitialized extents (up to three)
35800031462bSMingming Cao  * There are three possibilities:
35810031462bSMingming Cao  *   a> There is no split required: Entire extent should be uninitialized
35820031462bSMingming Cao  *   b> Splits in two extents: Write is happening at either end of the extent
35830031462bSMingming Cao  *   c> Splits in three extents: Somone is writing in middle of the extent
35840031462bSMingming Cao  *
35850031462bSMingming Cao  * One of more index blocks maybe needed if the extent tree grow after
3586b595076aSUwe Kleine-König  * the uninitialized extent split. To prevent ENOSPC occur at the IO
35870031462bSMingming Cao  * complete, we need to split the uninitialized extent before DIO submit
3588421f91d2SUwe Kleine-König  * the IO. The uninitialized extent called at this time will be split
35890031462bSMingming Cao  * into three uninitialized extent(at most). After IO complete, the part
35900031462bSMingming Cao  * being filled will be convert to initialized by the end_io callback function
35910031462bSMingming Cao  * via ext4_convert_unwritten_extents().
3592ba230c3fSMingming  *
3593ba230c3fSMingming  * Returns the size of uninitialized extent to be written on success.
35940031462bSMingming Cao  */
35950031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle,
35960031462bSMingming Cao 					struct inode *inode,
3597e35fd660STheodore Ts'o 					struct ext4_map_blocks *map,
35980031462bSMingming Cao 					struct ext4_ext_path *path,
35990031462bSMingming Cao 					int flags)
36000031462bSMingming Cao {
3601667eff35SYongqiang Yang 	ext4_lblk_t eof_block;
3602667eff35SYongqiang Yang 	ext4_lblk_t ee_block;
3603667eff35SYongqiang Yang 	struct ext4_extent *ex;
3604667eff35SYongqiang Yang 	unsigned int ee_len;
3605667eff35SYongqiang Yang 	int split_flag = 0, depth;
36060031462bSMingming Cao 
360721ca087aSDmitry Monakhov 	ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
360821ca087aSDmitry Monakhov 		"block %llu, max_blocks %u\n", inode->i_ino,
3609e35fd660STheodore Ts'o 		(unsigned long long)map->m_lblk, map->m_len);
361021ca087aSDmitry Monakhov 
361121ca087aSDmitry Monakhov 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
361221ca087aSDmitry Monakhov 		inode->i_sb->s_blocksize_bits;
3613e35fd660STheodore Ts'o 	if (eof_block < map->m_lblk + map->m_len)
3614e35fd660STheodore Ts'o 		eof_block = map->m_lblk + map->m_len;
36150031462bSMingming Cao 	/*
361621ca087aSDmitry Monakhov 	 * It is safe to convert extent to initialized via explicit
361721ca087aSDmitry Monakhov 	 * zeroout only if extent is fully insde i_size or new_size.
361821ca087aSDmitry Monakhov 	 */
3619667eff35SYongqiang Yang 	depth = ext_depth(inode);
36200031462bSMingming Cao 	ex = path[depth].p_ext;
3621667eff35SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
3622667eff35SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
36230031462bSMingming Cao 
3624667eff35SYongqiang Yang 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3625667eff35SYongqiang Yang 	split_flag |= EXT4_EXT_MARK_UNINIT2;
3626dee1f973SDmitry Monakhov 	if (flags & EXT4_GET_BLOCKS_CONVERT)
3627dee1f973SDmitry Monakhov 		split_flag |= EXT4_EXT_DATA_VALID2;
3628667eff35SYongqiang Yang 	flags |= EXT4_GET_BLOCKS_PRE_IO;
3629667eff35SYongqiang Yang 	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
36300031462bSMingming Cao }
3631197217a5SYongqiang Yang 
3632c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle,
36330031462bSMingming Cao 						struct inode *inode,
3634dee1f973SDmitry Monakhov 						struct ext4_map_blocks *map,
36350031462bSMingming Cao 						struct ext4_ext_path *path)
36360031462bSMingming Cao {
36370031462bSMingming Cao 	struct ext4_extent *ex;
3638dee1f973SDmitry Monakhov 	ext4_lblk_t ee_block;
3639dee1f973SDmitry Monakhov 	unsigned int ee_len;
36400031462bSMingming Cao 	int depth;
36410031462bSMingming Cao 	int err = 0;
36420031462bSMingming Cao 
36430031462bSMingming Cao 	depth = ext_depth(inode);
36440031462bSMingming Cao 	ex = path[depth].p_ext;
3645dee1f973SDmitry Monakhov 	ee_block = le32_to_cpu(ex->ee_block);
3646dee1f973SDmitry Monakhov 	ee_len = ext4_ext_get_actual_len(ex);
36470031462bSMingming Cao 
3648197217a5SYongqiang Yang 	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3649197217a5SYongqiang Yang 		"block %llu, max_blocks %u\n", inode->i_ino,
3650dee1f973SDmitry Monakhov 		  (unsigned long long)ee_block, ee_len);
3651dee1f973SDmitry Monakhov 
3652ff95ec22SDmitry Monakhov 	/* If extent is larger than requested it is a clear sign that we still
3653ff95ec22SDmitry Monakhov 	 * have some extent state machine issues left. So extent_split is still
3654ff95ec22SDmitry Monakhov 	 * required.
3655ff95ec22SDmitry Monakhov 	 * TODO: Once all related issues will be fixed this situation should be
3656ff95ec22SDmitry Monakhov 	 * illegal.
3657ff95ec22SDmitry Monakhov 	 */
3658dee1f973SDmitry Monakhov 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3659ff95ec22SDmitry Monakhov #ifdef EXT4_DEBUG
3660ff95ec22SDmitry Monakhov 		ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3661ff95ec22SDmitry Monakhov 			     " len %u; IO logical block %llu, len %u\n",
3662ff95ec22SDmitry Monakhov 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
3663ff95ec22SDmitry Monakhov 			     (unsigned long long)map->m_lblk, map->m_len);
3664ff95ec22SDmitry Monakhov #endif
3665dee1f973SDmitry Monakhov 		err = ext4_split_unwritten_extents(handle, inode, map, path,
3666dee1f973SDmitry Monakhov 						   EXT4_GET_BLOCKS_CONVERT);
3667dee1f973SDmitry Monakhov 		if (err < 0)
3668dee1f973SDmitry Monakhov 			goto out;
3669dee1f973SDmitry Monakhov 		ext4_ext_drop_refs(path);
3670107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3671dee1f973SDmitry Monakhov 		if (IS_ERR(path)) {
3672dee1f973SDmitry Monakhov 			err = PTR_ERR(path);
3673dee1f973SDmitry Monakhov 			goto out;
3674dee1f973SDmitry Monakhov 		}
3675dee1f973SDmitry Monakhov 		depth = ext_depth(inode);
3676dee1f973SDmitry Monakhov 		ex = path[depth].p_ext;
3677dee1f973SDmitry Monakhov 	}
3678197217a5SYongqiang Yang 
36790031462bSMingming Cao 	err = ext4_ext_get_access(handle, inode, path + depth);
36800031462bSMingming Cao 	if (err)
36810031462bSMingming Cao 		goto out;
36820031462bSMingming Cao 	/* first mark the extent as initialized */
36830031462bSMingming Cao 	ext4_ext_mark_initialized(ex);
36840031462bSMingming Cao 
3685197217a5SYongqiang Yang 	/* note: ext4_ext_correct_indexes() isn't needed here because
3686197217a5SYongqiang Yang 	 * borders are not changed
36870031462bSMingming Cao 	 */
3688ecb94f5fSTheodore Ts'o 	ext4_ext_try_to_merge(handle, inode, path, ex);
3689197217a5SYongqiang Yang 
36900031462bSMingming Cao 	/* Mark modified extent as dirty */
3691ecb94f5fSTheodore Ts'o 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
36920031462bSMingming Cao out:
36930031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
36940031462bSMingming Cao 	return err;
36950031462bSMingming Cao }
36960031462bSMingming Cao 
3697515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3698515f41c3SAneesh Kumar K.V 			sector_t block, int count)
3699515f41c3SAneesh Kumar K.V {
3700515f41c3SAneesh Kumar K.V 	int i;
3701515f41c3SAneesh Kumar K.V 	for (i = 0; i < count; i++)
3702515f41c3SAneesh Kumar K.V                 unmap_underlying_metadata(bdev, block + i);
3703515f41c3SAneesh Kumar K.V }
3704515f41c3SAneesh Kumar K.V 
370558590b06STheodore Ts'o /*
370658590b06STheodore Ts'o  * Handle EOFBLOCKS_FL flag, clearing it if necessary
370758590b06STheodore Ts'o  */
370858590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3709d002ebf1SEric Sandeen 			      ext4_lblk_t lblk,
371058590b06STheodore Ts'o 			      struct ext4_ext_path *path,
371158590b06STheodore Ts'o 			      unsigned int len)
371258590b06STheodore Ts'o {
371358590b06STheodore Ts'o 	int i, depth;
371458590b06STheodore Ts'o 	struct ext4_extent_header *eh;
371565922cb5SSergey Senozhatsky 	struct ext4_extent *last_ex;
371658590b06STheodore Ts'o 
371758590b06STheodore Ts'o 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
371858590b06STheodore Ts'o 		return 0;
371958590b06STheodore Ts'o 
372058590b06STheodore Ts'o 	depth = ext_depth(inode);
372158590b06STheodore Ts'o 	eh = path[depth].p_hdr;
372258590b06STheodore Ts'o 
3723afcff5d8SLukas Czerner 	/*
3724afcff5d8SLukas Czerner 	 * We're going to remove EOFBLOCKS_FL entirely in future so we
3725afcff5d8SLukas Czerner 	 * do not care for this case anymore. Simply remove the flag
3726afcff5d8SLukas Czerner 	 * if there are no extents.
3727afcff5d8SLukas Czerner 	 */
3728afcff5d8SLukas Czerner 	if (unlikely(!eh->eh_entries))
3729afcff5d8SLukas Czerner 		goto out;
373058590b06STheodore Ts'o 	last_ex = EXT_LAST_EXTENT(eh);
373158590b06STheodore Ts'o 	/*
373258590b06STheodore Ts'o 	 * We should clear the EOFBLOCKS_FL flag if we are writing the
373358590b06STheodore Ts'o 	 * last block in the last extent in the file.  We test this by
373458590b06STheodore Ts'o 	 * first checking to see if the caller to
373558590b06STheodore Ts'o 	 * ext4_ext_get_blocks() was interested in the last block (or
373658590b06STheodore Ts'o 	 * a block beyond the last block) in the current extent.  If
373758590b06STheodore Ts'o 	 * this turns out to be false, we can bail out from this
373858590b06STheodore Ts'o 	 * function immediately.
373958590b06STheodore Ts'o 	 */
3740d002ebf1SEric Sandeen 	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
374158590b06STheodore Ts'o 	    ext4_ext_get_actual_len(last_ex))
374258590b06STheodore Ts'o 		return 0;
374358590b06STheodore Ts'o 	/*
374458590b06STheodore Ts'o 	 * If the caller does appear to be planning to write at or
374558590b06STheodore Ts'o 	 * beyond the end of the current extent, we then test to see
374658590b06STheodore Ts'o 	 * if the current extent is the last extent in the file, by
374758590b06STheodore Ts'o 	 * checking to make sure it was reached via the rightmost node
374858590b06STheodore Ts'o 	 * at each level of the tree.
374958590b06STheodore Ts'o 	 */
375058590b06STheodore Ts'o 	for (i = depth-1; i >= 0; i--)
375158590b06STheodore Ts'o 		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
375258590b06STheodore Ts'o 			return 0;
3753afcff5d8SLukas Czerner out:
375458590b06STheodore Ts'o 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
375558590b06STheodore Ts'o 	return ext4_mark_inode_dirty(handle, inode);
375658590b06STheodore Ts'o }
375758590b06STheodore Ts'o 
37587b415bf6SAditya Kali /**
37597b415bf6SAditya Kali  * ext4_find_delalloc_range: find delayed allocated block in the given range.
37607b415bf6SAditya Kali  *
37617d1b1fbcSZheng Liu  * Return 1 if there is a delalloc block in the range, otherwise 0.
37627b415bf6SAditya Kali  */
3763f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode,
37647b415bf6SAditya Kali 			     ext4_lblk_t lblk_start,
37657d1b1fbcSZheng Liu 			     ext4_lblk_t lblk_end)
37667b415bf6SAditya Kali {
37677d1b1fbcSZheng Liu 	struct extent_status es;
37687b415bf6SAditya Kali 
3769e30b5dcaSYan, Zheng 	ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
377006b0c886SZheng Liu 	if (es.es_len == 0)
37717d1b1fbcSZheng Liu 		return 0; /* there is no delay extent in this tree */
377206b0c886SZheng Liu 	else if (es.es_lblk <= lblk_start &&
377306b0c886SZheng Liu 		 lblk_start < es.es_lblk + es.es_len)
37747b415bf6SAditya Kali 		return 1;
377506b0c886SZheng Liu 	else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
37767d1b1fbcSZheng Liu 		return 1;
37777b415bf6SAditya Kali 	else
37787b415bf6SAditya Kali 		return 0;
37797b415bf6SAditya Kali }
37807b415bf6SAditya Kali 
37817d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
37827b415bf6SAditya Kali {
37837b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
37847b415bf6SAditya Kali 	ext4_lblk_t lblk_start, lblk_end;
3785f5a44db5STheodore Ts'o 	lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
37867b415bf6SAditya Kali 	lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
37877b415bf6SAditya Kali 
37887d1b1fbcSZheng Liu 	return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
37897b415bf6SAditya Kali }
37907b415bf6SAditya Kali 
37917b415bf6SAditya Kali /**
37927b415bf6SAditya Kali  * Determines how many complete clusters (out of those specified by the 'map')
37937b415bf6SAditya Kali  * are under delalloc and were reserved quota for.
37947b415bf6SAditya Kali  * This function is called when we are writing out the blocks that were
37957b415bf6SAditya Kali  * originally written with their allocation delayed, but then the space was
37967b415bf6SAditya Kali  * allocated using fallocate() before the delayed allocation could be resolved.
37977b415bf6SAditya Kali  * The cases to look for are:
37987b415bf6SAditya Kali  * ('=' indicated delayed allocated blocks
37997b415bf6SAditya Kali  *  '-' indicates non-delayed allocated blocks)
38007b415bf6SAditya Kali  * (a) partial clusters towards beginning and/or end outside of allocated range
38017b415bf6SAditya Kali  *     are not delalloc'ed.
38027b415bf6SAditya Kali  *	Ex:
38037b415bf6SAditya Kali  *	|----c---=|====c====|====c====|===-c----|
38047b415bf6SAditya Kali  *	         |++++++ allocated ++++++|
38057b415bf6SAditya Kali  *	==> 4 complete clusters in above example
38067b415bf6SAditya Kali  *
38077b415bf6SAditya Kali  * (b) partial cluster (outside of allocated range) towards either end is
38087b415bf6SAditya Kali  *     marked for delayed allocation. In this case, we will exclude that
38097b415bf6SAditya Kali  *     cluster.
38107b415bf6SAditya Kali  *	Ex:
38117b415bf6SAditya Kali  *	|----====c========|========c========|
38127b415bf6SAditya Kali  *	     |++++++ allocated ++++++|
38137b415bf6SAditya Kali  *	==> 1 complete clusters in above example
38147b415bf6SAditya Kali  *
38157b415bf6SAditya Kali  *	Ex:
38167b415bf6SAditya Kali  *	|================c================|
38177b415bf6SAditya Kali  *            |++++++ allocated ++++++|
38187b415bf6SAditya Kali  *	==> 0 complete clusters in above example
38197b415bf6SAditya Kali  *
38207b415bf6SAditya Kali  * The ext4_da_update_reserve_space will be called only if we
38217b415bf6SAditya Kali  * determine here that there were some "entire" clusters that span
38227b415bf6SAditya Kali  * this 'allocated' range.
38237b415bf6SAditya Kali  * In the non-bigalloc case, this function will just end up returning num_blks
38247b415bf6SAditya Kali  * without ever calling ext4_find_delalloc_range.
38257b415bf6SAditya Kali  */
38267b415bf6SAditya Kali static unsigned int
38277b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
38287b415bf6SAditya Kali 			   unsigned int num_blks)
38297b415bf6SAditya Kali {
38307b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
38317b415bf6SAditya Kali 	ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
38327b415bf6SAditya Kali 	ext4_lblk_t lblk_from, lblk_to, c_offset;
38337b415bf6SAditya Kali 	unsigned int allocated_clusters = 0;
38347b415bf6SAditya Kali 
38357b415bf6SAditya Kali 	alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
38367b415bf6SAditya Kali 	alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
38377b415bf6SAditya Kali 
38387b415bf6SAditya Kali 	/* max possible clusters for this allocation */
38397b415bf6SAditya Kali 	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
38407b415bf6SAditya Kali 
3841d8990240SAditya Kali 	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3842d8990240SAditya Kali 
38437b415bf6SAditya Kali 	/* Check towards left side */
3844f5a44db5STheodore Ts'o 	c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
38457b415bf6SAditya Kali 	if (c_offset) {
3846f5a44db5STheodore Ts'o 		lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
38477b415bf6SAditya Kali 		lblk_to = lblk_from + c_offset - 1;
38487b415bf6SAditya Kali 
38497d1b1fbcSZheng Liu 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
38507b415bf6SAditya Kali 			allocated_clusters--;
38517b415bf6SAditya Kali 	}
38527b415bf6SAditya Kali 
38537b415bf6SAditya Kali 	/* Now check towards right. */
3854f5a44db5STheodore Ts'o 	c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
38557b415bf6SAditya Kali 	if (allocated_clusters && c_offset) {
38567b415bf6SAditya Kali 		lblk_from = lblk_start + num_blks;
38577b415bf6SAditya Kali 		lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
38587b415bf6SAditya Kali 
38597d1b1fbcSZheng Liu 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
38607b415bf6SAditya Kali 			allocated_clusters--;
38617b415bf6SAditya Kali 	}
38627b415bf6SAditya Kali 
38637b415bf6SAditya Kali 	return allocated_clusters;
38647b415bf6SAditya Kali }
38657b415bf6SAditya Kali 
38660031462bSMingming Cao static int
38670031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3868e35fd660STheodore Ts'o 			struct ext4_map_blocks *map,
38690031462bSMingming Cao 			struct ext4_ext_path *path, int flags,
3870e35fd660STheodore Ts'o 			unsigned int allocated, ext4_fsblk_t newblock)
38710031462bSMingming Cao {
38720031462bSMingming Cao 	int ret = 0;
38730031462bSMingming Cao 	int err = 0;
3874f45ee3a1SDmitry Monakhov 	ext4_io_end_t *io = ext4_inode_aio(inode);
38750031462bSMingming Cao 
38760031462bSMingming Cao 	ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
387788635ca2SZheng Liu 		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
3878e35fd660STheodore Ts'o 		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
38790031462bSMingming Cao 		  flags, allocated);
38800031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
38810031462bSMingming Cao 
388227dd4385SLukas Czerner 	/*
388327dd4385SLukas Czerner 	 * When writing into uninitialized space, we should not fail to
388427dd4385SLukas Czerner 	 * allocate metadata blocks for the new extent block if needed.
388527dd4385SLukas Czerner 	 */
388627dd4385SLukas Czerner 	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
388727dd4385SLukas Czerner 
3888b5645534SZheng Liu 	trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3889b5645534SZheng Liu 						    allocated, newblock);
3890d8990240SAditya Kali 
3891c7064ef1SJiaying Zhang 	/* get_block() before submit the IO, split the extent */
3892744692dcSJiaying Zhang 	if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3893e35fd660STheodore Ts'o 		ret = ext4_split_unwritten_extents(handle, inode, map,
3894e35fd660STheodore Ts'o 						   path, flags);
389582e54229SDmitry Monakhov 		if (ret <= 0)
389682e54229SDmitry Monakhov 			goto out;
38975f524950SMingming 		/*
38985f524950SMingming 		 * Flag the inode(non aio case) or end_io struct (aio case)
389925985edcSLucas De Marchi 		 * that this IO needs to conversion to written when IO is
39005f524950SMingming 		 * completed
39015f524950SMingming 		 */
39020edeb71dSTao Ma 		if (io)
39030edeb71dSTao Ma 			ext4_set_io_unwritten_flag(inode, io);
39040edeb71dSTao Ma 		else
390519f5fb7aSTheodore Ts'o 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3906a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
3907744692dcSJiaying Zhang 		if (ext4_should_dioread_nolock(inode))
3908e35fd660STheodore Ts'o 			map->m_flags |= EXT4_MAP_UNINIT;
39090031462bSMingming Cao 		goto out;
39100031462bSMingming Cao 	}
3911c7064ef1SJiaying Zhang 	/* IO end_io complete, convert the filled extent to written */
3912744692dcSJiaying Zhang 	if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3913dee1f973SDmitry Monakhov 		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
39140031462bSMingming Cao 							path);
391558590b06STheodore Ts'o 		if (ret >= 0) {
3916b436b9beSJan Kara 			ext4_update_inode_fsync_trans(handle, inode, 1);
3917d002ebf1SEric Sandeen 			err = check_eofblocks_fl(handle, inode, map->m_lblk,
3918d002ebf1SEric Sandeen 						 path, map->m_len);
391958590b06STheodore Ts'o 		} else
392058590b06STheodore Ts'o 			err = ret;
3921cdee7843SZheng Liu 		map->m_flags |= EXT4_MAP_MAPPED;
392215cc1767SEric Whitney 		map->m_pblk = newblock;
3923cdee7843SZheng Liu 		if (allocated > map->m_len)
3924cdee7843SZheng Liu 			allocated = map->m_len;
3925cdee7843SZheng Liu 		map->m_len = allocated;
39260031462bSMingming Cao 		goto out2;
39270031462bSMingming Cao 	}
39280031462bSMingming Cao 	/* buffered IO case */
39290031462bSMingming Cao 	/*
39300031462bSMingming Cao 	 * repeat fallocate creation request
39310031462bSMingming Cao 	 * we already have an unwritten extent
39320031462bSMingming Cao 	 */
3933a25a4e1aSZheng Liu 	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
3934a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
39350031462bSMingming Cao 		goto map_out;
3936a25a4e1aSZheng Liu 	}
39370031462bSMingming Cao 
39380031462bSMingming Cao 	/* buffered READ or buffered write_begin() lookup */
39390031462bSMingming Cao 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
39400031462bSMingming Cao 		/*
39410031462bSMingming Cao 		 * We have blocks reserved already.  We
39420031462bSMingming Cao 		 * return allocated blocks so that delalloc
39430031462bSMingming Cao 		 * won't do block reservation for us.  But
39440031462bSMingming Cao 		 * the buffer head will be unmapped so that
39450031462bSMingming Cao 		 * a read from the block returns 0s.
39460031462bSMingming Cao 		 */
3947e35fd660STheodore Ts'o 		map->m_flags |= EXT4_MAP_UNWRITTEN;
39480031462bSMingming Cao 		goto out1;
39490031462bSMingming Cao 	}
39500031462bSMingming Cao 
39510031462bSMingming Cao 	/* buffered write, writepage time, convert*/
395227dd4385SLukas Czerner 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
3953a4e5d88bSDmitry Monakhov 	if (ret >= 0)
3954b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 1);
39550031462bSMingming Cao out:
39560031462bSMingming Cao 	if (ret <= 0) {
39570031462bSMingming Cao 		err = ret;
39580031462bSMingming Cao 		goto out2;
39590031462bSMingming Cao 	} else
39600031462bSMingming Cao 		allocated = ret;
3961e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_NEW;
3962515f41c3SAneesh Kumar K.V 	/*
3963515f41c3SAneesh Kumar K.V 	 * if we allocated more blocks than requested
3964515f41c3SAneesh Kumar K.V 	 * we need to make sure we unmap the extra block
3965515f41c3SAneesh Kumar K.V 	 * allocated. The actual needed block will get
3966515f41c3SAneesh Kumar K.V 	 * unmapped later when we find the buffer_head marked
3967515f41c3SAneesh Kumar K.V 	 * new.
3968515f41c3SAneesh Kumar K.V 	 */
3969e35fd660STheodore Ts'o 	if (allocated > map->m_len) {
3970515f41c3SAneesh Kumar K.V 		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3971e35fd660STheodore Ts'o 					newblock + map->m_len,
3972e35fd660STheodore Ts'o 					allocated - map->m_len);
3973e35fd660STheodore Ts'o 		allocated = map->m_len;
3974515f41c3SAneesh Kumar K.V 	}
39753a225670SZheng Liu 	map->m_len = allocated;
39765f634d06SAneesh Kumar K.V 
39775f634d06SAneesh Kumar K.V 	/*
39785f634d06SAneesh Kumar K.V 	 * If we have done fallocate with the offset that is already
39795f634d06SAneesh Kumar K.V 	 * delayed allocated, we would have block reservation
39805f634d06SAneesh Kumar K.V 	 * and quota reservation done in the delayed write path.
39815f634d06SAneesh Kumar K.V 	 * But fallocate would have already updated quota and block
39825f634d06SAneesh Kumar K.V 	 * count for this offset. So cancel these reservation
39835f634d06SAneesh Kumar K.V 	 */
39847b415bf6SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
39857b415bf6SAditya Kali 		unsigned int reserved_clusters;
39867b415bf6SAditya Kali 		reserved_clusters = get_reserved_cluster_alloc(inode,
39877b415bf6SAditya Kali 				map->m_lblk, map->m_len);
39887b415bf6SAditya Kali 		if (reserved_clusters)
39897b415bf6SAditya Kali 			ext4_da_update_reserve_space(inode,
39907b415bf6SAditya Kali 						     reserved_clusters,
39917b415bf6SAditya Kali 						     0);
39927b415bf6SAditya Kali 	}
39935f634d06SAneesh Kumar K.V 
39940031462bSMingming Cao map_out:
3995e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_MAPPED;
3996a4e5d88bSDmitry Monakhov 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3997a4e5d88bSDmitry Monakhov 		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3998a4e5d88bSDmitry Monakhov 					 map->m_len);
3999a4e5d88bSDmitry Monakhov 		if (err < 0)
4000a4e5d88bSDmitry Monakhov 			goto out2;
4001a4e5d88bSDmitry Monakhov 	}
40020031462bSMingming Cao out1:
4003e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4004e35fd660STheodore Ts'o 		allocated = map->m_len;
40050031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
4006e35fd660STheodore Ts'o 	map->m_pblk = newblock;
4007e35fd660STheodore Ts'o 	map->m_len = allocated;
40080031462bSMingming Cao out2:
40090031462bSMingming Cao 	if (path) {
40100031462bSMingming Cao 		ext4_ext_drop_refs(path);
40110031462bSMingming Cao 		kfree(path);
40120031462bSMingming Cao 	}
40130031462bSMingming Cao 	return err ? err : allocated;
40140031462bSMingming Cao }
401558590b06STheodore Ts'o 
40160031462bSMingming Cao /*
40174d33b1efSTheodore Ts'o  * get_implied_cluster_alloc - check to see if the requested
40184d33b1efSTheodore Ts'o  * allocation (in the map structure) overlaps with a cluster already
40194d33b1efSTheodore Ts'o  * allocated in an extent.
4020d8990240SAditya Kali  *	@sb	The filesystem superblock structure
40214d33b1efSTheodore Ts'o  *	@map	The requested lblk->pblk mapping
40224d33b1efSTheodore Ts'o  *	@ex	The extent structure which might contain an implied
40234d33b1efSTheodore Ts'o  *			cluster allocation
40244d33b1efSTheodore Ts'o  *
40254d33b1efSTheodore Ts'o  * This function is called by ext4_ext_map_blocks() after we failed to
40264d33b1efSTheodore Ts'o  * find blocks that were already in the inode's extent tree.  Hence,
40274d33b1efSTheodore Ts'o  * we know that the beginning of the requested region cannot overlap
40284d33b1efSTheodore Ts'o  * the extent from the inode's extent tree.  There are three cases we
40294d33b1efSTheodore Ts'o  * want to catch.  The first is this case:
40304d33b1efSTheodore Ts'o  *
40314d33b1efSTheodore Ts'o  *		 |--- cluster # N--|
40324d33b1efSTheodore Ts'o  *    |--- extent ---|	|---- requested region ---|
40334d33b1efSTheodore Ts'o  *			|==========|
40344d33b1efSTheodore Ts'o  *
40354d33b1efSTheodore Ts'o  * The second case that we need to test for is this one:
40364d33b1efSTheodore Ts'o  *
40374d33b1efSTheodore Ts'o  *   |--------- cluster # N ----------------|
40384d33b1efSTheodore Ts'o  *	   |--- requested region --|   |------- extent ----|
40394d33b1efSTheodore Ts'o  *	   |=======================|
40404d33b1efSTheodore Ts'o  *
40414d33b1efSTheodore Ts'o  * The third case is when the requested region lies between two extents
40424d33b1efSTheodore Ts'o  * within the same cluster:
40434d33b1efSTheodore Ts'o  *          |------------- cluster # N-------------|
40444d33b1efSTheodore Ts'o  * |----- ex -----|                  |---- ex_right ----|
40454d33b1efSTheodore Ts'o  *                  |------ requested region ------|
40464d33b1efSTheodore Ts'o  *                  |================|
40474d33b1efSTheodore Ts'o  *
40484d33b1efSTheodore Ts'o  * In each of the above cases, we need to set the map->m_pblk and
40494d33b1efSTheodore Ts'o  * map->m_len so it corresponds to the return the extent labelled as
40504d33b1efSTheodore Ts'o  * "|====|" from cluster #N, since it is already in use for data in
40514d33b1efSTheodore Ts'o  * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
40524d33b1efSTheodore Ts'o  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
40534d33b1efSTheodore Ts'o  * as a new "allocated" block region.  Otherwise, we will return 0 and
40544d33b1efSTheodore Ts'o  * ext4_ext_map_blocks() will then allocate one or more new clusters
40554d33b1efSTheodore Ts'o  * by calling ext4_mb_new_blocks().
40564d33b1efSTheodore Ts'o  */
4057d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb,
40584d33b1efSTheodore Ts'o 				     struct ext4_map_blocks *map,
40594d33b1efSTheodore Ts'o 				     struct ext4_extent *ex,
40604d33b1efSTheodore Ts'o 				     struct ext4_ext_path *path)
40614d33b1efSTheodore Ts'o {
4062d8990240SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4063f5a44db5STheodore Ts'o 	ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
40644d33b1efSTheodore Ts'o 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
406514d7f3efSCurt Wohlgemuth 	ext4_lblk_t rr_cluster_start;
40664d33b1efSTheodore Ts'o 	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
40674d33b1efSTheodore Ts'o 	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
40684d33b1efSTheodore Ts'o 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
40694d33b1efSTheodore Ts'o 
40704d33b1efSTheodore Ts'o 	/* The extent passed in that we are trying to match */
40714d33b1efSTheodore Ts'o 	ex_cluster_start = EXT4_B2C(sbi, ee_block);
40724d33b1efSTheodore Ts'o 	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
40734d33b1efSTheodore Ts'o 
40744d33b1efSTheodore Ts'o 	/* The requested region passed into ext4_map_blocks() */
40754d33b1efSTheodore Ts'o 	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
40764d33b1efSTheodore Ts'o 
40774d33b1efSTheodore Ts'o 	if ((rr_cluster_start == ex_cluster_end) ||
40784d33b1efSTheodore Ts'o 	    (rr_cluster_start == ex_cluster_start)) {
40794d33b1efSTheodore Ts'o 		if (rr_cluster_start == ex_cluster_end)
40804d33b1efSTheodore Ts'o 			ee_start += ee_len - 1;
4081f5a44db5STheodore Ts'o 		map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
40824d33b1efSTheodore Ts'o 		map->m_len = min(map->m_len,
40834d33b1efSTheodore Ts'o 				 (unsigned) sbi->s_cluster_ratio - c_offset);
40844d33b1efSTheodore Ts'o 		/*
40854d33b1efSTheodore Ts'o 		 * Check for and handle this case:
40864d33b1efSTheodore Ts'o 		 *
40874d33b1efSTheodore Ts'o 		 *   |--------- cluster # N-------------|
40884d33b1efSTheodore Ts'o 		 *		       |------- extent ----|
40894d33b1efSTheodore Ts'o 		 *	   |--- requested region ---|
40904d33b1efSTheodore Ts'o 		 *	   |===========|
40914d33b1efSTheodore Ts'o 		 */
40924d33b1efSTheodore Ts'o 
40934d33b1efSTheodore Ts'o 		if (map->m_lblk < ee_block)
40944d33b1efSTheodore Ts'o 			map->m_len = min(map->m_len, ee_block - map->m_lblk);
40954d33b1efSTheodore Ts'o 
40964d33b1efSTheodore Ts'o 		/*
40974d33b1efSTheodore Ts'o 		 * Check for the case where there is already another allocated
40984d33b1efSTheodore Ts'o 		 * block to the right of 'ex' but before the end of the cluster.
40994d33b1efSTheodore Ts'o 		 *
41004d33b1efSTheodore Ts'o 		 *          |------------- cluster # N-------------|
41014d33b1efSTheodore Ts'o 		 * |----- ex -----|                  |---- ex_right ----|
41024d33b1efSTheodore Ts'o 		 *                  |------ requested region ------|
41034d33b1efSTheodore Ts'o 		 *                  |================|
41044d33b1efSTheodore Ts'o 		 */
41054d33b1efSTheodore Ts'o 		if (map->m_lblk > ee_block) {
41064d33b1efSTheodore Ts'o 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
41074d33b1efSTheodore Ts'o 			map->m_len = min(map->m_len, next - map->m_lblk);
41084d33b1efSTheodore Ts'o 		}
4109d8990240SAditya Kali 
4110d8990240SAditya Kali 		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
41114d33b1efSTheodore Ts'o 		return 1;
41124d33b1efSTheodore Ts'o 	}
4113d8990240SAditya Kali 
4114d8990240SAditya Kali 	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
41154d33b1efSTheodore Ts'o 	return 0;
41164d33b1efSTheodore Ts'o }
41174d33b1efSTheodore Ts'o 
41184d33b1efSTheodore Ts'o 
41194d33b1efSTheodore Ts'o /*
4120f5ab0d1fSMingming Cao  * Block allocation/map/preallocation routine for extents based files
4121f5ab0d1fSMingming Cao  *
4122f5ab0d1fSMingming Cao  *
4123c278bfecSAneesh Kumar K.V  * Need to be called with
41240e855ac8SAneesh Kumar K.V  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
41250e855ac8SAneesh Kumar K.V  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4126f5ab0d1fSMingming Cao  *
4127f5ab0d1fSMingming Cao  * return > 0, number of of blocks already mapped/allocated
4128f5ab0d1fSMingming Cao  *          if create == 0 and these are pre-allocated blocks
4129f5ab0d1fSMingming Cao  *          	buffer head is unmapped
4130f5ab0d1fSMingming Cao  *          otherwise blocks are mapped
4131f5ab0d1fSMingming Cao  *
4132f5ab0d1fSMingming Cao  * return = 0, if plain look up failed (blocks have not been allocated)
4133f5ab0d1fSMingming Cao  *          buffer head is unmapped
4134f5ab0d1fSMingming Cao  *
4135f5ab0d1fSMingming Cao  * return < 0, error case.
4136c278bfecSAneesh Kumar K.V  */
4137e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4138e35fd660STheodore Ts'o 			struct ext4_map_blocks *map, int flags)
4139a86c6181SAlex Tomas {
4140a86c6181SAlex Tomas 	struct ext4_ext_path *path = NULL;
41414d33b1efSTheodore Ts'o 	struct ext4_extent newex, *ex, *ex2;
41424d33b1efSTheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
41430562e0baSJiaying Zhang 	ext4_fsblk_t newblock = 0;
4144ce37c429SEric Whitney 	int free_on_err = 0, err = 0, depth, ret;
41454d33b1efSTheodore Ts'o 	unsigned int allocated = 0, offset = 0;
414681fdbb4aSYongqiang Yang 	unsigned int allocated_clusters = 0;
4147c9de560dSAlex Tomas 	struct ext4_allocation_request ar;
4148f45ee3a1SDmitry Monakhov 	ext4_io_end_t *io = ext4_inode_aio(inode);
41494d33b1efSTheodore Ts'o 	ext4_lblk_t cluster_offset;
415082e54229SDmitry Monakhov 	int set_unwritten = 0;
4151a86c6181SAlex Tomas 
415284fe3befSMingming 	ext_debug("blocks %u/%u requested for inode %lu\n",
4153e35fd660STheodore Ts'o 		  map->m_lblk, map->m_len, inode->i_ino);
41540562e0baSJiaying Zhang 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4155a86c6181SAlex Tomas 
4156a86c6181SAlex Tomas 	/* find extent for this block */
4157107a7bd3STheodore Ts'o 	path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
4158a86c6181SAlex Tomas 	if (IS_ERR(path)) {
4159a86c6181SAlex Tomas 		err = PTR_ERR(path);
4160a86c6181SAlex Tomas 		path = NULL;
4161a86c6181SAlex Tomas 		goto out2;
4162a86c6181SAlex Tomas 	}
4163a86c6181SAlex Tomas 
4164a86c6181SAlex Tomas 	depth = ext_depth(inode);
4165a86c6181SAlex Tomas 
4166a86c6181SAlex Tomas 	/*
4167d0d856e8SRandy Dunlap 	 * consistent leaf must not be empty;
4168d0d856e8SRandy Dunlap 	 * this situation is possible, though, _during_ tree modification;
4169a86c6181SAlex Tomas 	 * this is why assert can't be put in ext4_ext_find_extent()
4170a86c6181SAlex Tomas 	 */
4171273df556SFrank Mayhar 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4172273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "bad extent address "
4173f70f362bSTheodore Ts'o 				 "lblock: %lu, depth: %d pblock %lld",
4174f70f362bSTheodore Ts'o 				 (unsigned long) map->m_lblk, depth,
4175f70f362bSTheodore Ts'o 				 path[depth].p_block);
4176034fb4c9SSurbhi Palande 		err = -EIO;
4177034fb4c9SSurbhi Palande 		goto out2;
4178034fb4c9SSurbhi Palande 	}
4179a86c6181SAlex Tomas 
41807e028976SAvantika Mathur 	ex = path[depth].p_ext;
41817e028976SAvantika Mathur 	if (ex) {
4182725d26d3SAneesh Kumar K.V 		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4183bf89d16fSTheodore Ts'o 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4184a2df2a63SAmit Arora 		unsigned short ee_len;
4185471d4011SSuparna Bhattacharya 
4186471d4011SSuparna Bhattacharya 		/*
4187471d4011SSuparna Bhattacharya 		 * Uninitialized extents are treated as holes, except that
418856055d3aSAmit Arora 		 * we split out initialized portions during a write.
4189471d4011SSuparna Bhattacharya 		 */
4190a2df2a63SAmit Arora 		ee_len = ext4_ext_get_actual_len(ex);
4191d8990240SAditya Kali 
4192d8990240SAditya Kali 		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4193d8990240SAditya Kali 
4194d0d856e8SRandy Dunlap 		/* if found extent covers block, simply return it */
4195e35fd660STheodore Ts'o 		if (in_range(map->m_lblk, ee_block, ee_len)) {
4196e35fd660STheodore Ts'o 			newblock = map->m_lblk - ee_block + ee_start;
4197d0d856e8SRandy Dunlap 			/* number of remaining blocks in the extent */
4198e35fd660STheodore Ts'o 			allocated = ee_len - (map->m_lblk - ee_block);
4199e35fd660STheodore Ts'o 			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4200a86c6181SAlex Tomas 				  ee_block, ee_len, newblock);
420156055d3aSAmit Arora 
420269eb33dcSZheng Liu 			if (!ext4_ext_is_uninitialized(ex))
4203a86c6181SAlex Tomas 				goto out;
420469eb33dcSZheng Liu 
4205ce37c429SEric Whitney 			ret = ext4_ext_handle_uninitialized_extents(
4206e861304bSAllison Henderson 				handle, inode, map, path, flags,
4207e861304bSAllison Henderson 				allocated, newblock);
4208ce37c429SEric Whitney 			if (ret < 0)
4209ce37c429SEric Whitney 				err = ret;
4210ce37c429SEric Whitney 			else
4211ce37c429SEric Whitney 				allocated = ret;
421237794732SZheng Liu 			goto out3;
421356055d3aSAmit Arora 		}
4214a86c6181SAlex Tomas 	}
4215a86c6181SAlex Tomas 
42167b415bf6SAditya Kali 	if ((sbi->s_cluster_ratio > 1) &&
42177d1b1fbcSZheng Liu 	    ext4_find_delalloc_cluster(inode, map->m_lblk))
42187b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
42197b415bf6SAditya Kali 
4220a86c6181SAlex Tomas 	/*
4221d0d856e8SRandy Dunlap 	 * requested block isn't allocated yet;
4222a86c6181SAlex Tomas 	 * we couldn't try to create block if create flag is zero
4223a86c6181SAlex Tomas 	 */
4224c2177057STheodore Ts'o 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
422556055d3aSAmit Arora 		/*
422656055d3aSAmit Arora 		 * put just found gap into cache to speed up
422756055d3aSAmit Arora 		 * subsequent requests
422856055d3aSAmit Arora 		 */
4229d100eef2SZheng Liu 		if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4230e35fd660STheodore Ts'o 			ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4231a86c6181SAlex Tomas 		goto out2;
4232a86c6181SAlex Tomas 	}
42334d33b1efSTheodore Ts'o 
4234a86c6181SAlex Tomas 	/*
4235c2ea3fdeSTheodore Ts'o 	 * Okay, we need to do block allocation.
4236a86c6181SAlex Tomas 	 */
42377b415bf6SAditya Kali 	map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
42384d33b1efSTheodore Ts'o 	newex.ee_block = cpu_to_le32(map->m_lblk);
4239d0abafacSEric Whitney 	cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
42404d33b1efSTheodore Ts'o 
42414d33b1efSTheodore Ts'o 	/*
42424d33b1efSTheodore Ts'o 	 * If we are doing bigalloc, check to see if the extent returned
42434d33b1efSTheodore Ts'o 	 * by ext4_ext_find_extent() implies a cluster we can use.
42444d33b1efSTheodore Ts'o 	 */
42454d33b1efSTheodore Ts'o 	if (cluster_offset && ex &&
4246d8990240SAditya Kali 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
42474d33b1efSTheodore Ts'o 		ar.len = allocated = map->m_len;
42484d33b1efSTheodore Ts'o 		newblock = map->m_pblk;
42497b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
42504d33b1efSTheodore Ts'o 		goto got_allocated_blocks;
42514d33b1efSTheodore Ts'o 	}
4252a86c6181SAlex Tomas 
4253c9de560dSAlex Tomas 	/* find neighbour allocated blocks */
4254e35fd660STheodore Ts'o 	ar.lleft = map->m_lblk;
4255c9de560dSAlex Tomas 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4256c9de560dSAlex Tomas 	if (err)
4257c9de560dSAlex Tomas 		goto out2;
4258e35fd660STheodore Ts'o 	ar.lright = map->m_lblk;
42594d33b1efSTheodore Ts'o 	ex2 = NULL;
42604d33b1efSTheodore Ts'o 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4261c9de560dSAlex Tomas 	if (err)
4262c9de560dSAlex Tomas 		goto out2;
426325d14f98SAmit Arora 
42644d33b1efSTheodore Ts'o 	/* Check if the extent after searching to the right implies a
42654d33b1efSTheodore Ts'o 	 * cluster we can use. */
42664d33b1efSTheodore Ts'o 	if ((sbi->s_cluster_ratio > 1) && ex2 &&
4267d8990240SAditya Kali 	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
42684d33b1efSTheodore Ts'o 		ar.len = allocated = map->m_len;
42694d33b1efSTheodore Ts'o 		newblock = map->m_pblk;
42707b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
42714d33b1efSTheodore Ts'o 		goto got_allocated_blocks;
42724d33b1efSTheodore Ts'o 	}
42734d33b1efSTheodore Ts'o 
4274749269faSAmit Arora 	/*
4275749269faSAmit Arora 	 * See if request is beyond maximum number of blocks we can have in
4276749269faSAmit Arora 	 * a single extent. For an initialized extent this limit is
4277749269faSAmit Arora 	 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4278749269faSAmit Arora 	 * EXT_UNINIT_MAX_LEN.
4279749269faSAmit Arora 	 */
4280e35fd660STheodore Ts'o 	if (map->m_len > EXT_INIT_MAX_LEN &&
4281c2177057STheodore Ts'o 	    !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4282e35fd660STheodore Ts'o 		map->m_len = EXT_INIT_MAX_LEN;
4283e35fd660STheodore Ts'o 	else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4284c2177057STheodore Ts'o 		 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4285e35fd660STheodore Ts'o 		map->m_len = EXT_UNINIT_MAX_LEN;
4286749269faSAmit Arora 
4287e35fd660STheodore Ts'o 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4288e35fd660STheodore Ts'o 	newex.ee_len = cpu_to_le16(map->m_len);
42894d33b1efSTheodore Ts'o 	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
429025d14f98SAmit Arora 	if (err)
4291b939e376SAneesh Kumar K.V 		allocated = ext4_ext_get_actual_len(&newex);
429225d14f98SAmit Arora 	else
4293e35fd660STheodore Ts'o 		allocated = map->m_len;
4294c9de560dSAlex Tomas 
4295c9de560dSAlex Tomas 	/* allocate new block */
4296c9de560dSAlex Tomas 	ar.inode = inode;
4297e35fd660STheodore Ts'o 	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4298e35fd660STheodore Ts'o 	ar.logical = map->m_lblk;
42994d33b1efSTheodore Ts'o 	/*
43004d33b1efSTheodore Ts'o 	 * We calculate the offset from the beginning of the cluster
43014d33b1efSTheodore Ts'o 	 * for the logical block number, since when we allocate a
43024d33b1efSTheodore Ts'o 	 * physical cluster, the physical block should start at the
43034d33b1efSTheodore Ts'o 	 * same offset from the beginning of the cluster.  This is
43044d33b1efSTheodore Ts'o 	 * needed so that future calls to get_implied_cluster_alloc()
43054d33b1efSTheodore Ts'o 	 * work correctly.
43064d33b1efSTheodore Ts'o 	 */
4307f5a44db5STheodore Ts'o 	offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
43084d33b1efSTheodore Ts'o 	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
43094d33b1efSTheodore Ts'o 	ar.goal -= offset;
43104d33b1efSTheodore Ts'o 	ar.logical -= offset;
4311c9de560dSAlex Tomas 	if (S_ISREG(inode->i_mode))
4312c9de560dSAlex Tomas 		ar.flags = EXT4_MB_HINT_DATA;
4313c9de560dSAlex Tomas 	else
4314c9de560dSAlex Tomas 		/* disable in-core preallocation for non-regular files */
4315c9de560dSAlex Tomas 		ar.flags = 0;
4316556b27abSVivek Haldar 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4317556b27abSVivek Haldar 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4318c9de560dSAlex Tomas 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4319a86c6181SAlex Tomas 	if (!newblock)
4320a86c6181SAlex Tomas 		goto out2;
432184fe3befSMingming 	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4322498e5f24STheodore Ts'o 		  ar.goal, newblock, allocated);
43234d33b1efSTheodore Ts'o 	free_on_err = 1;
43247b415bf6SAditya Kali 	allocated_clusters = ar.len;
43254d33b1efSTheodore Ts'o 	ar.len = EXT4_C2B(sbi, ar.len) - offset;
43264d33b1efSTheodore Ts'o 	if (ar.len > allocated)
43274d33b1efSTheodore Ts'o 		ar.len = allocated;
4328a86c6181SAlex Tomas 
43294d33b1efSTheodore Ts'o got_allocated_blocks:
4330a86c6181SAlex Tomas 	/* try to insert new extent into found leaf and return */
43314d33b1efSTheodore Ts'o 	ext4_ext_store_pblock(&newex, newblock + offset);
4332c9de560dSAlex Tomas 	newex.ee_len = cpu_to_le16(ar.len);
43338d5d02e6SMingming Cao 	/* Mark uninitialized */
43348d5d02e6SMingming Cao 	if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4335a2df2a63SAmit Arora 		ext4_ext_mark_uninitialized(&newex);
4336a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
43378d5d02e6SMingming Cao 		/*
4338744692dcSJiaying Zhang 		 * io_end structure was created for every IO write to an
433925985edcSLucas De Marchi 		 * uninitialized extent. To avoid unnecessary conversion,
4340744692dcSJiaying Zhang 		 * here we flag the IO that really needs the conversion.
43415f524950SMingming 		 * For non asycn direct IO case, flag the inode state
434225985edcSLucas De Marchi 		 * that we need to perform conversion when IO is done.
43438d5d02e6SMingming Cao 		 */
434482e54229SDmitry Monakhov 		if ((flags & EXT4_GET_BLOCKS_PRE_IO))
434582e54229SDmitry Monakhov 			set_unwritten = 1;
4346744692dcSJiaying Zhang 		if (ext4_should_dioread_nolock(inode))
4347e35fd660STheodore Ts'o 			map->m_flags |= EXT4_MAP_UNINIT;
43488d5d02e6SMingming Cao 	}
4349c8d46e41SJiaying Zhang 
4350a4e5d88bSDmitry Monakhov 	err = 0;
4351a4e5d88bSDmitry Monakhov 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4352a4e5d88bSDmitry Monakhov 		err = check_eofblocks_fl(handle, inode, map->m_lblk,
4353a4e5d88bSDmitry Monakhov 					 path, ar.len);
4354575a1d4bSJiaying Zhang 	if (!err)
4355575a1d4bSJiaying Zhang 		err = ext4_ext_insert_extent(handle, inode, path,
4356575a1d4bSJiaying Zhang 					     &newex, flags);
435782e54229SDmitry Monakhov 
435882e54229SDmitry Monakhov 	if (!err && set_unwritten) {
435982e54229SDmitry Monakhov 		if (io)
436082e54229SDmitry Monakhov 			ext4_set_io_unwritten_flag(inode, io);
436182e54229SDmitry Monakhov 		else
436282e54229SDmitry Monakhov 			ext4_set_inode_state(inode,
436382e54229SDmitry Monakhov 					     EXT4_STATE_DIO_UNWRITTEN);
436482e54229SDmitry Monakhov 	}
436582e54229SDmitry Monakhov 
43664d33b1efSTheodore Ts'o 	if (err && free_on_err) {
43677132de74SMaxim Patlasov 		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
43687132de74SMaxim Patlasov 			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4369315054f0SAlex Tomas 		/* free data blocks we just allocated */
4370c9de560dSAlex Tomas 		/* not a good idea to call discard here directly,
4371c9de560dSAlex Tomas 		 * but otherwise we'd need to call it every free() */
4372c2ea3fdeSTheodore Ts'o 		ext4_discard_preallocations(inode);
4373c8e15130STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL, newblock,
4374c8e15130STheodore Ts'o 				 EXT4_C2B(sbi, allocated_clusters), fb_flags);
4375a86c6181SAlex Tomas 		goto out2;
4376315054f0SAlex Tomas 	}
4377a86c6181SAlex Tomas 
4378a86c6181SAlex Tomas 	/* previous routine could use block we allocated */
4379bf89d16fSTheodore Ts'o 	newblock = ext4_ext_pblock(&newex);
4380b939e376SAneesh Kumar K.V 	allocated = ext4_ext_get_actual_len(&newex);
4381e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4382e35fd660STheodore Ts'o 		allocated = map->m_len;
4383e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_NEW;
4384a86c6181SAlex Tomas 
4385b436b9beSJan Kara 	/*
43865f634d06SAneesh Kumar K.V 	 * Update reserved blocks/metadata blocks after successful
43875f634d06SAneesh Kumar K.V 	 * block allocation which had been deferred till now.
43885f634d06SAneesh Kumar K.V 	 */
43897b415bf6SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
439081fdbb4aSYongqiang Yang 		unsigned int reserved_clusters;
43917b415bf6SAditya Kali 		/*
439281fdbb4aSYongqiang Yang 		 * Check how many clusters we had reserved this allocated range
43937b415bf6SAditya Kali 		 */
43947b415bf6SAditya Kali 		reserved_clusters = get_reserved_cluster_alloc(inode,
43957b415bf6SAditya Kali 						map->m_lblk, allocated);
43967b415bf6SAditya Kali 		if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
43977b415bf6SAditya Kali 			if (reserved_clusters) {
43987b415bf6SAditya Kali 				/*
43997b415bf6SAditya Kali 				 * We have clusters reserved for this range.
44007b415bf6SAditya Kali 				 * But since we are not doing actual allocation
44017b415bf6SAditya Kali 				 * and are simply using blocks from previously
44027b415bf6SAditya Kali 				 * allocated cluster, we should release the
44037b415bf6SAditya Kali 				 * reservation and not claim quota.
44047b415bf6SAditya Kali 				 */
44057b415bf6SAditya Kali 				ext4_da_update_reserve_space(inode,
44067b415bf6SAditya Kali 						reserved_clusters, 0);
44077b415bf6SAditya Kali 			}
44087b415bf6SAditya Kali 		} else {
44097b415bf6SAditya Kali 			BUG_ON(allocated_clusters < reserved_clusters);
44107b415bf6SAditya Kali 			if (reserved_clusters < allocated_clusters) {
44115356f261SAditya Kali 				struct ext4_inode_info *ei = EXT4_I(inode);
44127b415bf6SAditya Kali 				int reservation = allocated_clusters -
44137b415bf6SAditya Kali 						  reserved_clusters;
44147b415bf6SAditya Kali 				/*
44157b415bf6SAditya Kali 				 * It seems we claimed few clusters outside of
44167b415bf6SAditya Kali 				 * the range of this allocation. We should give
44177b415bf6SAditya Kali 				 * it back to the reservation pool. This can
44187b415bf6SAditya Kali 				 * happen in the following case:
44197b415bf6SAditya Kali 				 *
44207b415bf6SAditya Kali 				 * * Suppose s_cluster_ratio is 4 (i.e., each
44217b415bf6SAditya Kali 				 *   cluster has 4 blocks. Thus, the clusters
44227b415bf6SAditya Kali 				 *   are [0-3],[4-7],[8-11]...
44237b415bf6SAditya Kali 				 * * First comes delayed allocation write for
44247b415bf6SAditya Kali 				 *   logical blocks 10 & 11. Since there were no
44257b415bf6SAditya Kali 				 *   previous delayed allocated blocks in the
44267b415bf6SAditya Kali 				 *   range [8-11], we would reserve 1 cluster
44277b415bf6SAditya Kali 				 *   for this write.
44287b415bf6SAditya Kali 				 * * Next comes write for logical blocks 3 to 8.
44297b415bf6SAditya Kali 				 *   In this case, we will reserve 2 clusters
44307b415bf6SAditya Kali 				 *   (for [0-3] and [4-7]; and not for [8-11] as
44317b415bf6SAditya Kali 				 *   that range has a delayed allocated blocks.
44327b415bf6SAditya Kali 				 *   Thus total reserved clusters now becomes 3.
44337b415bf6SAditya Kali 				 * * Now, during the delayed allocation writeout
44347b415bf6SAditya Kali 				 *   time, we will first write blocks [3-8] and
44357b415bf6SAditya Kali 				 *   allocate 3 clusters for writing these
44367b415bf6SAditya Kali 				 *   blocks. Also, we would claim all these
44377b415bf6SAditya Kali 				 *   three clusters above.
44387b415bf6SAditya Kali 				 * * Now when we come here to writeout the
44397b415bf6SAditya Kali 				 *   blocks [10-11], we would expect to claim
44407b415bf6SAditya Kali 				 *   the reservation of 1 cluster we had made
44417b415bf6SAditya Kali 				 *   (and we would claim it since there are no
44427b415bf6SAditya Kali 				 *   more delayed allocated blocks in the range
44437b415bf6SAditya Kali 				 *   [8-11]. But our reserved cluster count had
44447b415bf6SAditya Kali 				 *   already gone to 0.
44457b415bf6SAditya Kali 				 *
44467b415bf6SAditya Kali 				 *   Thus, at the step 4 above when we determine
44477b415bf6SAditya Kali 				 *   that there are still some unwritten delayed
44487b415bf6SAditya Kali 				 *   allocated blocks outside of our current
44497b415bf6SAditya Kali 				 *   block range, we should increment the
44507b415bf6SAditya Kali 				 *   reserved clusters count so that when the
44517b415bf6SAditya Kali 				 *   remaining blocks finally gets written, we
44527b415bf6SAditya Kali 				 *   could claim them.
44537b415bf6SAditya Kali 				 */
44545356f261SAditya Kali 				dquot_reserve_block(inode,
44555356f261SAditya Kali 						EXT4_C2B(sbi, reservation));
44565356f261SAditya Kali 				spin_lock(&ei->i_block_reservation_lock);
44575356f261SAditya Kali 				ei->i_reserved_data_blocks += reservation;
44585356f261SAditya Kali 				spin_unlock(&ei->i_block_reservation_lock);
44597b415bf6SAditya Kali 			}
4460232ec872SLukas Czerner 			/*
4461232ec872SLukas Czerner 			 * We will claim quota for all newly allocated blocks.
4462232ec872SLukas Czerner 			 * We're updating the reserved space *after* the
4463232ec872SLukas Czerner 			 * correction above so we do not accidentally free
4464232ec872SLukas Czerner 			 * all the metadata reservation because we might
4465232ec872SLukas Czerner 			 * actually need it later on.
4466232ec872SLukas Czerner 			 */
4467232ec872SLukas Czerner 			ext4_da_update_reserve_space(inode, allocated_clusters,
4468232ec872SLukas Czerner 							1);
44697b415bf6SAditya Kali 		}
44707b415bf6SAditya Kali 	}
44715f634d06SAneesh Kumar K.V 
44725f634d06SAneesh Kumar K.V 	/*
4473b436b9beSJan Kara 	 * Cache the extent and update transaction to commit on fdatasync only
4474b436b9beSJan Kara 	 * when it is _not_ an uninitialized extent.
4475b436b9beSJan Kara 	 */
447669eb33dcSZheng Liu 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
4477b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 1);
447869eb33dcSZheng Liu 	else
4479b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 0);
4480a86c6181SAlex Tomas out:
4481e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4482e35fd660STheodore Ts'o 		allocated = map->m_len;
4483a86c6181SAlex Tomas 	ext4_ext_show_leaf(inode, path);
4484e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_MAPPED;
4485e35fd660STheodore Ts'o 	map->m_pblk = newblock;
4486e35fd660STheodore Ts'o 	map->m_len = allocated;
4487a86c6181SAlex Tomas out2:
4488a86c6181SAlex Tomas 	if (path) {
4489a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
4490a86c6181SAlex Tomas 		kfree(path);
4491a86c6181SAlex Tomas 	}
4492e861304bSAllison Henderson 
449337794732SZheng Liu out3:
449463b99968STheodore Ts'o 	trace_ext4_ext_map_blocks_exit(inode, flags, map,
449563b99968STheodore Ts'o 				       err ? err : allocated);
449663b99968STheodore Ts'o 	ext4_es_lru_add(inode);
44977877191cSLukas Czerner 	return err ? err : allocated;
4498a86c6181SAlex Tomas }
4499a86c6181SAlex Tomas 
4500819c4920STheodore Ts'o void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4501a86c6181SAlex Tomas {
4502a86c6181SAlex Tomas 	struct super_block *sb = inode->i_sb;
4503725d26d3SAneesh Kumar K.V 	ext4_lblk_t last_block;
4504a86c6181SAlex Tomas 	int err = 0;
4505a86c6181SAlex Tomas 
4506a86c6181SAlex Tomas 	/*
4507d0d856e8SRandy Dunlap 	 * TODO: optimization is possible here.
4508d0d856e8SRandy Dunlap 	 * Probably we need not scan at all,
4509d0d856e8SRandy Dunlap 	 * because page truncation is enough.
4510a86c6181SAlex Tomas 	 */
4511a86c6181SAlex Tomas 
4512a86c6181SAlex Tomas 	/* we have to know where to truncate from in crash case */
4513a86c6181SAlex Tomas 	EXT4_I(inode)->i_disksize = inode->i_size;
4514a86c6181SAlex Tomas 	ext4_mark_inode_dirty(handle, inode);
4515a86c6181SAlex Tomas 
4516a86c6181SAlex Tomas 	last_block = (inode->i_size + sb->s_blocksize - 1)
4517a86c6181SAlex Tomas 			>> EXT4_BLOCK_SIZE_BITS(sb);
45188acd5e9bSTheodore Ts'o retry:
451951865fdaSZheng Liu 	err = ext4_es_remove_extent(inode, last_block,
452051865fdaSZheng Liu 				    EXT_MAX_BLOCKS - last_block);
452194eec0fcSTheodore Ts'o 	if (err == -ENOMEM) {
45228acd5e9bSTheodore Ts'o 		cond_resched();
45238acd5e9bSTheodore Ts'o 		congestion_wait(BLK_RW_ASYNC, HZ/50);
45248acd5e9bSTheodore Ts'o 		goto retry;
45258acd5e9bSTheodore Ts'o 	}
45268acd5e9bSTheodore Ts'o 	if (err) {
45278acd5e9bSTheodore Ts'o 		ext4_std_error(inode->i_sb, err);
45288acd5e9bSTheodore Ts'o 		return;
45298acd5e9bSTheodore Ts'o 	}
45305f95d21fSLukas Czerner 	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
45318acd5e9bSTheodore Ts'o 	ext4_std_error(inode->i_sb, err);
4532a86c6181SAlex Tomas }
4533a86c6181SAlex Tomas 
4534fd28784aSAneesh Kumar K.V static void ext4_falloc_update_inode(struct inode *inode,
4535fd28784aSAneesh Kumar K.V 				int mode, loff_t new_size, int update_ctime)
4536fd28784aSAneesh Kumar K.V {
4537fd28784aSAneesh Kumar K.V 	struct timespec now;
4538fd28784aSAneesh Kumar K.V 
4539fd28784aSAneesh Kumar K.V 	if (update_ctime) {
4540fd28784aSAneesh Kumar K.V 		now = current_fs_time(inode->i_sb);
4541fd28784aSAneesh Kumar K.V 		if (!timespec_equal(&inode->i_ctime, &now))
4542fd28784aSAneesh Kumar K.V 			inode->i_ctime = now;
4543fd28784aSAneesh Kumar K.V 	}
4544fd28784aSAneesh Kumar K.V 	/*
4545fd28784aSAneesh Kumar K.V 	 * Update only when preallocation was requested beyond
4546fd28784aSAneesh Kumar K.V 	 * the file size.
4547fd28784aSAneesh Kumar K.V 	 */
4548cf17fea6SAneesh Kumar K.V 	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4549cf17fea6SAneesh Kumar K.V 		if (new_size > i_size_read(inode))
4550fd28784aSAneesh Kumar K.V 			i_size_write(inode, new_size);
4551cf17fea6SAneesh Kumar K.V 		if (new_size > EXT4_I(inode)->i_disksize)
4552cf17fea6SAneesh Kumar K.V 			ext4_update_i_disksize(inode, new_size);
4553c8d46e41SJiaying Zhang 	} else {
4554c8d46e41SJiaying Zhang 		/*
4555c8d46e41SJiaying Zhang 		 * Mark that we allocate beyond EOF so the subsequent truncate
4556c8d46e41SJiaying Zhang 		 * can proceed even if the new size is the same as i_size.
4557c8d46e41SJiaying Zhang 		 */
4558c8d46e41SJiaying Zhang 		if (new_size > i_size_read(inode))
455912e9b892SDmitry Monakhov 			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4560fd28784aSAneesh Kumar K.V 	}
4561fd28784aSAneesh Kumar K.V 
4562fd28784aSAneesh Kumar K.V }
4563fd28784aSAneesh Kumar K.V 
4564a2df2a63SAmit Arora /*
45652fe17c10SChristoph Hellwig  * preallocate space for a file. This implements ext4's fallocate file
4566a2df2a63SAmit Arora  * operation, which gets called from sys_fallocate system call.
4567a2df2a63SAmit Arora  * For block-mapped files, posix_fallocate should fall back to the method
4568a2df2a63SAmit Arora  * of writing zeroes to the required new blocks (the same behavior which is
4569a2df2a63SAmit Arora  * expected for file systems which do not support fallocate() system call).
4570a2df2a63SAmit Arora  */
45712fe17c10SChristoph Hellwig long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4572a2df2a63SAmit Arora {
4573496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
4574a2df2a63SAmit Arora 	handle_t *handle;
4575fd28784aSAneesh Kumar K.V 	loff_t new_size;
4576498e5f24STheodore Ts'o 	unsigned int max_blocks;
4577a2df2a63SAmit Arora 	int ret = 0;
4578a2df2a63SAmit Arora 	int ret2 = 0;
4579a2df2a63SAmit Arora 	int retries = 0;
4580a4e5d88bSDmitry Monakhov 	int flags;
45812ed88685STheodore Ts'o 	struct ext4_map_blocks map;
4582a2df2a63SAmit Arora 	unsigned int credits, blkbits = inode->i_blkbits;
4583a2df2a63SAmit Arora 
4584a4bb6b64SAllison Henderson 	/* Return error if mode is not supported */
4585a4bb6b64SAllison Henderson 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4586a4bb6b64SAllison Henderson 		return -EOPNOTSUPP;
4587a4bb6b64SAllison Henderson 
4588a4bb6b64SAllison Henderson 	if (mode & FALLOC_FL_PUNCH_HOLE)
4589aeb2817aSAshish Sangwan 		return ext4_punch_hole(inode, offset, len);
4590a4bb6b64SAllison Henderson 
45910c8d414fSTao Ma 	ret = ext4_convert_inline_data(inode);
45920c8d414fSTao Ma 	if (ret)
45930c8d414fSTao Ma 		return ret;
45940c8d414fSTao Ma 
45958bad6fc8SZheng Liu 	/*
45968bad6fc8SZheng Liu 	 * currently supporting (pre)allocate mode for extent-based
45978bad6fc8SZheng Liu 	 * files _only_
45988bad6fc8SZheng Liu 	 */
45998bad6fc8SZheng Liu 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
46008bad6fc8SZheng Liu 		return -EOPNOTSUPP;
46018bad6fc8SZheng Liu 
46020562e0baSJiaying Zhang 	trace_ext4_fallocate_enter(inode, offset, len, mode);
46032ed88685STheodore Ts'o 	map.m_lblk = offset >> blkbits;
4604fd28784aSAneesh Kumar K.V 	/*
4605fd28784aSAneesh Kumar K.V 	 * We can't just convert len to max_blocks because
4606fd28784aSAneesh Kumar K.V 	 * If blocksize = 4096 offset = 3072 and len = 2048
4607fd28784aSAneesh Kumar K.V 	 */
4608a2df2a63SAmit Arora 	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
46092ed88685STheodore Ts'o 		- map.m_lblk;
4610a2df2a63SAmit Arora 	/*
4611f3bd1f3fSMingming Cao 	 * credits to insert 1 extent into extent tree
4612a2df2a63SAmit Arora 	 */
4613f3bd1f3fSMingming Cao 	credits = ext4_chunk_trans_blocks(inode, max_blocks);
461455bd725aSAneesh Kumar K.V 	mutex_lock(&inode->i_mutex);
46156d19c42bSNikanth Karthikesan 	ret = inode_newsize_ok(inode, (len + offset));
46166d19c42bSNikanth Karthikesan 	if (ret) {
46176d19c42bSNikanth Karthikesan 		mutex_unlock(&inode->i_mutex);
46180562e0baSJiaying Zhang 		trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
46196d19c42bSNikanth Karthikesan 		return ret;
46206d19c42bSNikanth Karthikesan 	}
46213c6fe770SGreg Harm 	flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4622a4e5d88bSDmitry Monakhov 	if (mode & FALLOC_FL_KEEP_SIZE)
4623a4e5d88bSDmitry Monakhov 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
46243c6fe770SGreg Harm 	/*
46253c6fe770SGreg Harm 	 * Don't normalize the request if it can fit in one extent so
46263c6fe770SGreg Harm 	 * that it doesn't get unnecessarily split into multiple
46273c6fe770SGreg Harm 	 * extents.
46283c6fe770SGreg Harm 	 */
46293c6fe770SGreg Harm 	if (len <= EXT_UNINIT_MAX_LEN << blkbits)
46303c6fe770SGreg Harm 		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
463160d4616fSDmitry Monakhov 
4632a2df2a63SAmit Arora retry:
4633a2df2a63SAmit Arora 	while (ret >= 0 && ret < max_blocks) {
46342ed88685STheodore Ts'o 		map.m_lblk = map.m_lblk + ret;
46352ed88685STheodore Ts'o 		map.m_len = max_blocks = max_blocks - ret;
46369924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
46379924a92aSTheodore Ts'o 					    credits);
4638a2df2a63SAmit Arora 		if (IS_ERR(handle)) {
4639a2df2a63SAmit Arora 			ret = PTR_ERR(handle);
4640a2df2a63SAmit Arora 			break;
4641a2df2a63SAmit Arora 		}
4642a4e5d88bSDmitry Monakhov 		ret = ext4_map_blocks(handle, inode, &map, flags);
4643221879c9SAneesh Kumar K.V 		if (ret <= 0) {
46442c98615dSAneesh Kumar K.V #ifdef EXT4FS_DEBUG
4645b06acd38SLukas Czerner 			ext4_warning(inode->i_sb,
4646b06acd38SLukas Czerner 				     "inode #%lu: block %u: len %u: "
4647b06acd38SLukas Czerner 				     "ext4_ext_map_blocks returned %d",
4648b06acd38SLukas Czerner 				     inode->i_ino, map.m_lblk,
4649b06acd38SLukas Czerner 				     map.m_len, ret);
46502c98615dSAneesh Kumar K.V #endif
4651a2df2a63SAmit Arora 			ext4_mark_inode_dirty(handle, inode);
4652a2df2a63SAmit Arora 			ret2 = ext4_journal_stop(handle);
4653a2df2a63SAmit Arora 			break;
4654a2df2a63SAmit Arora 		}
46552ed88685STheodore Ts'o 		if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4656fd28784aSAneesh Kumar K.V 						blkbits) >> blkbits))
4657fd28784aSAneesh Kumar K.V 			new_size = offset + len;
4658fd28784aSAneesh Kumar K.V 		else
465929ae07b7SUtako Kusaka 			new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4660a2df2a63SAmit Arora 
4661fd28784aSAneesh Kumar K.V 		ext4_falloc_update_inode(inode, mode, new_size,
46622ed88685STheodore Ts'o 					 (map.m_flags & EXT4_MAP_NEW));
4663a2df2a63SAmit Arora 		ext4_mark_inode_dirty(handle, inode);
4664f4e95b33SZheng Liu 		if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4665f4e95b33SZheng Liu 			ext4_handle_sync(handle);
4666a2df2a63SAmit Arora 		ret2 = ext4_journal_stop(handle);
4667a2df2a63SAmit Arora 		if (ret2)
4668a2df2a63SAmit Arora 			break;
4669a2df2a63SAmit Arora 	}
4670fd28784aSAneesh Kumar K.V 	if (ret == -ENOSPC &&
4671fd28784aSAneesh Kumar K.V 			ext4_should_retry_alloc(inode->i_sb, &retries)) {
4672fd28784aSAneesh Kumar K.V 		ret = 0;
4673a2df2a63SAmit Arora 		goto retry;
4674a2df2a63SAmit Arora 	}
467555bd725aSAneesh Kumar K.V 	mutex_unlock(&inode->i_mutex);
46760562e0baSJiaying Zhang 	trace_ext4_fallocate_exit(inode, offset, max_blocks,
46770562e0baSJiaying Zhang 				ret > 0 ? ret2 : ret);
4678a2df2a63SAmit Arora 	return ret > 0 ? ret2 : ret;
4679a2df2a63SAmit Arora }
46806873fa0dSEric Sandeen 
46816873fa0dSEric Sandeen /*
46820031462bSMingming Cao  * This function convert a range of blocks to written extents
46830031462bSMingming Cao  * The caller of this function will pass the start offset and the size.
46840031462bSMingming Cao  * all unwritten extents within this range will be converted to
46850031462bSMingming Cao  * written extents.
46860031462bSMingming Cao  *
46870031462bSMingming Cao  * This function is called from the direct IO end io call back
46880031462bSMingming Cao  * function, to convert the fallocated extents after IO is completed.
4689109f5565SMingming  * Returns 0 on success.
46900031462bSMingming Cao  */
46916b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
46926b523df4SJan Kara 				   loff_t offset, ssize_t len)
46930031462bSMingming Cao {
46940031462bSMingming Cao 	unsigned int max_blocks;
46950031462bSMingming Cao 	int ret = 0;
46960031462bSMingming Cao 	int ret2 = 0;
46972ed88685STheodore Ts'o 	struct ext4_map_blocks map;
46980031462bSMingming Cao 	unsigned int credits, blkbits = inode->i_blkbits;
46990031462bSMingming Cao 
47002ed88685STheodore Ts'o 	map.m_lblk = offset >> blkbits;
47010031462bSMingming Cao 	/*
47020031462bSMingming Cao 	 * We can't just convert len to max_blocks because
47030031462bSMingming Cao 	 * If blocksize = 4096 offset = 3072 and len = 2048
47040031462bSMingming Cao 	 */
47052ed88685STheodore Ts'o 	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
47062ed88685STheodore Ts'o 		      map.m_lblk);
47070031462bSMingming Cao 	/*
47086b523df4SJan Kara 	 * This is somewhat ugly but the idea is clear: When transaction is
47096b523df4SJan Kara 	 * reserved, everything goes into it. Otherwise we rather start several
47106b523df4SJan Kara 	 * smaller transactions for conversion of each extent separately.
47116b523df4SJan Kara 	 */
47126b523df4SJan Kara 	if (handle) {
47136b523df4SJan Kara 		handle = ext4_journal_start_reserved(handle,
47146b523df4SJan Kara 						     EXT4_HT_EXT_CONVERT);
47156b523df4SJan Kara 		if (IS_ERR(handle))
47166b523df4SJan Kara 			return PTR_ERR(handle);
47176b523df4SJan Kara 		credits = 0;
47186b523df4SJan Kara 	} else {
47196b523df4SJan Kara 		/*
47200031462bSMingming Cao 		 * credits to insert 1 extent into extent tree
47210031462bSMingming Cao 		 */
47220031462bSMingming Cao 		credits = ext4_chunk_trans_blocks(inode, max_blocks);
47236b523df4SJan Kara 	}
47240031462bSMingming Cao 	while (ret >= 0 && ret < max_blocks) {
47252ed88685STheodore Ts'o 		map.m_lblk += ret;
47262ed88685STheodore Ts'o 		map.m_len = (max_blocks -= ret);
47276b523df4SJan Kara 		if (credits) {
47286b523df4SJan Kara 			handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
47296b523df4SJan Kara 						    credits);
47300031462bSMingming Cao 			if (IS_ERR(handle)) {
47310031462bSMingming Cao 				ret = PTR_ERR(handle);
47320031462bSMingming Cao 				break;
47330031462bSMingming Cao 			}
47346b523df4SJan Kara 		}
47352ed88685STheodore Ts'o 		ret = ext4_map_blocks(handle, inode, &map,
4736c7064ef1SJiaying Zhang 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4737b06acd38SLukas Czerner 		if (ret <= 0)
4738b06acd38SLukas Czerner 			ext4_warning(inode->i_sb,
4739b06acd38SLukas Czerner 				     "inode #%lu: block %u: len %u: "
474092b97816STheodore Ts'o 				     "ext4_ext_map_blocks returned %d",
4741b06acd38SLukas Czerner 				     inode->i_ino, map.m_lblk,
474292b97816STheodore Ts'o 				     map.m_len, ret);
47430031462bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
47446b523df4SJan Kara 		if (credits)
47450031462bSMingming Cao 			ret2 = ext4_journal_stop(handle);
47460031462bSMingming Cao 		if (ret <= 0 || ret2)
47470031462bSMingming Cao 			break;
47480031462bSMingming Cao 	}
47496b523df4SJan Kara 	if (!credits)
47506b523df4SJan Kara 		ret2 = ext4_journal_stop(handle);
47510031462bSMingming Cao 	return ret > 0 ? ret2 : ret;
47520031462bSMingming Cao }
47536d9c85ebSYongqiang Yang 
47540031462bSMingming Cao /*
475569eb33dcSZheng Liu  * If newes is not existing extent (newes->ec_pblk equals zero) find
475669eb33dcSZheng Liu  * delayed extent at start of newes and update newes accordingly and
475791dd8c11SLukas Czerner  * return start of the next delayed extent.
475891dd8c11SLukas Czerner  *
475969eb33dcSZheng Liu  * If newes is existing extent (newes->ec_pblk is not equal zero)
476091dd8c11SLukas Czerner  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
476169eb33dcSZheng Liu  * extent found. Leave newes unmodified.
47626873fa0dSEric Sandeen  */
476391dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode,
476469eb33dcSZheng Liu 				    struct extent_status *newes)
47656873fa0dSEric Sandeen {
4766b3aff3e3SZheng Liu 	struct extent_status es;
4767be401363SZheng Liu 	ext4_lblk_t block, next_del;
47686873fa0dSEric Sandeen 
476969eb33dcSZheng Liu 	if (newes->es_pblk == 0) {
4770e30b5dcaSYan, Zheng 		ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4771e30b5dcaSYan, Zheng 				newes->es_lblk + newes->es_len - 1, &es);
4772e30b5dcaSYan, Zheng 
47736d9c85ebSYongqiang Yang 		/*
477469eb33dcSZheng Liu 		 * No extent in extent-tree contains block @newes->es_pblk,
47756d9c85ebSYongqiang Yang 		 * then the block may stay in 1)a hole or 2)delayed-extent.
47766d9c85ebSYongqiang Yang 		 */
477706b0c886SZheng Liu 		if (es.es_len == 0)
4778b3aff3e3SZheng Liu 			/* A hole found. */
477991dd8c11SLukas Czerner 			return 0;
47806d9c85ebSYongqiang Yang 
478169eb33dcSZheng Liu 		if (es.es_lblk > newes->es_lblk) {
4782b3aff3e3SZheng Liu 			/* A hole found. */
478369eb33dcSZheng Liu 			newes->es_len = min(es.es_lblk - newes->es_lblk,
478469eb33dcSZheng Liu 					    newes->es_len);
478591dd8c11SLukas Czerner 			return 0;
47866873fa0dSEric Sandeen 		}
47876d9c85ebSYongqiang Yang 
478869eb33dcSZheng Liu 		newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
47896d9c85ebSYongqiang Yang 	}
47906873fa0dSEric Sandeen 
479169eb33dcSZheng Liu 	block = newes->es_lblk + newes->es_len;
4792e30b5dcaSYan, Zheng 	ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
4793be401363SZheng Liu 	if (es.es_len == 0)
4794be401363SZheng Liu 		next_del = EXT_MAX_BLOCKS;
4795be401363SZheng Liu 	else
4796be401363SZheng Liu 		next_del = es.es_lblk;
4797be401363SZheng Liu 
479891dd8c11SLukas Czerner 	return next_del;
47996873fa0dSEric Sandeen }
48006873fa0dSEric Sandeen /* fiemap flags we can handle specified here */
48016873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
48026873fa0dSEric Sandeen 
48033a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode,
48043a06d778SAneesh Kumar K.V 				struct fiemap_extent_info *fieinfo)
48056873fa0dSEric Sandeen {
48066873fa0dSEric Sandeen 	__u64 physical = 0;
48076873fa0dSEric Sandeen 	__u64 length;
48086873fa0dSEric Sandeen 	__u32 flags = FIEMAP_EXTENT_LAST;
48096873fa0dSEric Sandeen 	int blockbits = inode->i_sb->s_blocksize_bits;
48106873fa0dSEric Sandeen 	int error = 0;
48116873fa0dSEric Sandeen 
48126873fa0dSEric Sandeen 	/* in-inode? */
481319f5fb7aSTheodore Ts'o 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
48146873fa0dSEric Sandeen 		struct ext4_iloc iloc;
48156873fa0dSEric Sandeen 		int offset;	/* offset of xattr in inode */
48166873fa0dSEric Sandeen 
48176873fa0dSEric Sandeen 		error = ext4_get_inode_loc(inode, &iloc);
48186873fa0dSEric Sandeen 		if (error)
48196873fa0dSEric Sandeen 			return error;
4820a60697f4SJan Kara 		physical = (__u64)iloc.bh->b_blocknr << blockbits;
48216873fa0dSEric Sandeen 		offset = EXT4_GOOD_OLD_INODE_SIZE +
48226873fa0dSEric Sandeen 				EXT4_I(inode)->i_extra_isize;
48236873fa0dSEric Sandeen 		physical += offset;
48246873fa0dSEric Sandeen 		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
48256873fa0dSEric Sandeen 		flags |= FIEMAP_EXTENT_DATA_INLINE;
4826fd2dd9fbSCurt Wohlgemuth 		brelse(iloc.bh);
48276873fa0dSEric Sandeen 	} else { /* external block */
4828a60697f4SJan Kara 		physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
48296873fa0dSEric Sandeen 		length = inode->i_sb->s_blocksize;
48306873fa0dSEric Sandeen 	}
48316873fa0dSEric Sandeen 
48326873fa0dSEric Sandeen 	if (physical)
48336873fa0dSEric Sandeen 		error = fiemap_fill_next_extent(fieinfo, 0, physical,
48346873fa0dSEric Sandeen 						length, flags);
48356873fa0dSEric Sandeen 	return (error < 0 ? error : 0);
48366873fa0dSEric Sandeen }
48376873fa0dSEric Sandeen 
48386873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
48396873fa0dSEric Sandeen 		__u64 start, __u64 len)
48406873fa0dSEric Sandeen {
48416873fa0dSEric Sandeen 	ext4_lblk_t start_blk;
48426873fa0dSEric Sandeen 	int error = 0;
48436873fa0dSEric Sandeen 
484494191985STao Ma 	if (ext4_has_inline_data(inode)) {
484594191985STao Ma 		int has_inline = 1;
484694191985STao Ma 
484794191985STao Ma 		error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
484894191985STao Ma 
484994191985STao Ma 		if (has_inline)
485094191985STao Ma 			return error;
485194191985STao Ma 	}
485294191985STao Ma 
48537869a4a6STheodore Ts'o 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
48547869a4a6STheodore Ts'o 		error = ext4_ext_precache(inode);
48557869a4a6STheodore Ts'o 		if (error)
48567869a4a6STheodore Ts'o 			return error;
48577869a4a6STheodore Ts'o 	}
48587869a4a6STheodore Ts'o 
48596873fa0dSEric Sandeen 	/* fallback to generic here if not in extents fmt */
486012e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
48616873fa0dSEric Sandeen 		return generic_block_fiemap(inode, fieinfo, start, len,
48626873fa0dSEric Sandeen 			ext4_get_block);
48636873fa0dSEric Sandeen 
48646873fa0dSEric Sandeen 	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
48656873fa0dSEric Sandeen 		return -EBADR;
48666873fa0dSEric Sandeen 
48676873fa0dSEric Sandeen 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
48686873fa0dSEric Sandeen 		error = ext4_xattr_fiemap(inode, fieinfo);
48696873fa0dSEric Sandeen 	} else {
4870aca92ff6SLeonard Michlmayr 		ext4_lblk_t len_blks;
4871aca92ff6SLeonard Michlmayr 		__u64 last_blk;
4872aca92ff6SLeonard Michlmayr 
48736873fa0dSEric Sandeen 		start_blk = start >> inode->i_sb->s_blocksize_bits;
4874aca92ff6SLeonard Michlmayr 		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4875f17722f9SLukas Czerner 		if (last_blk >= EXT_MAX_BLOCKS)
4876f17722f9SLukas Czerner 			last_blk = EXT_MAX_BLOCKS-1;
4877aca92ff6SLeonard Michlmayr 		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
48786873fa0dSEric Sandeen 
48796873fa0dSEric Sandeen 		/*
488091dd8c11SLukas Czerner 		 * Walk the extent tree gathering extent information
488191dd8c11SLukas Czerner 		 * and pushing extents back to the user.
48826873fa0dSEric Sandeen 		 */
488391dd8c11SLukas Czerner 		error = ext4_fill_fiemap_extents(inode, start_blk,
488491dd8c11SLukas Czerner 						 len_blks, fieinfo);
48856873fa0dSEric Sandeen 	}
4886107a7bd3STheodore Ts'o 	ext4_es_lru_add(inode);
48876873fa0dSEric Sandeen 	return error;
48886873fa0dSEric Sandeen }
4889