xref: /openbmc/linux/fs/ext4/extents.c (revision 19008f6dfa16d23afcd09dceaa598bb6da8de4b1)
1a86c6181SAlex Tomas /*
2a86c6181SAlex Tomas  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3a86c6181SAlex Tomas  * Written by Alex Tomas <alex@clusterfs.com>
4a86c6181SAlex Tomas  *
5a86c6181SAlex Tomas  * Architecture independence:
6a86c6181SAlex Tomas  *   Copyright (c) 2005, Bull S.A.
7a86c6181SAlex Tomas  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8a86c6181SAlex Tomas  *
9a86c6181SAlex Tomas  * This program is free software; you can redistribute it and/or modify
10a86c6181SAlex Tomas  * it under the terms of the GNU General Public License version 2 as
11a86c6181SAlex Tomas  * published by the Free Software Foundation.
12a86c6181SAlex Tomas  *
13a86c6181SAlex Tomas  * This program is distributed in the hope that it will be useful,
14a86c6181SAlex Tomas  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15a86c6181SAlex Tomas  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16a86c6181SAlex Tomas  * GNU General Public License for more details.
17a86c6181SAlex Tomas  *
18a86c6181SAlex Tomas  * You should have received a copy of the GNU General Public Licens
19a86c6181SAlex Tomas  * along with this program; if not, write to the Free Software
20a86c6181SAlex Tomas  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21a86c6181SAlex Tomas  */
22a86c6181SAlex Tomas 
23a86c6181SAlex Tomas /*
24a86c6181SAlex Tomas  * Extents support for EXT4
25a86c6181SAlex Tomas  *
26a86c6181SAlex Tomas  * TODO:
27a86c6181SAlex Tomas  *   - ext4*_error() should be used in some situations
28a86c6181SAlex Tomas  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29a86c6181SAlex Tomas  *   - smart tree reduction
30a86c6181SAlex Tomas  */
31a86c6181SAlex Tomas 
32a86c6181SAlex Tomas #include <linux/fs.h>
33a86c6181SAlex Tomas #include <linux/time.h>
34cd02ff0bSMingming Cao #include <linux/jbd2.h>
35a86c6181SAlex Tomas #include <linux/highuid.h>
36a86c6181SAlex Tomas #include <linux/pagemap.h>
37a86c6181SAlex Tomas #include <linux/quotaops.h>
38a86c6181SAlex Tomas #include <linux/string.h>
39a86c6181SAlex Tomas #include <linux/slab.h>
40a86c6181SAlex Tomas #include <asm/uaccess.h>
416873fa0dSEric Sandeen #include <linux/fiemap.h>
423dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
434a092d73STheodore Ts'o #include "ext4_extents.h"
44f19d5870STao Ma #include "xattr.h"
45a86c6181SAlex Tomas 
460562e0baSJiaying Zhang #include <trace/events/ext4.h>
470562e0baSJiaying Zhang 
485f95d21fSLukas Czerner /*
495f95d21fSLukas Czerner  * used by extent splitting.
505f95d21fSLukas Czerner  */
515f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT	0x1  /* safe to zeroout if split fails \
525f95d21fSLukas Czerner 					due to ENOSPC */
53556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1	0x2  /* mark first half unwritten */
54556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2	0x4  /* mark second half unwritten */
555f95d21fSLukas Czerner 
56dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1	0x8  /* first half contains valid data */
57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2	0x10 /* second half contains valid data */
58dee1f973SDmitry Monakhov 
597ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode,
607ac5990dSDarrick J. Wong 				     struct ext4_extent_header *eh)
617ac5990dSDarrick J. Wong {
627ac5990dSDarrick J. Wong 	struct ext4_inode_info *ei = EXT4_I(inode);
637ac5990dSDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
647ac5990dSDarrick J. Wong 	__u32 csum;
657ac5990dSDarrick J. Wong 
667ac5990dSDarrick J. Wong 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
677ac5990dSDarrick J. Wong 			   EXT4_EXTENT_TAIL_OFFSET(eh));
687ac5990dSDarrick J. Wong 	return cpu_to_le32(csum);
697ac5990dSDarrick J. Wong }
707ac5990dSDarrick J. Wong 
717ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode,
727ac5990dSDarrick J. Wong 					 struct ext4_extent_header *eh)
737ac5990dSDarrick J. Wong {
747ac5990dSDarrick J. Wong 	struct ext4_extent_tail *et;
757ac5990dSDarrick J. Wong 
767ac5990dSDarrick J. Wong 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
777ac5990dSDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
787ac5990dSDarrick J. Wong 		return 1;
797ac5990dSDarrick J. Wong 
807ac5990dSDarrick J. Wong 	et = find_ext4_extent_tail(eh);
817ac5990dSDarrick J. Wong 	if (et->et_checksum != ext4_extent_block_csum(inode, eh))
827ac5990dSDarrick J. Wong 		return 0;
837ac5990dSDarrick J. Wong 	return 1;
847ac5990dSDarrick J. Wong }
857ac5990dSDarrick J. Wong 
867ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode,
877ac5990dSDarrick J. Wong 				       struct ext4_extent_header *eh)
887ac5990dSDarrick J. Wong {
897ac5990dSDarrick J. Wong 	struct ext4_extent_tail *et;
907ac5990dSDarrick J. Wong 
917ac5990dSDarrick J. Wong 	if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
927ac5990dSDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
937ac5990dSDarrick J. Wong 		return;
947ac5990dSDarrick J. Wong 
957ac5990dSDarrick J. Wong 	et = find_ext4_extent_tail(eh);
967ac5990dSDarrick J. Wong 	et->et_checksum = ext4_extent_block_csum(inode, eh);
977ac5990dSDarrick J. Wong }
987ac5990dSDarrick J. Wong 
99d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle,
100d583fb87SAllison Henderson 				struct inode *inode,
101d583fb87SAllison Henderson 				struct ext4_ext_path *path,
102d583fb87SAllison Henderson 				struct ext4_map_blocks *map,
103d583fb87SAllison Henderson 				int split_flag,
104d583fb87SAllison Henderson 				int flags);
105d583fb87SAllison Henderson 
1065f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle,
1075f95d21fSLukas Czerner 			     struct inode *inode,
1085f95d21fSLukas Czerner 			     struct ext4_ext_path *path,
1095f95d21fSLukas Czerner 			     ext4_lblk_t split,
1105f95d21fSLukas Czerner 			     int split_flag,
1115f95d21fSLukas Czerner 			     int flags);
1125f95d21fSLukas Czerner 
11391dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode,
11469eb33dcSZheng Liu 				    struct extent_status *newes);
11591dd8c11SLukas Czerner 
116487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle,
117487caeefSJan Kara 					    struct inode *inode,
118487caeefSJan Kara 					    int needed)
119a86c6181SAlex Tomas {
120a86c6181SAlex Tomas 	int err;
121a86c6181SAlex Tomas 
1220390131bSFrank Mayhar 	if (!ext4_handle_valid(handle))
1230390131bSFrank Mayhar 		return 0;
124a86c6181SAlex Tomas 	if (handle->h_buffer_credits > needed)
1259102e4faSShen Feng 		return 0;
1269102e4faSShen Feng 	err = ext4_journal_extend(handle, needed);
1270123c939STheodore Ts'o 	if (err <= 0)
1289102e4faSShen Feng 		return err;
129487caeefSJan Kara 	err = ext4_truncate_restart_trans(handle, inode, needed);
1300617b83fSDmitry Monakhov 	if (err == 0)
1310617b83fSDmitry Monakhov 		err = -EAGAIN;
132487caeefSJan Kara 
133487caeefSJan Kara 	return err;
134a86c6181SAlex Tomas }
135a86c6181SAlex Tomas 
136a86c6181SAlex Tomas /*
137a86c6181SAlex Tomas  * could return:
138a86c6181SAlex Tomas  *  - EROFS
139a86c6181SAlex Tomas  *  - ENOMEM
140a86c6181SAlex Tomas  */
141a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
142a86c6181SAlex Tomas 				struct ext4_ext_path *path)
143a86c6181SAlex Tomas {
144a86c6181SAlex Tomas 	if (path->p_bh) {
145a86c6181SAlex Tomas 		/* path points to block */
1465d601255Sliang xie 		BUFFER_TRACE(path->p_bh, "get_write_access");
147a86c6181SAlex Tomas 		return ext4_journal_get_write_access(handle, path->p_bh);
148a86c6181SAlex Tomas 	}
149a86c6181SAlex Tomas 	/* path points to leaf/index in inode body */
150a86c6181SAlex Tomas 	/* we use in-core data, no need to protect them */
151a86c6181SAlex Tomas 	return 0;
152a86c6181SAlex Tomas }
153a86c6181SAlex Tomas 
154a86c6181SAlex Tomas /*
155a86c6181SAlex Tomas  * could return:
156a86c6181SAlex Tomas  *  - EROFS
157a86c6181SAlex Tomas  *  - ENOMEM
158a86c6181SAlex Tomas  *  - EIO
159a86c6181SAlex Tomas  */
1602656497bSDarrick J. Wong int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
1612656497bSDarrick J. Wong 		     struct inode *inode, struct ext4_ext_path *path)
162a86c6181SAlex Tomas {
163a86c6181SAlex Tomas 	int err;
1644b1f1660SDmitry Monakhov 
1654b1f1660SDmitry Monakhov 	WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
166a86c6181SAlex Tomas 	if (path->p_bh) {
1677ac5990dSDarrick J. Wong 		ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
168a86c6181SAlex Tomas 		/* path points to block */
1699ea7a0dfSTheodore Ts'o 		err = __ext4_handle_dirty_metadata(where, line, handle,
1709ea7a0dfSTheodore Ts'o 						   inode, path->p_bh);
171a86c6181SAlex Tomas 	} else {
172a86c6181SAlex Tomas 		/* path points to leaf/index in inode body */
173a86c6181SAlex Tomas 		err = ext4_mark_inode_dirty(handle, inode);
174a86c6181SAlex Tomas 	}
175a86c6181SAlex Tomas 	return err;
176a86c6181SAlex Tomas }
177a86c6181SAlex Tomas 
178f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
179a86c6181SAlex Tomas 			      struct ext4_ext_path *path,
180725d26d3SAneesh Kumar K.V 			      ext4_lblk_t block)
181a86c6181SAlex Tomas {
182a86c6181SAlex Tomas 	if (path) {
18381fdbb4aSYongqiang Yang 		int depth = path->p_depth;
184a86c6181SAlex Tomas 		struct ext4_extent *ex;
185a86c6181SAlex Tomas 
186ad4fb9caSKazuya Mio 		/*
187ad4fb9caSKazuya Mio 		 * Try to predict block placement assuming that we are
188ad4fb9caSKazuya Mio 		 * filling in a file which will eventually be
189ad4fb9caSKazuya Mio 		 * non-sparse --- i.e., in the case of libbfd writing
190ad4fb9caSKazuya Mio 		 * an ELF object sections out-of-order but in a way
191ad4fb9caSKazuya Mio 		 * the eventually results in a contiguous object or
192ad4fb9caSKazuya Mio 		 * executable file, or some database extending a table
193ad4fb9caSKazuya Mio 		 * space file.  However, this is actually somewhat
194ad4fb9caSKazuya Mio 		 * non-ideal if we are writing a sparse file such as
195ad4fb9caSKazuya Mio 		 * qemu or KVM writing a raw image file that is going
196ad4fb9caSKazuya Mio 		 * to stay fairly sparse, since it will end up
197ad4fb9caSKazuya Mio 		 * fragmenting the file system's free space.  Maybe we
198ad4fb9caSKazuya Mio 		 * should have some hueristics or some way to allow
199ad4fb9caSKazuya Mio 		 * userspace to pass a hint to file system,
200b8d6568aSTao Ma 		 * especially if the latter case turns out to be
201ad4fb9caSKazuya Mio 		 * common.
202ad4fb9caSKazuya Mio 		 */
2037e028976SAvantika Mathur 		ex = path[depth].p_ext;
204ad4fb9caSKazuya Mio 		if (ex) {
205ad4fb9caSKazuya Mio 			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
206ad4fb9caSKazuya Mio 			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
207ad4fb9caSKazuya Mio 
208ad4fb9caSKazuya Mio 			if (block > ext_block)
209ad4fb9caSKazuya Mio 				return ext_pblk + (block - ext_block);
210ad4fb9caSKazuya Mio 			else
211ad4fb9caSKazuya Mio 				return ext_pblk - (ext_block - block);
212ad4fb9caSKazuya Mio 		}
213a86c6181SAlex Tomas 
214d0d856e8SRandy Dunlap 		/* it looks like index is empty;
215d0d856e8SRandy Dunlap 		 * try to find starting block from index itself */
216a86c6181SAlex Tomas 		if (path[depth].p_bh)
217a86c6181SAlex Tomas 			return path[depth].p_bh->b_blocknr;
218a86c6181SAlex Tomas 	}
219a86c6181SAlex Tomas 
220a86c6181SAlex Tomas 	/* OK. use inode's group */
221f86186b4SEric Sandeen 	return ext4_inode_to_goal_block(inode);
222a86c6181SAlex Tomas }
223a86c6181SAlex Tomas 
224654b4908SAneesh Kumar K.V /*
225654b4908SAneesh Kumar K.V  * Allocation for a meta data block
226654b4908SAneesh Kumar K.V  */
227f65e6fbaSAlex Tomas static ext4_fsblk_t
228654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
229a86c6181SAlex Tomas 			struct ext4_ext_path *path,
23055f020dbSAllison Henderson 			struct ext4_extent *ex, int *err, unsigned int flags)
231a86c6181SAlex Tomas {
232f65e6fbaSAlex Tomas 	ext4_fsblk_t goal, newblock;
233a86c6181SAlex Tomas 
234a86c6181SAlex Tomas 	goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
23555f020dbSAllison Henderson 	newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
23655f020dbSAllison Henderson 					NULL, err);
237a86c6181SAlex Tomas 	return newblock;
238a86c6181SAlex Tomas }
239a86c6181SAlex Tomas 
24055ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check)
241a86c6181SAlex Tomas {
242a86c6181SAlex Tomas 	int size;
243a86c6181SAlex Tomas 
244a86c6181SAlex Tomas 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
245a86c6181SAlex Tomas 			/ sizeof(struct ext4_extent);
246bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
24702dc62fbSYongqiang Yang 	if (!check && size > 6)
248a86c6181SAlex Tomas 		size = 6;
249a86c6181SAlex Tomas #endif
250a86c6181SAlex Tomas 	return size;
251a86c6181SAlex Tomas }
252a86c6181SAlex Tomas 
25355ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
254a86c6181SAlex Tomas {
255a86c6181SAlex Tomas 	int size;
256a86c6181SAlex Tomas 
257a86c6181SAlex Tomas 	size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
258a86c6181SAlex Tomas 			/ sizeof(struct ext4_extent_idx);
259bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
26002dc62fbSYongqiang Yang 	if (!check && size > 5)
261a86c6181SAlex Tomas 		size = 5;
262a86c6181SAlex Tomas #endif
263a86c6181SAlex Tomas 	return size;
264a86c6181SAlex Tomas }
265a86c6181SAlex Tomas 
26655ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check)
267a86c6181SAlex Tomas {
268a86c6181SAlex Tomas 	int size;
269a86c6181SAlex Tomas 
270a86c6181SAlex Tomas 	size = sizeof(EXT4_I(inode)->i_data);
271a86c6181SAlex Tomas 	size -= sizeof(struct ext4_extent_header);
272a86c6181SAlex Tomas 	size /= sizeof(struct ext4_extent);
273bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
27402dc62fbSYongqiang Yang 	if (!check && size > 3)
275a86c6181SAlex Tomas 		size = 3;
276a86c6181SAlex Tomas #endif
277a86c6181SAlex Tomas 	return size;
278a86c6181SAlex Tomas }
279a86c6181SAlex Tomas 
28055ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
281a86c6181SAlex Tomas {
282a86c6181SAlex Tomas 	int size;
283a86c6181SAlex Tomas 
284a86c6181SAlex Tomas 	size = sizeof(EXT4_I(inode)->i_data);
285a86c6181SAlex Tomas 	size -= sizeof(struct ext4_extent_header);
286a86c6181SAlex Tomas 	size /= sizeof(struct ext4_extent_idx);
287bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
28802dc62fbSYongqiang Yang 	if (!check && size > 4)
289a86c6181SAlex Tomas 		size = 4;
290a86c6181SAlex Tomas #endif
291a86c6181SAlex Tomas 	return size;
292a86c6181SAlex Tomas }
293a86c6181SAlex Tomas 
294fcf6b1b7SDmitry Monakhov static inline int
295fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
296fcf6b1b7SDmitry Monakhov 			   struct ext4_ext_path *path, ext4_lblk_t lblk,
297fcf6b1b7SDmitry Monakhov 			   int nofail)
298fcf6b1b7SDmitry Monakhov {
299fcf6b1b7SDmitry Monakhov 	int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
300fcf6b1b7SDmitry Monakhov 
301fcf6b1b7SDmitry Monakhov 	return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
302fcf6b1b7SDmitry Monakhov 			EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
303fcf6b1b7SDmitry Monakhov 			EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
304fcf6b1b7SDmitry Monakhov 			(nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
305fcf6b1b7SDmitry Monakhov }
306fcf6b1b7SDmitry Monakhov 
307d2a17637SMingming Cao /*
308d2a17637SMingming Cao  * Calculate the number of metadata blocks needed
309d2a17637SMingming Cao  * to allocate @blocks
310d2a17637SMingming Cao  * Worse case is one block per extent
311d2a17637SMingming Cao  */
31201f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
313d2a17637SMingming Cao {
3149d0be502STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
31581fdbb4aSYongqiang Yang 	int idxs;
316d2a17637SMingming Cao 
3179d0be502STheodore Ts'o 	idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
3189d0be502STheodore Ts'o 		/ sizeof(struct ext4_extent_idx));
319d2a17637SMingming Cao 
320d2a17637SMingming Cao 	/*
3219d0be502STheodore Ts'o 	 * If the new delayed allocation block is contiguous with the
3229d0be502STheodore Ts'o 	 * previous da block, it can share index blocks with the
3239d0be502STheodore Ts'o 	 * previous block, so we only need to allocate a new index
3249d0be502STheodore Ts'o 	 * block every idxs leaf blocks.  At ldxs**2 blocks, we need
3259d0be502STheodore Ts'o 	 * an additional index block, and at ldxs**3 blocks, yet
3269d0be502STheodore Ts'o 	 * another index blocks.
327d2a17637SMingming Cao 	 */
3289d0be502STheodore Ts'o 	if (ei->i_da_metadata_calc_len &&
3299d0be502STheodore Ts'o 	    ei->i_da_metadata_calc_last_lblock+1 == lblock) {
33081fdbb4aSYongqiang Yang 		int num = 0;
33181fdbb4aSYongqiang Yang 
3329d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % idxs) == 0)
3339d0be502STheodore Ts'o 			num++;
3349d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
3359d0be502STheodore Ts'o 			num++;
3369d0be502STheodore Ts'o 		if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
3379d0be502STheodore Ts'o 			num++;
3389d0be502STheodore Ts'o 			ei->i_da_metadata_calc_len = 0;
3399d0be502STheodore Ts'o 		} else
3409d0be502STheodore Ts'o 			ei->i_da_metadata_calc_len++;
3419d0be502STheodore Ts'o 		ei->i_da_metadata_calc_last_lblock++;
342d2a17637SMingming Cao 		return num;
343d2a17637SMingming Cao 	}
344d2a17637SMingming Cao 
3459d0be502STheodore Ts'o 	/*
3469d0be502STheodore Ts'o 	 * In the worst case we need a new set of index blocks at
3479d0be502STheodore Ts'o 	 * every level of the inode's extent tree.
3489d0be502STheodore Ts'o 	 */
3499d0be502STheodore Ts'o 	ei->i_da_metadata_calc_len = 1;
3509d0be502STheodore Ts'o 	ei->i_da_metadata_calc_last_lblock = lblock;
3519d0be502STheodore Ts'o 	return ext_depth(inode) + 1;
3529d0be502STheodore Ts'o }
3539d0be502STheodore Ts'o 
354c29c0ae7SAlex Tomas static int
355c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth)
356c29c0ae7SAlex Tomas {
357c29c0ae7SAlex Tomas 	int max;
358c29c0ae7SAlex Tomas 
359c29c0ae7SAlex Tomas 	if (depth == ext_depth(inode)) {
360c29c0ae7SAlex Tomas 		if (depth == 0)
36155ad63bfSTheodore Ts'o 			max = ext4_ext_space_root(inode, 1);
362c29c0ae7SAlex Tomas 		else
36355ad63bfSTheodore Ts'o 			max = ext4_ext_space_root_idx(inode, 1);
364c29c0ae7SAlex Tomas 	} else {
365c29c0ae7SAlex Tomas 		if (depth == 0)
36655ad63bfSTheodore Ts'o 			max = ext4_ext_space_block(inode, 1);
367c29c0ae7SAlex Tomas 		else
36855ad63bfSTheodore Ts'o 			max = ext4_ext_space_block_idx(inode, 1);
369c29c0ae7SAlex Tomas 	}
370c29c0ae7SAlex Tomas 
371c29c0ae7SAlex Tomas 	return max;
372c29c0ae7SAlex Tomas }
373c29c0ae7SAlex Tomas 
37456b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
37556b19868SAneesh Kumar K.V {
376bf89d16fSTheodore Ts'o 	ext4_fsblk_t block = ext4_ext_pblock(ext);
37756b19868SAneesh Kumar K.V 	int len = ext4_ext_get_actual_len(ext);
3785946d089SEryu Guan 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
3795946d089SEryu Guan 	ext4_lblk_t last = lblock + len - 1;
380e84a26ceSTheodore Ts'o 
3815946d089SEryu Guan 	if (lblock > last)
38231d4f3a2STheodore Ts'o 		return 0;
3836fd058f7STheodore Ts'o 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
38456b19868SAneesh Kumar K.V }
38556b19868SAneesh Kumar K.V 
38656b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode,
38756b19868SAneesh Kumar K.V 				struct ext4_extent_idx *ext_idx)
38856b19868SAneesh Kumar K.V {
389bf89d16fSTheodore Ts'o 	ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
390e84a26ceSTheodore Ts'o 
3916fd058f7STheodore Ts'o 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
39256b19868SAneesh Kumar K.V }
39356b19868SAneesh Kumar K.V 
39456b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode,
39556b19868SAneesh Kumar K.V 				struct ext4_extent_header *eh,
39656b19868SAneesh Kumar K.V 				int depth)
39756b19868SAneesh Kumar K.V {
39856b19868SAneesh Kumar K.V 	unsigned short entries;
39956b19868SAneesh Kumar K.V 	if (eh->eh_entries == 0)
40056b19868SAneesh Kumar K.V 		return 1;
40156b19868SAneesh Kumar K.V 
40256b19868SAneesh Kumar K.V 	entries = le16_to_cpu(eh->eh_entries);
40356b19868SAneesh Kumar K.V 
40456b19868SAneesh Kumar K.V 	if (depth == 0) {
40556b19868SAneesh Kumar K.V 		/* leaf entries */
40681fdbb4aSYongqiang Yang 		struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
4075946d089SEryu Guan 		struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
4085946d089SEryu Guan 		ext4_fsblk_t pblock = 0;
4095946d089SEryu Guan 		ext4_lblk_t lblock = 0;
4105946d089SEryu Guan 		ext4_lblk_t prev = 0;
4115946d089SEryu Guan 		int len = 0;
41256b19868SAneesh Kumar K.V 		while (entries) {
41356b19868SAneesh Kumar K.V 			if (!ext4_valid_extent(inode, ext))
41456b19868SAneesh Kumar K.V 				return 0;
4155946d089SEryu Guan 
4165946d089SEryu Guan 			/* Check for overlapping extents */
4175946d089SEryu Guan 			lblock = le32_to_cpu(ext->ee_block);
4185946d089SEryu Guan 			len = ext4_ext_get_actual_len(ext);
4195946d089SEryu Guan 			if ((lblock <= prev) && prev) {
4205946d089SEryu Guan 				pblock = ext4_ext_pblock(ext);
4215946d089SEryu Guan 				es->s_last_error_block = cpu_to_le64(pblock);
4225946d089SEryu Guan 				return 0;
4235946d089SEryu Guan 			}
42456b19868SAneesh Kumar K.V 			ext++;
42556b19868SAneesh Kumar K.V 			entries--;
4265946d089SEryu Guan 			prev = lblock + len - 1;
42756b19868SAneesh Kumar K.V 		}
42856b19868SAneesh Kumar K.V 	} else {
42981fdbb4aSYongqiang Yang 		struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
43056b19868SAneesh Kumar K.V 		while (entries) {
43156b19868SAneesh Kumar K.V 			if (!ext4_valid_extent_idx(inode, ext_idx))
43256b19868SAneesh Kumar K.V 				return 0;
43356b19868SAneesh Kumar K.V 			ext_idx++;
43456b19868SAneesh Kumar K.V 			entries--;
43556b19868SAneesh Kumar K.V 		}
43656b19868SAneesh Kumar K.V 	}
43756b19868SAneesh Kumar K.V 	return 1;
43856b19868SAneesh Kumar K.V }
43956b19868SAneesh Kumar K.V 
440c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line,
441c398eda0STheodore Ts'o 			    struct inode *inode, struct ext4_extent_header *eh,
442c349179bSTheodore Ts'o 			    int depth, ext4_fsblk_t pblk)
443c29c0ae7SAlex Tomas {
444c29c0ae7SAlex Tomas 	const char *error_msg;
445c29c0ae7SAlex Tomas 	int max = 0;
446c29c0ae7SAlex Tomas 
447c29c0ae7SAlex Tomas 	if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
448c29c0ae7SAlex Tomas 		error_msg = "invalid magic";
449c29c0ae7SAlex Tomas 		goto corrupted;
450c29c0ae7SAlex Tomas 	}
451c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
452c29c0ae7SAlex Tomas 		error_msg = "unexpected eh_depth";
453c29c0ae7SAlex Tomas 		goto corrupted;
454c29c0ae7SAlex Tomas 	}
455c29c0ae7SAlex Tomas 	if (unlikely(eh->eh_max == 0)) {
456c29c0ae7SAlex Tomas 		error_msg = "invalid eh_max";
457c29c0ae7SAlex Tomas 		goto corrupted;
458c29c0ae7SAlex Tomas 	}
459c29c0ae7SAlex Tomas 	max = ext4_ext_max_entries(inode, depth);
460c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
461c29c0ae7SAlex Tomas 		error_msg = "too large eh_max";
462c29c0ae7SAlex Tomas 		goto corrupted;
463c29c0ae7SAlex Tomas 	}
464c29c0ae7SAlex Tomas 	if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
465c29c0ae7SAlex Tomas 		error_msg = "invalid eh_entries";
466c29c0ae7SAlex Tomas 		goto corrupted;
467c29c0ae7SAlex Tomas 	}
46856b19868SAneesh Kumar K.V 	if (!ext4_valid_extent_entries(inode, eh, depth)) {
46956b19868SAneesh Kumar K.V 		error_msg = "invalid extent entries";
47056b19868SAneesh Kumar K.V 		goto corrupted;
47156b19868SAneesh Kumar K.V 	}
4727ac5990dSDarrick J. Wong 	/* Verify checksum on non-root extent tree nodes */
4737ac5990dSDarrick J. Wong 	if (ext_depth(inode) != depth &&
4747ac5990dSDarrick J. Wong 	    !ext4_extent_block_csum_verify(inode, eh)) {
4757ac5990dSDarrick J. Wong 		error_msg = "extent tree corrupted";
4767ac5990dSDarrick J. Wong 		goto corrupted;
4777ac5990dSDarrick J. Wong 	}
478c29c0ae7SAlex Tomas 	return 0;
479c29c0ae7SAlex Tomas 
480c29c0ae7SAlex Tomas corrupted:
481c398eda0STheodore Ts'o 	ext4_error_inode(inode, function, line, 0,
482c349179bSTheodore Ts'o 			 "pblk %llu bad header/extent: %s - magic %x, "
483c29c0ae7SAlex Tomas 			 "entries %u, max %u(%u), depth %u(%u)",
484c349179bSTheodore Ts'o 			 (unsigned long long) pblk, error_msg,
485c349179bSTheodore Ts'o 			 le16_to_cpu(eh->eh_magic),
486c29c0ae7SAlex Tomas 			 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
487c29c0ae7SAlex Tomas 			 max, le16_to_cpu(eh->eh_depth), depth);
488c29c0ae7SAlex Tomas 	return -EIO;
489c29c0ae7SAlex Tomas }
490c29c0ae7SAlex Tomas 
491c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk)			\
492c349179bSTheodore Ts'o 	__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
493c29c0ae7SAlex Tomas 
4947a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode)
4957a262f7cSAneesh Kumar K.V {
496c349179bSTheodore Ts'o 	return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
4977a262f7cSAneesh Kumar K.V }
4987a262f7cSAneesh Kumar K.V 
4997d7ea89eSTheodore Ts'o static struct buffer_head *
5007d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line,
501107a7bd3STheodore Ts'o 			 struct inode *inode, ext4_fsblk_t pblk, int depth,
502107a7bd3STheodore Ts'o 			 int flags)
503f8489128SDarrick J. Wong {
5047d7ea89eSTheodore Ts'o 	struct buffer_head		*bh;
5057d7ea89eSTheodore Ts'o 	int				err;
506f8489128SDarrick J. Wong 
5077d7ea89eSTheodore Ts'o 	bh = sb_getblk(inode->i_sb, pblk);
5087d7ea89eSTheodore Ts'o 	if (unlikely(!bh))
5097d7ea89eSTheodore Ts'o 		return ERR_PTR(-ENOMEM);
5107d7ea89eSTheodore Ts'o 
5117d7ea89eSTheodore Ts'o 	if (!bh_uptodate_or_lock(bh)) {
5127d7ea89eSTheodore Ts'o 		trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
5137d7ea89eSTheodore Ts'o 		err = bh_submit_read(bh);
5147d7ea89eSTheodore Ts'o 		if (err < 0)
5157d7ea89eSTheodore Ts'o 			goto errout;
5167d7ea89eSTheodore Ts'o 	}
5177869a4a6STheodore Ts'o 	if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
5187d7ea89eSTheodore Ts'o 		return bh;
5197d7ea89eSTheodore Ts'o 	err = __ext4_ext_check(function, line, inode,
520c349179bSTheodore Ts'o 			       ext_block_hdr(bh), depth, pblk);
5217d7ea89eSTheodore Ts'o 	if (err)
5227d7ea89eSTheodore Ts'o 		goto errout;
523f8489128SDarrick J. Wong 	set_buffer_verified(bh);
524107a7bd3STheodore Ts'o 	/*
525107a7bd3STheodore Ts'o 	 * If this is a leaf block, cache all of its entries
526107a7bd3STheodore Ts'o 	 */
527107a7bd3STheodore Ts'o 	if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
528107a7bd3STheodore Ts'o 		struct ext4_extent_header *eh = ext_block_hdr(bh);
529107a7bd3STheodore Ts'o 		struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
530107a7bd3STheodore Ts'o 		ext4_lblk_t prev = 0;
531107a7bd3STheodore Ts'o 		int i;
532107a7bd3STheodore Ts'o 
533107a7bd3STheodore Ts'o 		for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
534107a7bd3STheodore Ts'o 			unsigned int status = EXTENT_STATUS_WRITTEN;
535107a7bd3STheodore Ts'o 			ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
536107a7bd3STheodore Ts'o 			int len = ext4_ext_get_actual_len(ex);
537107a7bd3STheodore Ts'o 
538107a7bd3STheodore Ts'o 			if (prev && (prev != lblk))
539107a7bd3STheodore Ts'o 				ext4_es_cache_extent(inode, prev,
540107a7bd3STheodore Ts'o 						     lblk - prev, ~0,
541107a7bd3STheodore Ts'o 						     EXTENT_STATUS_HOLE);
542107a7bd3STheodore Ts'o 
543556615dcSLukas Czerner 			if (ext4_ext_is_unwritten(ex))
544107a7bd3STheodore Ts'o 				status = EXTENT_STATUS_UNWRITTEN;
545107a7bd3STheodore Ts'o 			ext4_es_cache_extent(inode, lblk, len,
546107a7bd3STheodore Ts'o 					     ext4_ext_pblock(ex), status);
547107a7bd3STheodore Ts'o 			prev = lblk + len;
548107a7bd3STheodore Ts'o 		}
549107a7bd3STheodore Ts'o 	}
5507d7ea89eSTheodore Ts'o 	return bh;
5517d7ea89eSTheodore Ts'o errout:
5527d7ea89eSTheodore Ts'o 	put_bh(bh);
5537d7ea89eSTheodore Ts'o 	return ERR_PTR(err);
5547d7ea89eSTheodore Ts'o 
555f8489128SDarrick J. Wong }
556f8489128SDarrick J. Wong 
557107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags)		\
558107a7bd3STheodore Ts'o 	__read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
559107a7bd3STheodore Ts'o 				 (depth), (flags))
560f8489128SDarrick J. Wong 
5617869a4a6STheodore Ts'o /*
5627869a4a6STheodore Ts'o  * This function is called to cache a file's extent information in the
5637869a4a6STheodore Ts'o  * extent status tree
5647869a4a6STheodore Ts'o  */
5657869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode)
5667869a4a6STheodore Ts'o {
5677869a4a6STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
5687869a4a6STheodore Ts'o 	struct ext4_ext_path *path = NULL;
5697869a4a6STheodore Ts'o 	struct buffer_head *bh;
5707869a4a6STheodore Ts'o 	int i = 0, depth, ret = 0;
5717869a4a6STheodore Ts'o 
5727869a4a6STheodore Ts'o 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5737869a4a6STheodore Ts'o 		return 0;	/* not an extent-mapped inode */
5747869a4a6STheodore Ts'o 
5757869a4a6STheodore Ts'o 	down_read(&ei->i_data_sem);
5767869a4a6STheodore Ts'o 	depth = ext_depth(inode);
5777869a4a6STheodore Ts'o 
5787869a4a6STheodore Ts'o 	path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
5797869a4a6STheodore Ts'o 		       GFP_NOFS);
5807869a4a6STheodore Ts'o 	if (path == NULL) {
5817869a4a6STheodore Ts'o 		up_read(&ei->i_data_sem);
5827869a4a6STheodore Ts'o 		return -ENOMEM;
5837869a4a6STheodore Ts'o 	}
5847869a4a6STheodore Ts'o 
5857869a4a6STheodore Ts'o 	/* Don't cache anything if there are no external extent blocks */
5867869a4a6STheodore Ts'o 	if (depth == 0)
5877869a4a6STheodore Ts'o 		goto out;
5887869a4a6STheodore Ts'o 	path[0].p_hdr = ext_inode_hdr(inode);
5897869a4a6STheodore Ts'o 	ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
5907869a4a6STheodore Ts'o 	if (ret)
5917869a4a6STheodore Ts'o 		goto out;
5927869a4a6STheodore Ts'o 	path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
5937869a4a6STheodore Ts'o 	while (i >= 0) {
5947869a4a6STheodore Ts'o 		/*
5957869a4a6STheodore Ts'o 		 * If this is a leaf block or we've reached the end of
5967869a4a6STheodore Ts'o 		 * the index block, go up
5977869a4a6STheodore Ts'o 		 */
5987869a4a6STheodore Ts'o 		if ((i == depth) ||
5997869a4a6STheodore Ts'o 		    path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
6007869a4a6STheodore Ts'o 			brelse(path[i].p_bh);
6017869a4a6STheodore Ts'o 			path[i].p_bh = NULL;
6027869a4a6STheodore Ts'o 			i--;
6037869a4a6STheodore Ts'o 			continue;
6047869a4a6STheodore Ts'o 		}
6057869a4a6STheodore Ts'o 		bh = read_extent_tree_block(inode,
6067869a4a6STheodore Ts'o 					    ext4_idx_pblock(path[i].p_idx++),
6077869a4a6STheodore Ts'o 					    depth - i - 1,
6087869a4a6STheodore Ts'o 					    EXT4_EX_FORCE_CACHE);
6097869a4a6STheodore Ts'o 		if (IS_ERR(bh)) {
6107869a4a6STheodore Ts'o 			ret = PTR_ERR(bh);
6117869a4a6STheodore Ts'o 			break;
6127869a4a6STheodore Ts'o 		}
6137869a4a6STheodore Ts'o 		i++;
6147869a4a6STheodore Ts'o 		path[i].p_bh = bh;
6157869a4a6STheodore Ts'o 		path[i].p_hdr = ext_block_hdr(bh);
6167869a4a6STheodore Ts'o 		path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
6177869a4a6STheodore Ts'o 	}
6187869a4a6STheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
6197869a4a6STheodore Ts'o out:
6207869a4a6STheodore Ts'o 	up_read(&ei->i_data_sem);
6217869a4a6STheodore Ts'o 	ext4_ext_drop_refs(path);
6227869a4a6STheodore Ts'o 	kfree(path);
6237869a4a6STheodore Ts'o 	return ret;
6247869a4a6STheodore Ts'o }
6257869a4a6STheodore Ts'o 
626a86c6181SAlex Tomas #ifdef EXT_DEBUG
627a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
628a86c6181SAlex Tomas {
629a86c6181SAlex Tomas 	int k, l = path->p_depth;
630a86c6181SAlex Tomas 
631a86c6181SAlex Tomas 	ext_debug("path:");
632a86c6181SAlex Tomas 	for (k = 0; k <= l; k++, path++) {
633a86c6181SAlex Tomas 		if (path->p_idx) {
6342ae02107SMingming Cao 		  ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
635bf89d16fSTheodore Ts'o 			    ext4_idx_pblock(path->p_idx));
636a86c6181SAlex Tomas 		} else if (path->p_ext) {
637553f9008SMingming 			ext_debug("  %d:[%d]%d:%llu ",
638a86c6181SAlex Tomas 				  le32_to_cpu(path->p_ext->ee_block),
639556615dcSLukas Czerner 				  ext4_ext_is_unwritten(path->p_ext),
640a2df2a63SAmit Arora 				  ext4_ext_get_actual_len(path->p_ext),
641bf89d16fSTheodore Ts'o 				  ext4_ext_pblock(path->p_ext));
642a86c6181SAlex Tomas 		} else
643a86c6181SAlex Tomas 			ext_debug("  []");
644a86c6181SAlex Tomas 	}
645a86c6181SAlex Tomas 	ext_debug("\n");
646a86c6181SAlex Tomas }
647a86c6181SAlex Tomas 
648a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
649a86c6181SAlex Tomas {
650a86c6181SAlex Tomas 	int depth = ext_depth(inode);
651a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
652a86c6181SAlex Tomas 	struct ext4_extent *ex;
653a86c6181SAlex Tomas 	int i;
654a86c6181SAlex Tomas 
655a86c6181SAlex Tomas 	if (!path)
656a86c6181SAlex Tomas 		return;
657a86c6181SAlex Tomas 
658a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
659a86c6181SAlex Tomas 	ex = EXT_FIRST_EXTENT(eh);
660a86c6181SAlex Tomas 
661553f9008SMingming 	ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
662553f9008SMingming 
663a86c6181SAlex Tomas 	for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
664553f9008SMingming 		ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
665556615dcSLukas Czerner 			  ext4_ext_is_unwritten(ex),
666bf89d16fSTheodore Ts'o 			  ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
667a86c6181SAlex Tomas 	}
668a86c6181SAlex Tomas 	ext_debug("\n");
669a86c6181SAlex Tomas }
6701b16da77SYongqiang Yang 
6711b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
6721b16da77SYongqiang Yang 			ext4_fsblk_t newblock, int level)
6731b16da77SYongqiang Yang {
6741b16da77SYongqiang Yang 	int depth = ext_depth(inode);
6751b16da77SYongqiang Yang 	struct ext4_extent *ex;
6761b16da77SYongqiang Yang 
6771b16da77SYongqiang Yang 	if (depth != level) {
6781b16da77SYongqiang Yang 		struct ext4_extent_idx *idx;
6791b16da77SYongqiang Yang 		idx = path[level].p_idx;
6801b16da77SYongqiang Yang 		while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
6811b16da77SYongqiang Yang 			ext_debug("%d: move %d:%llu in new index %llu\n", level,
6821b16da77SYongqiang Yang 					le32_to_cpu(idx->ei_block),
6831b16da77SYongqiang Yang 					ext4_idx_pblock(idx),
6841b16da77SYongqiang Yang 					newblock);
6851b16da77SYongqiang Yang 			idx++;
6861b16da77SYongqiang Yang 		}
6871b16da77SYongqiang Yang 
6881b16da77SYongqiang Yang 		return;
6891b16da77SYongqiang Yang 	}
6901b16da77SYongqiang Yang 
6911b16da77SYongqiang Yang 	ex = path[depth].p_ext;
6921b16da77SYongqiang Yang 	while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
6931b16da77SYongqiang Yang 		ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
6941b16da77SYongqiang Yang 				le32_to_cpu(ex->ee_block),
6951b16da77SYongqiang Yang 				ext4_ext_pblock(ex),
696556615dcSLukas Czerner 				ext4_ext_is_unwritten(ex),
6971b16da77SYongqiang Yang 				ext4_ext_get_actual_len(ex),
6981b16da77SYongqiang Yang 				newblock);
6991b16da77SYongqiang Yang 		ex++;
7001b16da77SYongqiang Yang 	}
7011b16da77SYongqiang Yang }
7021b16da77SYongqiang Yang 
703a86c6181SAlex Tomas #else
704a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path)
705a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path)
7061b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level)
707a86c6181SAlex Tomas #endif
708a86c6181SAlex Tomas 
709b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path)
710a86c6181SAlex Tomas {
711a86c6181SAlex Tomas 	int depth = path->p_depth;
712a86c6181SAlex Tomas 	int i;
713a86c6181SAlex Tomas 
714a86c6181SAlex Tomas 	for (i = 0; i <= depth; i++, path++)
715a86c6181SAlex Tomas 		if (path->p_bh) {
716a86c6181SAlex Tomas 			brelse(path->p_bh);
717a86c6181SAlex Tomas 			path->p_bh = NULL;
718a86c6181SAlex Tomas 		}
719a86c6181SAlex Tomas }
720a86c6181SAlex Tomas 
721a86c6181SAlex Tomas /*
722d0d856e8SRandy Dunlap  * ext4_ext_binsearch_idx:
723d0d856e8SRandy Dunlap  * binary search for the closest index of the given block
724c29c0ae7SAlex Tomas  * the header must be checked before calling this
725a86c6181SAlex Tomas  */
726a86c6181SAlex Tomas static void
727725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode,
728725d26d3SAneesh Kumar K.V 			struct ext4_ext_path *path, ext4_lblk_t block)
729a86c6181SAlex Tomas {
730a86c6181SAlex Tomas 	struct ext4_extent_header *eh = path->p_hdr;
731a86c6181SAlex Tomas 	struct ext4_extent_idx *r, *l, *m;
732a86c6181SAlex Tomas 
733a86c6181SAlex Tomas 
734bba90743SEric Sandeen 	ext_debug("binsearch for %u(idx):  ", block);
735a86c6181SAlex Tomas 
736a86c6181SAlex Tomas 	l = EXT_FIRST_INDEX(eh) + 1;
737e9f410b1SDmitry Monakhov 	r = EXT_LAST_INDEX(eh);
738a86c6181SAlex Tomas 	while (l <= r) {
739a86c6181SAlex Tomas 		m = l + (r - l) / 2;
740a86c6181SAlex Tomas 		if (block < le32_to_cpu(m->ei_block))
741a86c6181SAlex Tomas 			r = m - 1;
742a86c6181SAlex Tomas 		else
743a86c6181SAlex Tomas 			l = m + 1;
74426d535edSDmitry Monakhov 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
74526d535edSDmitry Monakhov 				m, le32_to_cpu(m->ei_block),
74626d535edSDmitry Monakhov 				r, le32_to_cpu(r->ei_block));
747a86c6181SAlex Tomas 	}
748a86c6181SAlex Tomas 
749a86c6181SAlex Tomas 	path->p_idx = l - 1;
7504a3c3a51SZheng Liu 	ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
751bf89d16fSTheodore Ts'o 		  ext4_idx_pblock(path->p_idx));
752a86c6181SAlex Tomas 
753a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
754a86c6181SAlex Tomas 	{
755a86c6181SAlex Tomas 		struct ext4_extent_idx *chix, *ix;
756a86c6181SAlex Tomas 		int k;
757a86c6181SAlex Tomas 
758a86c6181SAlex Tomas 		chix = ix = EXT_FIRST_INDEX(eh);
759a86c6181SAlex Tomas 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
760a86c6181SAlex Tomas 		  if (k != 0 &&
761a86c6181SAlex Tomas 		      le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
7624776004fSTheodore Ts'o 				printk(KERN_DEBUG "k=%d, ix=0x%p, "
7634776004fSTheodore Ts'o 				       "first=0x%p\n", k,
764a86c6181SAlex Tomas 				       ix, EXT_FIRST_INDEX(eh));
7654776004fSTheodore Ts'o 				printk(KERN_DEBUG "%u <= %u\n",
766a86c6181SAlex Tomas 				       le32_to_cpu(ix->ei_block),
767a86c6181SAlex Tomas 				       le32_to_cpu(ix[-1].ei_block));
768a86c6181SAlex Tomas 			}
769a86c6181SAlex Tomas 			BUG_ON(k && le32_to_cpu(ix->ei_block)
770a86c6181SAlex Tomas 					   <= le32_to_cpu(ix[-1].ei_block));
771a86c6181SAlex Tomas 			if (block < le32_to_cpu(ix->ei_block))
772a86c6181SAlex Tomas 				break;
773a86c6181SAlex Tomas 			chix = ix;
774a86c6181SAlex Tomas 		}
775a86c6181SAlex Tomas 		BUG_ON(chix != path->p_idx);
776a86c6181SAlex Tomas 	}
777a86c6181SAlex Tomas #endif
778a86c6181SAlex Tomas 
779a86c6181SAlex Tomas }
780a86c6181SAlex Tomas 
781a86c6181SAlex Tomas /*
782d0d856e8SRandy Dunlap  * ext4_ext_binsearch:
783d0d856e8SRandy Dunlap  * binary search for closest extent of the given block
784c29c0ae7SAlex Tomas  * the header must be checked before calling this
785a86c6181SAlex Tomas  */
786a86c6181SAlex Tomas static void
787725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode,
788725d26d3SAneesh Kumar K.V 		struct ext4_ext_path *path, ext4_lblk_t block)
789a86c6181SAlex Tomas {
790a86c6181SAlex Tomas 	struct ext4_extent_header *eh = path->p_hdr;
791a86c6181SAlex Tomas 	struct ext4_extent *r, *l, *m;
792a86c6181SAlex Tomas 
793a86c6181SAlex Tomas 	if (eh->eh_entries == 0) {
794a86c6181SAlex Tomas 		/*
795d0d856e8SRandy Dunlap 		 * this leaf is empty:
796a86c6181SAlex Tomas 		 * we get such a leaf in split/add case
797a86c6181SAlex Tomas 		 */
798a86c6181SAlex Tomas 		return;
799a86c6181SAlex Tomas 	}
800a86c6181SAlex Tomas 
801bba90743SEric Sandeen 	ext_debug("binsearch for %u:  ", block);
802a86c6181SAlex Tomas 
803a86c6181SAlex Tomas 	l = EXT_FIRST_EXTENT(eh) + 1;
804e9f410b1SDmitry Monakhov 	r = EXT_LAST_EXTENT(eh);
805a86c6181SAlex Tomas 
806a86c6181SAlex Tomas 	while (l <= r) {
807a86c6181SAlex Tomas 		m = l + (r - l) / 2;
808a86c6181SAlex Tomas 		if (block < le32_to_cpu(m->ee_block))
809a86c6181SAlex Tomas 			r = m - 1;
810a86c6181SAlex Tomas 		else
811a86c6181SAlex Tomas 			l = m + 1;
81226d535edSDmitry Monakhov 		ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
81326d535edSDmitry Monakhov 				m, le32_to_cpu(m->ee_block),
81426d535edSDmitry Monakhov 				r, le32_to_cpu(r->ee_block));
815a86c6181SAlex Tomas 	}
816a86c6181SAlex Tomas 
817a86c6181SAlex Tomas 	path->p_ext = l - 1;
818553f9008SMingming 	ext_debug("  -> %d:%llu:[%d]%d ",
819a86c6181SAlex Tomas 			le32_to_cpu(path->p_ext->ee_block),
820bf89d16fSTheodore Ts'o 			ext4_ext_pblock(path->p_ext),
821556615dcSLukas Czerner 			ext4_ext_is_unwritten(path->p_ext),
822a2df2a63SAmit Arora 			ext4_ext_get_actual_len(path->p_ext));
823a86c6181SAlex Tomas 
824a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
825a86c6181SAlex Tomas 	{
826a86c6181SAlex Tomas 		struct ext4_extent *chex, *ex;
827a86c6181SAlex Tomas 		int k;
828a86c6181SAlex Tomas 
829a86c6181SAlex Tomas 		chex = ex = EXT_FIRST_EXTENT(eh);
830a86c6181SAlex Tomas 		for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
831a86c6181SAlex Tomas 			BUG_ON(k && le32_to_cpu(ex->ee_block)
832a86c6181SAlex Tomas 					  <= le32_to_cpu(ex[-1].ee_block));
833a86c6181SAlex Tomas 			if (block < le32_to_cpu(ex->ee_block))
834a86c6181SAlex Tomas 				break;
835a86c6181SAlex Tomas 			chex = ex;
836a86c6181SAlex Tomas 		}
837a86c6181SAlex Tomas 		BUG_ON(chex != path->p_ext);
838a86c6181SAlex Tomas 	}
839a86c6181SAlex Tomas #endif
840a86c6181SAlex Tomas 
841a86c6181SAlex Tomas }
842a86c6181SAlex Tomas 
843a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
844a86c6181SAlex Tomas {
845a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
846a86c6181SAlex Tomas 
847a86c6181SAlex Tomas 	eh = ext_inode_hdr(inode);
848a86c6181SAlex Tomas 	eh->eh_depth = 0;
849a86c6181SAlex Tomas 	eh->eh_entries = 0;
850a86c6181SAlex Tomas 	eh->eh_magic = EXT4_EXT_MAGIC;
85155ad63bfSTheodore Ts'o 	eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
852a86c6181SAlex Tomas 	ext4_mark_inode_dirty(handle, inode);
853a86c6181SAlex Tomas 	return 0;
854a86c6181SAlex Tomas }
855a86c6181SAlex Tomas 
856a86c6181SAlex Tomas struct ext4_ext_path *
857725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
858107a7bd3STheodore Ts'o 		     struct ext4_ext_path *path, int flags)
859a86c6181SAlex Tomas {
860a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
861a86c6181SAlex Tomas 	struct buffer_head *bh;
862a86c6181SAlex Tomas 	short int depth, i, ppos = 0, alloc = 0;
863860d21e2STheodore Ts'o 	int ret;
864a86c6181SAlex Tomas 
865a86c6181SAlex Tomas 	eh = ext_inode_hdr(inode);
866c29c0ae7SAlex Tomas 	depth = ext_depth(inode);
867a86c6181SAlex Tomas 
868a86c6181SAlex Tomas 	/* account possible depth increase */
869a86c6181SAlex Tomas 	if (!path) {
8705d4958f9SAvantika Mathur 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
871a86c6181SAlex Tomas 				GFP_NOFS);
872*19008f6dSTheodore Ts'o 		if (unlikely(!path))
873a86c6181SAlex Tomas 			return ERR_PTR(-ENOMEM);
874a86c6181SAlex Tomas 		alloc = 1;
875a86c6181SAlex Tomas 	}
876a86c6181SAlex Tomas 	path[0].p_hdr = eh;
8771973adcbSShen Feng 	path[0].p_bh = NULL;
878a86c6181SAlex Tomas 
879c29c0ae7SAlex Tomas 	i = depth;
880a86c6181SAlex Tomas 	/* walk through the tree */
881a86c6181SAlex Tomas 	while (i) {
882a86c6181SAlex Tomas 		ext_debug("depth %d: num %d, max %d\n",
883a86c6181SAlex Tomas 			  ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
884c29c0ae7SAlex Tomas 
885a86c6181SAlex Tomas 		ext4_ext_binsearch_idx(inode, path + ppos, block);
886bf89d16fSTheodore Ts'o 		path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
887a86c6181SAlex Tomas 		path[ppos].p_depth = i;
888a86c6181SAlex Tomas 		path[ppos].p_ext = NULL;
889a86c6181SAlex Tomas 
890107a7bd3STheodore Ts'o 		bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
891107a7bd3STheodore Ts'o 					    flags);
892*19008f6dSTheodore Ts'o 		if (unlikely(IS_ERR(bh))) {
8937d7ea89eSTheodore Ts'o 			ret = PTR_ERR(bh);
894a86c6181SAlex Tomas 			goto err;
895860d21e2STheodore Ts'o 		}
8967d7ea89eSTheodore Ts'o 
897a86c6181SAlex Tomas 		eh = ext_block_hdr(bh);
898a86c6181SAlex Tomas 		ppos++;
899273df556SFrank Mayhar 		if (unlikely(ppos > depth)) {
900273df556SFrank Mayhar 			put_bh(bh);
901273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
902273df556SFrank Mayhar 					 "ppos %d > depth %d", ppos, depth);
903860d21e2STheodore Ts'o 			ret = -EIO;
904273df556SFrank Mayhar 			goto err;
905273df556SFrank Mayhar 		}
906a86c6181SAlex Tomas 		path[ppos].p_bh = bh;
907a86c6181SAlex Tomas 		path[ppos].p_hdr = eh;
908a86c6181SAlex Tomas 	}
909a86c6181SAlex Tomas 
910a86c6181SAlex Tomas 	path[ppos].p_depth = i;
911a86c6181SAlex Tomas 	path[ppos].p_ext = NULL;
912a86c6181SAlex Tomas 	path[ppos].p_idx = NULL;
913a86c6181SAlex Tomas 
914a86c6181SAlex Tomas 	/* find extent */
915a86c6181SAlex Tomas 	ext4_ext_binsearch(inode, path + ppos, block);
9161973adcbSShen Feng 	/* if not an empty leaf */
9171973adcbSShen Feng 	if (path[ppos].p_ext)
918bf89d16fSTheodore Ts'o 		path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
919a86c6181SAlex Tomas 
920a86c6181SAlex Tomas 	ext4_ext_show_path(inode, path);
921a86c6181SAlex Tomas 
922a86c6181SAlex Tomas 	return path;
923a86c6181SAlex Tomas 
924a86c6181SAlex Tomas err:
925a86c6181SAlex Tomas 	ext4_ext_drop_refs(path);
926a86c6181SAlex Tomas 	if (alloc)
927a86c6181SAlex Tomas 		kfree(path);
928860d21e2STheodore Ts'o 	return ERR_PTR(ret);
929a86c6181SAlex Tomas }
930a86c6181SAlex Tomas 
931a86c6181SAlex Tomas /*
932d0d856e8SRandy Dunlap  * ext4_ext_insert_index:
933d0d856e8SRandy Dunlap  * insert new index [@logical;@ptr] into the block at @curp;
934d0d856e8SRandy Dunlap  * check where to insert: before @curp or after @curp
935a86c6181SAlex Tomas  */
9361f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
937a86c6181SAlex Tomas 				 struct ext4_ext_path *curp,
938f65e6fbaSAlex Tomas 				 int logical, ext4_fsblk_t ptr)
939a86c6181SAlex Tomas {
940a86c6181SAlex Tomas 	struct ext4_extent_idx *ix;
941a86c6181SAlex Tomas 	int len, err;
942a86c6181SAlex Tomas 
9437e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, curp);
9447e028976SAvantika Mathur 	if (err)
945a86c6181SAlex Tomas 		return err;
946a86c6181SAlex Tomas 
947273df556SFrank Mayhar 	if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
948273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
949273df556SFrank Mayhar 				 "logical %d == ei_block %d!",
950273df556SFrank Mayhar 				 logical, le32_to_cpu(curp->p_idx->ei_block));
951273df556SFrank Mayhar 		return -EIO;
952273df556SFrank Mayhar 	}
953d4620315SRobin Dong 
954d4620315SRobin Dong 	if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
955d4620315SRobin Dong 			     >= le16_to_cpu(curp->p_hdr->eh_max))) {
956d4620315SRobin Dong 		EXT4_ERROR_INODE(inode,
957d4620315SRobin Dong 				 "eh_entries %d >= eh_max %d!",
958d4620315SRobin Dong 				 le16_to_cpu(curp->p_hdr->eh_entries),
959d4620315SRobin Dong 				 le16_to_cpu(curp->p_hdr->eh_max));
960d4620315SRobin Dong 		return -EIO;
961d4620315SRobin Dong 	}
962d4620315SRobin Dong 
963a86c6181SAlex Tomas 	if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
964a86c6181SAlex Tomas 		/* insert after */
96580e675f9SEric Gouriou 		ext_debug("insert new index %d after: %llu\n", logical, ptr);
966a86c6181SAlex Tomas 		ix = curp->p_idx + 1;
967a86c6181SAlex Tomas 	} else {
968a86c6181SAlex Tomas 		/* insert before */
96980e675f9SEric Gouriou 		ext_debug("insert new index %d before: %llu\n", logical, ptr);
970a86c6181SAlex Tomas 		ix = curp->p_idx;
971a86c6181SAlex Tomas 	}
972a86c6181SAlex Tomas 
97380e675f9SEric Gouriou 	len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
97480e675f9SEric Gouriou 	BUG_ON(len < 0);
97580e675f9SEric Gouriou 	if (len > 0) {
97680e675f9SEric Gouriou 		ext_debug("insert new index %d: "
97780e675f9SEric Gouriou 				"move %d indices from 0x%p to 0x%p\n",
97880e675f9SEric Gouriou 				logical, len, ix, ix + 1);
97980e675f9SEric Gouriou 		memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
98080e675f9SEric Gouriou 	}
98180e675f9SEric Gouriou 
982f472e026STao Ma 	if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
983f472e026STao Ma 		EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
984f472e026STao Ma 		return -EIO;
985f472e026STao Ma 	}
986f472e026STao Ma 
987a86c6181SAlex Tomas 	ix->ei_block = cpu_to_le32(logical);
988f65e6fbaSAlex Tomas 	ext4_idx_store_pblock(ix, ptr);
989e8546d06SMarcin Slusarz 	le16_add_cpu(&curp->p_hdr->eh_entries, 1);
990a86c6181SAlex Tomas 
991273df556SFrank Mayhar 	if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
992273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
993273df556SFrank Mayhar 		return -EIO;
994273df556SFrank Mayhar 	}
995a86c6181SAlex Tomas 
996a86c6181SAlex Tomas 	err = ext4_ext_dirty(handle, inode, curp);
997a86c6181SAlex Tomas 	ext4_std_error(inode->i_sb, err);
998a86c6181SAlex Tomas 
999a86c6181SAlex Tomas 	return err;
1000a86c6181SAlex Tomas }
1001a86c6181SAlex Tomas 
1002a86c6181SAlex Tomas /*
1003d0d856e8SRandy Dunlap  * ext4_ext_split:
1004d0d856e8SRandy Dunlap  * inserts new subtree into the path, using free index entry
1005d0d856e8SRandy Dunlap  * at depth @at:
1006a86c6181SAlex Tomas  * - allocates all needed blocks (new leaf and all intermediate index blocks)
1007a86c6181SAlex Tomas  * - makes decision where to split
1008d0d856e8SRandy Dunlap  * - moves remaining extents and index entries (right to the split point)
1009a86c6181SAlex Tomas  *   into the newly allocated blocks
1010d0d856e8SRandy Dunlap  * - initializes subtree
1011a86c6181SAlex Tomas  */
1012a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode,
101355f020dbSAllison Henderson 			  unsigned int flags,
1014a86c6181SAlex Tomas 			  struct ext4_ext_path *path,
1015a86c6181SAlex Tomas 			  struct ext4_extent *newext, int at)
1016a86c6181SAlex Tomas {
1017a86c6181SAlex Tomas 	struct buffer_head *bh = NULL;
1018a86c6181SAlex Tomas 	int depth = ext_depth(inode);
1019a86c6181SAlex Tomas 	struct ext4_extent_header *neh;
1020a86c6181SAlex Tomas 	struct ext4_extent_idx *fidx;
1021a86c6181SAlex Tomas 	int i = at, k, m, a;
1022f65e6fbaSAlex Tomas 	ext4_fsblk_t newblock, oldblock;
1023a86c6181SAlex Tomas 	__le32 border;
1024f65e6fbaSAlex Tomas 	ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1025a86c6181SAlex Tomas 	int err = 0;
1026a86c6181SAlex Tomas 
1027a86c6181SAlex Tomas 	/* make decision: where to split? */
1028d0d856e8SRandy Dunlap 	/* FIXME: now decision is simplest: at current extent */
1029a86c6181SAlex Tomas 
1030d0d856e8SRandy Dunlap 	/* if current leaf will be split, then we should use
1031a86c6181SAlex Tomas 	 * border from split point */
1032273df556SFrank Mayhar 	if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1033273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1034273df556SFrank Mayhar 		return -EIO;
1035273df556SFrank Mayhar 	}
1036a86c6181SAlex Tomas 	if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1037a86c6181SAlex Tomas 		border = path[depth].p_ext[1].ee_block;
1038d0d856e8SRandy Dunlap 		ext_debug("leaf will be split."
1039a86c6181SAlex Tomas 				" next leaf starts at %d\n",
1040a86c6181SAlex Tomas 				  le32_to_cpu(border));
1041a86c6181SAlex Tomas 	} else {
1042a86c6181SAlex Tomas 		border = newext->ee_block;
1043a86c6181SAlex Tomas 		ext_debug("leaf will be added."
1044a86c6181SAlex Tomas 				" next leaf starts at %d\n",
1045a86c6181SAlex Tomas 				le32_to_cpu(border));
1046a86c6181SAlex Tomas 	}
1047a86c6181SAlex Tomas 
1048a86c6181SAlex Tomas 	/*
1049d0d856e8SRandy Dunlap 	 * If error occurs, then we break processing
1050d0d856e8SRandy Dunlap 	 * and mark filesystem read-only. index won't
1051a86c6181SAlex Tomas 	 * be inserted and tree will be in consistent
1052d0d856e8SRandy Dunlap 	 * state. Next mount will repair buffers too.
1053a86c6181SAlex Tomas 	 */
1054a86c6181SAlex Tomas 
1055a86c6181SAlex Tomas 	/*
1056d0d856e8SRandy Dunlap 	 * Get array to track all allocated blocks.
1057d0d856e8SRandy Dunlap 	 * We need this to handle errors and free blocks
1058d0d856e8SRandy Dunlap 	 * upon them.
1059a86c6181SAlex Tomas 	 */
10605d4958f9SAvantika Mathur 	ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
1061a86c6181SAlex Tomas 	if (!ablocks)
1062a86c6181SAlex Tomas 		return -ENOMEM;
1063a86c6181SAlex Tomas 
1064a86c6181SAlex Tomas 	/* allocate all needed blocks */
1065a86c6181SAlex Tomas 	ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1066a86c6181SAlex Tomas 	for (a = 0; a < depth - at; a++) {
1067654b4908SAneesh Kumar K.V 		newblock = ext4_ext_new_meta_block(handle, inode, path,
106855f020dbSAllison Henderson 						   newext, &err, flags);
1069a86c6181SAlex Tomas 		if (newblock == 0)
1070a86c6181SAlex Tomas 			goto cleanup;
1071a86c6181SAlex Tomas 		ablocks[a] = newblock;
1072a86c6181SAlex Tomas 	}
1073a86c6181SAlex Tomas 
1074a86c6181SAlex Tomas 	/* initialize new leaf */
1075a86c6181SAlex Tomas 	newblock = ablocks[--a];
1076273df556SFrank Mayhar 	if (unlikely(newblock == 0)) {
1077273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "newblock == 0!");
1078273df556SFrank Mayhar 		err = -EIO;
1079273df556SFrank Mayhar 		goto cleanup;
1080273df556SFrank Mayhar 	}
1081a86c6181SAlex Tomas 	bh = sb_getblk(inode->i_sb, newblock);
1082aebf0243SWang Shilong 	if (unlikely(!bh)) {
1083860d21e2STheodore Ts'o 		err = -ENOMEM;
1084a86c6181SAlex Tomas 		goto cleanup;
1085a86c6181SAlex Tomas 	}
1086a86c6181SAlex Tomas 	lock_buffer(bh);
1087a86c6181SAlex Tomas 
10887e028976SAvantika Mathur 	err = ext4_journal_get_create_access(handle, bh);
10897e028976SAvantika Mathur 	if (err)
1090a86c6181SAlex Tomas 		goto cleanup;
1091a86c6181SAlex Tomas 
1092a86c6181SAlex Tomas 	neh = ext_block_hdr(bh);
1093a86c6181SAlex Tomas 	neh->eh_entries = 0;
109455ad63bfSTheodore Ts'o 	neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1095a86c6181SAlex Tomas 	neh->eh_magic = EXT4_EXT_MAGIC;
1096a86c6181SAlex Tomas 	neh->eh_depth = 0;
1097a86c6181SAlex Tomas 
1098d0d856e8SRandy Dunlap 	/* move remainder of path[depth] to the new leaf */
1099273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr->eh_entries !=
1100273df556SFrank Mayhar 		     path[depth].p_hdr->eh_max)) {
1101273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1102273df556SFrank Mayhar 				 path[depth].p_hdr->eh_entries,
1103273df556SFrank Mayhar 				 path[depth].p_hdr->eh_max);
1104273df556SFrank Mayhar 		err = -EIO;
1105273df556SFrank Mayhar 		goto cleanup;
1106273df556SFrank Mayhar 	}
1107a86c6181SAlex Tomas 	/* start copy from next extent */
11081b16da77SYongqiang Yang 	m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
11091b16da77SYongqiang Yang 	ext4_ext_show_move(inode, path, newblock, depth);
1110a86c6181SAlex Tomas 	if (m) {
11111b16da77SYongqiang Yang 		struct ext4_extent *ex;
11121b16da77SYongqiang Yang 		ex = EXT_FIRST_EXTENT(neh);
11131b16da77SYongqiang Yang 		memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1114e8546d06SMarcin Slusarz 		le16_add_cpu(&neh->eh_entries, m);
1115a86c6181SAlex Tomas 	}
1116a86c6181SAlex Tomas 
11177ac5990dSDarrick J. Wong 	ext4_extent_block_csum_set(inode, neh);
1118a86c6181SAlex Tomas 	set_buffer_uptodate(bh);
1119a86c6181SAlex Tomas 	unlock_buffer(bh);
1120a86c6181SAlex Tomas 
11210390131bSFrank Mayhar 	err = ext4_handle_dirty_metadata(handle, inode, bh);
11227e028976SAvantika Mathur 	if (err)
1123a86c6181SAlex Tomas 		goto cleanup;
1124a86c6181SAlex Tomas 	brelse(bh);
1125a86c6181SAlex Tomas 	bh = NULL;
1126a86c6181SAlex Tomas 
1127a86c6181SAlex Tomas 	/* correct old leaf */
1128a86c6181SAlex Tomas 	if (m) {
11297e028976SAvantika Mathur 		err = ext4_ext_get_access(handle, inode, path + depth);
11307e028976SAvantika Mathur 		if (err)
1131a86c6181SAlex Tomas 			goto cleanup;
1132e8546d06SMarcin Slusarz 		le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
11337e028976SAvantika Mathur 		err = ext4_ext_dirty(handle, inode, path + depth);
11347e028976SAvantika Mathur 		if (err)
1135a86c6181SAlex Tomas 			goto cleanup;
1136a86c6181SAlex Tomas 
1137a86c6181SAlex Tomas 	}
1138a86c6181SAlex Tomas 
1139a86c6181SAlex Tomas 	/* create intermediate indexes */
1140a86c6181SAlex Tomas 	k = depth - at - 1;
1141273df556SFrank Mayhar 	if (unlikely(k < 0)) {
1142273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1143273df556SFrank Mayhar 		err = -EIO;
1144273df556SFrank Mayhar 		goto cleanup;
1145273df556SFrank Mayhar 	}
1146a86c6181SAlex Tomas 	if (k)
1147a86c6181SAlex Tomas 		ext_debug("create %d intermediate indices\n", k);
1148a86c6181SAlex Tomas 	/* insert new index into current index block */
1149a86c6181SAlex Tomas 	/* current depth stored in i var */
1150a86c6181SAlex Tomas 	i = depth - 1;
1151a86c6181SAlex Tomas 	while (k--) {
1152a86c6181SAlex Tomas 		oldblock = newblock;
1153a86c6181SAlex Tomas 		newblock = ablocks[--a];
1154bba90743SEric Sandeen 		bh = sb_getblk(inode->i_sb, newblock);
1155aebf0243SWang Shilong 		if (unlikely(!bh)) {
1156860d21e2STheodore Ts'o 			err = -ENOMEM;
1157a86c6181SAlex Tomas 			goto cleanup;
1158a86c6181SAlex Tomas 		}
1159a86c6181SAlex Tomas 		lock_buffer(bh);
1160a86c6181SAlex Tomas 
11617e028976SAvantika Mathur 		err = ext4_journal_get_create_access(handle, bh);
11627e028976SAvantika Mathur 		if (err)
1163a86c6181SAlex Tomas 			goto cleanup;
1164a86c6181SAlex Tomas 
1165a86c6181SAlex Tomas 		neh = ext_block_hdr(bh);
1166a86c6181SAlex Tomas 		neh->eh_entries = cpu_to_le16(1);
1167a86c6181SAlex Tomas 		neh->eh_magic = EXT4_EXT_MAGIC;
116855ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1169a86c6181SAlex Tomas 		neh->eh_depth = cpu_to_le16(depth - i);
1170a86c6181SAlex Tomas 		fidx = EXT_FIRST_INDEX(neh);
1171a86c6181SAlex Tomas 		fidx->ei_block = border;
1172f65e6fbaSAlex Tomas 		ext4_idx_store_pblock(fidx, oldblock);
1173a86c6181SAlex Tomas 
1174bba90743SEric Sandeen 		ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1175bba90743SEric Sandeen 				i, newblock, le32_to_cpu(border), oldblock);
1176a86c6181SAlex Tomas 
11771b16da77SYongqiang Yang 		/* move remainder of path[i] to the new index block */
1178273df556SFrank Mayhar 		if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1179273df556SFrank Mayhar 					EXT_LAST_INDEX(path[i].p_hdr))) {
1180273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1181273df556SFrank Mayhar 					 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1182273df556SFrank Mayhar 					 le32_to_cpu(path[i].p_ext->ee_block));
1183273df556SFrank Mayhar 			err = -EIO;
1184273df556SFrank Mayhar 			goto cleanup;
1185273df556SFrank Mayhar 		}
11861b16da77SYongqiang Yang 		/* start copy indexes */
11871b16da77SYongqiang Yang 		m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
11881b16da77SYongqiang Yang 		ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
11891b16da77SYongqiang Yang 				EXT_MAX_INDEX(path[i].p_hdr));
11901b16da77SYongqiang Yang 		ext4_ext_show_move(inode, path, newblock, i);
1191a86c6181SAlex Tomas 		if (m) {
11921b16da77SYongqiang Yang 			memmove(++fidx, path[i].p_idx,
1193a86c6181SAlex Tomas 				sizeof(struct ext4_extent_idx) * m);
1194e8546d06SMarcin Slusarz 			le16_add_cpu(&neh->eh_entries, m);
1195a86c6181SAlex Tomas 		}
11967ac5990dSDarrick J. Wong 		ext4_extent_block_csum_set(inode, neh);
1197a86c6181SAlex Tomas 		set_buffer_uptodate(bh);
1198a86c6181SAlex Tomas 		unlock_buffer(bh);
1199a86c6181SAlex Tomas 
12000390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
12017e028976SAvantika Mathur 		if (err)
1202a86c6181SAlex Tomas 			goto cleanup;
1203a86c6181SAlex Tomas 		brelse(bh);
1204a86c6181SAlex Tomas 		bh = NULL;
1205a86c6181SAlex Tomas 
1206a86c6181SAlex Tomas 		/* correct old index */
1207a86c6181SAlex Tomas 		if (m) {
1208a86c6181SAlex Tomas 			err = ext4_ext_get_access(handle, inode, path + i);
1209a86c6181SAlex Tomas 			if (err)
1210a86c6181SAlex Tomas 				goto cleanup;
1211e8546d06SMarcin Slusarz 			le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1212a86c6181SAlex Tomas 			err = ext4_ext_dirty(handle, inode, path + i);
1213a86c6181SAlex Tomas 			if (err)
1214a86c6181SAlex Tomas 				goto cleanup;
1215a86c6181SAlex Tomas 		}
1216a86c6181SAlex Tomas 
1217a86c6181SAlex Tomas 		i--;
1218a86c6181SAlex Tomas 	}
1219a86c6181SAlex Tomas 
1220a86c6181SAlex Tomas 	/* insert new index */
1221a86c6181SAlex Tomas 	err = ext4_ext_insert_index(handle, inode, path + at,
1222a86c6181SAlex Tomas 				    le32_to_cpu(border), newblock);
1223a86c6181SAlex Tomas 
1224a86c6181SAlex Tomas cleanup:
1225a86c6181SAlex Tomas 	if (bh) {
1226a86c6181SAlex Tomas 		if (buffer_locked(bh))
1227a86c6181SAlex Tomas 			unlock_buffer(bh);
1228a86c6181SAlex Tomas 		brelse(bh);
1229a86c6181SAlex Tomas 	}
1230a86c6181SAlex Tomas 
1231a86c6181SAlex Tomas 	if (err) {
1232a86c6181SAlex Tomas 		/* free all allocated blocks in error case */
1233a86c6181SAlex Tomas 		for (i = 0; i < depth; i++) {
1234a86c6181SAlex Tomas 			if (!ablocks[i])
1235a86c6181SAlex Tomas 				continue;
12367dc57615SPeter Huewe 			ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1237e6362609STheodore Ts'o 					 EXT4_FREE_BLOCKS_METADATA);
1238a86c6181SAlex Tomas 		}
1239a86c6181SAlex Tomas 	}
1240a86c6181SAlex Tomas 	kfree(ablocks);
1241a86c6181SAlex Tomas 
1242a86c6181SAlex Tomas 	return err;
1243a86c6181SAlex Tomas }
1244a86c6181SAlex Tomas 
1245a86c6181SAlex Tomas /*
1246d0d856e8SRandy Dunlap  * ext4_ext_grow_indepth:
1247d0d856e8SRandy Dunlap  * implements tree growing procedure:
1248a86c6181SAlex Tomas  * - allocates new block
1249a86c6181SAlex Tomas  * - moves top-level data (index block or leaf) into the new block
1250d0d856e8SRandy Dunlap  * - initializes new top-level, creating index that points to the
1251a86c6181SAlex Tomas  *   just created block
1252a86c6181SAlex Tomas  */
1253a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
125455f020dbSAllison Henderson 				 unsigned int flags,
1255a86c6181SAlex Tomas 				 struct ext4_extent *newext)
1256a86c6181SAlex Tomas {
1257a86c6181SAlex Tomas 	struct ext4_extent_header *neh;
1258a86c6181SAlex Tomas 	struct buffer_head *bh;
1259f65e6fbaSAlex Tomas 	ext4_fsblk_t newblock;
1260a86c6181SAlex Tomas 	int err = 0;
1261a86c6181SAlex Tomas 
12621939dd84SDmitry Monakhov 	newblock = ext4_ext_new_meta_block(handle, inode, NULL,
126355f020dbSAllison Henderson 		newext, &err, flags);
1264a86c6181SAlex Tomas 	if (newblock == 0)
1265a86c6181SAlex Tomas 		return err;
1266a86c6181SAlex Tomas 
1267a86c6181SAlex Tomas 	bh = sb_getblk(inode->i_sb, newblock);
1268aebf0243SWang Shilong 	if (unlikely(!bh))
1269860d21e2STheodore Ts'o 		return -ENOMEM;
1270a86c6181SAlex Tomas 	lock_buffer(bh);
1271a86c6181SAlex Tomas 
12727e028976SAvantika Mathur 	err = ext4_journal_get_create_access(handle, bh);
12737e028976SAvantika Mathur 	if (err) {
1274a86c6181SAlex Tomas 		unlock_buffer(bh);
1275a86c6181SAlex Tomas 		goto out;
1276a86c6181SAlex Tomas 	}
1277a86c6181SAlex Tomas 
1278a86c6181SAlex Tomas 	/* move top-level index/leaf into new block */
12791939dd84SDmitry Monakhov 	memmove(bh->b_data, EXT4_I(inode)->i_data,
12801939dd84SDmitry Monakhov 		sizeof(EXT4_I(inode)->i_data));
1281a86c6181SAlex Tomas 
1282a86c6181SAlex Tomas 	/* set size of new block */
1283a86c6181SAlex Tomas 	neh = ext_block_hdr(bh);
1284a86c6181SAlex Tomas 	/* old root could have indexes or leaves
1285a86c6181SAlex Tomas 	 * so calculate e_max right way */
1286a86c6181SAlex Tomas 	if (ext_depth(inode))
128755ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1288a86c6181SAlex Tomas 	else
128955ad63bfSTheodore Ts'o 		neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1290a86c6181SAlex Tomas 	neh->eh_magic = EXT4_EXT_MAGIC;
12917ac5990dSDarrick J. Wong 	ext4_extent_block_csum_set(inode, neh);
1292a86c6181SAlex Tomas 	set_buffer_uptodate(bh);
1293a86c6181SAlex Tomas 	unlock_buffer(bh);
1294a86c6181SAlex Tomas 
12950390131bSFrank Mayhar 	err = ext4_handle_dirty_metadata(handle, inode, bh);
12967e028976SAvantika Mathur 	if (err)
1297a86c6181SAlex Tomas 		goto out;
1298a86c6181SAlex Tomas 
12991939dd84SDmitry Monakhov 	/* Update top-level index: num,max,pointer */
1300a86c6181SAlex Tomas 	neh = ext_inode_hdr(inode);
13011939dd84SDmitry Monakhov 	neh->eh_entries = cpu_to_le16(1);
13021939dd84SDmitry Monakhov 	ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
13031939dd84SDmitry Monakhov 	if (neh->eh_depth == 0) {
13041939dd84SDmitry Monakhov 		/* Root extent block becomes index block */
13051939dd84SDmitry Monakhov 		neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
13061939dd84SDmitry Monakhov 		EXT_FIRST_INDEX(neh)->ei_block =
13071939dd84SDmitry Monakhov 			EXT_FIRST_EXTENT(neh)->ee_block;
13081939dd84SDmitry Monakhov 	}
13092ae02107SMingming Cao 	ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1310a86c6181SAlex Tomas 		  le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
13115a0790c2SAndi Kleen 		  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1312bf89d16fSTheodore Ts'o 		  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1313a86c6181SAlex Tomas 
1314ba39ebb6SWei Yongjun 	le16_add_cpu(&neh->eh_depth, 1);
13151939dd84SDmitry Monakhov 	ext4_mark_inode_dirty(handle, inode);
1316a86c6181SAlex Tomas out:
1317a86c6181SAlex Tomas 	brelse(bh);
1318a86c6181SAlex Tomas 
1319a86c6181SAlex Tomas 	return err;
1320a86c6181SAlex Tomas }
1321a86c6181SAlex Tomas 
1322a86c6181SAlex Tomas /*
1323d0d856e8SRandy Dunlap  * ext4_ext_create_new_leaf:
1324d0d856e8SRandy Dunlap  * finds empty index and adds new leaf.
1325d0d856e8SRandy Dunlap  * if no free index is found, then it requests in-depth growing.
1326a86c6181SAlex Tomas  */
1327a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1328107a7bd3STheodore Ts'o 				    unsigned int mb_flags,
1329107a7bd3STheodore Ts'o 				    unsigned int gb_flags,
1330a86c6181SAlex Tomas 				    struct ext4_ext_path *path,
1331a86c6181SAlex Tomas 				    struct ext4_extent *newext)
1332a86c6181SAlex Tomas {
1333a86c6181SAlex Tomas 	struct ext4_ext_path *curp;
1334a86c6181SAlex Tomas 	int depth, i, err = 0;
1335a86c6181SAlex Tomas 
1336a86c6181SAlex Tomas repeat:
1337a86c6181SAlex Tomas 	i = depth = ext_depth(inode);
1338a86c6181SAlex Tomas 
1339a86c6181SAlex Tomas 	/* walk up to the tree and look for free index entry */
1340a86c6181SAlex Tomas 	curp = path + depth;
1341a86c6181SAlex Tomas 	while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1342a86c6181SAlex Tomas 		i--;
1343a86c6181SAlex Tomas 		curp--;
1344a86c6181SAlex Tomas 	}
1345a86c6181SAlex Tomas 
1346d0d856e8SRandy Dunlap 	/* we use already allocated block for index block,
1347d0d856e8SRandy Dunlap 	 * so subsequent data blocks should be contiguous */
1348a86c6181SAlex Tomas 	if (EXT_HAS_FREE_INDEX(curp)) {
1349a86c6181SAlex Tomas 		/* if we found index with free entry, then use that
1350a86c6181SAlex Tomas 		 * entry: create all needed subtree and add new leaf */
1351107a7bd3STheodore Ts'o 		err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1352787e0981SShen Feng 		if (err)
1353787e0981SShen Feng 			goto out;
1354a86c6181SAlex Tomas 
1355a86c6181SAlex Tomas 		/* refill path */
1356a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
1357a86c6181SAlex Tomas 		path = ext4_ext_find_extent(inode,
1358725d26d3SAneesh Kumar K.V 				    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1359107a7bd3STheodore Ts'o 				    path, gb_flags);
1360a86c6181SAlex Tomas 		if (IS_ERR(path))
1361a86c6181SAlex Tomas 			err = PTR_ERR(path);
1362a86c6181SAlex Tomas 	} else {
1363a86c6181SAlex Tomas 		/* tree is full, time to grow in depth */
1364107a7bd3STheodore Ts'o 		err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
1365a86c6181SAlex Tomas 		if (err)
1366a86c6181SAlex Tomas 			goto out;
1367a86c6181SAlex Tomas 
1368a86c6181SAlex Tomas 		/* refill path */
1369a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
1370a86c6181SAlex Tomas 		path = ext4_ext_find_extent(inode,
1371725d26d3SAneesh Kumar K.V 				   (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1372107a7bd3STheodore Ts'o 				    path, gb_flags);
1373a86c6181SAlex Tomas 		if (IS_ERR(path)) {
1374a86c6181SAlex Tomas 			err = PTR_ERR(path);
1375a86c6181SAlex Tomas 			goto out;
1376a86c6181SAlex Tomas 		}
1377a86c6181SAlex Tomas 
1378a86c6181SAlex Tomas 		/*
1379d0d856e8SRandy Dunlap 		 * only first (depth 0 -> 1) produces free space;
1380d0d856e8SRandy Dunlap 		 * in all other cases we have to split the grown tree
1381a86c6181SAlex Tomas 		 */
1382a86c6181SAlex Tomas 		depth = ext_depth(inode);
1383a86c6181SAlex Tomas 		if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1384d0d856e8SRandy Dunlap 			/* now we need to split */
1385a86c6181SAlex Tomas 			goto repeat;
1386a86c6181SAlex Tomas 		}
1387a86c6181SAlex Tomas 	}
1388a86c6181SAlex Tomas 
1389a86c6181SAlex Tomas out:
1390a86c6181SAlex Tomas 	return err;
1391a86c6181SAlex Tomas }
1392a86c6181SAlex Tomas 
1393a86c6181SAlex Tomas /*
13941988b51eSAlex Tomas  * search the closest allocated block to the left for *logical
13951988b51eSAlex Tomas  * and returns it at @logical + it's physical address at @phys
13961988b51eSAlex Tomas  * if *logical is the smallest allocated block, the function
13971988b51eSAlex Tomas  * returns 0 at @phys
13981988b51eSAlex Tomas  * return value contains 0 (success) or error code
13991988b51eSAlex Tomas  */
14001f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode,
14011f109d5aSTheodore Ts'o 				struct ext4_ext_path *path,
14021988b51eSAlex Tomas 				ext4_lblk_t *logical, ext4_fsblk_t *phys)
14031988b51eSAlex Tomas {
14041988b51eSAlex Tomas 	struct ext4_extent_idx *ix;
14051988b51eSAlex Tomas 	struct ext4_extent *ex;
1406b939e376SAneesh Kumar K.V 	int depth, ee_len;
14071988b51eSAlex Tomas 
1408273df556SFrank Mayhar 	if (unlikely(path == NULL)) {
1409273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1410273df556SFrank Mayhar 		return -EIO;
1411273df556SFrank Mayhar 	}
14121988b51eSAlex Tomas 	depth = path->p_depth;
14131988b51eSAlex Tomas 	*phys = 0;
14141988b51eSAlex Tomas 
14151988b51eSAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
14161988b51eSAlex Tomas 		return 0;
14171988b51eSAlex Tomas 
14181988b51eSAlex Tomas 	/* usually extent in the path covers blocks smaller
14191988b51eSAlex Tomas 	 * then *logical, but it can be that extent is the
14201988b51eSAlex Tomas 	 * first one in the file */
14211988b51eSAlex Tomas 
14221988b51eSAlex Tomas 	ex = path[depth].p_ext;
1423b939e376SAneesh Kumar K.V 	ee_len = ext4_ext_get_actual_len(ex);
14241988b51eSAlex Tomas 	if (*logical < le32_to_cpu(ex->ee_block)) {
1425273df556SFrank Mayhar 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1426273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1427273df556SFrank Mayhar 					 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1428273df556SFrank Mayhar 					 *logical, le32_to_cpu(ex->ee_block));
1429273df556SFrank Mayhar 			return -EIO;
1430273df556SFrank Mayhar 		}
14311988b51eSAlex Tomas 		while (--depth >= 0) {
14321988b51eSAlex Tomas 			ix = path[depth].p_idx;
1433273df556SFrank Mayhar 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1434273df556SFrank Mayhar 				EXT4_ERROR_INODE(inode,
1435273df556SFrank Mayhar 				  "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
14366ee3b212STao Ma 				  ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1437273df556SFrank Mayhar 				  EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
14386ee3b212STao Ma 		le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1439273df556SFrank Mayhar 				  depth);
1440273df556SFrank Mayhar 				return -EIO;
1441273df556SFrank Mayhar 			}
14421988b51eSAlex Tomas 		}
14431988b51eSAlex Tomas 		return 0;
14441988b51eSAlex Tomas 	}
14451988b51eSAlex Tomas 
1446273df556SFrank Mayhar 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1447273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1448273df556SFrank Mayhar 				 "logical %d < ee_block %d + ee_len %d!",
1449273df556SFrank Mayhar 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1450273df556SFrank Mayhar 		return -EIO;
1451273df556SFrank Mayhar 	}
14521988b51eSAlex Tomas 
1453b939e376SAneesh Kumar K.V 	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1454bf89d16fSTheodore Ts'o 	*phys = ext4_ext_pblock(ex) + ee_len - 1;
14551988b51eSAlex Tomas 	return 0;
14561988b51eSAlex Tomas }
14571988b51eSAlex Tomas 
14581988b51eSAlex Tomas /*
14591988b51eSAlex Tomas  * search the closest allocated block to the right for *logical
14601988b51eSAlex Tomas  * and returns it at @logical + it's physical address at @phys
1461df3ab170STao Ma  * if *logical is the largest allocated block, the function
14621988b51eSAlex Tomas  * returns 0 at @phys
14631988b51eSAlex Tomas  * return value contains 0 (success) or error code
14641988b51eSAlex Tomas  */
14651f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode,
14661f109d5aSTheodore Ts'o 				 struct ext4_ext_path *path,
14674d33b1efSTheodore Ts'o 				 ext4_lblk_t *logical, ext4_fsblk_t *phys,
14684d33b1efSTheodore Ts'o 				 struct ext4_extent **ret_ex)
14691988b51eSAlex Tomas {
14701988b51eSAlex Tomas 	struct buffer_head *bh = NULL;
14711988b51eSAlex Tomas 	struct ext4_extent_header *eh;
14721988b51eSAlex Tomas 	struct ext4_extent_idx *ix;
14731988b51eSAlex Tomas 	struct ext4_extent *ex;
14741988b51eSAlex Tomas 	ext4_fsblk_t block;
1475395a87bfSEric Sandeen 	int depth;	/* Note, NOT eh_depth; depth from top of tree */
1476395a87bfSEric Sandeen 	int ee_len;
14771988b51eSAlex Tomas 
1478273df556SFrank Mayhar 	if (unlikely(path == NULL)) {
1479273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1480273df556SFrank Mayhar 		return -EIO;
1481273df556SFrank Mayhar 	}
14821988b51eSAlex Tomas 	depth = path->p_depth;
14831988b51eSAlex Tomas 	*phys = 0;
14841988b51eSAlex Tomas 
14851988b51eSAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
14861988b51eSAlex Tomas 		return 0;
14871988b51eSAlex Tomas 
14881988b51eSAlex Tomas 	/* usually extent in the path covers blocks smaller
14891988b51eSAlex Tomas 	 * then *logical, but it can be that extent is the
14901988b51eSAlex Tomas 	 * first one in the file */
14911988b51eSAlex Tomas 
14921988b51eSAlex Tomas 	ex = path[depth].p_ext;
1493b939e376SAneesh Kumar K.V 	ee_len = ext4_ext_get_actual_len(ex);
14941988b51eSAlex Tomas 	if (*logical < le32_to_cpu(ex->ee_block)) {
1495273df556SFrank Mayhar 		if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1496273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode,
1497273df556SFrank Mayhar 					 "first_extent(path[%d].p_hdr) != ex",
1498273df556SFrank Mayhar 					 depth);
1499273df556SFrank Mayhar 			return -EIO;
1500273df556SFrank Mayhar 		}
15011988b51eSAlex Tomas 		while (--depth >= 0) {
15021988b51eSAlex Tomas 			ix = path[depth].p_idx;
1503273df556SFrank Mayhar 			if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1504273df556SFrank Mayhar 				EXT4_ERROR_INODE(inode,
1505273df556SFrank Mayhar 						 "ix != EXT_FIRST_INDEX *logical %d!",
1506273df556SFrank Mayhar 						 *logical);
1507273df556SFrank Mayhar 				return -EIO;
1508273df556SFrank Mayhar 			}
15091988b51eSAlex Tomas 		}
15104d33b1efSTheodore Ts'o 		goto found_extent;
15111988b51eSAlex Tomas 	}
15121988b51eSAlex Tomas 
1513273df556SFrank Mayhar 	if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1514273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1515273df556SFrank Mayhar 				 "logical %d < ee_block %d + ee_len %d!",
1516273df556SFrank Mayhar 				 *logical, le32_to_cpu(ex->ee_block), ee_len);
1517273df556SFrank Mayhar 		return -EIO;
1518273df556SFrank Mayhar 	}
15191988b51eSAlex Tomas 
15201988b51eSAlex Tomas 	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
15211988b51eSAlex Tomas 		/* next allocated block in this leaf */
15221988b51eSAlex Tomas 		ex++;
15234d33b1efSTheodore Ts'o 		goto found_extent;
15241988b51eSAlex Tomas 	}
15251988b51eSAlex Tomas 
15261988b51eSAlex Tomas 	/* go up and search for index to the right */
15271988b51eSAlex Tomas 	while (--depth >= 0) {
15281988b51eSAlex Tomas 		ix = path[depth].p_idx;
15291988b51eSAlex Tomas 		if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
153025f1ee3aSWu Fengguang 			goto got_index;
15311988b51eSAlex Tomas 	}
15321988b51eSAlex Tomas 
153325f1ee3aSWu Fengguang 	/* we've gone up to the root and found no index to the right */
15341988b51eSAlex Tomas 	return 0;
15351988b51eSAlex Tomas 
153625f1ee3aSWu Fengguang got_index:
15371988b51eSAlex Tomas 	/* we've found index to the right, let's
15381988b51eSAlex Tomas 	 * follow it and find the closest allocated
15391988b51eSAlex Tomas 	 * block to the right */
15401988b51eSAlex Tomas 	ix++;
1541bf89d16fSTheodore Ts'o 	block = ext4_idx_pblock(ix);
15421988b51eSAlex Tomas 	while (++depth < path->p_depth) {
1543395a87bfSEric Sandeen 		/* subtract from p_depth to get proper eh_depth */
15447d7ea89eSTheodore Ts'o 		bh = read_extent_tree_block(inode, block,
1545107a7bd3STheodore Ts'o 					    path->p_depth - depth, 0);
15467d7ea89eSTheodore Ts'o 		if (IS_ERR(bh))
15477d7ea89eSTheodore Ts'o 			return PTR_ERR(bh);
15487d7ea89eSTheodore Ts'o 		eh = ext_block_hdr(bh);
15491988b51eSAlex Tomas 		ix = EXT_FIRST_INDEX(eh);
1550bf89d16fSTheodore Ts'o 		block = ext4_idx_pblock(ix);
15511988b51eSAlex Tomas 		put_bh(bh);
15521988b51eSAlex Tomas 	}
15531988b51eSAlex Tomas 
1554107a7bd3STheodore Ts'o 	bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
15557d7ea89eSTheodore Ts'o 	if (IS_ERR(bh))
15567d7ea89eSTheodore Ts'o 		return PTR_ERR(bh);
15571988b51eSAlex Tomas 	eh = ext_block_hdr(bh);
15581988b51eSAlex Tomas 	ex = EXT_FIRST_EXTENT(eh);
15594d33b1efSTheodore Ts'o found_extent:
15601988b51eSAlex Tomas 	*logical = le32_to_cpu(ex->ee_block);
1561bf89d16fSTheodore Ts'o 	*phys = ext4_ext_pblock(ex);
15624d33b1efSTheodore Ts'o 	*ret_ex = ex;
15634d33b1efSTheodore Ts'o 	if (bh)
15641988b51eSAlex Tomas 		put_bh(bh);
15651988b51eSAlex Tomas 	return 0;
15661988b51eSAlex Tomas }
15671988b51eSAlex Tomas 
15681988b51eSAlex Tomas /*
1569d0d856e8SRandy Dunlap  * ext4_ext_next_allocated_block:
1570f17722f9SLukas Czerner  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1571d0d856e8SRandy Dunlap  * NOTE: it considers block number from index entry as
1572d0d856e8SRandy Dunlap  * allocated block. Thus, index entries have to be consistent
1573d0d856e8SRandy Dunlap  * with leaves.
1574a86c6181SAlex Tomas  */
1575fcf6b1b7SDmitry Monakhov ext4_lblk_t
1576a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1577a86c6181SAlex Tomas {
1578a86c6181SAlex Tomas 	int depth;
1579a86c6181SAlex Tomas 
1580a86c6181SAlex Tomas 	BUG_ON(path == NULL);
1581a86c6181SAlex Tomas 	depth = path->p_depth;
1582a86c6181SAlex Tomas 
1583a86c6181SAlex Tomas 	if (depth == 0 && path->p_ext == NULL)
1584f17722f9SLukas Czerner 		return EXT_MAX_BLOCKS;
1585a86c6181SAlex Tomas 
1586a86c6181SAlex Tomas 	while (depth >= 0) {
1587a86c6181SAlex Tomas 		if (depth == path->p_depth) {
1588a86c6181SAlex Tomas 			/* leaf */
15896f8ff537SCurt Wohlgemuth 			if (path[depth].p_ext &&
15906f8ff537SCurt Wohlgemuth 				path[depth].p_ext !=
1591a86c6181SAlex Tomas 					EXT_LAST_EXTENT(path[depth].p_hdr))
1592a86c6181SAlex Tomas 			  return le32_to_cpu(path[depth].p_ext[1].ee_block);
1593a86c6181SAlex Tomas 		} else {
1594a86c6181SAlex Tomas 			/* index */
1595a86c6181SAlex Tomas 			if (path[depth].p_idx !=
1596a86c6181SAlex Tomas 					EXT_LAST_INDEX(path[depth].p_hdr))
1597a86c6181SAlex Tomas 			  return le32_to_cpu(path[depth].p_idx[1].ei_block);
1598a86c6181SAlex Tomas 		}
1599a86c6181SAlex Tomas 		depth--;
1600a86c6181SAlex Tomas 	}
1601a86c6181SAlex Tomas 
1602f17722f9SLukas Czerner 	return EXT_MAX_BLOCKS;
1603a86c6181SAlex Tomas }
1604a86c6181SAlex Tomas 
1605a86c6181SAlex Tomas /*
1606d0d856e8SRandy Dunlap  * ext4_ext_next_leaf_block:
1607f17722f9SLukas Czerner  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1608a86c6181SAlex Tomas  */
16095718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1610a86c6181SAlex Tomas {
1611a86c6181SAlex Tomas 	int depth;
1612a86c6181SAlex Tomas 
1613a86c6181SAlex Tomas 	BUG_ON(path == NULL);
1614a86c6181SAlex Tomas 	depth = path->p_depth;
1615a86c6181SAlex Tomas 
1616a86c6181SAlex Tomas 	/* zero-tree has no leaf blocks at all */
1617a86c6181SAlex Tomas 	if (depth == 0)
1618f17722f9SLukas Czerner 		return EXT_MAX_BLOCKS;
1619a86c6181SAlex Tomas 
1620a86c6181SAlex Tomas 	/* go to index block */
1621a86c6181SAlex Tomas 	depth--;
1622a86c6181SAlex Tomas 
1623a86c6181SAlex Tomas 	while (depth >= 0) {
1624a86c6181SAlex Tomas 		if (path[depth].p_idx !=
1625a86c6181SAlex Tomas 				EXT_LAST_INDEX(path[depth].p_hdr))
1626725d26d3SAneesh Kumar K.V 			return (ext4_lblk_t)
1627725d26d3SAneesh Kumar K.V 				le32_to_cpu(path[depth].p_idx[1].ei_block);
1628a86c6181SAlex Tomas 		depth--;
1629a86c6181SAlex Tomas 	}
1630a86c6181SAlex Tomas 
1631f17722f9SLukas Czerner 	return EXT_MAX_BLOCKS;
1632a86c6181SAlex Tomas }
1633a86c6181SAlex Tomas 
1634a86c6181SAlex Tomas /*
1635d0d856e8SRandy Dunlap  * ext4_ext_correct_indexes:
1636d0d856e8SRandy Dunlap  * if leaf gets modified and modified extent is first in the leaf,
1637d0d856e8SRandy Dunlap  * then we have to correct all indexes above.
1638a86c6181SAlex Tomas  * TODO: do we need to correct tree in all cases?
1639a86c6181SAlex Tomas  */
16401d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1641a86c6181SAlex Tomas 				struct ext4_ext_path *path)
1642a86c6181SAlex Tomas {
1643a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
1644a86c6181SAlex Tomas 	int depth = ext_depth(inode);
1645a86c6181SAlex Tomas 	struct ext4_extent *ex;
1646a86c6181SAlex Tomas 	__le32 border;
1647a86c6181SAlex Tomas 	int k, err = 0;
1648a86c6181SAlex Tomas 
1649a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
1650a86c6181SAlex Tomas 	ex = path[depth].p_ext;
1651273df556SFrank Mayhar 
1652273df556SFrank Mayhar 	if (unlikely(ex == NULL || eh == NULL)) {
1653273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode,
1654273df556SFrank Mayhar 				 "ex %p == NULL or eh %p == NULL", ex, eh);
1655273df556SFrank Mayhar 		return -EIO;
1656273df556SFrank Mayhar 	}
1657a86c6181SAlex Tomas 
1658a86c6181SAlex Tomas 	if (depth == 0) {
1659a86c6181SAlex Tomas 		/* there is no tree at all */
1660a86c6181SAlex Tomas 		return 0;
1661a86c6181SAlex Tomas 	}
1662a86c6181SAlex Tomas 
1663a86c6181SAlex Tomas 	if (ex != EXT_FIRST_EXTENT(eh)) {
1664a86c6181SAlex Tomas 		/* we correct tree if first leaf got modified only */
1665a86c6181SAlex Tomas 		return 0;
1666a86c6181SAlex Tomas 	}
1667a86c6181SAlex Tomas 
1668a86c6181SAlex Tomas 	/*
1669d0d856e8SRandy Dunlap 	 * TODO: we need correction if border is smaller than current one
1670a86c6181SAlex Tomas 	 */
1671a86c6181SAlex Tomas 	k = depth - 1;
1672a86c6181SAlex Tomas 	border = path[depth].p_ext->ee_block;
16737e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path + k);
16747e028976SAvantika Mathur 	if (err)
1675a86c6181SAlex Tomas 		return err;
1676a86c6181SAlex Tomas 	path[k].p_idx->ei_block = border;
16777e028976SAvantika Mathur 	err = ext4_ext_dirty(handle, inode, path + k);
16787e028976SAvantika Mathur 	if (err)
1679a86c6181SAlex Tomas 		return err;
1680a86c6181SAlex Tomas 
1681a86c6181SAlex Tomas 	while (k--) {
1682a86c6181SAlex Tomas 		/* change all left-side indexes */
1683a86c6181SAlex Tomas 		if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1684a86c6181SAlex Tomas 			break;
16857e028976SAvantika Mathur 		err = ext4_ext_get_access(handle, inode, path + k);
16867e028976SAvantika Mathur 		if (err)
1687a86c6181SAlex Tomas 			break;
1688a86c6181SAlex Tomas 		path[k].p_idx->ei_block = border;
16897e028976SAvantika Mathur 		err = ext4_ext_dirty(handle, inode, path + k);
16907e028976SAvantika Mathur 		if (err)
1691a86c6181SAlex Tomas 			break;
1692a86c6181SAlex Tomas 	}
1693a86c6181SAlex Tomas 
1694a86c6181SAlex Tomas 	return err;
1695a86c6181SAlex Tomas }
1696a86c6181SAlex Tomas 
1697748de673SAkira Fujita int
1698a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1699a86c6181SAlex Tomas 				struct ext4_extent *ex2)
1700a86c6181SAlex Tomas {
1701da0169b3SEric Sandeen 	unsigned short ext1_ee_len, ext2_ee_len;
1702a2df2a63SAmit Arora 
1703a2df2a63SAmit Arora 	/*
1704ec22ba8eSDmitry Monakhov 	 * Make sure that both extents are initialized. We don't merge
1705556615dcSLukas Czerner 	 * unwritten extents so that we can be sure that end_io code has
1706ec22ba8eSDmitry Monakhov 	 * the extent that was written properly split out and conversion to
1707ec22ba8eSDmitry Monakhov 	 * initialized is trivial.
1708a2df2a63SAmit Arora 	 */
1709556615dcSLukas Czerner 	if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1710a2df2a63SAmit Arora 		return 0;
1711a2df2a63SAmit Arora 
1712a2df2a63SAmit Arora 	ext1_ee_len = ext4_ext_get_actual_len(ex1);
1713a2df2a63SAmit Arora 	ext2_ee_len = ext4_ext_get_actual_len(ex2);
1714a2df2a63SAmit Arora 
1715a2df2a63SAmit Arora 	if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
171663f57933SAndrew Morton 			le32_to_cpu(ex2->ee_block))
1717a86c6181SAlex Tomas 		return 0;
1718a86c6181SAlex Tomas 
1719471d4011SSuparna Bhattacharya 	/*
1720471d4011SSuparna Bhattacharya 	 * To allow future support for preallocated extents to be added
1721471d4011SSuparna Bhattacharya 	 * as an RO_COMPAT feature, refuse to merge to extents if
1722d0d856e8SRandy Dunlap 	 * this can result in the top bit of ee_len being set.
1723471d4011SSuparna Bhattacharya 	 */
1724da0169b3SEric Sandeen 	if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1725471d4011SSuparna Bhattacharya 		return 0;
1726556615dcSLukas Czerner 	if (ext4_ext_is_unwritten(ex1) &&
1727a9b82415SDarrick J. Wong 	    (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
1728a9b82415SDarrick J. Wong 	     atomic_read(&EXT4_I(inode)->i_unwritten) ||
1729556615dcSLukas Czerner 	     (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
1730a9b82415SDarrick J. Wong 		return 0;
1731bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
1732b939e376SAneesh Kumar K.V 	if (ext1_ee_len >= 4)
1733a86c6181SAlex Tomas 		return 0;
1734a86c6181SAlex Tomas #endif
1735a86c6181SAlex Tomas 
1736bf89d16fSTheodore Ts'o 	if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1737a86c6181SAlex Tomas 		return 1;
1738a86c6181SAlex Tomas 	return 0;
1739a86c6181SAlex Tomas }
1740a86c6181SAlex Tomas 
1741a86c6181SAlex Tomas /*
174256055d3aSAmit Arora  * This function tries to merge the "ex" extent to the next extent in the tree.
174356055d3aSAmit Arora  * It always tries to merge towards right. If you want to merge towards
174456055d3aSAmit Arora  * left, pass "ex - 1" as argument instead of "ex".
174556055d3aSAmit Arora  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
174656055d3aSAmit Arora  * 1 if they got merged.
174756055d3aSAmit Arora  */
1748197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode,
174956055d3aSAmit Arora 				 struct ext4_ext_path *path,
175056055d3aSAmit Arora 				 struct ext4_extent *ex)
175156055d3aSAmit Arora {
175256055d3aSAmit Arora 	struct ext4_extent_header *eh;
175356055d3aSAmit Arora 	unsigned int depth, len;
1754556615dcSLukas Czerner 	int merge_done = 0, unwritten;
175556055d3aSAmit Arora 
175656055d3aSAmit Arora 	depth = ext_depth(inode);
175756055d3aSAmit Arora 	BUG_ON(path[depth].p_hdr == NULL);
175856055d3aSAmit Arora 	eh = path[depth].p_hdr;
175956055d3aSAmit Arora 
176056055d3aSAmit Arora 	while (ex < EXT_LAST_EXTENT(eh)) {
176156055d3aSAmit Arora 		if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
176256055d3aSAmit Arora 			break;
176356055d3aSAmit Arora 		/* merge with next extent! */
1764556615dcSLukas Czerner 		unwritten = ext4_ext_is_unwritten(ex);
176556055d3aSAmit Arora 		ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
176656055d3aSAmit Arora 				+ ext4_ext_get_actual_len(ex + 1));
1767556615dcSLukas Czerner 		if (unwritten)
1768556615dcSLukas Czerner 			ext4_ext_mark_unwritten(ex);
176956055d3aSAmit Arora 
177056055d3aSAmit Arora 		if (ex + 1 < EXT_LAST_EXTENT(eh)) {
177156055d3aSAmit Arora 			len = (EXT_LAST_EXTENT(eh) - ex - 1)
177256055d3aSAmit Arora 				* sizeof(struct ext4_extent);
177356055d3aSAmit Arora 			memmove(ex + 1, ex + 2, len);
177456055d3aSAmit Arora 		}
1775e8546d06SMarcin Slusarz 		le16_add_cpu(&eh->eh_entries, -1);
177656055d3aSAmit Arora 		merge_done = 1;
177756055d3aSAmit Arora 		WARN_ON(eh->eh_entries == 0);
177856055d3aSAmit Arora 		if (!eh->eh_entries)
177924676da4STheodore Ts'o 			EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
178056055d3aSAmit Arora 	}
178156055d3aSAmit Arora 
178256055d3aSAmit Arora 	return merge_done;
178356055d3aSAmit Arora }
178456055d3aSAmit Arora 
178556055d3aSAmit Arora /*
1786ecb94f5fSTheodore Ts'o  * This function does a very simple check to see if we can collapse
1787ecb94f5fSTheodore Ts'o  * an extent tree with a single extent tree leaf block into the inode.
1788ecb94f5fSTheodore Ts'o  */
1789ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle,
1790ecb94f5fSTheodore Ts'o 				     struct inode *inode,
1791ecb94f5fSTheodore Ts'o 				     struct ext4_ext_path *path)
1792ecb94f5fSTheodore Ts'o {
1793ecb94f5fSTheodore Ts'o 	size_t s;
1794ecb94f5fSTheodore Ts'o 	unsigned max_root = ext4_ext_space_root(inode, 0);
1795ecb94f5fSTheodore Ts'o 	ext4_fsblk_t blk;
1796ecb94f5fSTheodore Ts'o 
1797ecb94f5fSTheodore Ts'o 	if ((path[0].p_depth != 1) ||
1798ecb94f5fSTheodore Ts'o 	    (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1799ecb94f5fSTheodore Ts'o 	    (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1800ecb94f5fSTheodore Ts'o 		return;
1801ecb94f5fSTheodore Ts'o 
1802ecb94f5fSTheodore Ts'o 	/*
1803ecb94f5fSTheodore Ts'o 	 * We need to modify the block allocation bitmap and the block
1804ecb94f5fSTheodore Ts'o 	 * group descriptor to release the extent tree block.  If we
1805ecb94f5fSTheodore Ts'o 	 * can't get the journal credits, give up.
1806ecb94f5fSTheodore Ts'o 	 */
1807ecb94f5fSTheodore Ts'o 	if (ext4_journal_extend(handle, 2))
1808ecb94f5fSTheodore Ts'o 		return;
1809ecb94f5fSTheodore Ts'o 
1810ecb94f5fSTheodore Ts'o 	/*
1811ecb94f5fSTheodore Ts'o 	 * Copy the extent data up to the inode
1812ecb94f5fSTheodore Ts'o 	 */
1813ecb94f5fSTheodore Ts'o 	blk = ext4_idx_pblock(path[0].p_idx);
1814ecb94f5fSTheodore Ts'o 	s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1815ecb94f5fSTheodore Ts'o 		sizeof(struct ext4_extent_idx);
1816ecb94f5fSTheodore Ts'o 	s += sizeof(struct ext4_extent_header);
1817ecb94f5fSTheodore Ts'o 
1818ecb94f5fSTheodore Ts'o 	memcpy(path[0].p_hdr, path[1].p_hdr, s);
1819ecb94f5fSTheodore Ts'o 	path[0].p_depth = 0;
1820ecb94f5fSTheodore Ts'o 	path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1821ecb94f5fSTheodore Ts'o 		(path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1822ecb94f5fSTheodore Ts'o 	path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1823ecb94f5fSTheodore Ts'o 
1824ecb94f5fSTheodore Ts'o 	brelse(path[1].p_bh);
1825ecb94f5fSTheodore Ts'o 	ext4_free_blocks(handle, inode, NULL, blk, 1,
182671d4f7d0STheodore Ts'o 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1827ecb94f5fSTheodore Ts'o }
1828ecb94f5fSTheodore Ts'o 
1829ecb94f5fSTheodore Ts'o /*
1830197217a5SYongqiang Yang  * This function tries to merge the @ex extent to neighbours in the tree.
1831197217a5SYongqiang Yang  * return 1 if merge left else 0.
1832197217a5SYongqiang Yang  */
1833ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle,
1834ecb94f5fSTheodore Ts'o 				  struct inode *inode,
1835197217a5SYongqiang Yang 				  struct ext4_ext_path *path,
1836197217a5SYongqiang Yang 				  struct ext4_extent *ex) {
1837197217a5SYongqiang Yang 	struct ext4_extent_header *eh;
1838197217a5SYongqiang Yang 	unsigned int depth;
1839197217a5SYongqiang Yang 	int merge_done = 0;
1840197217a5SYongqiang Yang 
1841197217a5SYongqiang Yang 	depth = ext_depth(inode);
1842197217a5SYongqiang Yang 	BUG_ON(path[depth].p_hdr == NULL);
1843197217a5SYongqiang Yang 	eh = path[depth].p_hdr;
1844197217a5SYongqiang Yang 
1845197217a5SYongqiang Yang 	if (ex > EXT_FIRST_EXTENT(eh))
1846197217a5SYongqiang Yang 		merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1847197217a5SYongqiang Yang 
1848197217a5SYongqiang Yang 	if (!merge_done)
1849ecb94f5fSTheodore Ts'o 		(void) ext4_ext_try_to_merge_right(inode, path, ex);
1850197217a5SYongqiang Yang 
1851ecb94f5fSTheodore Ts'o 	ext4_ext_try_to_merge_up(handle, inode, path);
1852197217a5SYongqiang Yang }
1853197217a5SYongqiang Yang 
1854197217a5SYongqiang Yang /*
185525d14f98SAmit Arora  * check if a portion of the "newext" extent overlaps with an
185625d14f98SAmit Arora  * existing extent.
185725d14f98SAmit Arora  *
185825d14f98SAmit Arora  * If there is an overlap discovered, it updates the length of the newext
185925d14f98SAmit Arora  * such that there will be no overlap, and then returns 1.
186025d14f98SAmit Arora  * If there is no overlap found, it returns 0.
186125d14f98SAmit Arora  */
18624d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
18634d33b1efSTheodore Ts'o 					   struct inode *inode,
186425d14f98SAmit Arora 					   struct ext4_extent *newext,
186525d14f98SAmit Arora 					   struct ext4_ext_path *path)
186625d14f98SAmit Arora {
1867725d26d3SAneesh Kumar K.V 	ext4_lblk_t b1, b2;
186825d14f98SAmit Arora 	unsigned int depth, len1;
186925d14f98SAmit Arora 	unsigned int ret = 0;
187025d14f98SAmit Arora 
187125d14f98SAmit Arora 	b1 = le32_to_cpu(newext->ee_block);
1872a2df2a63SAmit Arora 	len1 = ext4_ext_get_actual_len(newext);
187325d14f98SAmit Arora 	depth = ext_depth(inode);
187425d14f98SAmit Arora 	if (!path[depth].p_ext)
187525d14f98SAmit Arora 		goto out;
1876f5a44db5STheodore Ts'o 	b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
187725d14f98SAmit Arora 
187825d14f98SAmit Arora 	/*
187925d14f98SAmit Arora 	 * get the next allocated block if the extent in the path
188025d14f98SAmit Arora 	 * is before the requested block(s)
188125d14f98SAmit Arora 	 */
188225d14f98SAmit Arora 	if (b2 < b1) {
188325d14f98SAmit Arora 		b2 = ext4_ext_next_allocated_block(path);
1884f17722f9SLukas Czerner 		if (b2 == EXT_MAX_BLOCKS)
188525d14f98SAmit Arora 			goto out;
1886f5a44db5STheodore Ts'o 		b2 = EXT4_LBLK_CMASK(sbi, b2);
188725d14f98SAmit Arora 	}
188825d14f98SAmit Arora 
1889725d26d3SAneesh Kumar K.V 	/* check for wrap through zero on extent logical start block*/
189025d14f98SAmit Arora 	if (b1 + len1 < b1) {
1891f17722f9SLukas Czerner 		len1 = EXT_MAX_BLOCKS - b1;
189225d14f98SAmit Arora 		newext->ee_len = cpu_to_le16(len1);
189325d14f98SAmit Arora 		ret = 1;
189425d14f98SAmit Arora 	}
189525d14f98SAmit Arora 
189625d14f98SAmit Arora 	/* check for overlap */
189725d14f98SAmit Arora 	if (b1 + len1 > b2) {
189825d14f98SAmit Arora 		newext->ee_len = cpu_to_le16(b2 - b1);
189925d14f98SAmit Arora 		ret = 1;
190025d14f98SAmit Arora 	}
190125d14f98SAmit Arora out:
190225d14f98SAmit Arora 	return ret;
190325d14f98SAmit Arora }
190425d14f98SAmit Arora 
190525d14f98SAmit Arora /*
1906d0d856e8SRandy Dunlap  * ext4_ext_insert_extent:
1907d0d856e8SRandy Dunlap  * tries to merge requsted extent into the existing extent or
1908d0d856e8SRandy Dunlap  * inserts requested extent as new one into the tree,
1909d0d856e8SRandy Dunlap  * creating new leaf in the no-space case.
1910a86c6181SAlex Tomas  */
1911a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1912a86c6181SAlex Tomas 				struct ext4_ext_path *path,
1913107a7bd3STheodore Ts'o 				struct ext4_extent *newext, int gb_flags)
1914a86c6181SAlex Tomas {
1915a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
1916a86c6181SAlex Tomas 	struct ext4_extent *ex, *fex;
1917a86c6181SAlex Tomas 	struct ext4_extent *nearex; /* nearest extent */
1918a86c6181SAlex Tomas 	struct ext4_ext_path *npath = NULL;
1919725d26d3SAneesh Kumar K.V 	int depth, len, err;
1920725d26d3SAneesh Kumar K.V 	ext4_lblk_t next;
1921556615dcSLukas Czerner 	int mb_flags = 0, unwritten;
1922a86c6181SAlex Tomas 
1923273df556SFrank Mayhar 	if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1924273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1925273df556SFrank Mayhar 		return -EIO;
1926273df556SFrank Mayhar 	}
1927a86c6181SAlex Tomas 	depth = ext_depth(inode);
1928a86c6181SAlex Tomas 	ex = path[depth].p_ext;
1929be8981beSLukas Czerner 	eh = path[depth].p_hdr;
1930273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr == NULL)) {
1931273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1932273df556SFrank Mayhar 		return -EIO;
1933273df556SFrank Mayhar 	}
1934a86c6181SAlex Tomas 
1935a86c6181SAlex Tomas 	/* try to insert block into found extent and return */
1936107a7bd3STheodore Ts'o 	if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1937be8981beSLukas Czerner 
1938be8981beSLukas Czerner 		/*
1939be8981beSLukas Czerner 		 * Try to see whether we should rather test the extent on
1940be8981beSLukas Czerner 		 * right from ex, or from the left of ex. This is because
1941be8981beSLukas Czerner 		 * ext4_ext_find_extent() can return either extent on the
1942be8981beSLukas Czerner 		 * left, or on the right from the searched position. This
1943be8981beSLukas Czerner 		 * will make merging more effective.
1944be8981beSLukas Czerner 		 */
1945be8981beSLukas Czerner 		if (ex < EXT_LAST_EXTENT(eh) &&
1946be8981beSLukas Czerner 		    (le32_to_cpu(ex->ee_block) +
1947be8981beSLukas Czerner 		    ext4_ext_get_actual_len(ex) <
1948be8981beSLukas Czerner 		    le32_to_cpu(newext->ee_block))) {
1949be8981beSLukas Czerner 			ex += 1;
1950be8981beSLukas Czerner 			goto prepend;
1951be8981beSLukas Czerner 		} else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1952be8981beSLukas Czerner 			   (le32_to_cpu(newext->ee_block) +
1953be8981beSLukas Czerner 			   ext4_ext_get_actual_len(newext) <
1954be8981beSLukas Czerner 			   le32_to_cpu(ex->ee_block)))
1955be8981beSLukas Czerner 			ex -= 1;
1956be8981beSLukas Czerner 
1957be8981beSLukas Czerner 		/* Try to append newex to the ex */
1958be8981beSLukas Czerner 		if (ext4_can_extents_be_merged(inode, ex, newext)) {
1959be8981beSLukas Czerner 			ext_debug("append [%d]%d block to %u:[%d]%d"
1960be8981beSLukas Czerner 				  "(from %llu)\n",
1961556615dcSLukas Czerner 				  ext4_ext_is_unwritten(newext),
1962a2df2a63SAmit Arora 				  ext4_ext_get_actual_len(newext),
1963a86c6181SAlex Tomas 				  le32_to_cpu(ex->ee_block),
1964556615dcSLukas Czerner 				  ext4_ext_is_unwritten(ex),
1965bf89d16fSTheodore Ts'o 				  ext4_ext_get_actual_len(ex),
1966bf89d16fSTheodore Ts'o 				  ext4_ext_pblock(ex));
1967be8981beSLukas Czerner 			err = ext4_ext_get_access(handle, inode,
1968be8981beSLukas Czerner 						  path + depth);
19697e028976SAvantika Mathur 			if (err)
1970a86c6181SAlex Tomas 				return err;
1971556615dcSLukas Czerner 			unwritten = ext4_ext_is_unwritten(ex);
1972a2df2a63SAmit Arora 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1973a2df2a63SAmit Arora 					+ ext4_ext_get_actual_len(newext));
1974556615dcSLukas Czerner 			if (unwritten)
1975556615dcSLukas Czerner 				ext4_ext_mark_unwritten(ex);
1976a86c6181SAlex Tomas 			eh = path[depth].p_hdr;
1977a86c6181SAlex Tomas 			nearex = ex;
1978a86c6181SAlex Tomas 			goto merge;
1979a86c6181SAlex Tomas 		}
1980a86c6181SAlex Tomas 
1981be8981beSLukas Czerner prepend:
1982be8981beSLukas Czerner 		/* Try to prepend newex to the ex */
1983be8981beSLukas Czerner 		if (ext4_can_extents_be_merged(inode, newext, ex)) {
1984be8981beSLukas Czerner 			ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1985be8981beSLukas Czerner 				  "(from %llu)\n",
1986be8981beSLukas Czerner 				  le32_to_cpu(newext->ee_block),
1987556615dcSLukas Czerner 				  ext4_ext_is_unwritten(newext),
1988be8981beSLukas Czerner 				  ext4_ext_get_actual_len(newext),
1989be8981beSLukas Czerner 				  le32_to_cpu(ex->ee_block),
1990556615dcSLukas Czerner 				  ext4_ext_is_unwritten(ex),
1991be8981beSLukas Czerner 				  ext4_ext_get_actual_len(ex),
1992be8981beSLukas Czerner 				  ext4_ext_pblock(ex));
1993be8981beSLukas Czerner 			err = ext4_ext_get_access(handle, inode,
1994be8981beSLukas Czerner 						  path + depth);
1995be8981beSLukas Czerner 			if (err)
1996be8981beSLukas Czerner 				return err;
1997be8981beSLukas Czerner 
1998556615dcSLukas Czerner 			unwritten = ext4_ext_is_unwritten(ex);
1999be8981beSLukas Czerner 			ex->ee_block = newext->ee_block;
2000be8981beSLukas Czerner 			ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2001be8981beSLukas Czerner 			ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2002be8981beSLukas Czerner 					+ ext4_ext_get_actual_len(newext));
2003556615dcSLukas Czerner 			if (unwritten)
2004556615dcSLukas Czerner 				ext4_ext_mark_unwritten(ex);
2005be8981beSLukas Czerner 			eh = path[depth].p_hdr;
2006be8981beSLukas Czerner 			nearex = ex;
2007be8981beSLukas Czerner 			goto merge;
2008be8981beSLukas Czerner 		}
2009be8981beSLukas Czerner 	}
2010be8981beSLukas Czerner 
2011a86c6181SAlex Tomas 	depth = ext_depth(inode);
2012a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
2013a86c6181SAlex Tomas 	if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2014a86c6181SAlex Tomas 		goto has_space;
2015a86c6181SAlex Tomas 
2016a86c6181SAlex Tomas 	/* probably next leaf has space for us? */
2017a86c6181SAlex Tomas 	fex = EXT_LAST_EXTENT(eh);
2018598dbdf2SRobin Dong 	next = EXT_MAX_BLOCKS;
2019598dbdf2SRobin Dong 	if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
20205718789dSRobin Dong 		next = ext4_ext_next_leaf_block(path);
2021598dbdf2SRobin Dong 	if (next != EXT_MAX_BLOCKS) {
202232de6756SYongqiang Yang 		ext_debug("next leaf block - %u\n", next);
2023a86c6181SAlex Tomas 		BUG_ON(npath != NULL);
2024107a7bd3STheodore Ts'o 		npath = ext4_ext_find_extent(inode, next, NULL, 0);
2025a86c6181SAlex Tomas 		if (IS_ERR(npath))
2026a86c6181SAlex Tomas 			return PTR_ERR(npath);
2027a86c6181SAlex Tomas 		BUG_ON(npath->p_depth != path->p_depth);
2028a86c6181SAlex Tomas 		eh = npath[depth].p_hdr;
2029a86c6181SAlex Tomas 		if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
203025985edcSLucas De Marchi 			ext_debug("next leaf isn't full(%d)\n",
2031a86c6181SAlex Tomas 				  le16_to_cpu(eh->eh_entries));
2032a86c6181SAlex Tomas 			path = npath;
2033ffb505ffSRobin Dong 			goto has_space;
2034a86c6181SAlex Tomas 		}
2035a86c6181SAlex Tomas 		ext_debug("next leaf has no free space(%d,%d)\n",
2036a86c6181SAlex Tomas 			  le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2037a86c6181SAlex Tomas 	}
2038a86c6181SAlex Tomas 
2039a86c6181SAlex Tomas 	/*
2040d0d856e8SRandy Dunlap 	 * There is no free space in the found leaf.
2041d0d856e8SRandy Dunlap 	 * We're gonna add a new leaf in the tree.
2042a86c6181SAlex Tomas 	 */
2043107a7bd3STheodore Ts'o 	if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2044107a7bd3STheodore Ts'o 		mb_flags = EXT4_MB_USE_RESERVED;
2045107a7bd3STheodore Ts'o 	err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2046107a7bd3STheodore Ts'o 				       path, newext);
2047a86c6181SAlex Tomas 	if (err)
2048a86c6181SAlex Tomas 		goto cleanup;
2049a86c6181SAlex Tomas 	depth = ext_depth(inode);
2050a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
2051a86c6181SAlex Tomas 
2052a86c6181SAlex Tomas has_space:
2053a86c6181SAlex Tomas 	nearex = path[depth].p_ext;
2054a86c6181SAlex Tomas 
20557e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path + depth);
20567e028976SAvantika Mathur 	if (err)
2057a86c6181SAlex Tomas 		goto cleanup;
2058a86c6181SAlex Tomas 
2059a86c6181SAlex Tomas 	if (!nearex) {
2060a86c6181SAlex Tomas 		/* there is no extent in this leaf, create first one */
206132de6756SYongqiang Yang 		ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2062a86c6181SAlex Tomas 				le32_to_cpu(newext->ee_block),
2063bf89d16fSTheodore Ts'o 				ext4_ext_pblock(newext),
2064556615dcSLukas Czerner 				ext4_ext_is_unwritten(newext),
2065a2df2a63SAmit Arora 				ext4_ext_get_actual_len(newext));
206680e675f9SEric Gouriou 		nearex = EXT_FIRST_EXTENT(eh);
2067a86c6181SAlex Tomas 	} else {
206880e675f9SEric Gouriou 		if (le32_to_cpu(newext->ee_block)
206980e675f9SEric Gouriou 			   > le32_to_cpu(nearex->ee_block)) {
207080e675f9SEric Gouriou 			/* Insert after */
207132de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d before: "
207232de6756SYongqiang Yang 					"nearest %p\n",
2073a86c6181SAlex Tomas 					le32_to_cpu(newext->ee_block),
2074bf89d16fSTheodore Ts'o 					ext4_ext_pblock(newext),
2075556615dcSLukas Czerner 					ext4_ext_is_unwritten(newext),
2076a2df2a63SAmit Arora 					ext4_ext_get_actual_len(newext),
207780e675f9SEric Gouriou 					nearex);
207880e675f9SEric Gouriou 			nearex++;
207980e675f9SEric Gouriou 		} else {
208080e675f9SEric Gouriou 			/* Insert before */
208180e675f9SEric Gouriou 			BUG_ON(newext->ee_block == nearex->ee_block);
208232de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d after: "
208332de6756SYongqiang Yang 					"nearest %p\n",
208480e675f9SEric Gouriou 					le32_to_cpu(newext->ee_block),
208580e675f9SEric Gouriou 					ext4_ext_pblock(newext),
2086556615dcSLukas Czerner 					ext4_ext_is_unwritten(newext),
208780e675f9SEric Gouriou 					ext4_ext_get_actual_len(newext),
208880e675f9SEric Gouriou 					nearex);
208980e675f9SEric Gouriou 		}
209080e675f9SEric Gouriou 		len = EXT_LAST_EXTENT(eh) - nearex + 1;
209180e675f9SEric Gouriou 		if (len > 0) {
209232de6756SYongqiang Yang 			ext_debug("insert %u:%llu:[%d]%d: "
209380e675f9SEric Gouriou 					"move %d extents from 0x%p to 0x%p\n",
209480e675f9SEric Gouriou 					le32_to_cpu(newext->ee_block),
209580e675f9SEric Gouriou 					ext4_ext_pblock(newext),
2096556615dcSLukas Czerner 					ext4_ext_is_unwritten(newext),
209780e675f9SEric Gouriou 					ext4_ext_get_actual_len(newext),
209880e675f9SEric Gouriou 					len, nearex, nearex + 1);
209980e675f9SEric Gouriou 			memmove(nearex + 1, nearex,
210080e675f9SEric Gouriou 				len * sizeof(struct ext4_extent));
210180e675f9SEric Gouriou 		}
2102a86c6181SAlex Tomas 	}
2103a86c6181SAlex Tomas 
2104e8546d06SMarcin Slusarz 	le16_add_cpu(&eh->eh_entries, 1);
210580e675f9SEric Gouriou 	path[depth].p_ext = nearex;
2106a86c6181SAlex Tomas 	nearex->ee_block = newext->ee_block;
2107bf89d16fSTheodore Ts'o 	ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2108a86c6181SAlex Tomas 	nearex->ee_len = newext->ee_len;
2109a86c6181SAlex Tomas 
2110a86c6181SAlex Tomas merge:
2111e7bcf823SHaiboLiu 	/* try to merge extents */
2112107a7bd3STheodore Ts'o 	if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2113ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, nearex);
2114a86c6181SAlex Tomas 
2115a86c6181SAlex Tomas 
2116a86c6181SAlex Tomas 	/* time to correct all indexes above */
2117a86c6181SAlex Tomas 	err = ext4_ext_correct_indexes(handle, inode, path);
2118a86c6181SAlex Tomas 	if (err)
2119a86c6181SAlex Tomas 		goto cleanup;
2120a86c6181SAlex Tomas 
2121ecb94f5fSTheodore Ts'o 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2122a86c6181SAlex Tomas 
2123a86c6181SAlex Tomas cleanup:
2124a86c6181SAlex Tomas 	if (npath) {
2125a86c6181SAlex Tomas 		ext4_ext_drop_refs(npath);
2126a86c6181SAlex Tomas 		kfree(npath);
2127a86c6181SAlex Tomas 	}
2128a86c6181SAlex Tomas 	return err;
2129a86c6181SAlex Tomas }
2130a86c6181SAlex Tomas 
213191dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode,
213291dd8c11SLukas Czerner 				    ext4_lblk_t block, ext4_lblk_t num,
213391dd8c11SLukas Czerner 				    struct fiemap_extent_info *fieinfo)
21346873fa0dSEric Sandeen {
21356873fa0dSEric Sandeen 	struct ext4_ext_path *path = NULL;
21366873fa0dSEric Sandeen 	struct ext4_extent *ex;
213769eb33dcSZheng Liu 	struct extent_status es;
213891dd8c11SLukas Czerner 	ext4_lblk_t next, next_del, start = 0, end = 0;
21396873fa0dSEric Sandeen 	ext4_lblk_t last = block + num;
214091dd8c11SLukas Czerner 	int exists, depth = 0, err = 0;
214191dd8c11SLukas Czerner 	unsigned int flags = 0;
214291dd8c11SLukas Czerner 	unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
21436873fa0dSEric Sandeen 
2144f17722f9SLukas Czerner 	while (block < last && block != EXT_MAX_BLOCKS) {
21456873fa0dSEric Sandeen 		num = last - block;
21466873fa0dSEric Sandeen 		/* find extent for this block */
2147fab3a549STheodore Ts'o 		down_read(&EXT4_I(inode)->i_data_sem);
214891dd8c11SLukas Czerner 
214991dd8c11SLukas Czerner 		if (path && ext_depth(inode) != depth) {
215091dd8c11SLukas Czerner 			/* depth was changed. we have to realloc path */
215191dd8c11SLukas Czerner 			kfree(path);
215291dd8c11SLukas Czerner 			path = NULL;
215391dd8c11SLukas Czerner 		}
215491dd8c11SLukas Czerner 
2155107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, block, path, 0);
21566873fa0dSEric Sandeen 		if (IS_ERR(path)) {
215791dd8c11SLukas Czerner 			up_read(&EXT4_I(inode)->i_data_sem);
21586873fa0dSEric Sandeen 			err = PTR_ERR(path);
21596873fa0dSEric Sandeen 			path = NULL;
21606873fa0dSEric Sandeen 			break;
21616873fa0dSEric Sandeen 		}
21626873fa0dSEric Sandeen 
21636873fa0dSEric Sandeen 		depth = ext_depth(inode);
2164273df556SFrank Mayhar 		if (unlikely(path[depth].p_hdr == NULL)) {
216591dd8c11SLukas Czerner 			up_read(&EXT4_I(inode)->i_data_sem);
2166273df556SFrank Mayhar 			EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2167273df556SFrank Mayhar 			err = -EIO;
2168273df556SFrank Mayhar 			break;
2169273df556SFrank Mayhar 		}
21706873fa0dSEric Sandeen 		ex = path[depth].p_ext;
21716873fa0dSEric Sandeen 		next = ext4_ext_next_allocated_block(path);
217291dd8c11SLukas Czerner 		ext4_ext_drop_refs(path);
21736873fa0dSEric Sandeen 
217491dd8c11SLukas Czerner 		flags = 0;
21756873fa0dSEric Sandeen 		exists = 0;
21766873fa0dSEric Sandeen 		if (!ex) {
21776873fa0dSEric Sandeen 			/* there is no extent yet, so try to allocate
21786873fa0dSEric Sandeen 			 * all requested space */
21796873fa0dSEric Sandeen 			start = block;
21806873fa0dSEric Sandeen 			end = block + num;
21816873fa0dSEric Sandeen 		} else if (le32_to_cpu(ex->ee_block) > block) {
21826873fa0dSEric Sandeen 			/* need to allocate space before found extent */
21836873fa0dSEric Sandeen 			start = block;
21846873fa0dSEric Sandeen 			end = le32_to_cpu(ex->ee_block);
21856873fa0dSEric Sandeen 			if (block + num < end)
21866873fa0dSEric Sandeen 				end = block + num;
21876873fa0dSEric Sandeen 		} else if (block >= le32_to_cpu(ex->ee_block)
21886873fa0dSEric Sandeen 					+ ext4_ext_get_actual_len(ex)) {
21896873fa0dSEric Sandeen 			/* need to allocate space after found extent */
21906873fa0dSEric Sandeen 			start = block;
21916873fa0dSEric Sandeen 			end = block + num;
21926873fa0dSEric Sandeen 			if (end >= next)
21936873fa0dSEric Sandeen 				end = next;
21946873fa0dSEric Sandeen 		} else if (block >= le32_to_cpu(ex->ee_block)) {
21956873fa0dSEric Sandeen 			/*
21966873fa0dSEric Sandeen 			 * some part of requested space is covered
21976873fa0dSEric Sandeen 			 * by found extent
21986873fa0dSEric Sandeen 			 */
21996873fa0dSEric Sandeen 			start = block;
22006873fa0dSEric Sandeen 			end = le32_to_cpu(ex->ee_block)
22016873fa0dSEric Sandeen 				+ ext4_ext_get_actual_len(ex);
22026873fa0dSEric Sandeen 			if (block + num < end)
22036873fa0dSEric Sandeen 				end = block + num;
22046873fa0dSEric Sandeen 			exists = 1;
22056873fa0dSEric Sandeen 		} else {
22066873fa0dSEric Sandeen 			BUG();
22076873fa0dSEric Sandeen 		}
22086873fa0dSEric Sandeen 		BUG_ON(end <= start);
22096873fa0dSEric Sandeen 
22106873fa0dSEric Sandeen 		if (!exists) {
221169eb33dcSZheng Liu 			es.es_lblk = start;
221269eb33dcSZheng Liu 			es.es_len = end - start;
221369eb33dcSZheng Liu 			es.es_pblk = 0;
22146873fa0dSEric Sandeen 		} else {
221569eb33dcSZheng Liu 			es.es_lblk = le32_to_cpu(ex->ee_block);
221669eb33dcSZheng Liu 			es.es_len = ext4_ext_get_actual_len(ex);
221769eb33dcSZheng Liu 			es.es_pblk = ext4_ext_pblock(ex);
2218556615dcSLukas Czerner 			if (ext4_ext_is_unwritten(ex))
221991dd8c11SLukas Czerner 				flags |= FIEMAP_EXTENT_UNWRITTEN;
22206873fa0dSEric Sandeen 		}
22216873fa0dSEric Sandeen 
222291dd8c11SLukas Czerner 		/*
222369eb33dcSZheng Liu 		 * Find delayed extent and update es accordingly. We call
222469eb33dcSZheng Liu 		 * it even in !exists case to find out whether es is the
222591dd8c11SLukas Czerner 		 * last existing extent or not.
222691dd8c11SLukas Czerner 		 */
222769eb33dcSZheng Liu 		next_del = ext4_find_delayed_extent(inode, &es);
222891dd8c11SLukas Czerner 		if (!exists && next_del) {
222991dd8c11SLukas Czerner 			exists = 1;
223072dac95dSJie Liu 			flags |= (FIEMAP_EXTENT_DELALLOC |
223172dac95dSJie Liu 				  FIEMAP_EXTENT_UNKNOWN);
223291dd8c11SLukas Czerner 		}
223391dd8c11SLukas Czerner 		up_read(&EXT4_I(inode)->i_data_sem);
223491dd8c11SLukas Czerner 
223569eb33dcSZheng Liu 		if (unlikely(es.es_len == 0)) {
223669eb33dcSZheng Liu 			EXT4_ERROR_INODE(inode, "es.es_len == 0");
2237273df556SFrank Mayhar 			err = -EIO;
2238273df556SFrank Mayhar 			break;
2239273df556SFrank Mayhar 		}
22406873fa0dSEric Sandeen 
2241f7fec032SZheng Liu 		/*
2242f7fec032SZheng Liu 		 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2243f7fec032SZheng Liu 		 * we need to check next == EXT_MAX_BLOCKS because it is
2244f7fec032SZheng Liu 		 * possible that an extent is with unwritten and delayed
2245f7fec032SZheng Liu 		 * status due to when an extent is delayed allocated and
2246f7fec032SZheng Liu 		 * is allocated by fallocate status tree will track both of
2247f7fec032SZheng Liu 		 * them in a extent.
2248f7fec032SZheng Liu 		 *
2249f7fec032SZheng Liu 		 * So we could return a unwritten and delayed extent, and
2250f7fec032SZheng Liu 		 * its block is equal to 'next'.
2251f7fec032SZheng Liu 		 */
2252f7fec032SZheng Liu 		if (next == next_del && next == EXT_MAX_BLOCKS) {
225391dd8c11SLukas Czerner 			flags |= FIEMAP_EXTENT_LAST;
225491dd8c11SLukas Czerner 			if (unlikely(next_del != EXT_MAX_BLOCKS ||
225591dd8c11SLukas Czerner 				     next != EXT_MAX_BLOCKS)) {
225691dd8c11SLukas Czerner 				EXT4_ERROR_INODE(inode,
225791dd8c11SLukas Czerner 						 "next extent == %u, next "
225891dd8c11SLukas Czerner 						 "delalloc extent = %u",
225991dd8c11SLukas Czerner 						 next, next_del);
226091dd8c11SLukas Czerner 				err = -EIO;
226191dd8c11SLukas Czerner 				break;
226291dd8c11SLukas Czerner 			}
226391dd8c11SLukas Czerner 		}
226491dd8c11SLukas Czerner 
226591dd8c11SLukas Czerner 		if (exists) {
226691dd8c11SLukas Czerner 			err = fiemap_fill_next_extent(fieinfo,
226769eb33dcSZheng Liu 				(__u64)es.es_lblk << blksize_bits,
226869eb33dcSZheng Liu 				(__u64)es.es_pblk << blksize_bits,
226969eb33dcSZheng Liu 				(__u64)es.es_len << blksize_bits,
227091dd8c11SLukas Czerner 				flags);
22716873fa0dSEric Sandeen 			if (err < 0)
22726873fa0dSEric Sandeen 				break;
227391dd8c11SLukas Czerner 			if (err == 1) {
22746873fa0dSEric Sandeen 				err = 0;
22756873fa0dSEric Sandeen 				break;
22766873fa0dSEric Sandeen 			}
22776873fa0dSEric Sandeen 		}
22786873fa0dSEric Sandeen 
227969eb33dcSZheng Liu 		block = es.es_lblk + es.es_len;
22806873fa0dSEric Sandeen 	}
22816873fa0dSEric Sandeen 
22826873fa0dSEric Sandeen 	if (path) {
22836873fa0dSEric Sandeen 		ext4_ext_drop_refs(path);
22846873fa0dSEric Sandeen 		kfree(path);
22856873fa0dSEric Sandeen 	}
22866873fa0dSEric Sandeen 
22876873fa0dSEric Sandeen 	return err;
22886873fa0dSEric Sandeen }
22896873fa0dSEric Sandeen 
2290a86c6181SAlex Tomas /*
2291d0d856e8SRandy Dunlap  * ext4_ext_put_gap_in_cache:
2292d0d856e8SRandy Dunlap  * calculate boundaries of the gap that the requested block fits into
2293a86c6181SAlex Tomas  * and cache this gap
2294a86c6181SAlex Tomas  */
229509b88252SAvantika Mathur static void
2296a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2297725d26d3SAneesh Kumar K.V 				ext4_lblk_t block)
2298a86c6181SAlex Tomas {
2299a86c6181SAlex Tomas 	int depth = ext_depth(inode);
230027b1b228SAndi Shyti 	unsigned long len = 0;
230127b1b228SAndi Shyti 	ext4_lblk_t lblock = 0;
2302a86c6181SAlex Tomas 	struct ext4_extent *ex;
2303a86c6181SAlex Tomas 
2304a86c6181SAlex Tomas 	ex = path[depth].p_ext;
2305a86c6181SAlex Tomas 	if (ex == NULL) {
230669eb33dcSZheng Liu 		/*
230769eb33dcSZheng Liu 		 * there is no extent yet, so gap is [0;-] and we
230869eb33dcSZheng Liu 		 * don't cache it
230969eb33dcSZheng Liu 		 */
2310a86c6181SAlex Tomas 		ext_debug("cache gap(whole file):");
2311a86c6181SAlex Tomas 	} else if (block < le32_to_cpu(ex->ee_block)) {
2312a86c6181SAlex Tomas 		lblock = block;
2313a86c6181SAlex Tomas 		len = le32_to_cpu(ex->ee_block) - block;
2314bba90743SEric Sandeen 		ext_debug("cache gap(before): %u [%u:%u]",
2315bba90743SEric Sandeen 				block,
2316bba90743SEric Sandeen 				le32_to_cpu(ex->ee_block),
2317bba90743SEric Sandeen 				 ext4_ext_get_actual_len(ex));
2318d100eef2SZheng Liu 		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2319d100eef2SZheng Liu 			ext4_es_insert_extent(inode, lblock, len, ~0,
2320d100eef2SZheng Liu 					      EXTENT_STATUS_HOLE);
2321a86c6181SAlex Tomas 	} else if (block >= le32_to_cpu(ex->ee_block)
2322a2df2a63SAmit Arora 			+ ext4_ext_get_actual_len(ex)) {
2323725d26d3SAneesh Kumar K.V 		ext4_lblk_t next;
2324a86c6181SAlex Tomas 		lblock = le32_to_cpu(ex->ee_block)
2325a2df2a63SAmit Arora 			+ ext4_ext_get_actual_len(ex);
2326725d26d3SAneesh Kumar K.V 
2327725d26d3SAneesh Kumar K.V 		next = ext4_ext_next_allocated_block(path);
2328bba90743SEric Sandeen 		ext_debug("cache gap(after): [%u:%u] %u",
2329bba90743SEric Sandeen 				le32_to_cpu(ex->ee_block),
2330bba90743SEric Sandeen 				ext4_ext_get_actual_len(ex),
2331bba90743SEric Sandeen 				block);
2332725d26d3SAneesh Kumar K.V 		BUG_ON(next == lblock);
2333725d26d3SAneesh Kumar K.V 		len = next - lblock;
2334d100eef2SZheng Liu 		if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2335d100eef2SZheng Liu 			ext4_es_insert_extent(inode, lblock, len, ~0,
2336d100eef2SZheng Liu 					      EXTENT_STATUS_HOLE);
2337a86c6181SAlex Tomas 	} else {
2338a86c6181SAlex Tomas 		BUG();
2339a86c6181SAlex Tomas 	}
2340a86c6181SAlex Tomas 
2341bba90743SEric Sandeen 	ext_debug(" -> %u:%lu\n", lblock, len);
2342a86c6181SAlex Tomas }
2343a86c6181SAlex Tomas 
2344a86c6181SAlex Tomas /*
2345d0d856e8SRandy Dunlap  * ext4_ext_rm_idx:
2346d0d856e8SRandy Dunlap  * removes index from the index block.
2347a86c6181SAlex Tomas  */
23481d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2349c36575e6SForrest Liu 			struct ext4_ext_path *path, int depth)
2350a86c6181SAlex Tomas {
2351a86c6181SAlex Tomas 	int err;
2352f65e6fbaSAlex Tomas 	ext4_fsblk_t leaf;
2353a86c6181SAlex Tomas 
2354a86c6181SAlex Tomas 	/* free index block */
2355c36575e6SForrest Liu 	depth--;
2356c36575e6SForrest Liu 	path = path + depth;
2357bf89d16fSTheodore Ts'o 	leaf = ext4_idx_pblock(path->p_idx);
2358273df556SFrank Mayhar 	if (unlikely(path->p_hdr->eh_entries == 0)) {
2359273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2360273df556SFrank Mayhar 		return -EIO;
2361273df556SFrank Mayhar 	}
23627e028976SAvantika Mathur 	err = ext4_ext_get_access(handle, inode, path);
23637e028976SAvantika Mathur 	if (err)
2364a86c6181SAlex Tomas 		return err;
23650e1147b0SRobin Dong 
23660e1147b0SRobin Dong 	if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
23670e1147b0SRobin Dong 		int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
23680e1147b0SRobin Dong 		len *= sizeof(struct ext4_extent_idx);
23690e1147b0SRobin Dong 		memmove(path->p_idx, path->p_idx + 1, len);
23700e1147b0SRobin Dong 	}
23710e1147b0SRobin Dong 
2372e8546d06SMarcin Slusarz 	le16_add_cpu(&path->p_hdr->eh_entries, -1);
23737e028976SAvantika Mathur 	err = ext4_ext_dirty(handle, inode, path);
23747e028976SAvantika Mathur 	if (err)
2375a86c6181SAlex Tomas 		return err;
23762ae02107SMingming Cao 	ext_debug("index is empty, remove it, free block %llu\n", leaf);
2377d8990240SAditya Kali 	trace_ext4_ext_rm_idx(inode, leaf);
2378d8990240SAditya Kali 
23797dc57615SPeter Huewe 	ext4_free_blocks(handle, inode, NULL, leaf, 1,
2380e6362609STheodore Ts'o 			 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2381c36575e6SForrest Liu 
2382c36575e6SForrest Liu 	while (--depth >= 0) {
2383c36575e6SForrest Liu 		if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2384c36575e6SForrest Liu 			break;
2385c36575e6SForrest Liu 		path--;
2386c36575e6SForrest Liu 		err = ext4_ext_get_access(handle, inode, path);
2387c36575e6SForrest Liu 		if (err)
2388c36575e6SForrest Liu 			break;
2389c36575e6SForrest Liu 		path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2390c36575e6SForrest Liu 		err = ext4_ext_dirty(handle, inode, path);
2391c36575e6SForrest Liu 		if (err)
2392c36575e6SForrest Liu 			break;
2393c36575e6SForrest Liu 	}
2394a86c6181SAlex Tomas 	return err;
2395a86c6181SAlex Tomas }
2396a86c6181SAlex Tomas 
2397a86c6181SAlex Tomas /*
2398ee12b630SMingming Cao  * ext4_ext_calc_credits_for_single_extent:
2399ee12b630SMingming Cao  * This routine returns max. credits that needed to insert an extent
2400ee12b630SMingming Cao  * to the extent tree.
2401ee12b630SMingming Cao  * When pass the actual path, the caller should calculate credits
2402ee12b630SMingming Cao  * under i_data_sem.
2403a86c6181SAlex Tomas  */
2404525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2405a86c6181SAlex Tomas 						struct ext4_ext_path *path)
2406a86c6181SAlex Tomas {
2407a86c6181SAlex Tomas 	if (path) {
2408ee12b630SMingming Cao 		int depth = ext_depth(inode);
2409f3bd1f3fSMingming Cao 		int ret = 0;
2410ee12b630SMingming Cao 
2411a86c6181SAlex Tomas 		/* probably there is space in leaf? */
2412a86c6181SAlex Tomas 		if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2413ee12b630SMingming Cao 				< le16_to_cpu(path[depth].p_hdr->eh_max)) {
2414ee12b630SMingming Cao 
2415ee12b630SMingming Cao 			/*
2416ee12b630SMingming Cao 			 *  There are some space in the leaf tree, no
2417ee12b630SMingming Cao 			 *  need to account for leaf block credit
2418ee12b630SMingming Cao 			 *
2419ee12b630SMingming Cao 			 *  bitmaps and block group descriptor blocks
2420df3ab170STao Ma 			 *  and other metadata blocks still need to be
2421ee12b630SMingming Cao 			 *  accounted.
2422ee12b630SMingming Cao 			 */
2423525f4ed8SMingming Cao 			/* 1 bitmap, 1 block group descriptor */
2424ee12b630SMingming Cao 			ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
24255887e98bSAneesh Kumar K.V 			return ret;
2426ee12b630SMingming Cao 		}
2427ee12b630SMingming Cao 	}
2428ee12b630SMingming Cao 
2429525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, nrblocks);
2430a86c6181SAlex Tomas }
2431a86c6181SAlex Tomas 
2432a86c6181SAlex Tomas /*
2433fffb2739SJan Kara  * How many index/leaf blocks need to change/allocate to add @extents extents?
2434ee12b630SMingming Cao  *
2435fffb2739SJan Kara  * If we add a single extent, then in the worse case, each tree level
2436fffb2739SJan Kara  * index/leaf need to be changed in case of the tree split.
2437ee12b630SMingming Cao  *
2438fffb2739SJan Kara  * If more extents are inserted, they could cause the whole tree split more
2439fffb2739SJan Kara  * than once, but this is really rare.
2440a86c6181SAlex Tomas  */
2441fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2442ee12b630SMingming Cao {
2443ee12b630SMingming Cao 	int index;
2444f19d5870STao Ma 	int depth;
2445f19d5870STao Ma 
2446f19d5870STao Ma 	/* If we are converting the inline data, only one is needed here. */
2447f19d5870STao Ma 	if (ext4_has_inline_data(inode))
2448f19d5870STao Ma 		return 1;
2449f19d5870STao Ma 
2450f19d5870STao Ma 	depth = ext_depth(inode);
2451a86c6181SAlex Tomas 
2452fffb2739SJan Kara 	if (extents <= 1)
2453ee12b630SMingming Cao 		index = depth * 2;
2454ee12b630SMingming Cao 	else
2455ee12b630SMingming Cao 		index = depth * 3;
2456a86c6181SAlex Tomas 
2457ee12b630SMingming Cao 	return index;
2458a86c6181SAlex Tomas }
2459a86c6181SAlex Tomas 
2460981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode)
2461981250caSTheodore Ts'o {
2462981250caSTheodore Ts'o 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2463981250caSTheodore Ts'o 		return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2464981250caSTheodore Ts'o 	else if (ext4_should_journal_data(inode))
2465981250caSTheodore Ts'o 		return EXT4_FREE_BLOCKS_FORGET;
2466981250caSTheodore Ts'o 	return 0;
2467981250caSTheodore Ts'o }
2468981250caSTheodore Ts'o 
2469a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2470a86c6181SAlex Tomas 			      struct ext4_extent *ex,
2471d23142c6SLukas Czerner 			      long long *partial_cluster,
2472725d26d3SAneesh Kumar K.V 			      ext4_lblk_t from, ext4_lblk_t to)
2473a86c6181SAlex Tomas {
24740aa06000STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2475a2df2a63SAmit Arora 	unsigned short ee_len =  ext4_ext_get_actual_len(ex);
24760aa06000STheodore Ts'o 	ext4_fsblk_t pblk;
2477981250caSTheodore Ts'o 	int flags = get_default_free_blocks_flags(inode);
247818888cf0SAndrey Sidorov 
24790aa06000STheodore Ts'o 	/*
24800aa06000STheodore Ts'o 	 * For bigalloc file systems, we never free a partial cluster
24810aa06000STheodore Ts'o 	 * at the beginning of the extent.  Instead, we make a note
24820aa06000STheodore Ts'o 	 * that we tried freeing the cluster, and check to see if we
24830aa06000STheodore Ts'o 	 * need to free it on a subsequent call to ext4_remove_blocks,
24840aa06000STheodore Ts'o 	 * or at the end of the ext4_truncate() operation.
24850aa06000STheodore Ts'o 	 */
24860aa06000STheodore Ts'o 	flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
24870aa06000STheodore Ts'o 
2488d8990240SAditya Kali 	trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
24890aa06000STheodore Ts'o 	/*
24900aa06000STheodore Ts'o 	 * If we have a partial cluster, and it's different from the
24910aa06000STheodore Ts'o 	 * cluster of the last block, we need to explicitly free the
24920aa06000STheodore Ts'o 	 * partial cluster here.
24930aa06000STheodore Ts'o 	 */
24940aa06000STheodore Ts'o 	pblk = ext4_ext_pblock(ex) + ee_len - 1;
2495d23142c6SLukas Czerner 	if ((*partial_cluster > 0) &&
2496d23142c6SLukas Czerner 	    (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
24970aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL,
24980aa06000STheodore Ts'o 				 EXT4_C2B(sbi, *partial_cluster),
24990aa06000STheodore Ts'o 				 sbi->s_cluster_ratio, flags);
25000aa06000STheodore Ts'o 		*partial_cluster = 0;
25010aa06000STheodore Ts'o 	}
25020aa06000STheodore Ts'o 
2503a86c6181SAlex Tomas #ifdef EXTENTS_STATS
2504a86c6181SAlex Tomas 	{
2505a86c6181SAlex Tomas 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2506a86c6181SAlex Tomas 		spin_lock(&sbi->s_ext_stats_lock);
2507a86c6181SAlex Tomas 		sbi->s_ext_blocks += ee_len;
2508a86c6181SAlex Tomas 		sbi->s_ext_extents++;
2509a86c6181SAlex Tomas 		if (ee_len < sbi->s_ext_min)
2510a86c6181SAlex Tomas 			sbi->s_ext_min = ee_len;
2511a86c6181SAlex Tomas 		if (ee_len > sbi->s_ext_max)
2512a86c6181SAlex Tomas 			sbi->s_ext_max = ee_len;
2513a86c6181SAlex Tomas 		if (ext_depth(inode) > sbi->s_depth_max)
2514a86c6181SAlex Tomas 			sbi->s_depth_max = ext_depth(inode);
2515a86c6181SAlex Tomas 		spin_unlock(&sbi->s_ext_stats_lock);
2516a86c6181SAlex Tomas 	}
2517a86c6181SAlex Tomas #endif
2518a86c6181SAlex Tomas 	if (from >= le32_to_cpu(ex->ee_block)
2519a2df2a63SAmit Arora 	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2520a86c6181SAlex Tomas 		/* tail removal */
2521725d26d3SAneesh Kumar K.V 		ext4_lblk_t num;
2522d23142c6SLukas Czerner 		unsigned int unaligned;
2523725d26d3SAneesh Kumar K.V 
2524a2df2a63SAmit Arora 		num = le32_to_cpu(ex->ee_block) + ee_len - from;
25250aa06000STheodore Ts'o 		pblk = ext4_ext_pblock(ex) + ee_len - num;
2526d23142c6SLukas Czerner 		/*
2527d23142c6SLukas Czerner 		 * Usually we want to free partial cluster at the end of the
2528d23142c6SLukas Czerner 		 * extent, except for the situation when the cluster is still
2529d23142c6SLukas Czerner 		 * used by any other extent (partial_cluster is negative).
2530d23142c6SLukas Czerner 		 */
2531d23142c6SLukas Czerner 		if (*partial_cluster < 0 &&
2532d23142c6SLukas Czerner 		    -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2533d23142c6SLukas Czerner 			flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2534d23142c6SLukas Czerner 
2535d23142c6SLukas Czerner 		ext_debug("free last %u blocks starting %llu partial %lld\n",
2536d23142c6SLukas Czerner 			  num, pblk, *partial_cluster);
25370aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
25380aa06000STheodore Ts'o 		/*
25390aa06000STheodore Ts'o 		 * If the block range to be freed didn't start at the
25400aa06000STheodore Ts'o 		 * beginning of a cluster, and we removed the entire
2541d23142c6SLukas Czerner 		 * extent and the cluster is not used by any other extent,
2542d23142c6SLukas Czerner 		 * save the partial cluster here, since we might need to
2543d23142c6SLukas Czerner 		 * delete if we determine that the truncate operation has
2544d23142c6SLukas Czerner 		 * removed all of the blocks in the cluster.
2545d23142c6SLukas Czerner 		 *
2546d23142c6SLukas Czerner 		 * On the other hand, if we did not manage to free the whole
2547d23142c6SLukas Czerner 		 * extent, we have to mark the cluster as used (store negative
2548d23142c6SLukas Czerner 		 * cluster number in partial_cluster).
25490aa06000STheodore Ts'o 		 */
2550f5a44db5STheodore Ts'o 		unaligned = EXT4_PBLK_COFF(sbi, pblk);
2551d23142c6SLukas Czerner 		if (unaligned && (ee_len == num) &&
2552d23142c6SLukas Czerner 		    (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
25530aa06000STheodore Ts'o 			*partial_cluster = EXT4_B2C(sbi, pblk);
2554d23142c6SLukas Czerner 		else if (unaligned)
2555d23142c6SLukas Czerner 			*partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2556d23142c6SLukas Czerner 		else if (*partial_cluster > 0)
25570aa06000STheodore Ts'o 			*partial_cluster = 0;
255878fb9cdfSLukas Czerner 	} else
255978fb9cdfSLukas Czerner 		ext4_error(sbi->s_sb, "strange request: removal(2) "
2560725d26d3SAneesh Kumar K.V 			   "%u-%u from %u:%u\n",
2561a2df2a63SAmit Arora 			   from, to, le32_to_cpu(ex->ee_block), ee_len);
2562a86c6181SAlex Tomas 	return 0;
2563a86c6181SAlex Tomas }
2564a86c6181SAlex Tomas 
2565d583fb87SAllison Henderson 
2566d583fb87SAllison Henderson /*
2567d583fb87SAllison Henderson  * ext4_ext_rm_leaf() Removes the extents associated with the
2568d583fb87SAllison Henderson  * blocks appearing between "start" and "end", and splits the extents
2569d583fb87SAllison Henderson  * if "start" and "end" appear in the same extent
2570d583fb87SAllison Henderson  *
2571d583fb87SAllison Henderson  * @handle: The journal handle
2572d583fb87SAllison Henderson  * @inode:  The files inode
2573d583fb87SAllison Henderson  * @path:   The path to the leaf
2574d23142c6SLukas Czerner  * @partial_cluster: The cluster which we'll have to free if all extents
2575d23142c6SLukas Czerner  *                   has been released from it. It gets negative in case
2576d23142c6SLukas Czerner  *                   that the cluster is still used.
2577d583fb87SAllison Henderson  * @start:  The first block to remove
2578d583fb87SAllison Henderson  * @end:   The last block to remove
2579d583fb87SAllison Henderson  */
2580a86c6181SAlex Tomas static int
2581a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2582d23142c6SLukas Czerner 		 struct ext4_ext_path *path,
2583d23142c6SLukas Czerner 		 long long *partial_cluster,
25840aa06000STheodore Ts'o 		 ext4_lblk_t start, ext4_lblk_t end)
2585a86c6181SAlex Tomas {
25860aa06000STheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2587a86c6181SAlex Tomas 	int err = 0, correct_index = 0;
2588a86c6181SAlex Tomas 	int depth = ext_depth(inode), credits;
2589a86c6181SAlex Tomas 	struct ext4_extent_header *eh;
2590750c9c47SDmitry Monakhov 	ext4_lblk_t a, b;
2591725d26d3SAneesh Kumar K.V 	unsigned num;
2592725d26d3SAneesh Kumar K.V 	ext4_lblk_t ex_ee_block;
2593a86c6181SAlex Tomas 	unsigned short ex_ee_len;
2594556615dcSLukas Czerner 	unsigned unwritten = 0;
2595a86c6181SAlex Tomas 	struct ext4_extent *ex;
2596d23142c6SLukas Czerner 	ext4_fsblk_t pblk;
2597a86c6181SAlex Tomas 
2598c29c0ae7SAlex Tomas 	/* the header must be checked already in ext4_ext_remove_space() */
25995f95d21fSLukas Czerner 	ext_debug("truncate since %u in leaf to %u\n", start, end);
2600a86c6181SAlex Tomas 	if (!path[depth].p_hdr)
2601a86c6181SAlex Tomas 		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2602a86c6181SAlex Tomas 	eh = path[depth].p_hdr;
2603273df556SFrank Mayhar 	if (unlikely(path[depth].p_hdr == NULL)) {
2604273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2605273df556SFrank Mayhar 		return -EIO;
2606273df556SFrank Mayhar 	}
2607a86c6181SAlex Tomas 	/* find where to start removing */
26086ae06ff5SAshish Sangwan 	ex = path[depth].p_ext;
26096ae06ff5SAshish Sangwan 	if (!ex)
2610a86c6181SAlex Tomas 		ex = EXT_LAST_EXTENT(eh);
2611a86c6181SAlex Tomas 
2612a86c6181SAlex Tomas 	ex_ee_block = le32_to_cpu(ex->ee_block);
2613a2df2a63SAmit Arora 	ex_ee_len = ext4_ext_get_actual_len(ex);
2614a86c6181SAlex Tomas 
2615c0634493SEric Whitney 	/*
2616c0634493SEric Whitney 	 * If we're starting with an extent other than the last one in the
2617c0634493SEric Whitney 	 * node, we need to see if it shares a cluster with the extent to
2618c0634493SEric Whitney 	 * the right (towards the end of the file). If its leftmost cluster
2619c0634493SEric Whitney 	 * is this extent's rightmost cluster and it is not cluster aligned,
2620c0634493SEric Whitney 	 * we'll mark it as a partial that is not to be deallocated.
2621c0634493SEric Whitney 	 */
2622c0634493SEric Whitney 
2623c0634493SEric Whitney 	if (ex != EXT_LAST_EXTENT(eh)) {
2624c0634493SEric Whitney 		ext4_fsblk_t current_pblk, right_pblk;
2625c0634493SEric Whitney 		long long current_cluster, right_cluster;
2626c0634493SEric Whitney 
2627c0634493SEric Whitney 		current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2628c0634493SEric Whitney 		current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2629c0634493SEric Whitney 		right_pblk = ext4_ext_pblock(ex + 1);
2630c0634493SEric Whitney 		right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2631c0634493SEric Whitney 		if (current_cluster == right_cluster &&
2632c0634493SEric Whitney 			EXT4_PBLK_COFF(sbi, right_pblk))
2633c0634493SEric Whitney 			*partial_cluster = -right_cluster;
2634c0634493SEric Whitney 	}
2635c0634493SEric Whitney 
2636d8990240SAditya Kali 	trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2637d8990240SAditya Kali 
2638a86c6181SAlex Tomas 	while (ex >= EXT_FIRST_EXTENT(eh) &&
2639a86c6181SAlex Tomas 			ex_ee_block + ex_ee_len > start) {
2640a41f2071SAneesh Kumar K.V 
2641556615dcSLukas Czerner 		if (ext4_ext_is_unwritten(ex))
2642556615dcSLukas Czerner 			unwritten = 1;
2643a41f2071SAneesh Kumar K.V 		else
2644556615dcSLukas Czerner 			unwritten = 0;
2645a41f2071SAneesh Kumar K.V 
2646553f9008SMingming 		ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2647556615dcSLukas Czerner 			  unwritten, ex_ee_len);
2648a86c6181SAlex Tomas 		path[depth].p_ext = ex;
2649a86c6181SAlex Tomas 
2650a86c6181SAlex Tomas 		a = ex_ee_block > start ? ex_ee_block : start;
2651d583fb87SAllison Henderson 		b = ex_ee_block+ex_ee_len - 1 < end ?
2652d583fb87SAllison Henderson 			ex_ee_block+ex_ee_len - 1 : end;
2653a86c6181SAlex Tomas 
2654a86c6181SAlex Tomas 		ext_debug("  border %u:%u\n", a, b);
2655a86c6181SAlex Tomas 
2656d583fb87SAllison Henderson 		/* If this extent is beyond the end of the hole, skip it */
26575f95d21fSLukas Czerner 		if (end < ex_ee_block) {
2658d23142c6SLukas Czerner 			/*
2659d23142c6SLukas Czerner 			 * We're going to skip this extent and move to another,
2660d23142c6SLukas Czerner 			 * so if this extent is not cluster aligned we have
2661d23142c6SLukas Czerner 			 * to mark the current cluster as used to avoid
2662d23142c6SLukas Czerner 			 * accidentally freeing it later on
2663d23142c6SLukas Czerner 			 */
2664d23142c6SLukas Czerner 			pblk = ext4_ext_pblock(ex);
2665f5a44db5STheodore Ts'o 			if (EXT4_PBLK_COFF(sbi, pblk))
2666d23142c6SLukas Czerner 				*partial_cluster =
2667d23142c6SLukas Czerner 					-((long long)EXT4_B2C(sbi, pblk));
2668d583fb87SAllison Henderson 			ex--;
2669d583fb87SAllison Henderson 			ex_ee_block = le32_to_cpu(ex->ee_block);
2670d583fb87SAllison Henderson 			ex_ee_len = ext4_ext_get_actual_len(ex);
2671d583fb87SAllison Henderson 			continue;
2672750c9c47SDmitry Monakhov 		} else if (b != ex_ee_block + ex_ee_len - 1) {
2673dc1841d6SLukas Czerner 			EXT4_ERROR_INODE(inode,
2674dc1841d6SLukas Czerner 					 "can not handle truncate %u:%u "
2675dc1841d6SLukas Czerner 					 "on extent %u:%u",
2676dc1841d6SLukas Czerner 					 start, end, ex_ee_block,
2677dc1841d6SLukas Czerner 					 ex_ee_block + ex_ee_len - 1);
2678d583fb87SAllison Henderson 			err = -EIO;
2679d583fb87SAllison Henderson 			goto out;
2680a86c6181SAlex Tomas 		} else if (a != ex_ee_block) {
2681a86c6181SAlex Tomas 			/* remove tail of the extent */
2682750c9c47SDmitry Monakhov 			num = a - ex_ee_block;
2683a86c6181SAlex Tomas 		} else {
2684a86c6181SAlex Tomas 			/* remove whole extent: excellent! */
2685a86c6181SAlex Tomas 			num = 0;
2686d583fb87SAllison Henderson 		}
268734071da7STheodore Ts'o 		/*
268834071da7STheodore Ts'o 		 * 3 for leaf, sb, and inode plus 2 (bmap and group
268934071da7STheodore Ts'o 		 * descriptor) for each block group; assume two block
269034071da7STheodore Ts'o 		 * groups plus ex_ee_len/blocks_per_block_group for
269134071da7STheodore Ts'o 		 * the worst case
269234071da7STheodore Ts'o 		 */
269334071da7STheodore Ts'o 		credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2694a86c6181SAlex Tomas 		if (ex == EXT_FIRST_EXTENT(eh)) {
2695a86c6181SAlex Tomas 			correct_index = 1;
2696a86c6181SAlex Tomas 			credits += (ext_depth(inode)) + 1;
2697a86c6181SAlex Tomas 		}
26985aca07ebSDmitry Monakhov 		credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2699a86c6181SAlex Tomas 
2700487caeefSJan Kara 		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
27019102e4faSShen Feng 		if (err)
2702a86c6181SAlex Tomas 			goto out;
2703a86c6181SAlex Tomas 
2704a86c6181SAlex Tomas 		err = ext4_ext_get_access(handle, inode, path + depth);
2705a86c6181SAlex Tomas 		if (err)
2706a86c6181SAlex Tomas 			goto out;
2707a86c6181SAlex Tomas 
27080aa06000STheodore Ts'o 		err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
27090aa06000STheodore Ts'o 					 a, b);
2710a86c6181SAlex Tomas 		if (err)
2711a86c6181SAlex Tomas 			goto out;
2712a86c6181SAlex Tomas 
2713750c9c47SDmitry Monakhov 		if (num == 0)
2714d0d856e8SRandy Dunlap 			/* this extent is removed; mark slot entirely unused */
2715f65e6fbaSAlex Tomas 			ext4_ext_store_pblock(ex, 0);
2716a86c6181SAlex Tomas 
2717a86c6181SAlex Tomas 		ex->ee_len = cpu_to_le16(num);
2718749269faSAmit Arora 		/*
2719556615dcSLukas Czerner 		 * Do not mark unwritten if all the blocks in the
2720749269faSAmit Arora 		 * extent have been removed.
2721749269faSAmit Arora 		 */
2722556615dcSLukas Czerner 		if (unwritten && num)
2723556615dcSLukas Czerner 			ext4_ext_mark_unwritten(ex);
2724d583fb87SAllison Henderson 		/*
2725d583fb87SAllison Henderson 		 * If the extent was completely released,
2726d583fb87SAllison Henderson 		 * we need to remove it from the leaf
2727d583fb87SAllison Henderson 		 */
2728d583fb87SAllison Henderson 		if (num == 0) {
2729f17722f9SLukas Czerner 			if (end != EXT_MAX_BLOCKS - 1) {
2730d583fb87SAllison Henderson 				/*
2731d583fb87SAllison Henderson 				 * For hole punching, we need to scoot all the
2732d583fb87SAllison Henderson 				 * extents up when an extent is removed so that
2733d583fb87SAllison Henderson 				 * we dont have blank extents in the middle
2734d583fb87SAllison Henderson 				 */
2735d583fb87SAllison Henderson 				memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2736d583fb87SAllison Henderson 					sizeof(struct ext4_extent));
2737d583fb87SAllison Henderson 
2738d583fb87SAllison Henderson 				/* Now get rid of the one at the end */
2739d583fb87SAllison Henderson 				memset(EXT_LAST_EXTENT(eh), 0,
2740d583fb87SAllison Henderson 					sizeof(struct ext4_extent));
2741d583fb87SAllison Henderson 			}
2742d583fb87SAllison Henderson 			le16_add_cpu(&eh->eh_entries, -1);
2743d23142c6SLukas Czerner 		} else if (*partial_cluster > 0)
27440aa06000STheodore Ts'o 			*partial_cluster = 0;
2745d583fb87SAllison Henderson 
2746750c9c47SDmitry Monakhov 		err = ext4_ext_dirty(handle, inode, path + depth);
2747750c9c47SDmitry Monakhov 		if (err)
2748750c9c47SDmitry Monakhov 			goto out;
2749750c9c47SDmitry Monakhov 
2750bf52c6f7SYongqiang Yang 		ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2751bf89d16fSTheodore Ts'o 				ext4_ext_pblock(ex));
2752a86c6181SAlex Tomas 		ex--;
2753a86c6181SAlex Tomas 		ex_ee_block = le32_to_cpu(ex->ee_block);
2754a2df2a63SAmit Arora 		ex_ee_len = ext4_ext_get_actual_len(ex);
2755a86c6181SAlex Tomas 	}
2756a86c6181SAlex Tomas 
2757a86c6181SAlex Tomas 	if (correct_index && eh->eh_entries)
2758a86c6181SAlex Tomas 		err = ext4_ext_correct_indexes(handle, inode, path);
2759a86c6181SAlex Tomas 
27600aa06000STheodore Ts'o 	/*
2761ad6599abSEric Whitney 	 * If there's a partial cluster and at least one extent remains in
2762ad6599abSEric Whitney 	 * the leaf, free the partial cluster if it isn't shared with the
2763ad6599abSEric Whitney 	 * current extent.  If there's a partial cluster and no extents
2764ad6599abSEric Whitney 	 * remain in the leaf, it can't be freed here.  It can only be
2765ad6599abSEric Whitney 	 * freed when it's possible to determine if it's not shared with
2766ad6599abSEric Whitney 	 * any other extent - when the next leaf is processed or when space
2767ad6599abSEric Whitney 	 * removal is complete.
27680aa06000STheodore Ts'o 	 */
2769ad6599abSEric Whitney 	if (*partial_cluster > 0 && eh->eh_entries &&
27700aa06000STheodore Ts'o 	    (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
27710aa06000STheodore Ts'o 	     *partial_cluster)) {
2772981250caSTheodore Ts'o 		int flags = get_default_free_blocks_flags(inode);
27730aa06000STheodore Ts'o 
27740aa06000STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL,
27750aa06000STheodore Ts'o 				 EXT4_C2B(sbi, *partial_cluster),
27760aa06000STheodore Ts'o 				 sbi->s_cluster_ratio, flags);
27770aa06000STheodore Ts'o 		*partial_cluster = 0;
27780aa06000STheodore Ts'o 	}
27790aa06000STheodore Ts'o 
2780a86c6181SAlex Tomas 	/* if this leaf is free, then we should
2781a86c6181SAlex Tomas 	 * remove it from index block above */
2782a86c6181SAlex Tomas 	if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2783c36575e6SForrest Liu 		err = ext4_ext_rm_idx(handle, inode, path, depth);
2784a86c6181SAlex Tomas 
2785a86c6181SAlex Tomas out:
2786a86c6181SAlex Tomas 	return err;
2787a86c6181SAlex Tomas }
2788a86c6181SAlex Tomas 
2789a86c6181SAlex Tomas /*
2790d0d856e8SRandy Dunlap  * ext4_ext_more_to_rm:
2791d0d856e8SRandy Dunlap  * returns 1 if current index has to be freed (even partial)
2792a86c6181SAlex Tomas  */
279309b88252SAvantika Mathur static int
2794a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path)
2795a86c6181SAlex Tomas {
2796a86c6181SAlex Tomas 	BUG_ON(path->p_idx == NULL);
2797a86c6181SAlex Tomas 
2798a86c6181SAlex Tomas 	if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2799a86c6181SAlex Tomas 		return 0;
2800a86c6181SAlex Tomas 
2801a86c6181SAlex Tomas 	/*
2802d0d856e8SRandy Dunlap 	 * if truncate on deeper level happened, it wasn't partial,
2803a86c6181SAlex Tomas 	 * so we have to consider current index for truncation
2804a86c6181SAlex Tomas 	 */
2805a86c6181SAlex Tomas 	if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2806a86c6181SAlex Tomas 		return 0;
2807a86c6181SAlex Tomas 	return 1;
2808a86c6181SAlex Tomas }
2809a86c6181SAlex Tomas 
281026a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
28115f95d21fSLukas Czerner 			  ext4_lblk_t end)
2812a86c6181SAlex Tomas {
2813a86c6181SAlex Tomas 	struct super_block *sb = inode->i_sb;
2814a86c6181SAlex Tomas 	int depth = ext_depth(inode);
2815968dee77SAshish Sangwan 	struct ext4_ext_path *path = NULL;
2816d23142c6SLukas Czerner 	long long partial_cluster = 0;
2817a86c6181SAlex Tomas 	handle_t *handle;
28186f2080e6SDmitry Monakhov 	int i = 0, err = 0;
2819a86c6181SAlex Tomas 
28205f95d21fSLukas Czerner 	ext_debug("truncate since %u to %u\n", start, end);
2821a86c6181SAlex Tomas 
2822a86c6181SAlex Tomas 	/* probably first extent we're gonna free will be last in block */
28239924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2824a86c6181SAlex Tomas 	if (IS_ERR(handle))
2825a86c6181SAlex Tomas 		return PTR_ERR(handle);
2826a86c6181SAlex Tomas 
28270617b83fSDmitry Monakhov again:
282861801325SLukas Czerner 	trace_ext4_ext_remove_space(inode, start, end, depth);
2829d8990240SAditya Kali 
2830a86c6181SAlex Tomas 	/*
28315f95d21fSLukas Czerner 	 * Check if we are removing extents inside the extent tree. If that
28325f95d21fSLukas Czerner 	 * is the case, we are going to punch a hole inside the extent tree
28335f95d21fSLukas Czerner 	 * so we have to check whether we need to split the extent covering
28345f95d21fSLukas Czerner 	 * the last block to remove so we can easily remove the part of it
28355f95d21fSLukas Czerner 	 * in ext4_ext_rm_leaf().
28365f95d21fSLukas Czerner 	 */
28375f95d21fSLukas Czerner 	if (end < EXT_MAX_BLOCKS - 1) {
28385f95d21fSLukas Czerner 		struct ext4_extent *ex;
28395f95d21fSLukas Czerner 		ext4_lblk_t ee_block;
28405f95d21fSLukas Czerner 
28415f95d21fSLukas Czerner 		/* find extent for this block */
2842107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
28435f95d21fSLukas Czerner 		if (IS_ERR(path)) {
28445f95d21fSLukas Czerner 			ext4_journal_stop(handle);
28455f95d21fSLukas Czerner 			return PTR_ERR(path);
28465f95d21fSLukas Czerner 		}
28475f95d21fSLukas Czerner 		depth = ext_depth(inode);
28486f2080e6SDmitry Monakhov 		/* Leaf not may not exist only if inode has no blocks at all */
28495f95d21fSLukas Czerner 		ex = path[depth].p_ext;
2850968dee77SAshish Sangwan 		if (!ex) {
28516f2080e6SDmitry Monakhov 			if (depth) {
28526f2080e6SDmitry Monakhov 				EXT4_ERROR_INODE(inode,
28536f2080e6SDmitry Monakhov 						 "path[%d].p_hdr == NULL",
28546f2080e6SDmitry Monakhov 						 depth);
28556f2080e6SDmitry Monakhov 				err = -EIO;
28566f2080e6SDmitry Monakhov 			}
28576f2080e6SDmitry Monakhov 			goto out;
2858968dee77SAshish Sangwan 		}
28595f95d21fSLukas Czerner 
28605f95d21fSLukas Czerner 		ee_block = le32_to_cpu(ex->ee_block);
28615f95d21fSLukas Czerner 
28625f95d21fSLukas Czerner 		/*
28635f95d21fSLukas Czerner 		 * See if the last block is inside the extent, if so split
28645f95d21fSLukas Czerner 		 * the extent at 'end' block so we can easily remove the
28655f95d21fSLukas Czerner 		 * tail of the first part of the split extent in
28665f95d21fSLukas Czerner 		 * ext4_ext_rm_leaf().
28675f95d21fSLukas Czerner 		 */
28685f95d21fSLukas Czerner 		if (end >= ee_block &&
28695f95d21fSLukas Czerner 		    end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
28705f95d21fSLukas Czerner 			/*
28715f95d21fSLukas Czerner 			 * Split the extent in two so that 'end' is the last
287227dd4385SLukas Czerner 			 * block in the first new extent. Also we should not
287327dd4385SLukas Czerner 			 * fail removing space due to ENOSPC so try to use
287427dd4385SLukas Czerner 			 * reserved block if that happens.
28755f95d21fSLukas Czerner 			 */
2876fcf6b1b7SDmitry Monakhov 			err = ext4_force_split_extent_at(handle, inode, path,
2877fcf6b1b7SDmitry Monakhov 							 end + 1, 1);
28785f95d21fSLukas Czerner 			if (err < 0)
28795f95d21fSLukas Czerner 				goto out;
28805f95d21fSLukas Czerner 		}
28815f95d21fSLukas Czerner 	}
28825f95d21fSLukas Czerner 	/*
2883d0d856e8SRandy Dunlap 	 * We start scanning from right side, freeing all the blocks
2884d0d856e8SRandy Dunlap 	 * after i_size and walking into the tree depth-wise.
2885a86c6181SAlex Tomas 	 */
28860617b83fSDmitry Monakhov 	depth = ext_depth(inode);
2887968dee77SAshish Sangwan 	if (path) {
2888968dee77SAshish Sangwan 		int k = i = depth;
2889968dee77SAshish Sangwan 		while (--k > 0)
2890968dee77SAshish Sangwan 			path[k].p_block =
2891968dee77SAshish Sangwan 				le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2892968dee77SAshish Sangwan 	} else {
2893968dee77SAshish Sangwan 		path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2894968dee77SAshish Sangwan 			       GFP_NOFS);
2895a86c6181SAlex Tomas 		if (path == NULL) {
2896a86c6181SAlex Tomas 			ext4_journal_stop(handle);
2897a86c6181SAlex Tomas 			return -ENOMEM;
2898a86c6181SAlex Tomas 		}
28990617b83fSDmitry Monakhov 		path[0].p_depth = depth;
2900a86c6181SAlex Tomas 		path[0].p_hdr = ext_inode_hdr(inode);
290189a4e48fSTheodore Ts'o 		i = 0;
29025f95d21fSLukas Czerner 
2903c349179bSTheodore Ts'o 		if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2904a86c6181SAlex Tomas 			err = -EIO;
2905a86c6181SAlex Tomas 			goto out;
2906a86c6181SAlex Tomas 		}
2907968dee77SAshish Sangwan 	}
2908968dee77SAshish Sangwan 	err = 0;
2909a86c6181SAlex Tomas 
2910a86c6181SAlex Tomas 	while (i >= 0 && err == 0) {
2911a86c6181SAlex Tomas 		if (i == depth) {
2912a86c6181SAlex Tomas 			/* this is leaf block */
2913d583fb87SAllison Henderson 			err = ext4_ext_rm_leaf(handle, inode, path,
29140aa06000STheodore Ts'o 					       &partial_cluster, start,
29155f95d21fSLukas Czerner 					       end);
2916d0d856e8SRandy Dunlap 			/* root level has p_bh == NULL, brelse() eats this */
2917a86c6181SAlex Tomas 			brelse(path[i].p_bh);
2918a86c6181SAlex Tomas 			path[i].p_bh = NULL;
2919a86c6181SAlex Tomas 			i--;
2920a86c6181SAlex Tomas 			continue;
2921a86c6181SAlex Tomas 		}
2922a86c6181SAlex Tomas 
2923a86c6181SAlex Tomas 		/* this is index block */
2924a86c6181SAlex Tomas 		if (!path[i].p_hdr) {
2925a86c6181SAlex Tomas 			ext_debug("initialize header\n");
2926a86c6181SAlex Tomas 			path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2927a86c6181SAlex Tomas 		}
2928a86c6181SAlex Tomas 
2929a86c6181SAlex Tomas 		if (!path[i].p_idx) {
2930d0d856e8SRandy Dunlap 			/* this level hasn't been touched yet */
2931a86c6181SAlex Tomas 			path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2932a86c6181SAlex Tomas 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2933a86c6181SAlex Tomas 			ext_debug("init index ptr: hdr 0x%p, num %d\n",
2934a86c6181SAlex Tomas 				  path[i].p_hdr,
2935a86c6181SAlex Tomas 				  le16_to_cpu(path[i].p_hdr->eh_entries));
2936a86c6181SAlex Tomas 		} else {
2937d0d856e8SRandy Dunlap 			/* we were already here, see at next index */
2938a86c6181SAlex Tomas 			path[i].p_idx--;
2939a86c6181SAlex Tomas 		}
2940a86c6181SAlex Tomas 
2941a86c6181SAlex Tomas 		ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2942a86c6181SAlex Tomas 				i, EXT_FIRST_INDEX(path[i].p_hdr),
2943a86c6181SAlex Tomas 				path[i].p_idx);
2944a86c6181SAlex Tomas 		if (ext4_ext_more_to_rm(path + i)) {
2945c29c0ae7SAlex Tomas 			struct buffer_head *bh;
2946a86c6181SAlex Tomas 			/* go to the next level */
29472ae02107SMingming Cao 			ext_debug("move to level %d (block %llu)\n",
2948bf89d16fSTheodore Ts'o 				  i + 1, ext4_idx_pblock(path[i].p_idx));
2949a86c6181SAlex Tomas 			memset(path + i + 1, 0, sizeof(*path));
29507d7ea89eSTheodore Ts'o 			bh = read_extent_tree_block(inode,
2951107a7bd3STheodore Ts'o 				ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2952107a7bd3STheodore Ts'o 				EXT4_EX_NOCACHE);
29537d7ea89eSTheodore Ts'o 			if (IS_ERR(bh)) {
2954a86c6181SAlex Tomas 				/* should we reset i_size? */
29557d7ea89eSTheodore Ts'o 				err = PTR_ERR(bh);
2956a86c6181SAlex Tomas 				break;
2957a86c6181SAlex Tomas 			}
295876828c88STheodore Ts'o 			/* Yield here to deal with large extent trees.
295976828c88STheodore Ts'o 			 * Should be a no-op if we did IO above. */
296076828c88STheodore Ts'o 			cond_resched();
2961c29c0ae7SAlex Tomas 			if (WARN_ON(i + 1 > depth)) {
2962c29c0ae7SAlex Tomas 				err = -EIO;
2963c29c0ae7SAlex Tomas 				break;
2964c29c0ae7SAlex Tomas 			}
2965c29c0ae7SAlex Tomas 			path[i + 1].p_bh = bh;
2966a86c6181SAlex Tomas 
2967d0d856e8SRandy Dunlap 			/* save actual number of indexes since this
2968d0d856e8SRandy Dunlap 			 * number is changed at the next iteration */
2969a86c6181SAlex Tomas 			path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2970a86c6181SAlex Tomas 			i++;
2971a86c6181SAlex Tomas 		} else {
2972d0d856e8SRandy Dunlap 			/* we finished processing this index, go up */
2973a86c6181SAlex Tomas 			if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2974d0d856e8SRandy Dunlap 				/* index is empty, remove it;
2975a86c6181SAlex Tomas 				 * handle must be already prepared by the
2976a86c6181SAlex Tomas 				 * truncatei_leaf() */
2977c36575e6SForrest Liu 				err = ext4_ext_rm_idx(handle, inode, path, i);
2978a86c6181SAlex Tomas 			}
2979d0d856e8SRandy Dunlap 			/* root level has p_bh == NULL, brelse() eats this */
2980a86c6181SAlex Tomas 			brelse(path[i].p_bh);
2981a86c6181SAlex Tomas 			path[i].p_bh = NULL;
2982a86c6181SAlex Tomas 			i--;
2983a86c6181SAlex Tomas 			ext_debug("return to level %d\n", i);
2984a86c6181SAlex Tomas 		}
2985a86c6181SAlex Tomas 	}
2986a86c6181SAlex Tomas 
298761801325SLukas Czerner 	trace_ext4_ext_remove_space_done(inode, start, end, depth,
298861801325SLukas Czerner 			partial_cluster, path->p_hdr->eh_entries);
2989d8990240SAditya Kali 
29907b415bf6SAditya Kali 	/* If we still have something in the partial cluster and we have removed
29917b415bf6SAditya Kali 	 * even the first extent, then we should free the blocks in the partial
29927b415bf6SAditya Kali 	 * cluster as well. */
2993d23142c6SLukas Czerner 	if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
2994981250caSTheodore Ts'o 		int flags = get_default_free_blocks_flags(inode);
29957b415bf6SAditya Kali 
29967b415bf6SAditya Kali 		ext4_free_blocks(handle, inode, NULL,
29977b415bf6SAditya Kali 				 EXT4_C2B(EXT4_SB(sb), partial_cluster),
29987b415bf6SAditya Kali 				 EXT4_SB(sb)->s_cluster_ratio, flags);
29997b415bf6SAditya Kali 		partial_cluster = 0;
30007b415bf6SAditya Kali 	}
30017b415bf6SAditya Kali 
3002a86c6181SAlex Tomas 	/* TODO: flexible tree reduction should be here */
3003a86c6181SAlex Tomas 	if (path->p_hdr->eh_entries == 0) {
3004a86c6181SAlex Tomas 		/*
3005d0d856e8SRandy Dunlap 		 * truncate to zero freed all the tree,
3006d0d856e8SRandy Dunlap 		 * so we need to correct eh_depth
3007a86c6181SAlex Tomas 		 */
3008a86c6181SAlex Tomas 		err = ext4_ext_get_access(handle, inode, path);
3009a86c6181SAlex Tomas 		if (err == 0) {
3010a86c6181SAlex Tomas 			ext_inode_hdr(inode)->eh_depth = 0;
3011a86c6181SAlex Tomas 			ext_inode_hdr(inode)->eh_max =
301255ad63bfSTheodore Ts'o 				cpu_to_le16(ext4_ext_space_root(inode, 0));
3013a86c6181SAlex Tomas 			err = ext4_ext_dirty(handle, inode, path);
3014a86c6181SAlex Tomas 		}
3015a86c6181SAlex Tomas 	}
3016a86c6181SAlex Tomas out:
3017a86c6181SAlex Tomas 	ext4_ext_drop_refs(path);
3018a86c6181SAlex Tomas 	kfree(path);
3019968dee77SAshish Sangwan 	if (err == -EAGAIN) {
3020968dee77SAshish Sangwan 		path = NULL;
30210617b83fSDmitry Monakhov 		goto again;
3022968dee77SAshish Sangwan 	}
3023a86c6181SAlex Tomas 	ext4_journal_stop(handle);
3024a86c6181SAlex Tomas 
3025a86c6181SAlex Tomas 	return err;
3026a86c6181SAlex Tomas }
3027a86c6181SAlex Tomas 
3028a86c6181SAlex Tomas /*
3029a86c6181SAlex Tomas  * called at mount time
3030a86c6181SAlex Tomas  */
3031a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb)
3032a86c6181SAlex Tomas {
3033a86c6181SAlex Tomas 	/*
3034a86c6181SAlex Tomas 	 * possible initialization would be here
3035a86c6181SAlex Tomas 	 */
3036a86c6181SAlex Tomas 
303783982b6fSTheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
303890576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
303992b97816STheodore Ts'o 		printk(KERN_INFO "EXT4-fs: file extents enabled"
3040bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
304192b97816STheodore Ts'o 		       ", aggressive tests"
3042a86c6181SAlex Tomas #endif
3043a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
304492b97816STheodore Ts'o 		       ", check binsearch"
3045a86c6181SAlex Tomas #endif
3046a86c6181SAlex Tomas #ifdef EXTENTS_STATS
304792b97816STheodore Ts'o 		       ", stats"
3048a86c6181SAlex Tomas #endif
304992b97816STheodore Ts'o 		       "\n");
305090576c0bSTheodore Ts'o #endif
3051a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3052a86c6181SAlex Tomas 		spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3053a86c6181SAlex Tomas 		EXT4_SB(sb)->s_ext_min = 1 << 30;
3054a86c6181SAlex Tomas 		EXT4_SB(sb)->s_ext_max = 0;
3055a86c6181SAlex Tomas #endif
3056a86c6181SAlex Tomas 	}
3057a86c6181SAlex Tomas }
3058a86c6181SAlex Tomas 
3059a86c6181SAlex Tomas /*
3060a86c6181SAlex Tomas  * called at umount time
3061a86c6181SAlex Tomas  */
3062a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb)
3063a86c6181SAlex Tomas {
306483982b6fSTheodore Ts'o 	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3065a86c6181SAlex Tomas 		return;
3066a86c6181SAlex Tomas 
3067a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3068a86c6181SAlex Tomas 	if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3069a86c6181SAlex Tomas 		struct ext4_sb_info *sbi = EXT4_SB(sb);
3070a86c6181SAlex Tomas 		printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3071a86c6181SAlex Tomas 			sbi->s_ext_blocks, sbi->s_ext_extents,
3072a86c6181SAlex Tomas 			sbi->s_ext_blocks / sbi->s_ext_extents);
3073a86c6181SAlex Tomas 		printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3074a86c6181SAlex Tomas 			sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3075a86c6181SAlex Tomas 	}
3076a86c6181SAlex Tomas #endif
3077a86c6181SAlex Tomas }
3078a86c6181SAlex Tomas 
3079d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3080d7b2a00cSZheng Liu {
3081d7b2a00cSZheng Liu 	ext4_lblk_t  ee_block;
3082d7b2a00cSZheng Liu 	ext4_fsblk_t ee_pblock;
3083d7b2a00cSZheng Liu 	unsigned int ee_len;
3084d7b2a00cSZheng Liu 
3085d7b2a00cSZheng Liu 	ee_block  = le32_to_cpu(ex->ee_block);
3086d7b2a00cSZheng Liu 	ee_len    = ext4_ext_get_actual_len(ex);
3087d7b2a00cSZheng Liu 	ee_pblock = ext4_ext_pblock(ex);
3088d7b2a00cSZheng Liu 
3089d7b2a00cSZheng Liu 	if (ee_len == 0)
3090d7b2a00cSZheng Liu 		return 0;
3091d7b2a00cSZheng Liu 
3092d7b2a00cSZheng Liu 	return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3093d7b2a00cSZheng Liu 				     EXTENT_STATUS_WRITTEN);
3094d7b2a00cSZheng Liu }
3095d7b2a00cSZheng Liu 
3096093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out  */
3097093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3098093a088bSAneesh Kumar K.V {
30992407518dSLukas Czerner 	ext4_fsblk_t ee_pblock;
31002407518dSLukas Czerner 	unsigned int ee_len;
3101b720303dSJing Zhang 	int ret;
3102093a088bSAneesh Kumar K.V 
3103093a088bSAneesh Kumar K.V 	ee_len    = ext4_ext_get_actual_len(ex);
3104bf89d16fSTheodore Ts'o 	ee_pblock = ext4_ext_pblock(ex);
3105093a088bSAneesh Kumar K.V 
3106a107e5a3STheodore Ts'o 	ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
31072407518dSLukas Czerner 	if (ret > 0)
31082407518dSLukas Czerner 		ret = 0;
3109093a088bSAneesh Kumar K.V 
31102407518dSLukas Czerner 	return ret;
3111093a088bSAneesh Kumar K.V }
3112093a088bSAneesh Kumar K.V 
311347ea3bb5SYongqiang Yang /*
311447ea3bb5SYongqiang Yang  * ext4_split_extent_at() splits an extent at given block.
311547ea3bb5SYongqiang Yang  *
311647ea3bb5SYongqiang Yang  * @handle: the journal handle
311747ea3bb5SYongqiang Yang  * @inode: the file inode
311847ea3bb5SYongqiang Yang  * @path: the path to the extent
311947ea3bb5SYongqiang Yang  * @split: the logical block where the extent is splitted.
312047ea3bb5SYongqiang Yang  * @split_flags: indicates if the extent could be zeroout if split fails, and
3121556615dcSLukas Czerner  *		 the states(init or unwritten) of new extents.
312247ea3bb5SYongqiang Yang  * @flags: flags used to insert new extent to extent tree.
312347ea3bb5SYongqiang Yang  *
312447ea3bb5SYongqiang Yang  *
312547ea3bb5SYongqiang Yang  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
312647ea3bb5SYongqiang Yang  * of which are deterimined by split_flag.
312747ea3bb5SYongqiang Yang  *
312847ea3bb5SYongqiang Yang  * There are two cases:
312947ea3bb5SYongqiang Yang  *  a> the extent are splitted into two extent.
313047ea3bb5SYongqiang Yang  *  b> split is not needed, and just mark the extent.
313147ea3bb5SYongqiang Yang  *
313247ea3bb5SYongqiang Yang  * return 0 on success.
313347ea3bb5SYongqiang Yang  */
313447ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle,
313547ea3bb5SYongqiang Yang 			     struct inode *inode,
313647ea3bb5SYongqiang Yang 			     struct ext4_ext_path *path,
313747ea3bb5SYongqiang Yang 			     ext4_lblk_t split,
313847ea3bb5SYongqiang Yang 			     int split_flag,
313947ea3bb5SYongqiang Yang 			     int flags)
314047ea3bb5SYongqiang Yang {
314147ea3bb5SYongqiang Yang 	ext4_fsblk_t newblock;
314247ea3bb5SYongqiang Yang 	ext4_lblk_t ee_block;
3143adb23551SZheng Liu 	struct ext4_extent *ex, newex, orig_ex, zero_ex;
314447ea3bb5SYongqiang Yang 	struct ext4_extent *ex2 = NULL;
314547ea3bb5SYongqiang Yang 	unsigned int ee_len, depth;
314647ea3bb5SYongqiang Yang 	int err = 0;
314747ea3bb5SYongqiang Yang 
3148dee1f973SDmitry Monakhov 	BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3149dee1f973SDmitry Monakhov 	       (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3150dee1f973SDmitry Monakhov 
315147ea3bb5SYongqiang Yang 	ext_debug("ext4_split_extents_at: inode %lu, logical"
315247ea3bb5SYongqiang Yang 		"block %llu\n", inode->i_ino, (unsigned long long)split);
315347ea3bb5SYongqiang Yang 
315447ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
315547ea3bb5SYongqiang Yang 
315647ea3bb5SYongqiang Yang 	depth = ext_depth(inode);
315747ea3bb5SYongqiang Yang 	ex = path[depth].p_ext;
315847ea3bb5SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
315947ea3bb5SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
316047ea3bb5SYongqiang Yang 	newblock = split - ee_block + ext4_ext_pblock(ex);
316147ea3bb5SYongqiang Yang 
316247ea3bb5SYongqiang Yang 	BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3163556615dcSLukas Czerner 	BUG_ON(!ext4_ext_is_unwritten(ex) &&
3164357b66fdSDmitry Monakhov 	       split_flag & (EXT4_EXT_MAY_ZEROOUT |
3165556615dcSLukas Czerner 			     EXT4_EXT_MARK_UNWRIT1 |
3166556615dcSLukas Czerner 			     EXT4_EXT_MARK_UNWRIT2));
316747ea3bb5SYongqiang Yang 
316847ea3bb5SYongqiang Yang 	err = ext4_ext_get_access(handle, inode, path + depth);
316947ea3bb5SYongqiang Yang 	if (err)
317047ea3bb5SYongqiang Yang 		goto out;
317147ea3bb5SYongqiang Yang 
317247ea3bb5SYongqiang Yang 	if (split == ee_block) {
317347ea3bb5SYongqiang Yang 		/*
317447ea3bb5SYongqiang Yang 		 * case b: block @split is the block that the extent begins with
317547ea3bb5SYongqiang Yang 		 * then we just change the state of the extent, and splitting
317647ea3bb5SYongqiang Yang 		 * is not needed.
317747ea3bb5SYongqiang Yang 		 */
3178556615dcSLukas Czerner 		if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3179556615dcSLukas Czerner 			ext4_ext_mark_unwritten(ex);
318047ea3bb5SYongqiang Yang 		else
318147ea3bb5SYongqiang Yang 			ext4_ext_mark_initialized(ex);
318247ea3bb5SYongqiang Yang 
318347ea3bb5SYongqiang Yang 		if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3184ecb94f5fSTheodore Ts'o 			ext4_ext_try_to_merge(handle, inode, path, ex);
318547ea3bb5SYongqiang Yang 
3186ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
318747ea3bb5SYongqiang Yang 		goto out;
318847ea3bb5SYongqiang Yang 	}
318947ea3bb5SYongqiang Yang 
319047ea3bb5SYongqiang Yang 	/* case a */
319147ea3bb5SYongqiang Yang 	memcpy(&orig_ex, ex, sizeof(orig_ex));
319247ea3bb5SYongqiang Yang 	ex->ee_len = cpu_to_le16(split - ee_block);
3193556615dcSLukas Czerner 	if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3194556615dcSLukas Czerner 		ext4_ext_mark_unwritten(ex);
319547ea3bb5SYongqiang Yang 
319647ea3bb5SYongqiang Yang 	/*
319747ea3bb5SYongqiang Yang 	 * path may lead to new leaf, not to original leaf any more
319847ea3bb5SYongqiang Yang 	 * after ext4_ext_insert_extent() returns,
319947ea3bb5SYongqiang Yang 	 */
320047ea3bb5SYongqiang Yang 	err = ext4_ext_dirty(handle, inode, path + depth);
320147ea3bb5SYongqiang Yang 	if (err)
320247ea3bb5SYongqiang Yang 		goto fix_extent_len;
320347ea3bb5SYongqiang Yang 
320447ea3bb5SYongqiang Yang 	ex2 = &newex;
320547ea3bb5SYongqiang Yang 	ex2->ee_block = cpu_to_le32(split);
320647ea3bb5SYongqiang Yang 	ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
320747ea3bb5SYongqiang Yang 	ext4_ext_store_pblock(ex2, newblock);
3208556615dcSLukas Czerner 	if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3209556615dcSLukas Czerner 		ext4_ext_mark_unwritten(ex2);
321047ea3bb5SYongqiang Yang 
321147ea3bb5SYongqiang Yang 	err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
321247ea3bb5SYongqiang Yang 	if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3213dee1f973SDmitry Monakhov 		if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3214adb23551SZheng Liu 			if (split_flag & EXT4_EXT_DATA_VALID1) {
3215dee1f973SDmitry Monakhov 				err = ext4_ext_zeroout(inode, ex2);
3216adb23551SZheng Liu 				zero_ex.ee_block = ex2->ee_block;
32178cde7ad1SZheng Liu 				zero_ex.ee_len = cpu_to_le16(
32188cde7ad1SZheng Liu 						ext4_ext_get_actual_len(ex2));
3219adb23551SZheng Liu 				ext4_ext_store_pblock(&zero_ex,
3220adb23551SZheng Liu 						      ext4_ext_pblock(ex2));
3221adb23551SZheng Liu 			} else {
3222dee1f973SDmitry Monakhov 				err = ext4_ext_zeroout(inode, ex);
3223adb23551SZheng Liu 				zero_ex.ee_block = ex->ee_block;
32248cde7ad1SZheng Liu 				zero_ex.ee_len = cpu_to_le16(
32258cde7ad1SZheng Liu 						ext4_ext_get_actual_len(ex));
3226adb23551SZheng Liu 				ext4_ext_store_pblock(&zero_ex,
3227adb23551SZheng Liu 						      ext4_ext_pblock(ex));
3228adb23551SZheng Liu 			}
3229adb23551SZheng Liu 		} else {
323047ea3bb5SYongqiang Yang 			err = ext4_ext_zeroout(inode, &orig_ex);
3231adb23551SZheng Liu 			zero_ex.ee_block = orig_ex.ee_block;
32328cde7ad1SZheng Liu 			zero_ex.ee_len = cpu_to_le16(
32338cde7ad1SZheng Liu 						ext4_ext_get_actual_len(&orig_ex));
3234adb23551SZheng Liu 			ext4_ext_store_pblock(&zero_ex,
3235adb23551SZheng Liu 					      ext4_ext_pblock(&orig_ex));
3236adb23551SZheng Liu 		}
3237dee1f973SDmitry Monakhov 
323847ea3bb5SYongqiang Yang 		if (err)
323947ea3bb5SYongqiang Yang 			goto fix_extent_len;
324047ea3bb5SYongqiang Yang 		/* update the extent length and mark as initialized */
3241af1584f5SAl Viro 		ex->ee_len = cpu_to_le16(ee_len);
3242ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, ex);
3243ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3244adb23551SZheng Liu 		if (err)
3245adb23551SZheng Liu 			goto fix_extent_len;
3246adb23551SZheng Liu 
3247adb23551SZheng Liu 		/* update extent status tree */
3248d7b2a00cSZheng Liu 		err = ext4_zeroout_es(inode, &zero_ex);
3249adb23551SZheng Liu 
325047ea3bb5SYongqiang Yang 		goto out;
325147ea3bb5SYongqiang Yang 	} else if (err)
325247ea3bb5SYongqiang Yang 		goto fix_extent_len;
325347ea3bb5SYongqiang Yang 
325447ea3bb5SYongqiang Yang out:
325547ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
325647ea3bb5SYongqiang Yang 	return err;
325747ea3bb5SYongqiang Yang 
325847ea3bb5SYongqiang Yang fix_extent_len:
325947ea3bb5SYongqiang Yang 	ex->ee_len = orig_ex.ee_len;
326029faed16SDmitry Monakhov 	ext4_ext_dirty(handle, inode, path + path->p_depth);
326147ea3bb5SYongqiang Yang 	return err;
326247ea3bb5SYongqiang Yang }
326347ea3bb5SYongqiang Yang 
326447ea3bb5SYongqiang Yang /*
326547ea3bb5SYongqiang Yang  * ext4_split_extents() splits an extent and mark extent which is covered
326647ea3bb5SYongqiang Yang  * by @map as split_flags indicates
326747ea3bb5SYongqiang Yang  *
326847ea3bb5SYongqiang Yang  * It may result in splitting the extent into multiple extents (up to three)
326947ea3bb5SYongqiang Yang  * There are three possibilities:
327047ea3bb5SYongqiang Yang  *   a> There is no split required
327147ea3bb5SYongqiang Yang  *   b> Splits in two extents: Split is happening at either end of the extent
327247ea3bb5SYongqiang Yang  *   c> Splits in three extents: Somone is splitting in middle of the extent
327347ea3bb5SYongqiang Yang  *
327447ea3bb5SYongqiang Yang  */
327547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle,
327647ea3bb5SYongqiang Yang 			      struct inode *inode,
327747ea3bb5SYongqiang Yang 			      struct ext4_ext_path *path,
327847ea3bb5SYongqiang Yang 			      struct ext4_map_blocks *map,
327947ea3bb5SYongqiang Yang 			      int split_flag,
328047ea3bb5SYongqiang Yang 			      int flags)
328147ea3bb5SYongqiang Yang {
328247ea3bb5SYongqiang Yang 	ext4_lblk_t ee_block;
328347ea3bb5SYongqiang Yang 	struct ext4_extent *ex;
328447ea3bb5SYongqiang Yang 	unsigned int ee_len, depth;
328547ea3bb5SYongqiang Yang 	int err = 0;
3286556615dcSLukas Czerner 	int unwritten;
328747ea3bb5SYongqiang Yang 	int split_flag1, flags1;
32883a225670SZheng Liu 	int allocated = map->m_len;
328947ea3bb5SYongqiang Yang 
329047ea3bb5SYongqiang Yang 	depth = ext_depth(inode);
329147ea3bb5SYongqiang Yang 	ex = path[depth].p_ext;
329247ea3bb5SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
329347ea3bb5SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
3294556615dcSLukas Czerner 	unwritten = ext4_ext_is_unwritten(ex);
329547ea3bb5SYongqiang Yang 
329647ea3bb5SYongqiang Yang 	if (map->m_lblk + map->m_len < ee_block + ee_len) {
3297dee1f973SDmitry Monakhov 		split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
329847ea3bb5SYongqiang Yang 		flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3299556615dcSLukas Czerner 		if (unwritten)
3300556615dcSLukas Czerner 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3301556615dcSLukas Czerner 				       EXT4_EXT_MARK_UNWRIT2;
3302dee1f973SDmitry Monakhov 		if (split_flag & EXT4_EXT_DATA_VALID2)
3303dee1f973SDmitry Monakhov 			split_flag1 |= EXT4_EXT_DATA_VALID1;
330447ea3bb5SYongqiang Yang 		err = ext4_split_extent_at(handle, inode, path,
330547ea3bb5SYongqiang Yang 				map->m_lblk + map->m_len, split_flag1, flags1);
330693917411SYongqiang Yang 		if (err)
330793917411SYongqiang Yang 			goto out;
33083a225670SZheng Liu 	} else {
33093a225670SZheng Liu 		allocated = ee_len - (map->m_lblk - ee_block);
331047ea3bb5SYongqiang Yang 	}
3311357b66fdSDmitry Monakhov 	/*
3312357b66fdSDmitry Monakhov 	 * Update path is required because previous ext4_split_extent_at() may
3313357b66fdSDmitry Monakhov 	 * result in split of original leaf or extent zeroout.
3314357b66fdSDmitry Monakhov 	 */
331547ea3bb5SYongqiang Yang 	ext4_ext_drop_refs(path);
3316107a7bd3STheodore Ts'o 	path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
331747ea3bb5SYongqiang Yang 	if (IS_ERR(path))
331847ea3bb5SYongqiang Yang 		return PTR_ERR(path);
3319357b66fdSDmitry Monakhov 	depth = ext_depth(inode);
3320357b66fdSDmitry Monakhov 	ex = path[depth].p_ext;
3321a18ed359SDmitry Monakhov 	if (!ex) {
3322a18ed359SDmitry Monakhov 		EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3323a18ed359SDmitry Monakhov 				 (unsigned long) map->m_lblk);
3324a18ed359SDmitry Monakhov 		return -EIO;
3325a18ed359SDmitry Monakhov 	}
3326556615dcSLukas Czerner 	unwritten = ext4_ext_is_unwritten(ex);
3327357b66fdSDmitry Monakhov 	split_flag1 = 0;
332847ea3bb5SYongqiang Yang 
332947ea3bb5SYongqiang Yang 	if (map->m_lblk >= ee_block) {
3330357b66fdSDmitry Monakhov 		split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3331556615dcSLukas Czerner 		if (unwritten) {
3332556615dcSLukas Czerner 			split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3333357b66fdSDmitry Monakhov 			split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3334556615dcSLukas Czerner 						     EXT4_EXT_MARK_UNWRIT2);
3335357b66fdSDmitry Monakhov 		}
333647ea3bb5SYongqiang Yang 		err = ext4_split_extent_at(handle, inode, path,
333747ea3bb5SYongqiang Yang 				map->m_lblk, split_flag1, flags);
333847ea3bb5SYongqiang Yang 		if (err)
333947ea3bb5SYongqiang Yang 			goto out;
334047ea3bb5SYongqiang Yang 	}
334147ea3bb5SYongqiang Yang 
334247ea3bb5SYongqiang Yang 	ext4_ext_show_leaf(inode, path);
334347ea3bb5SYongqiang Yang out:
33443a225670SZheng Liu 	return err ? err : allocated;
334547ea3bb5SYongqiang Yang }
334647ea3bb5SYongqiang Yang 
334756055d3aSAmit Arora /*
3348e35fd660STheodore Ts'o  * This function is called by ext4_ext_map_blocks() if someone tries to write
3349556615dcSLukas Czerner  * to an unwritten extent. It may result in splitting the unwritten
335056055d3aSAmit Arora  * extent into multiple extents (up to three - one initialized and two
3351556615dcSLukas Czerner  * unwritten).
335256055d3aSAmit Arora  * There are three possibilities:
335356055d3aSAmit Arora  *   a> There is no split required: Entire extent should be initialized
335456055d3aSAmit Arora  *   b> Splits in two extents: Write is happening at either end of the extent
335556055d3aSAmit Arora  *   c> Splits in three extents: Somone is writing in middle of the extent
33566f91bc5fSEric Gouriou  *
33576f91bc5fSEric Gouriou  * Pre-conditions:
3358556615dcSLukas Czerner  *  - The extent pointed to by 'path' is unwritten.
33596f91bc5fSEric Gouriou  *  - The extent pointed to by 'path' contains a superset
33606f91bc5fSEric Gouriou  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
33616f91bc5fSEric Gouriou  *
33626f91bc5fSEric Gouriou  * Post-conditions on success:
33636f91bc5fSEric Gouriou  *  - the returned value is the number of blocks beyond map->l_lblk
33646f91bc5fSEric Gouriou  *    that are allocated and initialized.
33656f91bc5fSEric Gouriou  *    It is guaranteed to be >= map->m_len.
336656055d3aSAmit Arora  */
3367725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle,
3368725d26d3SAneesh Kumar K.V 					   struct inode *inode,
3369e35fd660STheodore Ts'o 					   struct ext4_map_blocks *map,
337027dd4385SLukas Czerner 					   struct ext4_ext_path *path,
337127dd4385SLukas Czerner 					   int flags)
337256055d3aSAmit Arora {
337367a5da56SZheng Liu 	struct ext4_sb_info *sbi;
33746f91bc5fSEric Gouriou 	struct ext4_extent_header *eh;
3375667eff35SYongqiang Yang 	struct ext4_map_blocks split_map;
3376667eff35SYongqiang Yang 	struct ext4_extent zero_ex;
3377bc2d9db4SLukas Czerner 	struct ext4_extent *ex, *abut_ex;
337821ca087aSDmitry Monakhov 	ext4_lblk_t ee_block, eof_block;
3379bc2d9db4SLukas Czerner 	unsigned int ee_len, depth, map_len = map->m_len;
3380bc2d9db4SLukas Czerner 	int allocated = 0, max_zeroout = 0;
338156055d3aSAmit Arora 	int err = 0;
3382667eff35SYongqiang Yang 	int split_flag = 0;
338321ca087aSDmitry Monakhov 
338421ca087aSDmitry Monakhov 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
338521ca087aSDmitry Monakhov 		"block %llu, max_blocks %u\n", inode->i_ino,
3386bc2d9db4SLukas Czerner 		(unsigned long long)map->m_lblk, map_len);
338721ca087aSDmitry Monakhov 
338867a5da56SZheng Liu 	sbi = EXT4_SB(inode->i_sb);
338921ca087aSDmitry Monakhov 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
339021ca087aSDmitry Monakhov 		inode->i_sb->s_blocksize_bits;
3391bc2d9db4SLukas Czerner 	if (eof_block < map->m_lblk + map_len)
3392bc2d9db4SLukas Czerner 		eof_block = map->m_lblk + map_len;
339356055d3aSAmit Arora 
339456055d3aSAmit Arora 	depth = ext_depth(inode);
33956f91bc5fSEric Gouriou 	eh = path[depth].p_hdr;
339656055d3aSAmit Arora 	ex = path[depth].p_ext;
339756055d3aSAmit Arora 	ee_block = le32_to_cpu(ex->ee_block);
339856055d3aSAmit Arora 	ee_len = ext4_ext_get_actual_len(ex);
3399adb23551SZheng Liu 	zero_ex.ee_len = 0;
340021ca087aSDmitry Monakhov 
34016f91bc5fSEric Gouriou 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
34026f91bc5fSEric Gouriou 
34036f91bc5fSEric Gouriou 	/* Pre-conditions */
3404556615dcSLukas Czerner 	BUG_ON(!ext4_ext_is_unwritten(ex));
34056f91bc5fSEric Gouriou 	BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
34066f91bc5fSEric Gouriou 
34076f91bc5fSEric Gouriou 	/*
34086f91bc5fSEric Gouriou 	 * Attempt to transfer newly initialized blocks from the currently
3409556615dcSLukas Czerner 	 * unwritten extent to its neighbor. This is much cheaper
34106f91bc5fSEric Gouriou 	 * than an insertion followed by a merge as those involve costly
3411bc2d9db4SLukas Czerner 	 * memmove() calls. Transferring to the left is the common case in
3412bc2d9db4SLukas Czerner 	 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3413bc2d9db4SLukas Czerner 	 * followed by append writes.
34146f91bc5fSEric Gouriou 	 *
34156f91bc5fSEric Gouriou 	 * Limitations of the current logic:
3416bc2d9db4SLukas Czerner 	 *  - L1: we do not deal with writes covering the whole extent.
34176f91bc5fSEric Gouriou 	 *    This would require removing the extent if the transfer
34186f91bc5fSEric Gouriou 	 *    is possible.
3419bc2d9db4SLukas Czerner 	 *  - L2: we only attempt to merge with an extent stored in the
34206f91bc5fSEric Gouriou 	 *    same extent tree node.
34216f91bc5fSEric Gouriou 	 */
3422bc2d9db4SLukas Czerner 	if ((map->m_lblk == ee_block) &&
3423bc2d9db4SLukas Czerner 		/* See if we can merge left */
3424bc2d9db4SLukas Czerner 		(map_len < ee_len) &&		/*L1*/
3425bc2d9db4SLukas Czerner 		(ex > EXT_FIRST_EXTENT(eh))) {	/*L2*/
34266f91bc5fSEric Gouriou 		ext4_lblk_t prev_lblk;
34276f91bc5fSEric Gouriou 		ext4_fsblk_t prev_pblk, ee_pblk;
3428bc2d9db4SLukas Czerner 		unsigned int prev_len;
34296f91bc5fSEric Gouriou 
3430bc2d9db4SLukas Czerner 		abut_ex = ex - 1;
3431bc2d9db4SLukas Czerner 		prev_lblk = le32_to_cpu(abut_ex->ee_block);
3432bc2d9db4SLukas Czerner 		prev_len = ext4_ext_get_actual_len(abut_ex);
3433bc2d9db4SLukas Czerner 		prev_pblk = ext4_ext_pblock(abut_ex);
34346f91bc5fSEric Gouriou 		ee_pblk = ext4_ext_pblock(ex);
34356f91bc5fSEric Gouriou 
34366f91bc5fSEric Gouriou 		/*
3437bc2d9db4SLukas Czerner 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
34386f91bc5fSEric Gouriou 		 * upon those conditions:
3439bc2d9db4SLukas Czerner 		 * - C1: abut_ex is initialized,
3440bc2d9db4SLukas Czerner 		 * - C2: abut_ex is logically abutting ex,
3441bc2d9db4SLukas Czerner 		 * - C3: abut_ex is physically abutting ex,
3442bc2d9db4SLukas Czerner 		 * - C4: abut_ex can receive the additional blocks without
34436f91bc5fSEric Gouriou 		 *   overflowing the (initialized) length limit.
34446f91bc5fSEric Gouriou 		 */
3445556615dcSLukas Czerner 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
34466f91bc5fSEric Gouriou 			((prev_lblk + prev_len) == ee_block) &&		/*C2*/
34476f91bc5fSEric Gouriou 			((prev_pblk + prev_len) == ee_pblk) &&		/*C3*/
3448bc2d9db4SLukas Czerner 			(prev_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
34496f91bc5fSEric Gouriou 			err = ext4_ext_get_access(handle, inode, path + depth);
34506f91bc5fSEric Gouriou 			if (err)
34516f91bc5fSEric Gouriou 				goto out;
34526f91bc5fSEric Gouriou 
34536f91bc5fSEric Gouriou 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3454bc2d9db4SLukas Czerner 				map, ex, abut_ex);
34556f91bc5fSEric Gouriou 
3456bc2d9db4SLukas Czerner 			/* Shift the start of ex by 'map_len' blocks */
3457bc2d9db4SLukas Czerner 			ex->ee_block = cpu_to_le32(ee_block + map_len);
3458bc2d9db4SLukas Czerner 			ext4_ext_store_pblock(ex, ee_pblk + map_len);
3459bc2d9db4SLukas Czerner 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3460556615dcSLukas Czerner 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
34616f91bc5fSEric Gouriou 
3462bc2d9db4SLukas Czerner 			/* Extend abut_ex by 'map_len' blocks */
3463bc2d9db4SLukas Czerner 			abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
34646f91bc5fSEric Gouriou 
3465bc2d9db4SLukas Czerner 			/* Result: number of initialized blocks past m_lblk */
3466bc2d9db4SLukas Czerner 			allocated = map_len;
3467bc2d9db4SLukas Czerner 		}
3468bc2d9db4SLukas Czerner 	} else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3469bc2d9db4SLukas Czerner 		   (map_len < ee_len) &&	/*L1*/
3470bc2d9db4SLukas Czerner 		   ex < EXT_LAST_EXTENT(eh)) {	/*L2*/
3471bc2d9db4SLukas Czerner 		/* See if we can merge right */
3472bc2d9db4SLukas Czerner 		ext4_lblk_t next_lblk;
3473bc2d9db4SLukas Czerner 		ext4_fsblk_t next_pblk, ee_pblk;
3474bc2d9db4SLukas Czerner 		unsigned int next_len;
3475bc2d9db4SLukas Czerner 
3476bc2d9db4SLukas Czerner 		abut_ex = ex + 1;
3477bc2d9db4SLukas Czerner 		next_lblk = le32_to_cpu(abut_ex->ee_block);
3478bc2d9db4SLukas Czerner 		next_len = ext4_ext_get_actual_len(abut_ex);
3479bc2d9db4SLukas Czerner 		next_pblk = ext4_ext_pblock(abut_ex);
3480bc2d9db4SLukas Czerner 		ee_pblk = ext4_ext_pblock(ex);
3481bc2d9db4SLukas Czerner 
3482bc2d9db4SLukas Czerner 		/*
3483bc2d9db4SLukas Czerner 		 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3484bc2d9db4SLukas Czerner 		 * upon those conditions:
3485bc2d9db4SLukas Czerner 		 * - C1: abut_ex is initialized,
3486bc2d9db4SLukas Czerner 		 * - C2: abut_ex is logically abutting ex,
3487bc2d9db4SLukas Czerner 		 * - C3: abut_ex is physically abutting ex,
3488bc2d9db4SLukas Czerner 		 * - C4: abut_ex can receive the additional blocks without
3489bc2d9db4SLukas Czerner 		 *   overflowing the (initialized) length limit.
3490bc2d9db4SLukas Czerner 		 */
3491556615dcSLukas Czerner 		if ((!ext4_ext_is_unwritten(abut_ex)) &&		/*C1*/
3492bc2d9db4SLukas Czerner 		    ((map->m_lblk + map_len) == next_lblk) &&		/*C2*/
3493bc2d9db4SLukas Czerner 		    ((ee_pblk + ee_len) == next_pblk) &&		/*C3*/
3494bc2d9db4SLukas Czerner 		    (next_len < (EXT_INIT_MAX_LEN - map_len))) {	/*C4*/
3495bc2d9db4SLukas Czerner 			err = ext4_ext_get_access(handle, inode, path + depth);
3496bc2d9db4SLukas Czerner 			if (err)
3497bc2d9db4SLukas Czerner 				goto out;
3498bc2d9db4SLukas Czerner 
3499bc2d9db4SLukas Czerner 			trace_ext4_ext_convert_to_initialized_fastpath(inode,
3500bc2d9db4SLukas Czerner 				map, ex, abut_ex);
3501bc2d9db4SLukas Czerner 
3502bc2d9db4SLukas Czerner 			/* Shift the start of abut_ex by 'map_len' blocks */
3503bc2d9db4SLukas Czerner 			abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3504bc2d9db4SLukas Czerner 			ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3505bc2d9db4SLukas Czerner 			ex->ee_len = cpu_to_le16(ee_len - map_len);
3506556615dcSLukas Czerner 			ext4_ext_mark_unwritten(ex); /* Restore the flag */
3507bc2d9db4SLukas Czerner 
3508bc2d9db4SLukas Czerner 			/* Extend abut_ex by 'map_len' blocks */
3509bc2d9db4SLukas Czerner 			abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3510bc2d9db4SLukas Czerner 
3511bc2d9db4SLukas Czerner 			/* Result: number of initialized blocks past m_lblk */
3512bc2d9db4SLukas Czerner 			allocated = map_len;
3513bc2d9db4SLukas Czerner 		}
3514bc2d9db4SLukas Czerner 	}
3515bc2d9db4SLukas Czerner 	if (allocated) {
35166f91bc5fSEric Gouriou 		/* Mark the block containing both extents as dirty */
35176f91bc5fSEric Gouriou 		ext4_ext_dirty(handle, inode, path + depth);
35186f91bc5fSEric Gouriou 
35196f91bc5fSEric Gouriou 		/* Update path to point to the right extent */
3520bc2d9db4SLukas Czerner 		path[depth].p_ext = abut_ex;
35216f91bc5fSEric Gouriou 		goto out;
3522bc2d9db4SLukas Czerner 	} else
3523bc2d9db4SLukas Czerner 		allocated = ee_len - (map->m_lblk - ee_block);
35246f91bc5fSEric Gouriou 
3525667eff35SYongqiang Yang 	WARN_ON(map->m_lblk < ee_block);
352621ca087aSDmitry Monakhov 	/*
352721ca087aSDmitry Monakhov 	 * It is safe to convert extent to initialized via explicit
35289e740568SYongqiang Yang 	 * zeroout only if extent is fully inside i_size or new_size.
352921ca087aSDmitry Monakhov 	 */
3530667eff35SYongqiang Yang 	split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
353121ca087aSDmitry Monakhov 
353267a5da56SZheng Liu 	if (EXT4_EXT_MAY_ZEROOUT & split_flag)
353367a5da56SZheng Liu 		max_zeroout = sbi->s_extent_max_zeroout_kb >>
35344f42f80aSLukas Czerner 			(inode->i_sb->s_blocksize_bits - 10);
353567a5da56SZheng Liu 
353667a5da56SZheng Liu 	/* If extent is less than s_max_zeroout_kb, zeroout directly */
353767a5da56SZheng Liu 	if (max_zeroout && (ee_len <= max_zeroout)) {
3538667eff35SYongqiang Yang 		err = ext4_ext_zeroout(inode, ex);
35393977c965SAneesh Kumar K.V 		if (err)
354056055d3aSAmit Arora 			goto out;
3541adb23551SZheng Liu 		zero_ex.ee_block = ex->ee_block;
35428cde7ad1SZheng Liu 		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3543adb23551SZheng Liu 		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
35449df5643aSAneesh Kumar K.V 
35459df5643aSAneesh Kumar K.V 		err = ext4_ext_get_access(handle, inode, path + depth);
35469df5643aSAneesh Kumar K.V 		if (err)
35479df5643aSAneesh Kumar K.V 			goto out;
3548667eff35SYongqiang Yang 		ext4_ext_mark_initialized(ex);
3549ecb94f5fSTheodore Ts'o 		ext4_ext_try_to_merge(handle, inode, path, ex);
3550ecb94f5fSTheodore Ts'o 		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
355156055d3aSAmit Arora 		goto out;
3552667eff35SYongqiang Yang 	}
3553093a088bSAneesh Kumar K.V 
3554667eff35SYongqiang Yang 	/*
3555667eff35SYongqiang Yang 	 * four cases:
3556667eff35SYongqiang Yang 	 * 1. split the extent into three extents.
3557667eff35SYongqiang Yang 	 * 2. split the extent into two extents, zeroout the first half.
3558667eff35SYongqiang Yang 	 * 3. split the extent into two extents, zeroout the second half.
3559667eff35SYongqiang Yang 	 * 4. split the extent into two extents with out zeroout.
3560667eff35SYongqiang Yang 	 */
3561667eff35SYongqiang Yang 	split_map.m_lblk = map->m_lblk;
3562667eff35SYongqiang Yang 	split_map.m_len = map->m_len;
3563667eff35SYongqiang Yang 
356467a5da56SZheng Liu 	if (max_zeroout && (allocated > map->m_len)) {
356567a5da56SZheng Liu 		if (allocated <= max_zeroout) {
3566667eff35SYongqiang Yang 			/* case 3 */
3567667eff35SYongqiang Yang 			zero_ex.ee_block =
35689b940f8eSAllison Henderson 					 cpu_to_le32(map->m_lblk);
35699b940f8eSAllison Henderson 			zero_ex.ee_len = cpu_to_le16(allocated);
3570667eff35SYongqiang Yang 			ext4_ext_store_pblock(&zero_ex,
3571667eff35SYongqiang Yang 				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3572667eff35SYongqiang Yang 			err = ext4_ext_zeroout(inode, &zero_ex);
3573667eff35SYongqiang Yang 			if (err)
3574667eff35SYongqiang Yang 				goto out;
3575667eff35SYongqiang Yang 			split_map.m_lblk = map->m_lblk;
3576667eff35SYongqiang Yang 			split_map.m_len = allocated;
357767a5da56SZheng Liu 		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3578667eff35SYongqiang Yang 			/* case 2 */
3579667eff35SYongqiang Yang 			if (map->m_lblk != ee_block) {
3580667eff35SYongqiang Yang 				zero_ex.ee_block = ex->ee_block;
3581667eff35SYongqiang Yang 				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3582667eff35SYongqiang Yang 							ee_block);
3583667eff35SYongqiang Yang 				ext4_ext_store_pblock(&zero_ex,
3584667eff35SYongqiang Yang 						      ext4_ext_pblock(ex));
3585667eff35SYongqiang Yang 				err = ext4_ext_zeroout(inode, &zero_ex);
3586667eff35SYongqiang Yang 				if (err)
3587667eff35SYongqiang Yang 					goto out;
3588667eff35SYongqiang Yang 			}
3589667eff35SYongqiang Yang 
3590667eff35SYongqiang Yang 			split_map.m_lblk = ee_block;
35919b940f8eSAllison Henderson 			split_map.m_len = map->m_lblk - ee_block + map->m_len;
35929b940f8eSAllison Henderson 			allocated = map->m_len;
3593667eff35SYongqiang Yang 		}
3594667eff35SYongqiang Yang 	}
3595667eff35SYongqiang Yang 
3596667eff35SYongqiang Yang 	allocated = ext4_split_extent(handle, inode, path,
359727dd4385SLukas Czerner 				      &split_map, split_flag, flags);
3598667eff35SYongqiang Yang 	if (allocated < 0)
3599667eff35SYongqiang Yang 		err = allocated;
3600667eff35SYongqiang Yang 
3601667eff35SYongqiang Yang out:
3602adb23551SZheng Liu 	/* If we have gotten a failure, don't zero out status tree */
3603adb23551SZheng Liu 	if (!err)
3604d7b2a00cSZheng Liu 		err = ext4_zeroout_es(inode, &zero_ex);
3605667eff35SYongqiang Yang 	return err ? err : allocated;
360656055d3aSAmit Arora }
360756055d3aSAmit Arora 
3608c278bfecSAneesh Kumar K.V /*
3609e35fd660STheodore Ts'o  * This function is called by ext4_ext_map_blocks() from
36100031462bSMingming Cao  * ext4_get_blocks_dio_write() when DIO to write
3611556615dcSLukas Czerner  * to an unwritten extent.
36120031462bSMingming Cao  *
3613556615dcSLukas Czerner  * Writing to an unwritten extent may result in splitting the unwritten
3614556615dcSLukas Czerner  * extent into multiple initialized/unwritten extents (up to three)
36150031462bSMingming Cao  * There are three possibilities:
3616556615dcSLukas Czerner  *   a> There is no split required: Entire extent should be unwritten
36170031462bSMingming Cao  *   b> Splits in two extents: Write is happening at either end of the extent
36180031462bSMingming Cao  *   c> Splits in three extents: Somone is writing in middle of the extent
36190031462bSMingming Cao  *
3620b8a86845SLukas Czerner  * This works the same way in the case of initialized -> unwritten conversion.
3621b8a86845SLukas Czerner  *
36220031462bSMingming Cao  * One of more index blocks maybe needed if the extent tree grow after
3623556615dcSLukas Czerner  * the unwritten extent split. To prevent ENOSPC occur at the IO
3624556615dcSLukas Czerner  * complete, we need to split the unwritten extent before DIO submit
3625556615dcSLukas Czerner  * the IO. The unwritten extent called at this time will be split
3626556615dcSLukas Czerner  * into three unwritten extent(at most). After IO complete, the part
36270031462bSMingming Cao  * being filled will be convert to initialized by the end_io callback function
36280031462bSMingming Cao  * via ext4_convert_unwritten_extents().
3629ba230c3fSMingming  *
3630556615dcSLukas Czerner  * Returns the size of unwritten extent to be written on success.
36310031462bSMingming Cao  */
3632b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle,
36330031462bSMingming Cao 					struct inode *inode,
3634e35fd660STheodore Ts'o 					struct ext4_map_blocks *map,
36350031462bSMingming Cao 					struct ext4_ext_path *path,
36360031462bSMingming Cao 					int flags)
36370031462bSMingming Cao {
3638667eff35SYongqiang Yang 	ext4_lblk_t eof_block;
3639667eff35SYongqiang Yang 	ext4_lblk_t ee_block;
3640667eff35SYongqiang Yang 	struct ext4_extent *ex;
3641667eff35SYongqiang Yang 	unsigned int ee_len;
3642667eff35SYongqiang Yang 	int split_flag = 0, depth;
36430031462bSMingming Cao 
3644b8a86845SLukas Czerner 	ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
3645b8a86845SLukas Czerner 		  __func__, inode->i_ino,
3646e35fd660STheodore Ts'o 		  (unsigned long long)map->m_lblk, map->m_len);
364721ca087aSDmitry Monakhov 
364821ca087aSDmitry Monakhov 	eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
364921ca087aSDmitry Monakhov 		inode->i_sb->s_blocksize_bits;
3650e35fd660STheodore Ts'o 	if (eof_block < map->m_lblk + map->m_len)
3651e35fd660STheodore Ts'o 		eof_block = map->m_lblk + map->m_len;
36520031462bSMingming Cao 	/*
365321ca087aSDmitry Monakhov 	 * It is safe to convert extent to initialized via explicit
365421ca087aSDmitry Monakhov 	 * zeroout only if extent is fully insde i_size or new_size.
365521ca087aSDmitry Monakhov 	 */
3656667eff35SYongqiang Yang 	depth = ext_depth(inode);
36570031462bSMingming Cao 	ex = path[depth].p_ext;
3658667eff35SYongqiang Yang 	ee_block = le32_to_cpu(ex->ee_block);
3659667eff35SYongqiang Yang 	ee_len = ext4_ext_get_actual_len(ex);
36600031462bSMingming Cao 
3661b8a86845SLukas Czerner 	/* Convert to unwritten */
3662b8a86845SLukas Czerner 	if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3663b8a86845SLukas Czerner 		split_flag |= EXT4_EXT_DATA_VALID1;
3664b8a86845SLukas Czerner 	/* Convert to initialized */
3665b8a86845SLukas Czerner 	} else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3666b8a86845SLukas Czerner 		split_flag |= ee_block + ee_len <= eof_block ?
3667b8a86845SLukas Czerner 			      EXT4_EXT_MAY_ZEROOUT : 0;
3668556615dcSLukas Czerner 		split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3669b8a86845SLukas Czerner 	}
3670667eff35SYongqiang Yang 	flags |= EXT4_GET_BLOCKS_PRE_IO;
3671667eff35SYongqiang Yang 	return ext4_split_extent(handle, inode, path, map, split_flag, flags);
36720031462bSMingming Cao }
3673197217a5SYongqiang Yang 
3674b8a86845SLukas Czerner static int ext4_convert_initialized_extents(handle_t *handle,
3675b8a86845SLukas Czerner 					    struct inode *inode,
3676b8a86845SLukas Czerner 					    struct ext4_map_blocks *map,
3677b8a86845SLukas Czerner 					    struct ext4_ext_path *path)
3678b8a86845SLukas Czerner {
3679b8a86845SLukas Czerner 	struct ext4_extent *ex;
3680b8a86845SLukas Czerner 	ext4_lblk_t ee_block;
3681b8a86845SLukas Czerner 	unsigned int ee_len;
3682b8a86845SLukas Czerner 	int depth;
3683b8a86845SLukas Czerner 	int err = 0;
3684b8a86845SLukas Czerner 
3685b8a86845SLukas Czerner 	depth = ext_depth(inode);
3686b8a86845SLukas Czerner 	ex = path[depth].p_ext;
3687b8a86845SLukas Czerner 	ee_block = le32_to_cpu(ex->ee_block);
3688b8a86845SLukas Czerner 	ee_len = ext4_ext_get_actual_len(ex);
3689b8a86845SLukas Czerner 
3690b8a86845SLukas Czerner 	ext_debug("%s: inode %lu, logical"
3691b8a86845SLukas Czerner 		"block %llu, max_blocks %u\n", __func__, inode->i_ino,
3692b8a86845SLukas Czerner 		  (unsigned long long)ee_block, ee_len);
3693b8a86845SLukas Czerner 
3694b8a86845SLukas Czerner 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3695b8a86845SLukas Czerner 		err = ext4_split_convert_extents(handle, inode, map, path,
3696b8a86845SLukas Czerner 				EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3697b8a86845SLukas Czerner 		if (err < 0)
3698b8a86845SLukas Czerner 			goto out;
3699b8a86845SLukas Czerner 		ext4_ext_drop_refs(path);
3700b8a86845SLukas Czerner 		path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3701b8a86845SLukas Czerner 		if (IS_ERR(path)) {
3702b8a86845SLukas Czerner 			err = PTR_ERR(path);
3703b8a86845SLukas Czerner 			goto out;
3704b8a86845SLukas Czerner 		}
3705b8a86845SLukas Czerner 		depth = ext_depth(inode);
3706b8a86845SLukas Czerner 		ex = path[depth].p_ext;
3707a18ed359SDmitry Monakhov 		if (!ex) {
3708a18ed359SDmitry Monakhov 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3709a18ed359SDmitry Monakhov 					 (unsigned long) map->m_lblk);
3710a18ed359SDmitry Monakhov 			err = -EIO;
3711a18ed359SDmitry Monakhov 			goto out;
3712a18ed359SDmitry Monakhov 		}
3713b8a86845SLukas Czerner 	}
3714b8a86845SLukas Czerner 
3715b8a86845SLukas Czerner 	err = ext4_ext_get_access(handle, inode, path + depth);
3716b8a86845SLukas Czerner 	if (err)
3717b8a86845SLukas Czerner 		goto out;
3718556615dcSLukas Czerner 	/* first mark the extent as unwritten */
3719556615dcSLukas Czerner 	ext4_ext_mark_unwritten(ex);
3720b8a86845SLukas Czerner 
3721b8a86845SLukas Czerner 	/* note: ext4_ext_correct_indexes() isn't needed here because
3722b8a86845SLukas Czerner 	 * borders are not changed
3723b8a86845SLukas Czerner 	 */
3724b8a86845SLukas Czerner 	ext4_ext_try_to_merge(handle, inode, path, ex);
3725b8a86845SLukas Czerner 
3726b8a86845SLukas Czerner 	/* Mark modified extent as dirty */
3727b8a86845SLukas Czerner 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3728b8a86845SLukas Czerner out:
3729b8a86845SLukas Czerner 	ext4_ext_show_leaf(inode, path);
3730b8a86845SLukas Czerner 	return err;
3731b8a86845SLukas Czerner }
3732b8a86845SLukas Czerner 
3733b8a86845SLukas Czerner 
3734c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle,
37350031462bSMingming Cao 						struct inode *inode,
3736dee1f973SDmitry Monakhov 						struct ext4_map_blocks *map,
37370031462bSMingming Cao 						struct ext4_ext_path *path)
37380031462bSMingming Cao {
37390031462bSMingming Cao 	struct ext4_extent *ex;
3740dee1f973SDmitry Monakhov 	ext4_lblk_t ee_block;
3741dee1f973SDmitry Monakhov 	unsigned int ee_len;
37420031462bSMingming Cao 	int depth;
37430031462bSMingming Cao 	int err = 0;
37440031462bSMingming Cao 
37450031462bSMingming Cao 	depth = ext_depth(inode);
37460031462bSMingming Cao 	ex = path[depth].p_ext;
3747dee1f973SDmitry Monakhov 	ee_block = le32_to_cpu(ex->ee_block);
3748dee1f973SDmitry Monakhov 	ee_len = ext4_ext_get_actual_len(ex);
37490031462bSMingming Cao 
3750197217a5SYongqiang Yang 	ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3751197217a5SYongqiang Yang 		"block %llu, max_blocks %u\n", inode->i_ino,
3752dee1f973SDmitry Monakhov 		  (unsigned long long)ee_block, ee_len);
3753dee1f973SDmitry Monakhov 
3754ff95ec22SDmitry Monakhov 	/* If extent is larger than requested it is a clear sign that we still
3755ff95ec22SDmitry Monakhov 	 * have some extent state machine issues left. So extent_split is still
3756ff95ec22SDmitry Monakhov 	 * required.
3757ff95ec22SDmitry Monakhov 	 * TODO: Once all related issues will be fixed this situation should be
3758ff95ec22SDmitry Monakhov 	 * illegal.
3759ff95ec22SDmitry Monakhov 	 */
3760dee1f973SDmitry Monakhov 	if (ee_block != map->m_lblk || ee_len > map->m_len) {
3761ff95ec22SDmitry Monakhov #ifdef EXT4_DEBUG
3762ff95ec22SDmitry Monakhov 		ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3763ff95ec22SDmitry Monakhov 			     " len %u; IO logical block %llu, len %u\n",
3764ff95ec22SDmitry Monakhov 			     inode->i_ino, (unsigned long long)ee_block, ee_len,
3765ff95ec22SDmitry Monakhov 			     (unsigned long long)map->m_lblk, map->m_len);
3766ff95ec22SDmitry Monakhov #endif
3767b8a86845SLukas Czerner 		err = ext4_split_convert_extents(handle, inode, map, path,
3768dee1f973SDmitry Monakhov 						 EXT4_GET_BLOCKS_CONVERT);
3769dee1f973SDmitry Monakhov 		if (err < 0)
3770dee1f973SDmitry Monakhov 			goto out;
3771dee1f973SDmitry Monakhov 		ext4_ext_drop_refs(path);
3772107a7bd3STheodore Ts'o 		path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3773dee1f973SDmitry Monakhov 		if (IS_ERR(path)) {
3774dee1f973SDmitry Monakhov 			err = PTR_ERR(path);
3775dee1f973SDmitry Monakhov 			goto out;
3776dee1f973SDmitry Monakhov 		}
3777dee1f973SDmitry Monakhov 		depth = ext_depth(inode);
3778dee1f973SDmitry Monakhov 		ex = path[depth].p_ext;
3779dee1f973SDmitry Monakhov 	}
3780197217a5SYongqiang Yang 
37810031462bSMingming Cao 	err = ext4_ext_get_access(handle, inode, path + depth);
37820031462bSMingming Cao 	if (err)
37830031462bSMingming Cao 		goto out;
37840031462bSMingming Cao 	/* first mark the extent as initialized */
37850031462bSMingming Cao 	ext4_ext_mark_initialized(ex);
37860031462bSMingming Cao 
3787197217a5SYongqiang Yang 	/* note: ext4_ext_correct_indexes() isn't needed here because
3788197217a5SYongqiang Yang 	 * borders are not changed
37890031462bSMingming Cao 	 */
3790ecb94f5fSTheodore Ts'o 	ext4_ext_try_to_merge(handle, inode, path, ex);
3791197217a5SYongqiang Yang 
37920031462bSMingming Cao 	/* Mark modified extent as dirty */
3793ecb94f5fSTheodore Ts'o 	err = ext4_ext_dirty(handle, inode, path + path->p_depth);
37940031462bSMingming Cao out:
37950031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
37960031462bSMingming Cao 	return err;
37970031462bSMingming Cao }
37980031462bSMingming Cao 
3799515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3800515f41c3SAneesh Kumar K.V 			sector_t block, int count)
3801515f41c3SAneesh Kumar K.V {
3802515f41c3SAneesh Kumar K.V 	int i;
3803515f41c3SAneesh Kumar K.V 	for (i = 0; i < count; i++)
3804515f41c3SAneesh Kumar K.V                 unmap_underlying_metadata(bdev, block + i);
3805515f41c3SAneesh Kumar K.V }
3806515f41c3SAneesh Kumar K.V 
380758590b06STheodore Ts'o /*
380858590b06STheodore Ts'o  * Handle EOFBLOCKS_FL flag, clearing it if necessary
380958590b06STheodore Ts'o  */
381058590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3811d002ebf1SEric Sandeen 			      ext4_lblk_t lblk,
381258590b06STheodore Ts'o 			      struct ext4_ext_path *path,
381358590b06STheodore Ts'o 			      unsigned int len)
381458590b06STheodore Ts'o {
381558590b06STheodore Ts'o 	int i, depth;
381658590b06STheodore Ts'o 	struct ext4_extent_header *eh;
381765922cb5SSergey Senozhatsky 	struct ext4_extent *last_ex;
381858590b06STheodore Ts'o 
381958590b06STheodore Ts'o 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
382058590b06STheodore Ts'o 		return 0;
382158590b06STheodore Ts'o 
382258590b06STheodore Ts'o 	depth = ext_depth(inode);
382358590b06STheodore Ts'o 	eh = path[depth].p_hdr;
382458590b06STheodore Ts'o 
3825afcff5d8SLukas Czerner 	/*
3826afcff5d8SLukas Czerner 	 * We're going to remove EOFBLOCKS_FL entirely in future so we
3827afcff5d8SLukas Czerner 	 * do not care for this case anymore. Simply remove the flag
3828afcff5d8SLukas Czerner 	 * if there are no extents.
3829afcff5d8SLukas Czerner 	 */
3830afcff5d8SLukas Czerner 	if (unlikely(!eh->eh_entries))
3831afcff5d8SLukas Czerner 		goto out;
383258590b06STheodore Ts'o 	last_ex = EXT_LAST_EXTENT(eh);
383358590b06STheodore Ts'o 	/*
383458590b06STheodore Ts'o 	 * We should clear the EOFBLOCKS_FL flag if we are writing the
383558590b06STheodore Ts'o 	 * last block in the last extent in the file.  We test this by
383658590b06STheodore Ts'o 	 * first checking to see if the caller to
383758590b06STheodore Ts'o 	 * ext4_ext_get_blocks() was interested in the last block (or
383858590b06STheodore Ts'o 	 * a block beyond the last block) in the current extent.  If
383958590b06STheodore Ts'o 	 * this turns out to be false, we can bail out from this
384058590b06STheodore Ts'o 	 * function immediately.
384158590b06STheodore Ts'o 	 */
3842d002ebf1SEric Sandeen 	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
384358590b06STheodore Ts'o 	    ext4_ext_get_actual_len(last_ex))
384458590b06STheodore Ts'o 		return 0;
384558590b06STheodore Ts'o 	/*
384658590b06STheodore Ts'o 	 * If the caller does appear to be planning to write at or
384758590b06STheodore Ts'o 	 * beyond the end of the current extent, we then test to see
384858590b06STheodore Ts'o 	 * if the current extent is the last extent in the file, by
384958590b06STheodore Ts'o 	 * checking to make sure it was reached via the rightmost node
385058590b06STheodore Ts'o 	 * at each level of the tree.
385158590b06STheodore Ts'o 	 */
385258590b06STheodore Ts'o 	for (i = depth-1; i >= 0; i--)
385358590b06STheodore Ts'o 		if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
385458590b06STheodore Ts'o 			return 0;
3855afcff5d8SLukas Czerner out:
385658590b06STheodore Ts'o 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
385758590b06STheodore Ts'o 	return ext4_mark_inode_dirty(handle, inode);
385858590b06STheodore Ts'o }
385958590b06STheodore Ts'o 
38607b415bf6SAditya Kali /**
38617b415bf6SAditya Kali  * ext4_find_delalloc_range: find delayed allocated block in the given range.
38627b415bf6SAditya Kali  *
38637d1b1fbcSZheng Liu  * Return 1 if there is a delalloc block in the range, otherwise 0.
38647b415bf6SAditya Kali  */
3865f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode,
38667b415bf6SAditya Kali 			     ext4_lblk_t lblk_start,
38677d1b1fbcSZheng Liu 			     ext4_lblk_t lblk_end)
38687b415bf6SAditya Kali {
38697d1b1fbcSZheng Liu 	struct extent_status es;
38707b415bf6SAditya Kali 
3871e30b5dcaSYan, Zheng 	ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
387206b0c886SZheng Liu 	if (es.es_len == 0)
38737d1b1fbcSZheng Liu 		return 0; /* there is no delay extent in this tree */
387406b0c886SZheng Liu 	else if (es.es_lblk <= lblk_start &&
387506b0c886SZheng Liu 		 lblk_start < es.es_lblk + es.es_len)
38767b415bf6SAditya Kali 		return 1;
387706b0c886SZheng Liu 	else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
38787d1b1fbcSZheng Liu 		return 1;
38797b415bf6SAditya Kali 	else
38807b415bf6SAditya Kali 		return 0;
38817b415bf6SAditya Kali }
38827b415bf6SAditya Kali 
38837d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
38847b415bf6SAditya Kali {
38857b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
38867b415bf6SAditya Kali 	ext4_lblk_t lblk_start, lblk_end;
3887f5a44db5STheodore Ts'o 	lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
38887b415bf6SAditya Kali 	lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
38897b415bf6SAditya Kali 
38907d1b1fbcSZheng Liu 	return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
38917b415bf6SAditya Kali }
38927b415bf6SAditya Kali 
38937b415bf6SAditya Kali /**
38947b415bf6SAditya Kali  * Determines how many complete clusters (out of those specified by the 'map')
38957b415bf6SAditya Kali  * are under delalloc and were reserved quota for.
38967b415bf6SAditya Kali  * This function is called when we are writing out the blocks that were
38977b415bf6SAditya Kali  * originally written with their allocation delayed, but then the space was
38987b415bf6SAditya Kali  * allocated using fallocate() before the delayed allocation could be resolved.
38997b415bf6SAditya Kali  * The cases to look for are:
39007b415bf6SAditya Kali  * ('=' indicated delayed allocated blocks
39017b415bf6SAditya Kali  *  '-' indicates non-delayed allocated blocks)
39027b415bf6SAditya Kali  * (a) partial clusters towards beginning and/or end outside of allocated range
39037b415bf6SAditya Kali  *     are not delalloc'ed.
39047b415bf6SAditya Kali  *	Ex:
39057b415bf6SAditya Kali  *	|----c---=|====c====|====c====|===-c----|
39067b415bf6SAditya Kali  *	         |++++++ allocated ++++++|
39077b415bf6SAditya Kali  *	==> 4 complete clusters in above example
39087b415bf6SAditya Kali  *
39097b415bf6SAditya Kali  * (b) partial cluster (outside of allocated range) towards either end is
39107b415bf6SAditya Kali  *     marked for delayed allocation. In this case, we will exclude that
39117b415bf6SAditya Kali  *     cluster.
39127b415bf6SAditya Kali  *	Ex:
39137b415bf6SAditya Kali  *	|----====c========|========c========|
39147b415bf6SAditya Kali  *	     |++++++ allocated ++++++|
39157b415bf6SAditya Kali  *	==> 1 complete clusters in above example
39167b415bf6SAditya Kali  *
39177b415bf6SAditya Kali  *	Ex:
39187b415bf6SAditya Kali  *	|================c================|
39197b415bf6SAditya Kali  *            |++++++ allocated ++++++|
39207b415bf6SAditya Kali  *	==> 0 complete clusters in above example
39217b415bf6SAditya Kali  *
39227b415bf6SAditya Kali  * The ext4_da_update_reserve_space will be called only if we
39237b415bf6SAditya Kali  * determine here that there were some "entire" clusters that span
39247b415bf6SAditya Kali  * this 'allocated' range.
39257b415bf6SAditya Kali  * In the non-bigalloc case, this function will just end up returning num_blks
39267b415bf6SAditya Kali  * without ever calling ext4_find_delalloc_range.
39277b415bf6SAditya Kali  */
39287b415bf6SAditya Kali static unsigned int
39297b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
39307b415bf6SAditya Kali 			   unsigned int num_blks)
39317b415bf6SAditya Kali {
39327b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
39337b415bf6SAditya Kali 	ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
39347b415bf6SAditya Kali 	ext4_lblk_t lblk_from, lblk_to, c_offset;
39357b415bf6SAditya Kali 	unsigned int allocated_clusters = 0;
39367b415bf6SAditya Kali 
39377b415bf6SAditya Kali 	alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
39387b415bf6SAditya Kali 	alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
39397b415bf6SAditya Kali 
39407b415bf6SAditya Kali 	/* max possible clusters for this allocation */
39417b415bf6SAditya Kali 	allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
39427b415bf6SAditya Kali 
3943d8990240SAditya Kali 	trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3944d8990240SAditya Kali 
39457b415bf6SAditya Kali 	/* Check towards left side */
3946f5a44db5STheodore Ts'o 	c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
39477b415bf6SAditya Kali 	if (c_offset) {
3948f5a44db5STheodore Ts'o 		lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
39497b415bf6SAditya Kali 		lblk_to = lblk_from + c_offset - 1;
39507b415bf6SAditya Kali 
39517d1b1fbcSZheng Liu 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
39527b415bf6SAditya Kali 			allocated_clusters--;
39537b415bf6SAditya Kali 	}
39547b415bf6SAditya Kali 
39557b415bf6SAditya Kali 	/* Now check towards right. */
3956f5a44db5STheodore Ts'o 	c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
39577b415bf6SAditya Kali 	if (allocated_clusters && c_offset) {
39587b415bf6SAditya Kali 		lblk_from = lblk_start + num_blks;
39597b415bf6SAditya Kali 		lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
39607b415bf6SAditya Kali 
39617d1b1fbcSZheng Liu 		if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
39627b415bf6SAditya Kali 			allocated_clusters--;
39637b415bf6SAditya Kali 	}
39647b415bf6SAditya Kali 
39657b415bf6SAditya Kali 	return allocated_clusters;
39667b415bf6SAditya Kali }
39677b415bf6SAditya Kali 
39680031462bSMingming Cao static int
3969b8a86845SLukas Czerner ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
3970b8a86845SLukas Czerner 			struct ext4_map_blocks *map,
3971b8a86845SLukas Czerner 			struct ext4_ext_path *path, int flags,
3972b8a86845SLukas Czerner 			unsigned int allocated, ext4_fsblk_t newblock)
3973b8a86845SLukas Czerner {
3974b8a86845SLukas Czerner 	int ret = 0;
3975b8a86845SLukas Czerner 	int err = 0;
3976b8a86845SLukas Czerner 
3977b8a86845SLukas Czerner 	/*
3978b8a86845SLukas Czerner 	 * Make sure that the extent is no bigger than we support with
3979556615dcSLukas Czerner 	 * unwritten extent
3980b8a86845SLukas Czerner 	 */
3981556615dcSLukas Czerner 	if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3982556615dcSLukas Czerner 		map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3983b8a86845SLukas Czerner 
3984b8a86845SLukas Czerner 	ret = ext4_convert_initialized_extents(handle, inode, map,
3985b8a86845SLukas Czerner 						path);
3986b8a86845SLukas Czerner 	if (ret >= 0) {
3987b8a86845SLukas Czerner 		ext4_update_inode_fsync_trans(handle, inode, 1);
3988b8a86845SLukas Czerner 		err = check_eofblocks_fl(handle, inode, map->m_lblk,
3989b8a86845SLukas Czerner 					 path, map->m_len);
3990b8a86845SLukas Czerner 	} else
3991b8a86845SLukas Czerner 		err = ret;
3992b8a86845SLukas Czerner 	map->m_flags |= EXT4_MAP_UNWRITTEN;
3993b8a86845SLukas Czerner 	if (allocated > map->m_len)
3994b8a86845SLukas Czerner 		allocated = map->m_len;
3995b8a86845SLukas Czerner 	map->m_len = allocated;
3996b8a86845SLukas Czerner 
3997b8a86845SLukas Czerner 	return err ? err : allocated;
3998b8a86845SLukas Czerner }
3999b8a86845SLukas Czerner 
4000b8a86845SLukas Czerner static int
4001556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
4002e35fd660STheodore Ts'o 			struct ext4_map_blocks *map,
40030031462bSMingming Cao 			struct ext4_ext_path *path, int flags,
4004e35fd660STheodore Ts'o 			unsigned int allocated, ext4_fsblk_t newblock)
40050031462bSMingming Cao {
40060031462bSMingming Cao 	int ret = 0;
40070031462bSMingming Cao 	int err = 0;
4008f45ee3a1SDmitry Monakhov 	ext4_io_end_t *io = ext4_inode_aio(inode);
40090031462bSMingming Cao 
4010556615dcSLukas Czerner 	ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
401188635ca2SZheng Liu 		  "block %llu, max_blocks %u, flags %x, allocated %u\n",
4012e35fd660STheodore Ts'o 		  inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
40130031462bSMingming Cao 		  flags, allocated);
40140031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
40150031462bSMingming Cao 
401627dd4385SLukas Czerner 	/*
4017556615dcSLukas Czerner 	 * When writing into unwritten space, we should not fail to
401827dd4385SLukas Czerner 	 * allocate metadata blocks for the new extent block if needed.
401927dd4385SLukas Czerner 	 */
402027dd4385SLukas Czerner 	flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
402127dd4385SLukas Czerner 
4022556615dcSLukas Czerner 	trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
4023b5645534SZheng Liu 						    allocated, newblock);
4024d8990240SAditya Kali 
4025c7064ef1SJiaying Zhang 	/* get_block() before submit the IO, split the extent */
4026c8b459f4SLukas Czerner 	if (flags & EXT4_GET_BLOCKS_PRE_IO) {
4027b8a86845SLukas Czerner 		ret = ext4_split_convert_extents(handle, inode, map,
4028b8a86845SLukas Czerner 					 path, flags | EXT4_GET_BLOCKS_CONVERT);
402982e54229SDmitry Monakhov 		if (ret <= 0)
403082e54229SDmitry Monakhov 			goto out;
40315f524950SMingming 		/*
40325f524950SMingming 		 * Flag the inode(non aio case) or end_io struct (aio case)
403325985edcSLucas De Marchi 		 * that this IO needs to conversion to written when IO is
40345f524950SMingming 		 * completed
40355f524950SMingming 		 */
40360edeb71dSTao Ma 		if (io)
40370edeb71dSTao Ma 			ext4_set_io_unwritten_flag(inode, io);
40380edeb71dSTao Ma 		else
403919f5fb7aSTheodore Ts'o 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
4040a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
40410031462bSMingming Cao 		goto out;
40420031462bSMingming Cao 	}
4043c7064ef1SJiaying Zhang 	/* IO end_io complete, convert the filled extent to written */
4044c8b459f4SLukas Czerner 	if (flags & EXT4_GET_BLOCKS_CONVERT) {
4045dee1f973SDmitry Monakhov 		ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
40460031462bSMingming Cao 							path);
404758590b06STheodore Ts'o 		if (ret >= 0) {
4048b436b9beSJan Kara 			ext4_update_inode_fsync_trans(handle, inode, 1);
4049d002ebf1SEric Sandeen 			err = check_eofblocks_fl(handle, inode, map->m_lblk,
4050d002ebf1SEric Sandeen 						 path, map->m_len);
405158590b06STheodore Ts'o 		} else
405258590b06STheodore Ts'o 			err = ret;
4053cdee7843SZheng Liu 		map->m_flags |= EXT4_MAP_MAPPED;
405415cc1767SEric Whitney 		map->m_pblk = newblock;
4055cdee7843SZheng Liu 		if (allocated > map->m_len)
4056cdee7843SZheng Liu 			allocated = map->m_len;
4057cdee7843SZheng Liu 		map->m_len = allocated;
40580031462bSMingming Cao 		goto out2;
40590031462bSMingming Cao 	}
40600031462bSMingming Cao 	/* buffered IO case */
40610031462bSMingming Cao 	/*
40620031462bSMingming Cao 	 * repeat fallocate creation request
40630031462bSMingming Cao 	 * we already have an unwritten extent
40640031462bSMingming Cao 	 */
4065556615dcSLukas Czerner 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4066a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
40670031462bSMingming Cao 		goto map_out;
4068a25a4e1aSZheng Liu 	}
40690031462bSMingming Cao 
40700031462bSMingming Cao 	/* buffered READ or buffered write_begin() lookup */
40710031462bSMingming Cao 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
40720031462bSMingming Cao 		/*
40730031462bSMingming Cao 		 * We have blocks reserved already.  We
40740031462bSMingming Cao 		 * return allocated blocks so that delalloc
40750031462bSMingming Cao 		 * won't do block reservation for us.  But
40760031462bSMingming Cao 		 * the buffer head will be unmapped so that
40770031462bSMingming Cao 		 * a read from the block returns 0s.
40780031462bSMingming Cao 		 */
4079e35fd660STheodore Ts'o 		map->m_flags |= EXT4_MAP_UNWRITTEN;
40800031462bSMingming Cao 		goto out1;
40810031462bSMingming Cao 	}
40820031462bSMingming Cao 
40830031462bSMingming Cao 	/* buffered write, writepage time, convert*/
408427dd4385SLukas Czerner 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
4085a4e5d88bSDmitry Monakhov 	if (ret >= 0)
4086b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 1);
40870031462bSMingming Cao out:
40880031462bSMingming Cao 	if (ret <= 0) {
40890031462bSMingming Cao 		err = ret;
40900031462bSMingming Cao 		goto out2;
40910031462bSMingming Cao 	} else
40920031462bSMingming Cao 		allocated = ret;
4093e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_NEW;
4094515f41c3SAneesh Kumar K.V 	/*
4095515f41c3SAneesh Kumar K.V 	 * if we allocated more blocks than requested
4096515f41c3SAneesh Kumar K.V 	 * we need to make sure we unmap the extra block
4097515f41c3SAneesh Kumar K.V 	 * allocated. The actual needed block will get
4098515f41c3SAneesh Kumar K.V 	 * unmapped later when we find the buffer_head marked
4099515f41c3SAneesh Kumar K.V 	 * new.
4100515f41c3SAneesh Kumar K.V 	 */
4101e35fd660STheodore Ts'o 	if (allocated > map->m_len) {
4102515f41c3SAneesh Kumar K.V 		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
4103e35fd660STheodore Ts'o 					newblock + map->m_len,
4104e35fd660STheodore Ts'o 					allocated - map->m_len);
4105e35fd660STheodore Ts'o 		allocated = map->m_len;
4106515f41c3SAneesh Kumar K.V 	}
41073a225670SZheng Liu 	map->m_len = allocated;
41085f634d06SAneesh Kumar K.V 
41095f634d06SAneesh Kumar K.V 	/*
41105f634d06SAneesh Kumar K.V 	 * If we have done fallocate with the offset that is already
41115f634d06SAneesh Kumar K.V 	 * delayed allocated, we would have block reservation
41125f634d06SAneesh Kumar K.V 	 * and quota reservation done in the delayed write path.
41135f634d06SAneesh Kumar K.V 	 * But fallocate would have already updated quota and block
41145f634d06SAneesh Kumar K.V 	 * count for this offset. So cancel these reservation
41155f634d06SAneesh Kumar K.V 	 */
41167b415bf6SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
41177b415bf6SAditya Kali 		unsigned int reserved_clusters;
41187b415bf6SAditya Kali 		reserved_clusters = get_reserved_cluster_alloc(inode,
41197b415bf6SAditya Kali 				map->m_lblk, map->m_len);
41207b415bf6SAditya Kali 		if (reserved_clusters)
41217b415bf6SAditya Kali 			ext4_da_update_reserve_space(inode,
41227b415bf6SAditya Kali 						     reserved_clusters,
41237b415bf6SAditya Kali 						     0);
41247b415bf6SAditya Kali 	}
41255f634d06SAneesh Kumar K.V 
41260031462bSMingming Cao map_out:
4127e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_MAPPED;
4128a4e5d88bSDmitry Monakhov 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
4129a4e5d88bSDmitry Monakhov 		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
4130a4e5d88bSDmitry Monakhov 					 map->m_len);
4131a4e5d88bSDmitry Monakhov 		if (err < 0)
4132a4e5d88bSDmitry Monakhov 			goto out2;
4133a4e5d88bSDmitry Monakhov 	}
41340031462bSMingming Cao out1:
4135e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4136e35fd660STheodore Ts'o 		allocated = map->m_len;
41370031462bSMingming Cao 	ext4_ext_show_leaf(inode, path);
4138e35fd660STheodore Ts'o 	map->m_pblk = newblock;
4139e35fd660STheodore Ts'o 	map->m_len = allocated;
41400031462bSMingming Cao out2:
41410031462bSMingming Cao 	return err ? err : allocated;
41420031462bSMingming Cao }
414358590b06STheodore Ts'o 
41440031462bSMingming Cao /*
41454d33b1efSTheodore Ts'o  * get_implied_cluster_alloc - check to see if the requested
41464d33b1efSTheodore Ts'o  * allocation (in the map structure) overlaps with a cluster already
41474d33b1efSTheodore Ts'o  * allocated in an extent.
4148d8990240SAditya Kali  *	@sb	The filesystem superblock structure
41494d33b1efSTheodore Ts'o  *	@map	The requested lblk->pblk mapping
41504d33b1efSTheodore Ts'o  *	@ex	The extent structure which might contain an implied
41514d33b1efSTheodore Ts'o  *			cluster allocation
41524d33b1efSTheodore Ts'o  *
41534d33b1efSTheodore Ts'o  * This function is called by ext4_ext_map_blocks() after we failed to
41544d33b1efSTheodore Ts'o  * find blocks that were already in the inode's extent tree.  Hence,
41554d33b1efSTheodore Ts'o  * we know that the beginning of the requested region cannot overlap
41564d33b1efSTheodore Ts'o  * the extent from the inode's extent tree.  There are three cases we
41574d33b1efSTheodore Ts'o  * want to catch.  The first is this case:
41584d33b1efSTheodore Ts'o  *
41594d33b1efSTheodore Ts'o  *		 |--- cluster # N--|
41604d33b1efSTheodore Ts'o  *    |--- extent ---|	|---- requested region ---|
41614d33b1efSTheodore Ts'o  *			|==========|
41624d33b1efSTheodore Ts'o  *
41634d33b1efSTheodore Ts'o  * The second case that we need to test for is this one:
41644d33b1efSTheodore Ts'o  *
41654d33b1efSTheodore Ts'o  *   |--------- cluster # N ----------------|
41664d33b1efSTheodore Ts'o  *	   |--- requested region --|   |------- extent ----|
41674d33b1efSTheodore Ts'o  *	   |=======================|
41684d33b1efSTheodore Ts'o  *
41694d33b1efSTheodore Ts'o  * The third case is when the requested region lies between two extents
41704d33b1efSTheodore Ts'o  * within the same cluster:
41714d33b1efSTheodore Ts'o  *          |------------- cluster # N-------------|
41724d33b1efSTheodore Ts'o  * |----- ex -----|                  |---- ex_right ----|
41734d33b1efSTheodore Ts'o  *                  |------ requested region ------|
41744d33b1efSTheodore Ts'o  *                  |================|
41754d33b1efSTheodore Ts'o  *
41764d33b1efSTheodore Ts'o  * In each of the above cases, we need to set the map->m_pblk and
41774d33b1efSTheodore Ts'o  * map->m_len so it corresponds to the return the extent labelled as
41784d33b1efSTheodore Ts'o  * "|====|" from cluster #N, since it is already in use for data in
41794d33b1efSTheodore Ts'o  * cluster EXT4_B2C(sbi, map->m_lblk).	We will then return 1 to
41804d33b1efSTheodore Ts'o  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
41814d33b1efSTheodore Ts'o  * as a new "allocated" block region.  Otherwise, we will return 0 and
41824d33b1efSTheodore Ts'o  * ext4_ext_map_blocks() will then allocate one or more new clusters
41834d33b1efSTheodore Ts'o  * by calling ext4_mb_new_blocks().
41844d33b1efSTheodore Ts'o  */
4185d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb,
41864d33b1efSTheodore Ts'o 				     struct ext4_map_blocks *map,
41874d33b1efSTheodore Ts'o 				     struct ext4_extent *ex,
41884d33b1efSTheodore Ts'o 				     struct ext4_ext_path *path)
41894d33b1efSTheodore Ts'o {
4190d8990240SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4191f5a44db5STheodore Ts'o 	ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
41924d33b1efSTheodore Ts'o 	ext4_lblk_t ex_cluster_start, ex_cluster_end;
419314d7f3efSCurt Wohlgemuth 	ext4_lblk_t rr_cluster_start;
41944d33b1efSTheodore Ts'o 	ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
41954d33b1efSTheodore Ts'o 	ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
41964d33b1efSTheodore Ts'o 	unsigned short ee_len = ext4_ext_get_actual_len(ex);
41974d33b1efSTheodore Ts'o 
41984d33b1efSTheodore Ts'o 	/* The extent passed in that we are trying to match */
41994d33b1efSTheodore Ts'o 	ex_cluster_start = EXT4_B2C(sbi, ee_block);
42004d33b1efSTheodore Ts'o 	ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
42014d33b1efSTheodore Ts'o 
42024d33b1efSTheodore Ts'o 	/* The requested region passed into ext4_map_blocks() */
42034d33b1efSTheodore Ts'o 	rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
42044d33b1efSTheodore Ts'o 
42054d33b1efSTheodore Ts'o 	if ((rr_cluster_start == ex_cluster_end) ||
42064d33b1efSTheodore Ts'o 	    (rr_cluster_start == ex_cluster_start)) {
42074d33b1efSTheodore Ts'o 		if (rr_cluster_start == ex_cluster_end)
42084d33b1efSTheodore Ts'o 			ee_start += ee_len - 1;
4209f5a44db5STheodore Ts'o 		map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
42104d33b1efSTheodore Ts'o 		map->m_len = min(map->m_len,
42114d33b1efSTheodore Ts'o 				 (unsigned) sbi->s_cluster_ratio - c_offset);
42124d33b1efSTheodore Ts'o 		/*
42134d33b1efSTheodore Ts'o 		 * Check for and handle this case:
42144d33b1efSTheodore Ts'o 		 *
42154d33b1efSTheodore Ts'o 		 *   |--------- cluster # N-------------|
42164d33b1efSTheodore Ts'o 		 *		       |------- extent ----|
42174d33b1efSTheodore Ts'o 		 *	   |--- requested region ---|
42184d33b1efSTheodore Ts'o 		 *	   |===========|
42194d33b1efSTheodore Ts'o 		 */
42204d33b1efSTheodore Ts'o 
42214d33b1efSTheodore Ts'o 		if (map->m_lblk < ee_block)
42224d33b1efSTheodore Ts'o 			map->m_len = min(map->m_len, ee_block - map->m_lblk);
42234d33b1efSTheodore Ts'o 
42244d33b1efSTheodore Ts'o 		/*
42254d33b1efSTheodore Ts'o 		 * Check for the case where there is already another allocated
42264d33b1efSTheodore Ts'o 		 * block to the right of 'ex' but before the end of the cluster.
42274d33b1efSTheodore Ts'o 		 *
42284d33b1efSTheodore Ts'o 		 *          |------------- cluster # N-------------|
42294d33b1efSTheodore Ts'o 		 * |----- ex -----|                  |---- ex_right ----|
42304d33b1efSTheodore Ts'o 		 *                  |------ requested region ------|
42314d33b1efSTheodore Ts'o 		 *                  |================|
42324d33b1efSTheodore Ts'o 		 */
42334d33b1efSTheodore Ts'o 		if (map->m_lblk > ee_block) {
42344d33b1efSTheodore Ts'o 			ext4_lblk_t next = ext4_ext_next_allocated_block(path);
42354d33b1efSTheodore Ts'o 			map->m_len = min(map->m_len, next - map->m_lblk);
42364d33b1efSTheodore Ts'o 		}
4237d8990240SAditya Kali 
4238d8990240SAditya Kali 		trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
42394d33b1efSTheodore Ts'o 		return 1;
42404d33b1efSTheodore Ts'o 	}
4241d8990240SAditya Kali 
4242d8990240SAditya Kali 	trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
42434d33b1efSTheodore Ts'o 	return 0;
42444d33b1efSTheodore Ts'o }
42454d33b1efSTheodore Ts'o 
42464d33b1efSTheodore Ts'o 
42474d33b1efSTheodore Ts'o /*
4248f5ab0d1fSMingming Cao  * Block allocation/map/preallocation routine for extents based files
4249f5ab0d1fSMingming Cao  *
4250f5ab0d1fSMingming Cao  *
4251c278bfecSAneesh Kumar K.V  * Need to be called with
42520e855ac8SAneesh Kumar K.V  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
42530e855ac8SAneesh Kumar K.V  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4254f5ab0d1fSMingming Cao  *
4255f5ab0d1fSMingming Cao  * return > 0, number of of blocks already mapped/allocated
4256f5ab0d1fSMingming Cao  *          if create == 0 and these are pre-allocated blocks
4257f5ab0d1fSMingming Cao  *          	buffer head is unmapped
4258f5ab0d1fSMingming Cao  *          otherwise blocks are mapped
4259f5ab0d1fSMingming Cao  *
4260f5ab0d1fSMingming Cao  * return = 0, if plain look up failed (blocks have not been allocated)
4261f5ab0d1fSMingming Cao  *          buffer head is unmapped
4262f5ab0d1fSMingming Cao  *
4263f5ab0d1fSMingming Cao  * return < 0, error case.
4264c278bfecSAneesh Kumar K.V  */
4265e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4266e35fd660STheodore Ts'o 			struct ext4_map_blocks *map, int flags)
4267a86c6181SAlex Tomas {
4268a86c6181SAlex Tomas 	struct ext4_ext_path *path = NULL;
42694d33b1efSTheodore Ts'o 	struct ext4_extent newex, *ex, *ex2;
42704d33b1efSTheodore Ts'o 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
42710562e0baSJiaying Zhang 	ext4_fsblk_t newblock = 0;
4272ce37c429SEric Whitney 	int free_on_err = 0, err = 0, depth, ret;
42734d33b1efSTheodore Ts'o 	unsigned int allocated = 0, offset = 0;
427481fdbb4aSYongqiang Yang 	unsigned int allocated_clusters = 0;
4275c9de560dSAlex Tomas 	struct ext4_allocation_request ar;
4276f45ee3a1SDmitry Monakhov 	ext4_io_end_t *io = ext4_inode_aio(inode);
42774d33b1efSTheodore Ts'o 	ext4_lblk_t cluster_offset;
427882e54229SDmitry Monakhov 	int set_unwritten = 0;
4279a86c6181SAlex Tomas 
428084fe3befSMingming 	ext_debug("blocks %u/%u requested for inode %lu\n",
4281e35fd660STheodore Ts'o 		  map->m_lblk, map->m_len, inode->i_ino);
42820562e0baSJiaying Zhang 	trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4283a86c6181SAlex Tomas 
4284a86c6181SAlex Tomas 	/* find extent for this block */
4285107a7bd3STheodore Ts'o 	path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
4286a86c6181SAlex Tomas 	if (IS_ERR(path)) {
4287a86c6181SAlex Tomas 		err = PTR_ERR(path);
4288a86c6181SAlex Tomas 		path = NULL;
4289a86c6181SAlex Tomas 		goto out2;
4290a86c6181SAlex Tomas 	}
4291a86c6181SAlex Tomas 
4292a86c6181SAlex Tomas 	depth = ext_depth(inode);
4293a86c6181SAlex Tomas 
4294a86c6181SAlex Tomas 	/*
4295d0d856e8SRandy Dunlap 	 * consistent leaf must not be empty;
4296d0d856e8SRandy Dunlap 	 * this situation is possible, though, _during_ tree modification;
4297a86c6181SAlex Tomas 	 * this is why assert can't be put in ext4_ext_find_extent()
4298a86c6181SAlex Tomas 	 */
4299273df556SFrank Mayhar 	if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4300273df556SFrank Mayhar 		EXT4_ERROR_INODE(inode, "bad extent address "
4301f70f362bSTheodore Ts'o 				 "lblock: %lu, depth: %d pblock %lld",
4302f70f362bSTheodore Ts'o 				 (unsigned long) map->m_lblk, depth,
4303f70f362bSTheodore Ts'o 				 path[depth].p_block);
4304034fb4c9SSurbhi Palande 		err = -EIO;
4305034fb4c9SSurbhi Palande 		goto out2;
4306034fb4c9SSurbhi Palande 	}
4307a86c6181SAlex Tomas 
43087e028976SAvantika Mathur 	ex = path[depth].p_ext;
43097e028976SAvantika Mathur 	if (ex) {
4310725d26d3SAneesh Kumar K.V 		ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4311bf89d16fSTheodore Ts'o 		ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4312a2df2a63SAmit Arora 		unsigned short ee_len;
4313471d4011SSuparna Bhattacharya 
4314b8a86845SLukas Czerner 
4315471d4011SSuparna Bhattacharya 		/*
4316556615dcSLukas Czerner 		 * unwritten extents are treated as holes, except that
431756055d3aSAmit Arora 		 * we split out initialized portions during a write.
4318471d4011SSuparna Bhattacharya 		 */
4319a2df2a63SAmit Arora 		ee_len = ext4_ext_get_actual_len(ex);
4320d8990240SAditya Kali 
4321d8990240SAditya Kali 		trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4322d8990240SAditya Kali 
4323d0d856e8SRandy Dunlap 		/* if found extent covers block, simply return it */
4324e35fd660STheodore Ts'o 		if (in_range(map->m_lblk, ee_block, ee_len)) {
4325e35fd660STheodore Ts'o 			newblock = map->m_lblk - ee_block + ee_start;
4326d0d856e8SRandy Dunlap 			/* number of remaining blocks in the extent */
4327e35fd660STheodore Ts'o 			allocated = ee_len - (map->m_lblk - ee_block);
4328e35fd660STheodore Ts'o 			ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4329a86c6181SAlex Tomas 				  ee_block, ee_len, newblock);
433056055d3aSAmit Arora 
4331b8a86845SLukas Czerner 			/*
4332b8a86845SLukas Czerner 			 * If the extent is initialized check whether the
4333b8a86845SLukas Czerner 			 * caller wants to convert it to unwritten.
4334b8a86845SLukas Czerner 			 */
4335556615dcSLukas Czerner 			if ((!ext4_ext_is_unwritten(ex)) &&
4336b8a86845SLukas Czerner 			    (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4337b8a86845SLukas Czerner 				allocated = ext4_ext_convert_initialized_extent(
4338b8a86845SLukas Czerner 						handle, inode, map, path, flags,
4339b8a86845SLukas Czerner 						allocated, newblock);
4340b8a86845SLukas Czerner 				goto out2;
4341556615dcSLukas Czerner 			} else if (!ext4_ext_is_unwritten(ex))
4342a86c6181SAlex Tomas 				goto out;
434369eb33dcSZheng Liu 
4344556615dcSLukas Czerner 			ret = ext4_ext_handle_unwritten_extents(
4345e861304bSAllison Henderson 				handle, inode, map, path, flags,
4346e861304bSAllison Henderson 				allocated, newblock);
4347ce37c429SEric Whitney 			if (ret < 0)
4348ce37c429SEric Whitney 				err = ret;
4349ce37c429SEric Whitney 			else
4350ce37c429SEric Whitney 				allocated = ret;
435131cf0f2cSEric Whitney 			goto out2;
435256055d3aSAmit Arora 		}
4353a86c6181SAlex Tomas 	}
4354a86c6181SAlex Tomas 
43557b415bf6SAditya Kali 	if ((sbi->s_cluster_ratio > 1) &&
43567d1b1fbcSZheng Liu 	    ext4_find_delalloc_cluster(inode, map->m_lblk))
43577b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
43587b415bf6SAditya Kali 
4359a86c6181SAlex Tomas 	/*
4360d0d856e8SRandy Dunlap 	 * requested block isn't allocated yet;
4361a86c6181SAlex Tomas 	 * we couldn't try to create block if create flag is zero
4362a86c6181SAlex Tomas 	 */
4363c2177057STheodore Ts'o 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
436456055d3aSAmit Arora 		/*
436556055d3aSAmit Arora 		 * put just found gap into cache to speed up
436656055d3aSAmit Arora 		 * subsequent requests
436756055d3aSAmit Arora 		 */
4368d100eef2SZheng Liu 		if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4369e35fd660STheodore Ts'o 			ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4370a86c6181SAlex Tomas 		goto out2;
4371a86c6181SAlex Tomas 	}
43724d33b1efSTheodore Ts'o 
4373a86c6181SAlex Tomas 	/*
4374c2ea3fdeSTheodore Ts'o 	 * Okay, we need to do block allocation.
4375a86c6181SAlex Tomas 	 */
43767b415bf6SAditya Kali 	map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
43774d33b1efSTheodore Ts'o 	newex.ee_block = cpu_to_le32(map->m_lblk);
4378d0abafacSEric Whitney 	cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
43794d33b1efSTheodore Ts'o 
43804d33b1efSTheodore Ts'o 	/*
43814d33b1efSTheodore Ts'o 	 * If we are doing bigalloc, check to see if the extent returned
43824d33b1efSTheodore Ts'o 	 * by ext4_ext_find_extent() implies a cluster we can use.
43834d33b1efSTheodore Ts'o 	 */
43844d33b1efSTheodore Ts'o 	if (cluster_offset && ex &&
4385d8990240SAditya Kali 	    get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
43864d33b1efSTheodore Ts'o 		ar.len = allocated = map->m_len;
43874d33b1efSTheodore Ts'o 		newblock = map->m_pblk;
43887b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
43894d33b1efSTheodore Ts'o 		goto got_allocated_blocks;
43904d33b1efSTheodore Ts'o 	}
4391a86c6181SAlex Tomas 
4392c9de560dSAlex Tomas 	/* find neighbour allocated blocks */
4393e35fd660STheodore Ts'o 	ar.lleft = map->m_lblk;
4394c9de560dSAlex Tomas 	err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4395c9de560dSAlex Tomas 	if (err)
4396c9de560dSAlex Tomas 		goto out2;
4397e35fd660STheodore Ts'o 	ar.lright = map->m_lblk;
43984d33b1efSTheodore Ts'o 	ex2 = NULL;
43994d33b1efSTheodore Ts'o 	err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4400c9de560dSAlex Tomas 	if (err)
4401c9de560dSAlex Tomas 		goto out2;
440225d14f98SAmit Arora 
44034d33b1efSTheodore Ts'o 	/* Check if the extent after searching to the right implies a
44044d33b1efSTheodore Ts'o 	 * cluster we can use. */
44054d33b1efSTheodore Ts'o 	if ((sbi->s_cluster_ratio > 1) && ex2 &&
4406d8990240SAditya Kali 	    get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
44074d33b1efSTheodore Ts'o 		ar.len = allocated = map->m_len;
44084d33b1efSTheodore Ts'o 		newblock = map->m_pblk;
44097b415bf6SAditya Kali 		map->m_flags |= EXT4_MAP_FROM_CLUSTER;
44104d33b1efSTheodore Ts'o 		goto got_allocated_blocks;
44114d33b1efSTheodore Ts'o 	}
44124d33b1efSTheodore Ts'o 
4413749269faSAmit Arora 	/*
4414749269faSAmit Arora 	 * See if request is beyond maximum number of blocks we can have in
4415749269faSAmit Arora 	 * a single extent. For an initialized extent this limit is
4416556615dcSLukas Czerner 	 * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4417556615dcSLukas Czerner 	 * EXT_UNWRITTEN_MAX_LEN.
4418749269faSAmit Arora 	 */
4419e35fd660STheodore Ts'o 	if (map->m_len > EXT_INIT_MAX_LEN &&
4420556615dcSLukas Czerner 	    !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4421e35fd660STheodore Ts'o 		map->m_len = EXT_INIT_MAX_LEN;
4422556615dcSLukas Czerner 	else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4423556615dcSLukas Czerner 		 (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4424556615dcSLukas Czerner 		map->m_len = EXT_UNWRITTEN_MAX_LEN;
4425749269faSAmit Arora 
4426e35fd660STheodore Ts'o 	/* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4427e35fd660STheodore Ts'o 	newex.ee_len = cpu_to_le16(map->m_len);
44284d33b1efSTheodore Ts'o 	err = ext4_ext_check_overlap(sbi, inode, &newex, path);
442925d14f98SAmit Arora 	if (err)
4430b939e376SAneesh Kumar K.V 		allocated = ext4_ext_get_actual_len(&newex);
443125d14f98SAmit Arora 	else
4432e35fd660STheodore Ts'o 		allocated = map->m_len;
4433c9de560dSAlex Tomas 
4434c9de560dSAlex Tomas 	/* allocate new block */
4435c9de560dSAlex Tomas 	ar.inode = inode;
4436e35fd660STheodore Ts'o 	ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4437e35fd660STheodore Ts'o 	ar.logical = map->m_lblk;
44384d33b1efSTheodore Ts'o 	/*
44394d33b1efSTheodore Ts'o 	 * We calculate the offset from the beginning of the cluster
44404d33b1efSTheodore Ts'o 	 * for the logical block number, since when we allocate a
44414d33b1efSTheodore Ts'o 	 * physical cluster, the physical block should start at the
44424d33b1efSTheodore Ts'o 	 * same offset from the beginning of the cluster.  This is
44434d33b1efSTheodore Ts'o 	 * needed so that future calls to get_implied_cluster_alloc()
44444d33b1efSTheodore Ts'o 	 * work correctly.
44454d33b1efSTheodore Ts'o 	 */
4446f5a44db5STheodore Ts'o 	offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
44474d33b1efSTheodore Ts'o 	ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
44484d33b1efSTheodore Ts'o 	ar.goal -= offset;
44494d33b1efSTheodore Ts'o 	ar.logical -= offset;
4450c9de560dSAlex Tomas 	if (S_ISREG(inode->i_mode))
4451c9de560dSAlex Tomas 		ar.flags = EXT4_MB_HINT_DATA;
4452c9de560dSAlex Tomas 	else
4453c9de560dSAlex Tomas 		/* disable in-core preallocation for non-regular files */
4454c9de560dSAlex Tomas 		ar.flags = 0;
4455556b27abSVivek Haldar 	if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4456556b27abSVivek Haldar 		ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4457c9de560dSAlex Tomas 	newblock = ext4_mb_new_blocks(handle, &ar, &err);
4458a86c6181SAlex Tomas 	if (!newblock)
4459a86c6181SAlex Tomas 		goto out2;
446084fe3befSMingming 	ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4461498e5f24STheodore Ts'o 		  ar.goal, newblock, allocated);
44624d33b1efSTheodore Ts'o 	free_on_err = 1;
44637b415bf6SAditya Kali 	allocated_clusters = ar.len;
44644d33b1efSTheodore Ts'o 	ar.len = EXT4_C2B(sbi, ar.len) - offset;
44654d33b1efSTheodore Ts'o 	if (ar.len > allocated)
44664d33b1efSTheodore Ts'o 		ar.len = allocated;
4467a86c6181SAlex Tomas 
44684d33b1efSTheodore Ts'o got_allocated_blocks:
4469a86c6181SAlex Tomas 	/* try to insert new extent into found leaf and return */
44704d33b1efSTheodore Ts'o 	ext4_ext_store_pblock(&newex, newblock + offset);
4471c9de560dSAlex Tomas 	newex.ee_len = cpu_to_le16(ar.len);
4472556615dcSLukas Czerner 	/* Mark unwritten */
4473556615dcSLukas Czerner 	if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
4474556615dcSLukas Czerner 		ext4_ext_mark_unwritten(&newex);
4475a25a4e1aSZheng Liu 		map->m_flags |= EXT4_MAP_UNWRITTEN;
44768d5d02e6SMingming Cao 		/*
4477744692dcSJiaying Zhang 		 * io_end structure was created for every IO write to an
4478556615dcSLukas Czerner 		 * unwritten extent. To avoid unnecessary conversion,
4479744692dcSJiaying Zhang 		 * here we flag the IO that really needs the conversion.
44805f524950SMingming 		 * For non asycn direct IO case, flag the inode state
448125985edcSLucas De Marchi 		 * that we need to perform conversion when IO is done.
44828d5d02e6SMingming Cao 		 */
4483c8b459f4SLukas Czerner 		if (flags & EXT4_GET_BLOCKS_PRE_IO)
448482e54229SDmitry Monakhov 			set_unwritten = 1;
44858d5d02e6SMingming Cao 	}
4486c8d46e41SJiaying Zhang 
4487a4e5d88bSDmitry Monakhov 	err = 0;
4488a4e5d88bSDmitry Monakhov 	if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4489a4e5d88bSDmitry Monakhov 		err = check_eofblocks_fl(handle, inode, map->m_lblk,
4490a4e5d88bSDmitry Monakhov 					 path, ar.len);
4491575a1d4bSJiaying Zhang 	if (!err)
4492575a1d4bSJiaying Zhang 		err = ext4_ext_insert_extent(handle, inode, path,
4493575a1d4bSJiaying Zhang 					     &newex, flags);
449482e54229SDmitry Monakhov 
449582e54229SDmitry Monakhov 	if (!err && set_unwritten) {
449682e54229SDmitry Monakhov 		if (io)
449782e54229SDmitry Monakhov 			ext4_set_io_unwritten_flag(inode, io);
449882e54229SDmitry Monakhov 		else
449982e54229SDmitry Monakhov 			ext4_set_inode_state(inode,
450082e54229SDmitry Monakhov 					     EXT4_STATE_DIO_UNWRITTEN);
450182e54229SDmitry Monakhov 	}
450282e54229SDmitry Monakhov 
45034d33b1efSTheodore Ts'o 	if (err && free_on_err) {
45047132de74SMaxim Patlasov 		int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
45057132de74SMaxim Patlasov 			EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4506315054f0SAlex Tomas 		/* free data blocks we just allocated */
4507c9de560dSAlex Tomas 		/* not a good idea to call discard here directly,
4508c9de560dSAlex Tomas 		 * but otherwise we'd need to call it every free() */
4509c2ea3fdeSTheodore Ts'o 		ext4_discard_preallocations(inode);
4510c8e15130STheodore Ts'o 		ext4_free_blocks(handle, inode, NULL, newblock,
4511c8e15130STheodore Ts'o 				 EXT4_C2B(sbi, allocated_clusters), fb_flags);
4512a86c6181SAlex Tomas 		goto out2;
4513315054f0SAlex Tomas 	}
4514a86c6181SAlex Tomas 
4515a86c6181SAlex Tomas 	/* previous routine could use block we allocated */
4516bf89d16fSTheodore Ts'o 	newblock = ext4_ext_pblock(&newex);
4517b939e376SAneesh Kumar K.V 	allocated = ext4_ext_get_actual_len(&newex);
4518e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4519e35fd660STheodore Ts'o 		allocated = map->m_len;
4520e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_NEW;
4521a86c6181SAlex Tomas 
4522b436b9beSJan Kara 	/*
45235f634d06SAneesh Kumar K.V 	 * Update reserved blocks/metadata blocks after successful
45245f634d06SAneesh Kumar K.V 	 * block allocation which had been deferred till now.
45255f634d06SAneesh Kumar K.V 	 */
45267b415bf6SAditya Kali 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
452781fdbb4aSYongqiang Yang 		unsigned int reserved_clusters;
45287b415bf6SAditya Kali 		/*
452981fdbb4aSYongqiang Yang 		 * Check how many clusters we had reserved this allocated range
45307b415bf6SAditya Kali 		 */
45317b415bf6SAditya Kali 		reserved_clusters = get_reserved_cluster_alloc(inode,
45327b415bf6SAditya Kali 						map->m_lblk, allocated);
45337b415bf6SAditya Kali 		if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
45347b415bf6SAditya Kali 			if (reserved_clusters) {
45357b415bf6SAditya Kali 				/*
45367b415bf6SAditya Kali 				 * We have clusters reserved for this range.
45377b415bf6SAditya Kali 				 * But since we are not doing actual allocation
45387b415bf6SAditya Kali 				 * and are simply using blocks from previously
45397b415bf6SAditya Kali 				 * allocated cluster, we should release the
45407b415bf6SAditya Kali 				 * reservation and not claim quota.
45417b415bf6SAditya Kali 				 */
45427b415bf6SAditya Kali 				ext4_da_update_reserve_space(inode,
45437b415bf6SAditya Kali 						reserved_clusters, 0);
45447b415bf6SAditya Kali 			}
45457b415bf6SAditya Kali 		} else {
45467b415bf6SAditya Kali 			BUG_ON(allocated_clusters < reserved_clusters);
45477b415bf6SAditya Kali 			if (reserved_clusters < allocated_clusters) {
45485356f261SAditya Kali 				struct ext4_inode_info *ei = EXT4_I(inode);
45497b415bf6SAditya Kali 				int reservation = allocated_clusters -
45507b415bf6SAditya Kali 						  reserved_clusters;
45517b415bf6SAditya Kali 				/*
45527b415bf6SAditya Kali 				 * It seems we claimed few clusters outside of
45537b415bf6SAditya Kali 				 * the range of this allocation. We should give
45547b415bf6SAditya Kali 				 * it back to the reservation pool. This can
45557b415bf6SAditya Kali 				 * happen in the following case:
45567b415bf6SAditya Kali 				 *
45577b415bf6SAditya Kali 				 * * Suppose s_cluster_ratio is 4 (i.e., each
45587b415bf6SAditya Kali 				 *   cluster has 4 blocks. Thus, the clusters
45597b415bf6SAditya Kali 				 *   are [0-3],[4-7],[8-11]...
45607b415bf6SAditya Kali 				 * * First comes delayed allocation write for
45617b415bf6SAditya Kali 				 *   logical blocks 10 & 11. Since there were no
45627b415bf6SAditya Kali 				 *   previous delayed allocated blocks in the
45637b415bf6SAditya Kali 				 *   range [8-11], we would reserve 1 cluster
45647b415bf6SAditya Kali 				 *   for this write.
45657b415bf6SAditya Kali 				 * * Next comes write for logical blocks 3 to 8.
45667b415bf6SAditya Kali 				 *   In this case, we will reserve 2 clusters
45677b415bf6SAditya Kali 				 *   (for [0-3] and [4-7]; and not for [8-11] as
45687b415bf6SAditya Kali 				 *   that range has a delayed allocated blocks.
45697b415bf6SAditya Kali 				 *   Thus total reserved clusters now becomes 3.
45707b415bf6SAditya Kali 				 * * Now, during the delayed allocation writeout
45717b415bf6SAditya Kali 				 *   time, we will first write blocks [3-8] and
45727b415bf6SAditya Kali 				 *   allocate 3 clusters for writing these
45737b415bf6SAditya Kali 				 *   blocks. Also, we would claim all these
45747b415bf6SAditya Kali 				 *   three clusters above.
45757b415bf6SAditya Kali 				 * * Now when we come here to writeout the
45767b415bf6SAditya Kali 				 *   blocks [10-11], we would expect to claim
45777b415bf6SAditya Kali 				 *   the reservation of 1 cluster we had made
45787b415bf6SAditya Kali 				 *   (and we would claim it since there are no
45797b415bf6SAditya Kali 				 *   more delayed allocated blocks in the range
45807b415bf6SAditya Kali 				 *   [8-11]. But our reserved cluster count had
45817b415bf6SAditya Kali 				 *   already gone to 0.
45827b415bf6SAditya Kali 				 *
45837b415bf6SAditya Kali 				 *   Thus, at the step 4 above when we determine
45847b415bf6SAditya Kali 				 *   that there are still some unwritten delayed
45857b415bf6SAditya Kali 				 *   allocated blocks outside of our current
45867b415bf6SAditya Kali 				 *   block range, we should increment the
45877b415bf6SAditya Kali 				 *   reserved clusters count so that when the
45887b415bf6SAditya Kali 				 *   remaining blocks finally gets written, we
45897b415bf6SAditya Kali 				 *   could claim them.
45907b415bf6SAditya Kali 				 */
45915356f261SAditya Kali 				dquot_reserve_block(inode,
45925356f261SAditya Kali 						EXT4_C2B(sbi, reservation));
45935356f261SAditya Kali 				spin_lock(&ei->i_block_reservation_lock);
45945356f261SAditya Kali 				ei->i_reserved_data_blocks += reservation;
45955356f261SAditya Kali 				spin_unlock(&ei->i_block_reservation_lock);
45967b415bf6SAditya Kali 			}
4597232ec872SLukas Czerner 			/*
4598232ec872SLukas Czerner 			 * We will claim quota for all newly allocated blocks.
4599232ec872SLukas Czerner 			 * We're updating the reserved space *after* the
4600232ec872SLukas Czerner 			 * correction above so we do not accidentally free
4601232ec872SLukas Czerner 			 * all the metadata reservation because we might
4602232ec872SLukas Czerner 			 * actually need it later on.
4603232ec872SLukas Czerner 			 */
4604232ec872SLukas Czerner 			ext4_da_update_reserve_space(inode, allocated_clusters,
4605232ec872SLukas Czerner 							1);
46067b415bf6SAditya Kali 		}
46077b415bf6SAditya Kali 	}
46085f634d06SAneesh Kumar K.V 
46095f634d06SAneesh Kumar K.V 	/*
4610b436b9beSJan Kara 	 * Cache the extent and update transaction to commit on fdatasync only
4611556615dcSLukas Czerner 	 * when it is _not_ an unwritten extent.
4612b436b9beSJan Kara 	 */
4613556615dcSLukas Czerner 	if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4614b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 1);
461569eb33dcSZheng Liu 	else
4616b436b9beSJan Kara 		ext4_update_inode_fsync_trans(handle, inode, 0);
4617a86c6181SAlex Tomas out:
4618e35fd660STheodore Ts'o 	if (allocated > map->m_len)
4619e35fd660STheodore Ts'o 		allocated = map->m_len;
4620a86c6181SAlex Tomas 	ext4_ext_show_leaf(inode, path);
4621e35fd660STheodore Ts'o 	map->m_flags |= EXT4_MAP_MAPPED;
4622e35fd660STheodore Ts'o 	map->m_pblk = newblock;
4623e35fd660STheodore Ts'o 	map->m_len = allocated;
4624a86c6181SAlex Tomas out2:
4625a86c6181SAlex Tomas 	if (path) {
4626a86c6181SAlex Tomas 		ext4_ext_drop_refs(path);
4627a86c6181SAlex Tomas 		kfree(path);
4628a86c6181SAlex Tomas 	}
4629e861304bSAllison Henderson 
463063b99968STheodore Ts'o 	trace_ext4_ext_map_blocks_exit(inode, flags, map,
463163b99968STheodore Ts'o 				       err ? err : allocated);
463263b99968STheodore Ts'o 	ext4_es_lru_add(inode);
46337877191cSLukas Czerner 	return err ? err : allocated;
4634a86c6181SAlex Tomas }
4635a86c6181SAlex Tomas 
4636819c4920STheodore Ts'o void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4637a86c6181SAlex Tomas {
4638a86c6181SAlex Tomas 	struct super_block *sb = inode->i_sb;
4639725d26d3SAneesh Kumar K.V 	ext4_lblk_t last_block;
4640a86c6181SAlex Tomas 	int err = 0;
4641a86c6181SAlex Tomas 
4642a86c6181SAlex Tomas 	/*
4643d0d856e8SRandy Dunlap 	 * TODO: optimization is possible here.
4644d0d856e8SRandy Dunlap 	 * Probably we need not scan at all,
4645d0d856e8SRandy Dunlap 	 * because page truncation is enough.
4646a86c6181SAlex Tomas 	 */
4647a86c6181SAlex Tomas 
4648a86c6181SAlex Tomas 	/* we have to know where to truncate from in crash case */
4649a86c6181SAlex Tomas 	EXT4_I(inode)->i_disksize = inode->i_size;
4650a86c6181SAlex Tomas 	ext4_mark_inode_dirty(handle, inode);
4651a86c6181SAlex Tomas 
4652a86c6181SAlex Tomas 	last_block = (inode->i_size + sb->s_blocksize - 1)
4653a86c6181SAlex Tomas 			>> EXT4_BLOCK_SIZE_BITS(sb);
46548acd5e9bSTheodore Ts'o retry:
465551865fdaSZheng Liu 	err = ext4_es_remove_extent(inode, last_block,
465651865fdaSZheng Liu 				    EXT_MAX_BLOCKS - last_block);
465794eec0fcSTheodore Ts'o 	if (err == -ENOMEM) {
46588acd5e9bSTheodore Ts'o 		cond_resched();
46598acd5e9bSTheodore Ts'o 		congestion_wait(BLK_RW_ASYNC, HZ/50);
46608acd5e9bSTheodore Ts'o 		goto retry;
46618acd5e9bSTheodore Ts'o 	}
46628acd5e9bSTheodore Ts'o 	if (err) {
46638acd5e9bSTheodore Ts'o 		ext4_std_error(inode->i_sb, err);
46648acd5e9bSTheodore Ts'o 		return;
46658acd5e9bSTheodore Ts'o 	}
46665f95d21fSLukas Czerner 	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
46678acd5e9bSTheodore Ts'o 	ext4_std_error(inode->i_sb, err);
4668a86c6181SAlex Tomas }
4669a86c6181SAlex Tomas 
46700e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4671c174e6d6SDmitry Monakhov 				  ext4_lblk_t len, loff_t new_size,
4672c174e6d6SDmitry Monakhov 				  int flags, int mode)
4673a2df2a63SAmit Arora {
4674496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
4675a2df2a63SAmit Arora 	handle_t *handle;
4676a2df2a63SAmit Arora 	int ret = 0;
4677a2df2a63SAmit Arora 	int ret2 = 0;
4678a2df2a63SAmit Arora 	int retries = 0;
46792ed88685STheodore Ts'o 	struct ext4_map_blocks map;
46800e8b6879SLukas Czerner 	unsigned int credits;
4681c174e6d6SDmitry Monakhov 	loff_t epos;
4682a2df2a63SAmit Arora 
46830e8b6879SLukas Czerner 	map.m_lblk = offset;
4684c174e6d6SDmitry Monakhov 	map.m_len = len;
46853c6fe770SGreg Harm 	/*
46863c6fe770SGreg Harm 	 * Don't normalize the request if it can fit in one extent so
46873c6fe770SGreg Harm 	 * that it doesn't get unnecessarily split into multiple
46883c6fe770SGreg Harm 	 * extents.
46893c6fe770SGreg Harm 	 */
4690556615dcSLukas Czerner 	if (len <= EXT_UNWRITTEN_MAX_LEN)
46913c6fe770SGreg Harm 		flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
469260d4616fSDmitry Monakhov 
46930e8b6879SLukas Czerner 	/*
46940e8b6879SLukas Czerner 	 * credits to insert 1 extent into extent tree
46950e8b6879SLukas Czerner 	 */
46960e8b6879SLukas Czerner 	credits = ext4_chunk_trans_blocks(inode, len);
46970e8b6879SLukas Czerner 
4698a2df2a63SAmit Arora retry:
4699c174e6d6SDmitry Monakhov 	while (ret >= 0 && len) {
47009924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
47019924a92aSTheodore Ts'o 					    credits);
4702a2df2a63SAmit Arora 		if (IS_ERR(handle)) {
4703a2df2a63SAmit Arora 			ret = PTR_ERR(handle);
4704a2df2a63SAmit Arora 			break;
4705a2df2a63SAmit Arora 		}
4706a4e5d88bSDmitry Monakhov 		ret = ext4_map_blocks(handle, inode, &map, flags);
4707221879c9SAneesh Kumar K.V 		if (ret <= 0) {
4708f282ac19SLukas Czerner 			ext4_debug("inode #%lu: block %u: len %u: "
4709b06acd38SLukas Czerner 				   "ext4_ext_map_blocks returned %d",
4710b06acd38SLukas Czerner 				   inode->i_ino, map.m_lblk,
4711b06acd38SLukas Czerner 				   map.m_len, ret);
4712a2df2a63SAmit Arora 			ext4_mark_inode_dirty(handle, inode);
4713a2df2a63SAmit Arora 			ret2 = ext4_journal_stop(handle);
4714a2df2a63SAmit Arora 			break;
4715a2df2a63SAmit Arora 		}
4716c174e6d6SDmitry Monakhov 		map.m_lblk += ret;
4717c174e6d6SDmitry Monakhov 		map.m_len = len = len - ret;
4718c174e6d6SDmitry Monakhov 		epos = (loff_t)map.m_lblk << inode->i_blkbits;
4719c174e6d6SDmitry Monakhov 		inode->i_ctime = ext4_current_time(inode);
4720c174e6d6SDmitry Monakhov 		if (new_size) {
4721c174e6d6SDmitry Monakhov 			if (epos > new_size)
4722c174e6d6SDmitry Monakhov 				epos = new_size;
4723c174e6d6SDmitry Monakhov 			if (ext4_update_inode_size(inode, epos) & 0x1)
4724c174e6d6SDmitry Monakhov 				inode->i_mtime = inode->i_ctime;
4725c174e6d6SDmitry Monakhov 		} else {
4726c174e6d6SDmitry Monakhov 			if (epos > inode->i_size)
4727c174e6d6SDmitry Monakhov 				ext4_set_inode_flag(inode,
4728c174e6d6SDmitry Monakhov 						    EXT4_INODE_EOFBLOCKS);
4729c174e6d6SDmitry Monakhov 		}
4730c174e6d6SDmitry Monakhov 		ext4_mark_inode_dirty(handle, inode);
4731a2df2a63SAmit Arora 		ret2 = ext4_journal_stop(handle);
4732a2df2a63SAmit Arora 		if (ret2)
4733a2df2a63SAmit Arora 			break;
4734a2df2a63SAmit Arora 	}
4735fd28784aSAneesh Kumar K.V 	if (ret == -ENOSPC &&
4736fd28784aSAneesh Kumar K.V 			ext4_should_retry_alloc(inode->i_sb, &retries)) {
4737fd28784aSAneesh Kumar K.V 		ret = 0;
4738a2df2a63SAmit Arora 		goto retry;
4739a2df2a63SAmit Arora 	}
4740f282ac19SLukas Czerner 
47410e8b6879SLukas Czerner 	return ret > 0 ? ret2 : ret;
47420e8b6879SLukas Czerner }
47430e8b6879SLukas Czerner 
4744b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset,
4745b8a86845SLukas Czerner 			    loff_t len, int mode)
4746b8a86845SLukas Czerner {
4747b8a86845SLukas Czerner 	struct inode *inode = file_inode(file);
4748b8a86845SLukas Czerner 	handle_t *handle = NULL;
4749b8a86845SLukas Czerner 	unsigned int max_blocks;
4750b8a86845SLukas Czerner 	loff_t new_size = 0;
4751b8a86845SLukas Czerner 	int ret = 0;
4752b8a86845SLukas Czerner 	int flags;
475369dc9536SDmitry Monakhov 	int credits;
4754c174e6d6SDmitry Monakhov 	int partial_begin, partial_end;
4755b8a86845SLukas Czerner 	loff_t start, end;
4756b8a86845SLukas Czerner 	ext4_lblk_t lblk;
4757b8a86845SLukas Czerner 	struct address_space *mapping = inode->i_mapping;
4758b8a86845SLukas Czerner 	unsigned int blkbits = inode->i_blkbits;
4759b8a86845SLukas Czerner 
4760b8a86845SLukas Czerner 	trace_ext4_zero_range(inode, offset, len, mode);
4761b8a86845SLukas Czerner 
47626c5e73d3Sjon ernst 	if (!S_ISREG(inode->i_mode))
47636c5e73d3Sjon ernst 		return -EINVAL;
47646c5e73d3Sjon ernst 
4765e1ee60fdSNamjae Jeon 	/* Call ext4_force_commit to flush all data in case of data=journal. */
4766e1ee60fdSNamjae Jeon 	if (ext4_should_journal_data(inode)) {
4767e1ee60fdSNamjae Jeon 		ret = ext4_force_commit(inode->i_sb);
4768e1ee60fdSNamjae Jeon 		if (ret)
4769e1ee60fdSNamjae Jeon 			return ret;
4770e1ee60fdSNamjae Jeon 	}
4771e1ee60fdSNamjae Jeon 
4772b8a86845SLukas Czerner 	/*
4773b8a86845SLukas Czerner 	 * Write out all dirty pages to avoid race conditions
4774b8a86845SLukas Czerner 	 * Then release them.
4775b8a86845SLukas Czerner 	 */
4776b8a86845SLukas Czerner 	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4777b8a86845SLukas Czerner 		ret = filemap_write_and_wait_range(mapping, offset,
4778b8a86845SLukas Czerner 						   offset + len - 1);
4779b8a86845SLukas Czerner 		if (ret)
4780b8a86845SLukas Czerner 			return ret;
4781b8a86845SLukas Czerner 	}
4782b8a86845SLukas Czerner 
4783b8a86845SLukas Czerner 	/*
4784b8a86845SLukas Czerner 	 * Round up offset. This is not fallocate, we neet to zero out
4785b8a86845SLukas Czerner 	 * blocks, so convert interior block aligned part of the range to
4786b8a86845SLukas Czerner 	 * unwritten and possibly manually zero out unaligned parts of the
4787b8a86845SLukas Czerner 	 * range.
4788b8a86845SLukas Czerner 	 */
4789b8a86845SLukas Czerner 	start = round_up(offset, 1 << blkbits);
4790b8a86845SLukas Czerner 	end = round_down((offset + len), 1 << blkbits);
4791b8a86845SLukas Czerner 
4792b8a86845SLukas Czerner 	if (start < offset || end > offset + len)
4793b8a86845SLukas Czerner 		return -EINVAL;
4794c174e6d6SDmitry Monakhov 	partial_begin = offset & ((1 << blkbits) - 1);
4795c174e6d6SDmitry Monakhov 	partial_end = (offset + len) & ((1 << blkbits) - 1);
4796b8a86845SLukas Czerner 
4797b8a86845SLukas Czerner 	lblk = start >> blkbits;
4798b8a86845SLukas Czerner 	max_blocks = (end >> blkbits);
4799b8a86845SLukas Czerner 	if (max_blocks < lblk)
4800b8a86845SLukas Czerner 		max_blocks = 0;
4801b8a86845SLukas Czerner 	else
4802b8a86845SLukas Czerner 		max_blocks -= lblk;
4803b8a86845SLukas Czerner 
4804556615dcSLukas Czerner 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
4805b8a86845SLukas Czerner 		EXT4_GET_BLOCKS_CONVERT_UNWRITTEN;
4806b8a86845SLukas Czerner 	if (mode & FALLOC_FL_KEEP_SIZE)
4807b8a86845SLukas Czerner 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4808b8a86845SLukas Czerner 
4809b8a86845SLukas Czerner 	mutex_lock(&inode->i_mutex);
4810b8a86845SLukas Czerner 
4811b8a86845SLukas Czerner 	/*
4812b8a86845SLukas Czerner 	 * Indirect files do not support unwritten extnets
4813b8a86845SLukas Czerner 	 */
4814b8a86845SLukas Czerner 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4815b8a86845SLukas Czerner 		ret = -EOPNOTSUPP;
4816b8a86845SLukas Czerner 		goto out_mutex;
4817b8a86845SLukas Czerner 	}
4818b8a86845SLukas Czerner 
4819b8a86845SLukas Czerner 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4820b8a86845SLukas Czerner 	     offset + len > i_size_read(inode)) {
4821b8a86845SLukas Czerner 		new_size = offset + len;
4822b8a86845SLukas Czerner 		ret = inode_newsize_ok(inode, new_size);
4823b8a86845SLukas Czerner 		if (ret)
4824b8a86845SLukas Czerner 			goto out_mutex;
4825b8a86845SLukas Czerner 		/*
4826b8a86845SLukas Czerner 		 * If we have a partial block after EOF we have to allocate
4827b8a86845SLukas Czerner 		 * the entire block.
4828b8a86845SLukas Czerner 		 */
4829c174e6d6SDmitry Monakhov 		if (partial_end)
4830b8a86845SLukas Czerner 			max_blocks += 1;
4831b8a86845SLukas Czerner 	}
4832b8a86845SLukas Czerner 
4833b8a86845SLukas Czerner 	if (max_blocks > 0) {
4834b8a86845SLukas Czerner 
4835b8a86845SLukas Czerner 		/* Now release the pages and zero block aligned part of pages*/
4836b8a86845SLukas Czerner 		truncate_pagecache_range(inode, start, end - 1);
4837c174e6d6SDmitry Monakhov 		inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4838b8a86845SLukas Czerner 
4839b8a86845SLukas Czerner 		/* Wait all existing dio workers, newcomers will block on i_mutex */
4840b8a86845SLukas Czerner 		ext4_inode_block_unlocked_dio(inode);
4841b8a86845SLukas Czerner 		inode_dio_wait(inode);
4842b8a86845SLukas Czerner 
4843b8a86845SLukas Czerner 		/*
4844b8a86845SLukas Czerner 		 * Remove entire range from the extent status tree.
4845b8a86845SLukas Czerner 		 */
4846b8a86845SLukas Czerner 		ret = ext4_es_remove_extent(inode, lblk, max_blocks);
4847b8a86845SLukas Czerner 		if (ret)
4848b8a86845SLukas Czerner 			goto out_dio;
4849b8a86845SLukas Czerner 
4850c174e6d6SDmitry Monakhov 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4851c174e6d6SDmitry Monakhov 					     flags, mode);
4852b8a86845SLukas Czerner 		if (ret)
4853b8a86845SLukas Czerner 			goto out_dio;
4854b8a86845SLukas Czerner 	}
4855c174e6d6SDmitry Monakhov 	if (!partial_begin && !partial_end)
4856c174e6d6SDmitry Monakhov 		goto out_dio;
4857c174e6d6SDmitry Monakhov 
485869dc9536SDmitry Monakhov 	/*
485969dc9536SDmitry Monakhov 	 * In worst case we have to writeout two nonadjacent unwritten
486069dc9536SDmitry Monakhov 	 * blocks and update the inode
486169dc9536SDmitry Monakhov 	 */
486269dc9536SDmitry Monakhov 	credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
486369dc9536SDmitry Monakhov 	if (ext4_should_journal_data(inode))
486469dc9536SDmitry Monakhov 		credits += 2;
486569dc9536SDmitry Monakhov 	handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4866b8a86845SLukas Czerner 	if (IS_ERR(handle)) {
4867b8a86845SLukas Czerner 		ret = PTR_ERR(handle);
4868b8a86845SLukas Czerner 		ext4_std_error(inode->i_sb, ret);
4869b8a86845SLukas Czerner 		goto out_dio;
4870b8a86845SLukas Czerner 	}
4871b8a86845SLukas Czerner 
4872b8a86845SLukas Czerner 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4873e5b30416SLukas Czerner 	if (new_size) {
48744631dbf6SDmitry Monakhov 		ext4_update_inode_size(inode, new_size);
4875e5b30416SLukas Czerner 	} else {
4876b8a86845SLukas Czerner 		/*
4877b8a86845SLukas Czerner 		* Mark that we allocate beyond EOF so the subsequent truncate
4878b8a86845SLukas Czerner 		* can proceed even if the new size is the same as i_size.
4879b8a86845SLukas Czerner 		*/
4880b8a86845SLukas Czerner 		if ((offset + len) > i_size_read(inode))
4881b8a86845SLukas Czerner 			ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4882b8a86845SLukas Czerner 	}
4883b8a86845SLukas Czerner 	ext4_mark_inode_dirty(handle, inode);
4884b8a86845SLukas Czerner 
4885b8a86845SLukas Czerner 	/* Zero out partial block at the edges of the range */
4886b8a86845SLukas Czerner 	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4887b8a86845SLukas Czerner 
4888b8a86845SLukas Czerner 	if (file->f_flags & O_SYNC)
4889b8a86845SLukas Czerner 		ext4_handle_sync(handle);
4890b8a86845SLukas Czerner 
4891b8a86845SLukas Czerner 	ext4_journal_stop(handle);
4892b8a86845SLukas Czerner out_dio:
4893b8a86845SLukas Czerner 	ext4_inode_resume_unlocked_dio(inode);
4894b8a86845SLukas Czerner out_mutex:
4895b8a86845SLukas Czerner 	mutex_unlock(&inode->i_mutex);
4896b8a86845SLukas Czerner 	return ret;
4897b8a86845SLukas Czerner }
4898b8a86845SLukas Czerner 
48990e8b6879SLukas Czerner /*
49000e8b6879SLukas Czerner  * preallocate space for a file. This implements ext4's fallocate file
49010e8b6879SLukas Czerner  * operation, which gets called from sys_fallocate system call.
49020e8b6879SLukas Czerner  * For block-mapped files, posix_fallocate should fall back to the method
49030e8b6879SLukas Czerner  * of writing zeroes to the required new blocks (the same behavior which is
49040e8b6879SLukas Czerner  * expected for file systems which do not support fallocate() system call).
49050e8b6879SLukas Czerner  */
49060e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
49070e8b6879SLukas Czerner {
49080e8b6879SLukas Czerner 	struct inode *inode = file_inode(file);
49090e8b6879SLukas Czerner 	loff_t new_size = 0;
49100e8b6879SLukas Czerner 	unsigned int max_blocks;
49110e8b6879SLukas Czerner 	int ret = 0;
49120e8b6879SLukas Czerner 	int flags;
49130e8b6879SLukas Czerner 	ext4_lblk_t lblk;
49140e8b6879SLukas Czerner 	unsigned int blkbits = inode->i_blkbits;
49150e8b6879SLukas Czerner 
49160e8b6879SLukas Czerner 	/* Return error if mode is not supported */
49170e8b6879SLukas Czerner 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4918b8a86845SLukas Czerner 		     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
49190e8b6879SLukas Czerner 		return -EOPNOTSUPP;
49200e8b6879SLukas Czerner 
49210e8b6879SLukas Czerner 	if (mode & FALLOC_FL_PUNCH_HOLE)
49220e8b6879SLukas Czerner 		return ext4_punch_hole(inode, offset, len);
49230e8b6879SLukas Czerner 
49240e8b6879SLukas Czerner 	ret = ext4_convert_inline_data(inode);
49250e8b6879SLukas Czerner 	if (ret)
49260e8b6879SLukas Czerner 		return ret;
49270e8b6879SLukas Czerner 
49280e8b6879SLukas Czerner 	/*
49290e8b6879SLukas Czerner 	 * currently supporting (pre)allocate mode for extent-based
49300e8b6879SLukas Czerner 	 * files _only_
49310e8b6879SLukas Czerner 	 */
49320e8b6879SLukas Czerner 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
49330e8b6879SLukas Czerner 		return -EOPNOTSUPP;
49340e8b6879SLukas Czerner 
493540c406c7STheodore Ts'o 	if (mode & FALLOC_FL_COLLAPSE_RANGE)
493640c406c7STheodore Ts'o 		return ext4_collapse_range(inode, offset, len);
493740c406c7STheodore Ts'o 
4938b8a86845SLukas Czerner 	if (mode & FALLOC_FL_ZERO_RANGE)
4939b8a86845SLukas Czerner 		return ext4_zero_range(file, offset, len, mode);
4940b8a86845SLukas Czerner 
49410e8b6879SLukas Czerner 	trace_ext4_fallocate_enter(inode, offset, len, mode);
49420e8b6879SLukas Czerner 	lblk = offset >> blkbits;
49430e8b6879SLukas Czerner 	/*
49440e8b6879SLukas Czerner 	 * We can't just convert len to max_blocks because
49450e8b6879SLukas Czerner 	 * If blocksize = 4096 offset = 3072 and len = 2048
49460e8b6879SLukas Czerner 	 */
49470e8b6879SLukas Czerner 	max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
49480e8b6879SLukas Czerner 		- lblk;
49490e8b6879SLukas Czerner 
4950556615dcSLukas Czerner 	flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
49510e8b6879SLukas Czerner 	if (mode & FALLOC_FL_KEEP_SIZE)
49520e8b6879SLukas Czerner 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
49530e8b6879SLukas Czerner 
49540e8b6879SLukas Czerner 	mutex_lock(&inode->i_mutex);
49550e8b6879SLukas Czerner 
49560e8b6879SLukas Czerner 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
49570e8b6879SLukas Czerner 	     offset + len > i_size_read(inode)) {
49580e8b6879SLukas Czerner 		new_size = offset + len;
49590e8b6879SLukas Czerner 		ret = inode_newsize_ok(inode, new_size);
49600e8b6879SLukas Czerner 		if (ret)
49610e8b6879SLukas Czerner 			goto out;
49620e8b6879SLukas Czerner 	}
49630e8b6879SLukas Czerner 
4964c174e6d6SDmitry Monakhov 	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4965c174e6d6SDmitry Monakhov 				     flags, mode);
49660e8b6879SLukas Czerner 	if (ret)
49670e8b6879SLukas Czerner 		goto out;
49680e8b6879SLukas Czerner 
4969c174e6d6SDmitry Monakhov 	if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4970c174e6d6SDmitry Monakhov 		ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
4971c174e6d6SDmitry Monakhov 						EXT4_I(inode)->i_sync_tid);
4972f282ac19SLukas Czerner 	}
4973f282ac19SLukas Czerner out:
497455bd725aSAneesh Kumar K.V 	mutex_unlock(&inode->i_mutex);
49750e8b6879SLukas Czerner 	trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
49760e8b6879SLukas Czerner 	return ret;
4977a2df2a63SAmit Arora }
49786873fa0dSEric Sandeen 
49796873fa0dSEric Sandeen /*
49800031462bSMingming Cao  * This function convert a range of blocks to written extents
49810031462bSMingming Cao  * The caller of this function will pass the start offset and the size.
49820031462bSMingming Cao  * all unwritten extents within this range will be converted to
49830031462bSMingming Cao  * written extents.
49840031462bSMingming Cao  *
49850031462bSMingming Cao  * This function is called from the direct IO end io call back
49860031462bSMingming Cao  * function, to convert the fallocated extents after IO is completed.
4987109f5565SMingming  * Returns 0 on success.
49880031462bSMingming Cao  */
49896b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
49906b523df4SJan Kara 				   loff_t offset, ssize_t len)
49910031462bSMingming Cao {
49920031462bSMingming Cao 	unsigned int max_blocks;
49930031462bSMingming Cao 	int ret = 0;
49940031462bSMingming Cao 	int ret2 = 0;
49952ed88685STheodore Ts'o 	struct ext4_map_blocks map;
49960031462bSMingming Cao 	unsigned int credits, blkbits = inode->i_blkbits;
49970031462bSMingming Cao 
49982ed88685STheodore Ts'o 	map.m_lblk = offset >> blkbits;
49990031462bSMingming Cao 	/*
50000031462bSMingming Cao 	 * We can't just convert len to max_blocks because
50010031462bSMingming Cao 	 * If blocksize = 4096 offset = 3072 and len = 2048
50020031462bSMingming Cao 	 */
50032ed88685STheodore Ts'o 	max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
50042ed88685STheodore Ts'o 		      map.m_lblk);
50050031462bSMingming Cao 	/*
50066b523df4SJan Kara 	 * This is somewhat ugly but the idea is clear: When transaction is
50076b523df4SJan Kara 	 * reserved, everything goes into it. Otherwise we rather start several
50086b523df4SJan Kara 	 * smaller transactions for conversion of each extent separately.
50096b523df4SJan Kara 	 */
50106b523df4SJan Kara 	if (handle) {
50116b523df4SJan Kara 		handle = ext4_journal_start_reserved(handle,
50126b523df4SJan Kara 						     EXT4_HT_EXT_CONVERT);
50136b523df4SJan Kara 		if (IS_ERR(handle))
50146b523df4SJan Kara 			return PTR_ERR(handle);
50156b523df4SJan Kara 		credits = 0;
50166b523df4SJan Kara 	} else {
50176b523df4SJan Kara 		/*
50180031462bSMingming Cao 		 * credits to insert 1 extent into extent tree
50190031462bSMingming Cao 		 */
50200031462bSMingming Cao 		credits = ext4_chunk_trans_blocks(inode, max_blocks);
50216b523df4SJan Kara 	}
50220031462bSMingming Cao 	while (ret >= 0 && ret < max_blocks) {
50232ed88685STheodore Ts'o 		map.m_lblk += ret;
50242ed88685STheodore Ts'o 		map.m_len = (max_blocks -= ret);
50256b523df4SJan Kara 		if (credits) {
50266b523df4SJan Kara 			handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
50276b523df4SJan Kara 						    credits);
50280031462bSMingming Cao 			if (IS_ERR(handle)) {
50290031462bSMingming Cao 				ret = PTR_ERR(handle);
50300031462bSMingming Cao 				break;
50310031462bSMingming Cao 			}
50326b523df4SJan Kara 		}
50332ed88685STheodore Ts'o 		ret = ext4_map_blocks(handle, inode, &map,
5034c7064ef1SJiaying Zhang 				      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
5035b06acd38SLukas Czerner 		if (ret <= 0)
5036b06acd38SLukas Czerner 			ext4_warning(inode->i_sb,
5037b06acd38SLukas Czerner 				     "inode #%lu: block %u: len %u: "
503892b97816STheodore Ts'o 				     "ext4_ext_map_blocks returned %d",
5039b06acd38SLukas Czerner 				     inode->i_ino, map.m_lblk,
504092b97816STheodore Ts'o 				     map.m_len, ret);
50410031462bSMingming Cao 		ext4_mark_inode_dirty(handle, inode);
50426b523df4SJan Kara 		if (credits)
50430031462bSMingming Cao 			ret2 = ext4_journal_stop(handle);
50440031462bSMingming Cao 		if (ret <= 0 || ret2)
50450031462bSMingming Cao 			break;
50460031462bSMingming Cao 	}
50476b523df4SJan Kara 	if (!credits)
50486b523df4SJan Kara 		ret2 = ext4_journal_stop(handle);
50490031462bSMingming Cao 	return ret > 0 ? ret2 : ret;
50500031462bSMingming Cao }
50516d9c85ebSYongqiang Yang 
50520031462bSMingming Cao /*
505369eb33dcSZheng Liu  * If newes is not existing extent (newes->ec_pblk equals zero) find
505469eb33dcSZheng Liu  * delayed extent at start of newes and update newes accordingly and
505591dd8c11SLukas Czerner  * return start of the next delayed extent.
505691dd8c11SLukas Czerner  *
505769eb33dcSZheng Liu  * If newes is existing extent (newes->ec_pblk is not equal zero)
505891dd8c11SLukas Czerner  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
505969eb33dcSZheng Liu  * extent found. Leave newes unmodified.
50606873fa0dSEric Sandeen  */
506191dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode,
506269eb33dcSZheng Liu 				    struct extent_status *newes)
50636873fa0dSEric Sandeen {
5064b3aff3e3SZheng Liu 	struct extent_status es;
5065be401363SZheng Liu 	ext4_lblk_t block, next_del;
50666873fa0dSEric Sandeen 
506769eb33dcSZheng Liu 	if (newes->es_pblk == 0) {
5068e30b5dcaSYan, Zheng 		ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
5069e30b5dcaSYan, Zheng 				newes->es_lblk + newes->es_len - 1, &es);
5070e30b5dcaSYan, Zheng 
50716d9c85ebSYongqiang Yang 		/*
507269eb33dcSZheng Liu 		 * No extent in extent-tree contains block @newes->es_pblk,
50736d9c85ebSYongqiang Yang 		 * then the block may stay in 1)a hole or 2)delayed-extent.
50746d9c85ebSYongqiang Yang 		 */
507506b0c886SZheng Liu 		if (es.es_len == 0)
5076b3aff3e3SZheng Liu 			/* A hole found. */
507791dd8c11SLukas Czerner 			return 0;
50786d9c85ebSYongqiang Yang 
507969eb33dcSZheng Liu 		if (es.es_lblk > newes->es_lblk) {
5080b3aff3e3SZheng Liu 			/* A hole found. */
508169eb33dcSZheng Liu 			newes->es_len = min(es.es_lblk - newes->es_lblk,
508269eb33dcSZheng Liu 					    newes->es_len);
508391dd8c11SLukas Czerner 			return 0;
50846873fa0dSEric Sandeen 		}
50856d9c85ebSYongqiang Yang 
508669eb33dcSZheng Liu 		newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
50876d9c85ebSYongqiang Yang 	}
50886873fa0dSEric Sandeen 
508969eb33dcSZheng Liu 	block = newes->es_lblk + newes->es_len;
5090e30b5dcaSYan, Zheng 	ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
5091be401363SZheng Liu 	if (es.es_len == 0)
5092be401363SZheng Liu 		next_del = EXT_MAX_BLOCKS;
5093be401363SZheng Liu 	else
5094be401363SZheng Liu 		next_del = es.es_lblk;
5095be401363SZheng Liu 
509691dd8c11SLukas Czerner 	return next_del;
50976873fa0dSEric Sandeen }
50986873fa0dSEric Sandeen /* fiemap flags we can handle specified here */
50996873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS	(FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
51006873fa0dSEric Sandeen 
51013a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode,
51023a06d778SAneesh Kumar K.V 				struct fiemap_extent_info *fieinfo)
51036873fa0dSEric Sandeen {
51046873fa0dSEric Sandeen 	__u64 physical = 0;
51056873fa0dSEric Sandeen 	__u64 length;
51066873fa0dSEric Sandeen 	__u32 flags = FIEMAP_EXTENT_LAST;
51076873fa0dSEric Sandeen 	int blockbits = inode->i_sb->s_blocksize_bits;
51086873fa0dSEric Sandeen 	int error = 0;
51096873fa0dSEric Sandeen 
51106873fa0dSEric Sandeen 	/* in-inode? */
511119f5fb7aSTheodore Ts'o 	if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
51126873fa0dSEric Sandeen 		struct ext4_iloc iloc;
51136873fa0dSEric Sandeen 		int offset;	/* offset of xattr in inode */
51146873fa0dSEric Sandeen 
51156873fa0dSEric Sandeen 		error = ext4_get_inode_loc(inode, &iloc);
51166873fa0dSEric Sandeen 		if (error)
51176873fa0dSEric Sandeen 			return error;
5118a60697f4SJan Kara 		physical = (__u64)iloc.bh->b_blocknr << blockbits;
51196873fa0dSEric Sandeen 		offset = EXT4_GOOD_OLD_INODE_SIZE +
51206873fa0dSEric Sandeen 				EXT4_I(inode)->i_extra_isize;
51216873fa0dSEric Sandeen 		physical += offset;
51226873fa0dSEric Sandeen 		length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
51236873fa0dSEric Sandeen 		flags |= FIEMAP_EXTENT_DATA_INLINE;
5124fd2dd9fbSCurt Wohlgemuth 		brelse(iloc.bh);
51256873fa0dSEric Sandeen 	} else { /* external block */
5126a60697f4SJan Kara 		physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
51276873fa0dSEric Sandeen 		length = inode->i_sb->s_blocksize;
51286873fa0dSEric Sandeen 	}
51296873fa0dSEric Sandeen 
51306873fa0dSEric Sandeen 	if (physical)
51316873fa0dSEric Sandeen 		error = fiemap_fill_next_extent(fieinfo, 0, physical,
51326873fa0dSEric Sandeen 						length, flags);
51336873fa0dSEric Sandeen 	return (error < 0 ? error : 0);
51346873fa0dSEric Sandeen }
51356873fa0dSEric Sandeen 
51366873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
51376873fa0dSEric Sandeen 		__u64 start, __u64 len)
51386873fa0dSEric Sandeen {
51396873fa0dSEric Sandeen 	ext4_lblk_t start_blk;
51406873fa0dSEric Sandeen 	int error = 0;
51416873fa0dSEric Sandeen 
514294191985STao Ma 	if (ext4_has_inline_data(inode)) {
514394191985STao Ma 		int has_inline = 1;
514494191985STao Ma 
514594191985STao Ma 		error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
514694191985STao Ma 
514794191985STao Ma 		if (has_inline)
514894191985STao Ma 			return error;
514994191985STao Ma 	}
515094191985STao Ma 
51517869a4a6STheodore Ts'o 	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
51527869a4a6STheodore Ts'o 		error = ext4_ext_precache(inode);
51537869a4a6STheodore Ts'o 		if (error)
51547869a4a6STheodore Ts'o 			return error;
51557869a4a6STheodore Ts'o 	}
51567869a4a6STheodore Ts'o 
51576873fa0dSEric Sandeen 	/* fallback to generic here if not in extents fmt */
515812e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
51596873fa0dSEric Sandeen 		return generic_block_fiemap(inode, fieinfo, start, len,
51606873fa0dSEric Sandeen 			ext4_get_block);
51616873fa0dSEric Sandeen 
51626873fa0dSEric Sandeen 	if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
51636873fa0dSEric Sandeen 		return -EBADR;
51646873fa0dSEric Sandeen 
51656873fa0dSEric Sandeen 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
51666873fa0dSEric Sandeen 		error = ext4_xattr_fiemap(inode, fieinfo);
51676873fa0dSEric Sandeen 	} else {
5168aca92ff6SLeonard Michlmayr 		ext4_lblk_t len_blks;
5169aca92ff6SLeonard Michlmayr 		__u64 last_blk;
5170aca92ff6SLeonard Michlmayr 
51716873fa0dSEric Sandeen 		start_blk = start >> inode->i_sb->s_blocksize_bits;
5172aca92ff6SLeonard Michlmayr 		last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5173f17722f9SLukas Czerner 		if (last_blk >= EXT_MAX_BLOCKS)
5174f17722f9SLukas Czerner 			last_blk = EXT_MAX_BLOCKS-1;
5175aca92ff6SLeonard Michlmayr 		len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
51766873fa0dSEric Sandeen 
51776873fa0dSEric Sandeen 		/*
517891dd8c11SLukas Czerner 		 * Walk the extent tree gathering extent information
517991dd8c11SLukas Czerner 		 * and pushing extents back to the user.
51806873fa0dSEric Sandeen 		 */
518191dd8c11SLukas Czerner 		error = ext4_fill_fiemap_extents(inode, start_blk,
518291dd8c11SLukas Czerner 						 len_blks, fieinfo);
51836873fa0dSEric Sandeen 	}
5184107a7bd3STheodore Ts'o 	ext4_es_lru_add(inode);
51856873fa0dSEric Sandeen 	return error;
51866873fa0dSEric Sandeen }
51879eb79482SNamjae Jeon 
51889eb79482SNamjae Jeon /*
51899eb79482SNamjae Jeon  * ext4_access_path:
51909eb79482SNamjae Jeon  * Function to access the path buffer for marking it dirty.
51919eb79482SNamjae Jeon  * It also checks if there are sufficient credits left in the journal handle
51929eb79482SNamjae Jeon  * to update path.
51939eb79482SNamjae Jeon  */
51949eb79482SNamjae Jeon static int
51959eb79482SNamjae Jeon ext4_access_path(handle_t *handle, struct inode *inode,
51969eb79482SNamjae Jeon 		struct ext4_ext_path *path)
51979eb79482SNamjae Jeon {
51989eb79482SNamjae Jeon 	int credits, err;
51999eb79482SNamjae Jeon 
52009eb79482SNamjae Jeon 	if (!ext4_handle_valid(handle))
52019eb79482SNamjae Jeon 		return 0;
52029eb79482SNamjae Jeon 
52039eb79482SNamjae Jeon 	/*
52049eb79482SNamjae Jeon 	 * Check if need to extend journal credits
52059eb79482SNamjae Jeon 	 * 3 for leaf, sb, and inode plus 2 (bmap and group
52069eb79482SNamjae Jeon 	 * descriptor) for each block group; assume two block
52079eb79482SNamjae Jeon 	 * groups
52089eb79482SNamjae Jeon 	 */
52099eb79482SNamjae Jeon 	if (handle->h_buffer_credits < 7) {
52109eb79482SNamjae Jeon 		credits = ext4_writepage_trans_blocks(inode);
52119eb79482SNamjae Jeon 		err = ext4_ext_truncate_extend_restart(handle, inode, credits);
52129eb79482SNamjae Jeon 		/* EAGAIN is success */
52139eb79482SNamjae Jeon 		if (err && err != -EAGAIN)
52149eb79482SNamjae Jeon 			return err;
52159eb79482SNamjae Jeon 	}
52169eb79482SNamjae Jeon 
52179eb79482SNamjae Jeon 	err = ext4_ext_get_access(handle, inode, path);
52189eb79482SNamjae Jeon 	return err;
52199eb79482SNamjae Jeon }
52209eb79482SNamjae Jeon 
52219eb79482SNamjae Jeon /*
52229eb79482SNamjae Jeon  * ext4_ext_shift_path_extents:
52239eb79482SNamjae Jeon  * Shift the extents of a path structure lying between path[depth].p_ext
52249eb79482SNamjae Jeon  * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
52259eb79482SNamjae Jeon  * from starting block for each extent.
52269eb79482SNamjae Jeon  */
52279eb79482SNamjae Jeon static int
52289eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
52299eb79482SNamjae Jeon 			    struct inode *inode, handle_t *handle,
52309eb79482SNamjae Jeon 			    ext4_lblk_t *start)
52319eb79482SNamjae Jeon {
52329eb79482SNamjae Jeon 	int depth, err = 0;
52339eb79482SNamjae Jeon 	struct ext4_extent *ex_start, *ex_last;
52349eb79482SNamjae Jeon 	bool update = 0;
52359eb79482SNamjae Jeon 	depth = path->p_depth;
52369eb79482SNamjae Jeon 
52379eb79482SNamjae Jeon 	while (depth >= 0) {
52389eb79482SNamjae Jeon 		if (depth == path->p_depth) {
52399eb79482SNamjae Jeon 			ex_start = path[depth].p_ext;
52409eb79482SNamjae Jeon 			if (!ex_start)
52419eb79482SNamjae Jeon 				return -EIO;
52429eb79482SNamjae Jeon 
52439eb79482SNamjae Jeon 			ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
52449eb79482SNamjae Jeon 			if (!ex_last)
52459eb79482SNamjae Jeon 				return -EIO;
52469eb79482SNamjae Jeon 
52479eb79482SNamjae Jeon 			err = ext4_access_path(handle, inode, path + depth);
52489eb79482SNamjae Jeon 			if (err)
52499eb79482SNamjae Jeon 				goto out;
52509eb79482SNamjae Jeon 
52519eb79482SNamjae Jeon 			if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
52529eb79482SNamjae Jeon 				update = 1;
52539eb79482SNamjae Jeon 
5254847c6c42SZheng Liu 			*start = le32_to_cpu(ex_last->ee_block) +
52559eb79482SNamjae Jeon 				ext4_ext_get_actual_len(ex_last);
52569eb79482SNamjae Jeon 
52579eb79482SNamjae Jeon 			while (ex_start <= ex_last) {
5258847c6c42SZheng Liu 				le32_add_cpu(&ex_start->ee_block, -shift);
52596dd834efSLukas Czerner 				/* Try to merge to the left. */
52606dd834efSLukas Czerner 				if ((ex_start >
52616dd834efSLukas Czerner 				     EXT_FIRST_EXTENT(path[depth].p_hdr)) &&
52626dd834efSLukas Czerner 				    ext4_ext_try_to_merge_right(inode,
52639eb79482SNamjae Jeon 							path, ex_start - 1))
52649eb79482SNamjae Jeon 					ex_last--;
52656dd834efSLukas Czerner 				else
52669eb79482SNamjae Jeon 					ex_start++;
52679eb79482SNamjae Jeon 			}
52689eb79482SNamjae Jeon 			err = ext4_ext_dirty(handle, inode, path + depth);
52699eb79482SNamjae Jeon 			if (err)
52709eb79482SNamjae Jeon 				goto out;
52719eb79482SNamjae Jeon 
52729eb79482SNamjae Jeon 			if (--depth < 0 || !update)
52739eb79482SNamjae Jeon 				break;
52749eb79482SNamjae Jeon 		}
52759eb79482SNamjae Jeon 
52769eb79482SNamjae Jeon 		/* Update index too */
52779eb79482SNamjae Jeon 		err = ext4_access_path(handle, inode, path + depth);
52789eb79482SNamjae Jeon 		if (err)
52799eb79482SNamjae Jeon 			goto out;
52809eb79482SNamjae Jeon 
5281847c6c42SZheng Liu 		le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
52829eb79482SNamjae Jeon 		err = ext4_ext_dirty(handle, inode, path + depth);
52839eb79482SNamjae Jeon 		if (err)
52849eb79482SNamjae Jeon 			goto out;
52859eb79482SNamjae Jeon 
52869eb79482SNamjae Jeon 		/* we are done if current index is not a starting index */
52879eb79482SNamjae Jeon 		if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
52889eb79482SNamjae Jeon 			break;
52899eb79482SNamjae Jeon 
52909eb79482SNamjae Jeon 		depth--;
52919eb79482SNamjae Jeon 	}
52929eb79482SNamjae Jeon 
52939eb79482SNamjae Jeon out:
52949eb79482SNamjae Jeon 	return err;
52959eb79482SNamjae Jeon }
52969eb79482SNamjae Jeon 
52979eb79482SNamjae Jeon /*
52989eb79482SNamjae Jeon  * ext4_ext_shift_extents:
52999eb79482SNamjae Jeon  * All the extents which lies in the range from start to the last allocated
53009eb79482SNamjae Jeon  * block for the file are shifted downwards by shift blocks.
53019eb79482SNamjae Jeon  * On success, 0 is returned, error otherwise.
53029eb79482SNamjae Jeon  */
53039eb79482SNamjae Jeon static int
53049eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
53059eb79482SNamjae Jeon 		       ext4_lblk_t start, ext4_lblk_t shift)
53069eb79482SNamjae Jeon {
53079eb79482SNamjae Jeon 	struct ext4_ext_path *path;
53089eb79482SNamjae Jeon 	int ret = 0, depth;
53099eb79482SNamjae Jeon 	struct ext4_extent *extent;
5310f8fb4f41SDmitry Monakhov 	ext4_lblk_t stop_block;
53119eb79482SNamjae Jeon 	ext4_lblk_t ex_start, ex_end;
53129eb79482SNamjae Jeon 
53139eb79482SNamjae Jeon 	/* Let path point to the last extent */
53149eb79482SNamjae Jeon 	path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
53159eb79482SNamjae Jeon 	if (IS_ERR(path))
53169eb79482SNamjae Jeon 		return PTR_ERR(path);
53179eb79482SNamjae Jeon 
53189eb79482SNamjae Jeon 	depth = path->p_depth;
53199eb79482SNamjae Jeon 	extent = path[depth].p_ext;
53209eb79482SNamjae Jeon 	if (!extent) {
53219eb79482SNamjae Jeon 		ext4_ext_drop_refs(path);
53229eb79482SNamjae Jeon 		kfree(path);
53239eb79482SNamjae Jeon 		return ret;
53249eb79482SNamjae Jeon 	}
53259eb79482SNamjae Jeon 
5326847c6c42SZheng Liu 	stop_block = le32_to_cpu(extent->ee_block) +
5327847c6c42SZheng Liu 			ext4_ext_get_actual_len(extent);
53289eb79482SNamjae Jeon 	ext4_ext_drop_refs(path);
53299eb79482SNamjae Jeon 	kfree(path);
53309eb79482SNamjae Jeon 
53319eb79482SNamjae Jeon 	/* Nothing to shift, if hole is at the end of file */
53329eb79482SNamjae Jeon 	if (start >= stop_block)
53339eb79482SNamjae Jeon 		return ret;
53349eb79482SNamjae Jeon 
53359eb79482SNamjae Jeon 	/*
53369eb79482SNamjae Jeon 	 * Don't start shifting extents until we make sure the hole is big
53379eb79482SNamjae Jeon 	 * enough to accomodate the shift.
53389eb79482SNamjae Jeon 	 */
53399eb79482SNamjae Jeon 	path = ext4_ext_find_extent(inode, start - 1, NULL, 0);
53408dc79ec4SDmitry Monakhov 	if (IS_ERR(path))
53418dc79ec4SDmitry Monakhov 		return PTR_ERR(path);
53429eb79482SNamjae Jeon 	depth = path->p_depth;
53439eb79482SNamjae Jeon 	extent =  path[depth].p_ext;
53448dc79ec4SDmitry Monakhov 	if (extent) {
5345847c6c42SZheng Liu 		ex_start = le32_to_cpu(extent->ee_block);
5346847c6c42SZheng Liu 		ex_end = le32_to_cpu(extent->ee_block) +
5347847c6c42SZheng Liu 			ext4_ext_get_actual_len(extent);
53488dc79ec4SDmitry Monakhov 	} else {
53498dc79ec4SDmitry Monakhov 		ex_start = 0;
53508dc79ec4SDmitry Monakhov 		ex_end = 0;
53518dc79ec4SDmitry Monakhov 	}
53529eb79482SNamjae Jeon 	ext4_ext_drop_refs(path);
53539eb79482SNamjae Jeon 	kfree(path);
53549eb79482SNamjae Jeon 
53559eb79482SNamjae Jeon 	if ((start == ex_start && shift > ex_start) ||
53569eb79482SNamjae Jeon 	    (shift > start - ex_end))
53579eb79482SNamjae Jeon 		return -EINVAL;
53589eb79482SNamjae Jeon 
53599eb79482SNamjae Jeon 	/* Its safe to start updating extents */
53609eb79482SNamjae Jeon 	while (start < stop_block) {
53619eb79482SNamjae Jeon 		path = ext4_ext_find_extent(inode, start, NULL, 0);
53629eb79482SNamjae Jeon 		if (IS_ERR(path))
53639eb79482SNamjae Jeon 			return PTR_ERR(path);
53649eb79482SNamjae Jeon 		depth = path->p_depth;
53659eb79482SNamjae Jeon 		extent = path[depth].p_ext;
5366a18ed359SDmitry Monakhov 		if (!extent) {
5367a18ed359SDmitry Monakhov 			EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5368a18ed359SDmitry Monakhov 					 (unsigned long) start);
5369a18ed359SDmitry Monakhov 			return -EIO;
5370a18ed359SDmitry Monakhov 		}
5371f8fb4f41SDmitry Monakhov 		if (start > le32_to_cpu(extent->ee_block)) {
53729eb79482SNamjae Jeon 			/* Hole, move to the next extent */
5373f8fb4f41SDmitry Monakhov 			if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5374f8fb4f41SDmitry Monakhov 				path[depth].p_ext++;
5375f8fb4f41SDmitry Monakhov 			} else {
5376f8fb4f41SDmitry Monakhov 				start = ext4_ext_next_allocated_block(path);
53779eb79482SNamjae Jeon 				ext4_ext_drop_refs(path);
53789eb79482SNamjae Jeon 				kfree(path);
5379f8fb4f41SDmitry Monakhov 				continue;
53809eb79482SNamjae Jeon 			}
53819eb79482SNamjae Jeon 		}
53829eb79482SNamjae Jeon 		ret = ext4_ext_shift_path_extents(path, shift, inode,
53839eb79482SNamjae Jeon 				handle, &start);
53849eb79482SNamjae Jeon 		ext4_ext_drop_refs(path);
53859eb79482SNamjae Jeon 		kfree(path);
53869eb79482SNamjae Jeon 		if (ret)
53879eb79482SNamjae Jeon 			break;
53889eb79482SNamjae Jeon 	}
53899eb79482SNamjae Jeon 
53909eb79482SNamjae Jeon 	return ret;
53919eb79482SNamjae Jeon }
53929eb79482SNamjae Jeon 
53939eb79482SNamjae Jeon /*
53949eb79482SNamjae Jeon  * ext4_collapse_range:
53959eb79482SNamjae Jeon  * This implements the fallocate's collapse range functionality for ext4
53969eb79482SNamjae Jeon  * Returns: 0 and non-zero on error.
53979eb79482SNamjae Jeon  */
53989eb79482SNamjae Jeon int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
53999eb79482SNamjae Jeon {
54009eb79482SNamjae Jeon 	struct super_block *sb = inode->i_sb;
54019eb79482SNamjae Jeon 	ext4_lblk_t punch_start, punch_stop;
54029eb79482SNamjae Jeon 	handle_t *handle;
54039eb79482SNamjae Jeon 	unsigned int credits;
5404a8680e0dSNamjae Jeon 	loff_t new_size, ioffset;
54059eb79482SNamjae Jeon 	int ret;
54069eb79482SNamjae Jeon 
54079eb79482SNamjae Jeon 	/* Collapse range works only on fs block size aligned offsets. */
5408ee98fa3aSNamjae Jeon 	if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
5409ee98fa3aSNamjae Jeon 	    len & (EXT4_CLUSTER_SIZE(sb) - 1))
54109eb79482SNamjae Jeon 		return -EINVAL;
54119eb79482SNamjae Jeon 
54129eb79482SNamjae Jeon 	if (!S_ISREG(inode->i_mode))
541386f1ca38STheodore Ts'o 		return -EINVAL;
54149eb79482SNamjae Jeon 
54159eb79482SNamjae Jeon 	trace_ext4_collapse_range(inode, offset, len);
54169eb79482SNamjae Jeon 
54179eb79482SNamjae Jeon 	punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
54189eb79482SNamjae Jeon 	punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
54199eb79482SNamjae Jeon 
54201ce01c4aSNamjae Jeon 	/* Call ext4_force_commit to flush all data in case of data=journal. */
54211ce01c4aSNamjae Jeon 	if (ext4_should_journal_data(inode)) {
54221ce01c4aSNamjae Jeon 		ret = ext4_force_commit(inode->i_sb);
54231ce01c4aSNamjae Jeon 		if (ret)
54241ce01c4aSNamjae Jeon 			return ret;
54251ce01c4aSNamjae Jeon 	}
54261ce01c4aSNamjae Jeon 
5427a8680e0dSNamjae Jeon 	/*
5428a8680e0dSNamjae Jeon 	 * Need to round down offset to be aligned with page size boundary
5429a8680e0dSNamjae Jeon 	 * for page size > block size.
5430a8680e0dSNamjae Jeon 	 */
5431a8680e0dSNamjae Jeon 	ioffset = round_down(offset, PAGE_SIZE);
5432a8680e0dSNamjae Jeon 
54339eb79482SNamjae Jeon 	/* Write out all dirty pages */
5434a8680e0dSNamjae Jeon 	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
5435a8680e0dSNamjae Jeon 					   LLONG_MAX);
54369eb79482SNamjae Jeon 	if (ret)
54379eb79482SNamjae Jeon 		return ret;
54389eb79482SNamjae Jeon 
54399eb79482SNamjae Jeon 	/* Take mutex lock */
54409eb79482SNamjae Jeon 	mutex_lock(&inode->i_mutex);
54419eb79482SNamjae Jeon 
544223fffa92SLukas Czerner 	/*
544323fffa92SLukas Czerner 	 * There is no need to overlap collapse range with EOF, in which case
544423fffa92SLukas Czerner 	 * it is effectively a truncate operation
544523fffa92SLukas Czerner 	 */
544623fffa92SLukas Czerner 	if (offset + len >= i_size_read(inode)) {
544723fffa92SLukas Czerner 		ret = -EINVAL;
544823fffa92SLukas Czerner 		goto out_mutex;
544923fffa92SLukas Czerner 	}
545023fffa92SLukas Czerner 
54519eb79482SNamjae Jeon 	/* Currently just for extent based files */
54529eb79482SNamjae Jeon 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
54539eb79482SNamjae Jeon 		ret = -EOPNOTSUPP;
54549eb79482SNamjae Jeon 		goto out_mutex;
54559eb79482SNamjae Jeon 	}
54569eb79482SNamjae Jeon 
5457a8680e0dSNamjae Jeon 	truncate_pagecache(inode, ioffset);
54589eb79482SNamjae Jeon 
54599eb79482SNamjae Jeon 	/* Wait for existing dio to complete */
54609eb79482SNamjae Jeon 	ext4_inode_block_unlocked_dio(inode);
54619eb79482SNamjae Jeon 	inode_dio_wait(inode);
54629eb79482SNamjae Jeon 
54639eb79482SNamjae Jeon 	credits = ext4_writepage_trans_blocks(inode);
54649eb79482SNamjae Jeon 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
54659eb79482SNamjae Jeon 	if (IS_ERR(handle)) {
54669eb79482SNamjae Jeon 		ret = PTR_ERR(handle);
54679eb79482SNamjae Jeon 		goto out_dio;
54689eb79482SNamjae Jeon 	}
54699eb79482SNamjae Jeon 
54709eb79482SNamjae Jeon 	down_write(&EXT4_I(inode)->i_data_sem);
54719eb79482SNamjae Jeon 	ext4_discard_preallocations(inode);
54729eb79482SNamjae Jeon 
54739eb79482SNamjae Jeon 	ret = ext4_es_remove_extent(inode, punch_start,
54742c1d2328SLukas Czerner 				    EXT_MAX_BLOCKS - punch_start);
54759eb79482SNamjae Jeon 	if (ret) {
54769eb79482SNamjae Jeon 		up_write(&EXT4_I(inode)->i_data_sem);
54779eb79482SNamjae Jeon 		goto out_stop;
54789eb79482SNamjae Jeon 	}
54799eb79482SNamjae Jeon 
54809eb79482SNamjae Jeon 	ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
54819eb79482SNamjae Jeon 	if (ret) {
54829eb79482SNamjae Jeon 		up_write(&EXT4_I(inode)->i_data_sem);
54839eb79482SNamjae Jeon 		goto out_stop;
54849eb79482SNamjae Jeon 	}
5485ef24f6c2SLukas Czerner 	ext4_discard_preallocations(inode);
54869eb79482SNamjae Jeon 
54879eb79482SNamjae Jeon 	ret = ext4_ext_shift_extents(inode, handle, punch_stop,
54889eb79482SNamjae Jeon 				     punch_stop - punch_start);
54899eb79482SNamjae Jeon 	if (ret) {
54909eb79482SNamjae Jeon 		up_write(&EXT4_I(inode)->i_data_sem);
54919eb79482SNamjae Jeon 		goto out_stop;
54929eb79482SNamjae Jeon 	}
54939eb79482SNamjae Jeon 
54949eb79482SNamjae Jeon 	new_size = i_size_read(inode) - len;
54959337d5d3SLukas Czerner 	i_size_write(inode, new_size);
54969eb79482SNamjae Jeon 	EXT4_I(inode)->i_disksize = new_size;
54979eb79482SNamjae Jeon 
54989eb79482SNamjae Jeon 	up_write(&EXT4_I(inode)->i_data_sem);
54999eb79482SNamjae Jeon 	if (IS_SYNC(inode))
55009eb79482SNamjae Jeon 		ext4_handle_sync(handle);
55019eb79482SNamjae Jeon 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
55029eb79482SNamjae Jeon 	ext4_mark_inode_dirty(handle, inode);
55039eb79482SNamjae Jeon 
55049eb79482SNamjae Jeon out_stop:
55059eb79482SNamjae Jeon 	ext4_journal_stop(handle);
55069eb79482SNamjae Jeon out_dio:
55079eb79482SNamjae Jeon 	ext4_inode_resume_unlocked_dio(inode);
55089eb79482SNamjae Jeon out_mutex:
55099eb79482SNamjae Jeon 	mutex_unlock(&inode->i_mutex);
55109eb79482SNamjae Jeon 	return ret;
55119eb79482SNamjae Jeon }
5512fcf6b1b7SDmitry Monakhov 
5513fcf6b1b7SDmitry Monakhov /**
5514fcf6b1b7SDmitry Monakhov  * ext4_swap_extents - Swap extents between two inodes
5515fcf6b1b7SDmitry Monakhov  *
5516fcf6b1b7SDmitry Monakhov  * @inode1:	First inode
5517fcf6b1b7SDmitry Monakhov  * @inode2:	Second inode
5518fcf6b1b7SDmitry Monakhov  * @lblk1:	Start block for first inode
5519fcf6b1b7SDmitry Monakhov  * @lblk2:	Start block for second inode
5520fcf6b1b7SDmitry Monakhov  * @count:	Number of blocks to swap
5521fcf6b1b7SDmitry Monakhov  * @mark_unwritten: Mark second inode's extents as unwritten after swap
5522fcf6b1b7SDmitry Monakhov  * @erp:	Pointer to save error value
5523fcf6b1b7SDmitry Monakhov  *
5524fcf6b1b7SDmitry Monakhov  * This helper routine does exactly what is promise "swap extents". All other
5525fcf6b1b7SDmitry Monakhov  * stuff such as page-cache locking consistency, bh mapping consistency or
5526fcf6b1b7SDmitry Monakhov  * extent's data copying must be performed by caller.
5527fcf6b1b7SDmitry Monakhov  * Locking:
5528fcf6b1b7SDmitry Monakhov  * 		i_mutex is held for both inodes
5529fcf6b1b7SDmitry Monakhov  * 		i_data_sem is locked for write for both inodes
5530fcf6b1b7SDmitry Monakhov  * Assumptions:
5531fcf6b1b7SDmitry Monakhov  *		All pages from requested range are locked for both inodes
5532fcf6b1b7SDmitry Monakhov  */
5533fcf6b1b7SDmitry Monakhov int
5534fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1,
5535fcf6b1b7SDmitry Monakhov 		     struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5536fcf6b1b7SDmitry Monakhov 		  ext4_lblk_t count, int unwritten, int *erp)
5537fcf6b1b7SDmitry Monakhov {
5538fcf6b1b7SDmitry Monakhov 	struct ext4_ext_path *path1 = NULL;
5539fcf6b1b7SDmitry Monakhov 	struct ext4_ext_path *path2 = NULL;
5540fcf6b1b7SDmitry Monakhov 	int replaced_count = 0;
5541fcf6b1b7SDmitry Monakhov 
5542fcf6b1b7SDmitry Monakhov 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5543fcf6b1b7SDmitry Monakhov 	BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5544fcf6b1b7SDmitry Monakhov 	BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5545fcf6b1b7SDmitry Monakhov 	BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5546fcf6b1b7SDmitry Monakhov 
5547fcf6b1b7SDmitry Monakhov 	*erp = ext4_es_remove_extent(inode1, lblk1, count);
5548*19008f6dSTheodore Ts'o 	if (unlikely(*erp))
5549fcf6b1b7SDmitry Monakhov 		return 0;
5550fcf6b1b7SDmitry Monakhov 	*erp = ext4_es_remove_extent(inode2, lblk2, count);
5551*19008f6dSTheodore Ts'o 	if (unlikely(*erp))
5552fcf6b1b7SDmitry Monakhov 		return 0;
5553fcf6b1b7SDmitry Monakhov 
5554fcf6b1b7SDmitry Monakhov 	while (count) {
5555fcf6b1b7SDmitry Monakhov 		struct ext4_extent *ex1, *ex2, tmp_ex;
5556fcf6b1b7SDmitry Monakhov 		ext4_lblk_t e1_blk, e2_blk;
5557fcf6b1b7SDmitry Monakhov 		int e1_len, e2_len, len;
5558fcf6b1b7SDmitry Monakhov 		int split = 0;
5559fcf6b1b7SDmitry Monakhov 
5560fcf6b1b7SDmitry Monakhov 		path1 = ext4_ext_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5561*19008f6dSTheodore Ts'o 		if (unlikely(IS_ERR(path1))) {
5562fcf6b1b7SDmitry Monakhov 			*erp = PTR_ERR(path1);
5563*19008f6dSTheodore Ts'o 			path1 = NULL;
5564*19008f6dSTheodore Ts'o 		finish:
5565*19008f6dSTheodore Ts'o 			count = 0;
5566*19008f6dSTheodore Ts'o 			goto repeat;
5567fcf6b1b7SDmitry Monakhov 		}
5568fcf6b1b7SDmitry Monakhov 		path2 = ext4_ext_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5569*19008f6dSTheodore Ts'o 		if (unlikely(IS_ERR(path2))) {
5570fcf6b1b7SDmitry Monakhov 			*erp = PTR_ERR(path2);
5571*19008f6dSTheodore Ts'o 			path2 = NULL;
5572*19008f6dSTheodore Ts'o 			goto finish;
5573fcf6b1b7SDmitry Monakhov 		}
5574fcf6b1b7SDmitry Monakhov 		ex1 = path1[path1->p_depth].p_ext;
5575fcf6b1b7SDmitry Monakhov 		ex2 = path2[path2->p_depth].p_ext;
5576fcf6b1b7SDmitry Monakhov 		/* Do we have somthing to swap ? */
5577fcf6b1b7SDmitry Monakhov 		if (unlikely(!ex2 || !ex1))
5578*19008f6dSTheodore Ts'o 			goto finish;
5579fcf6b1b7SDmitry Monakhov 
5580fcf6b1b7SDmitry Monakhov 		e1_blk = le32_to_cpu(ex1->ee_block);
5581fcf6b1b7SDmitry Monakhov 		e2_blk = le32_to_cpu(ex2->ee_block);
5582fcf6b1b7SDmitry Monakhov 		e1_len = ext4_ext_get_actual_len(ex1);
5583fcf6b1b7SDmitry Monakhov 		e2_len = ext4_ext_get_actual_len(ex2);
5584fcf6b1b7SDmitry Monakhov 
5585fcf6b1b7SDmitry Monakhov 		/* Hole handling */
5586fcf6b1b7SDmitry Monakhov 		if (!in_range(lblk1, e1_blk, e1_len) ||
5587fcf6b1b7SDmitry Monakhov 		    !in_range(lblk2, e2_blk, e2_len)) {
5588fcf6b1b7SDmitry Monakhov 			ext4_lblk_t next1, next2;
5589fcf6b1b7SDmitry Monakhov 
5590fcf6b1b7SDmitry Monakhov 			/* if hole after extent, then go to next extent */
5591fcf6b1b7SDmitry Monakhov 			next1 = ext4_ext_next_allocated_block(path1);
5592fcf6b1b7SDmitry Monakhov 			next2 = ext4_ext_next_allocated_block(path2);
5593fcf6b1b7SDmitry Monakhov 			/* If hole before extent, then shift to that extent */
5594fcf6b1b7SDmitry Monakhov 			if (e1_blk > lblk1)
5595fcf6b1b7SDmitry Monakhov 				next1 = e1_blk;
5596fcf6b1b7SDmitry Monakhov 			if (e2_blk > lblk2)
5597fcf6b1b7SDmitry Monakhov 				next2 = e1_blk;
5598fcf6b1b7SDmitry Monakhov 			/* Do we have something to swap */
5599fcf6b1b7SDmitry Monakhov 			if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5600*19008f6dSTheodore Ts'o 				goto finish;
5601fcf6b1b7SDmitry Monakhov 			/* Move to the rightest boundary */
5602fcf6b1b7SDmitry Monakhov 			len = next1 - lblk1;
5603fcf6b1b7SDmitry Monakhov 			if (len < next2 - lblk2)
5604fcf6b1b7SDmitry Monakhov 				len = next2 - lblk2;
5605fcf6b1b7SDmitry Monakhov 			if (len > count)
5606fcf6b1b7SDmitry Monakhov 				len = count;
5607fcf6b1b7SDmitry Monakhov 			lblk1 += len;
5608fcf6b1b7SDmitry Monakhov 			lblk2 += len;
5609fcf6b1b7SDmitry Monakhov 			count -= len;
5610fcf6b1b7SDmitry Monakhov 			goto repeat;
5611fcf6b1b7SDmitry Monakhov 		}
5612fcf6b1b7SDmitry Monakhov 
5613fcf6b1b7SDmitry Monakhov 		/* Prepare left boundary */
5614fcf6b1b7SDmitry Monakhov 		if (e1_blk < lblk1) {
5615fcf6b1b7SDmitry Monakhov 			split = 1;
5616fcf6b1b7SDmitry Monakhov 			*erp = ext4_force_split_extent_at(handle, inode1,
5617fcf6b1b7SDmitry Monakhov 						path1, lblk1, 0);
5618*19008f6dSTheodore Ts'o 			if (unlikely(*erp))
5619*19008f6dSTheodore Ts'o 				goto finish;
5620fcf6b1b7SDmitry Monakhov 		}
5621fcf6b1b7SDmitry Monakhov 		if (e2_blk < lblk2) {
5622fcf6b1b7SDmitry Monakhov 			split = 1;
5623fcf6b1b7SDmitry Monakhov 			*erp = ext4_force_split_extent_at(handle, inode2,
5624fcf6b1b7SDmitry Monakhov 						path2,  lblk2, 0);
5625*19008f6dSTheodore Ts'o 			if (unlikely(*erp))
5626*19008f6dSTheodore Ts'o 				goto finish;
5627fcf6b1b7SDmitry Monakhov 		}
5628fcf6b1b7SDmitry Monakhov 		/* ext4_split_extent_at() may retult in leaf extent split,
5629fcf6b1b7SDmitry Monakhov 		 * path must to be revalidated. */
5630fcf6b1b7SDmitry Monakhov 		if (split)
5631fcf6b1b7SDmitry Monakhov 			goto repeat;
5632fcf6b1b7SDmitry Monakhov 
5633fcf6b1b7SDmitry Monakhov 		/* Prepare right boundary */
5634fcf6b1b7SDmitry Monakhov 		len = count;
5635fcf6b1b7SDmitry Monakhov 		if (len > e1_blk + e1_len - lblk1)
5636fcf6b1b7SDmitry Monakhov 			len = e1_blk + e1_len - lblk1;
5637fcf6b1b7SDmitry Monakhov 		if (len > e2_blk + e2_len - lblk2)
5638fcf6b1b7SDmitry Monakhov 			len = e2_blk + e2_len - lblk2;
5639fcf6b1b7SDmitry Monakhov 
5640fcf6b1b7SDmitry Monakhov 		if (len != e1_len) {
5641fcf6b1b7SDmitry Monakhov 			split = 1;
5642fcf6b1b7SDmitry Monakhov 			*erp = ext4_force_split_extent_at(handle, inode1,
5643fcf6b1b7SDmitry Monakhov 						path1, lblk1 + len, 0);
5644*19008f6dSTheodore Ts'o 			if (unlikely(*erp))
5645*19008f6dSTheodore Ts'o 				goto finish;
5646fcf6b1b7SDmitry Monakhov 		}
5647fcf6b1b7SDmitry Monakhov 		if (len != e2_len) {
5648fcf6b1b7SDmitry Monakhov 			split = 1;
5649fcf6b1b7SDmitry Monakhov 			*erp = ext4_force_split_extent_at(handle, inode2,
5650fcf6b1b7SDmitry Monakhov 						path2, lblk2 + len, 0);
5651fcf6b1b7SDmitry Monakhov 			if (*erp)
5652*19008f6dSTheodore Ts'o 				goto finish;
5653fcf6b1b7SDmitry Monakhov 		}
5654fcf6b1b7SDmitry Monakhov 		/* ext4_split_extent_at() may retult in leaf extent split,
5655fcf6b1b7SDmitry Monakhov 		 * path must to be revalidated. */
5656fcf6b1b7SDmitry Monakhov 		if (split)
5657fcf6b1b7SDmitry Monakhov 			goto repeat;
5658fcf6b1b7SDmitry Monakhov 
5659fcf6b1b7SDmitry Monakhov 		BUG_ON(e2_len != e1_len);
5660fcf6b1b7SDmitry Monakhov 		*erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5661*19008f6dSTheodore Ts'o 		if (unlikely(*erp))
5662*19008f6dSTheodore Ts'o 			goto finish;
5663fcf6b1b7SDmitry Monakhov 		*erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5664*19008f6dSTheodore Ts'o 		if (unlikely(*erp))
5665*19008f6dSTheodore Ts'o 			goto finish;
5666fcf6b1b7SDmitry Monakhov 
5667fcf6b1b7SDmitry Monakhov 		/* Both extents are fully inside boundaries. Swap it now */
5668fcf6b1b7SDmitry Monakhov 		tmp_ex = *ex1;
5669fcf6b1b7SDmitry Monakhov 		ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5670fcf6b1b7SDmitry Monakhov 		ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5671fcf6b1b7SDmitry Monakhov 		ex1->ee_len = cpu_to_le16(e2_len);
5672fcf6b1b7SDmitry Monakhov 		ex2->ee_len = cpu_to_le16(e1_len);
5673fcf6b1b7SDmitry Monakhov 		if (unwritten)
5674fcf6b1b7SDmitry Monakhov 			ext4_ext_mark_unwritten(ex2);
5675fcf6b1b7SDmitry Monakhov 		if (ext4_ext_is_unwritten(&tmp_ex))
5676fcf6b1b7SDmitry Monakhov 			ext4_ext_mark_unwritten(ex1);
5677fcf6b1b7SDmitry Monakhov 
5678fcf6b1b7SDmitry Monakhov 		ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5679fcf6b1b7SDmitry Monakhov 		ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5680fcf6b1b7SDmitry Monakhov 		*erp = ext4_ext_dirty(handle, inode2, path2 +
5681fcf6b1b7SDmitry Monakhov 				      path2->p_depth);
5682*19008f6dSTheodore Ts'o 		if (unlikely(*erp))
5683*19008f6dSTheodore Ts'o 			goto finish;
5684fcf6b1b7SDmitry Monakhov 		*erp = ext4_ext_dirty(handle, inode1, path1 +
5685fcf6b1b7SDmitry Monakhov 				      path1->p_depth);
5686fcf6b1b7SDmitry Monakhov 		/*
5687fcf6b1b7SDmitry Monakhov 		 * Looks scarry ah..? second inode already points to new blocks,
5688fcf6b1b7SDmitry Monakhov 		 * and it was successfully dirtied. But luckily error may happen
5689fcf6b1b7SDmitry Monakhov 		 * only due to journal error, so full transaction will be
5690fcf6b1b7SDmitry Monakhov 		 * aborted anyway.
5691fcf6b1b7SDmitry Monakhov 		 */
5692*19008f6dSTheodore Ts'o 		if (unlikely(*erp))
5693*19008f6dSTheodore Ts'o 			goto finish;
5694fcf6b1b7SDmitry Monakhov 		lblk1 += len;
5695fcf6b1b7SDmitry Monakhov 		lblk2 += len;
5696fcf6b1b7SDmitry Monakhov 		replaced_count += len;
5697fcf6b1b7SDmitry Monakhov 		count -= len;
5698fcf6b1b7SDmitry Monakhov 
5699fcf6b1b7SDmitry Monakhov 	repeat:
5700fcf6b1b7SDmitry Monakhov 		if (path1) {
5701fcf6b1b7SDmitry Monakhov 			ext4_ext_drop_refs(path1);
5702fcf6b1b7SDmitry Monakhov 			kfree(path1);
5703fcf6b1b7SDmitry Monakhov 			path1 = NULL;
5704fcf6b1b7SDmitry Monakhov 		}
5705fcf6b1b7SDmitry Monakhov 		if (path2) {
5706fcf6b1b7SDmitry Monakhov 			ext4_ext_drop_refs(path2);
5707fcf6b1b7SDmitry Monakhov 			kfree(path2);
5708fcf6b1b7SDmitry Monakhov 			path2 = NULL;
5709fcf6b1b7SDmitry Monakhov 		}
5710fcf6b1b7SDmitry Monakhov 	}
5711fcf6b1b7SDmitry Monakhov 	return replaced_count;
5712fcf6b1b7SDmitry Monakhov }
5713