1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0
2a86c6181SAlex Tomas /*
3a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com>
5a86c6181SAlex Tomas *
6a86c6181SAlex Tomas * Architecture independence:
7a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A.
8a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9a86c6181SAlex Tomas */
10a86c6181SAlex Tomas
11a86c6181SAlex Tomas /*
12a86c6181SAlex Tomas * Extents support for EXT4
13a86c6181SAlex Tomas *
14a86c6181SAlex Tomas * TODO:
15a86c6181SAlex Tomas * - ext4*_error() should be used in some situations
16a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
17a86c6181SAlex Tomas * - smart tree reduction
18a86c6181SAlex Tomas */
19a86c6181SAlex Tomas
20a86c6181SAlex Tomas #include <linux/fs.h>
21a86c6181SAlex Tomas #include <linux/time.h>
22cd02ff0bSMingming Cao #include <linux/jbd2.h>
23a86c6181SAlex Tomas #include <linux/highuid.h>
24a86c6181SAlex Tomas #include <linux/pagemap.h>
25a86c6181SAlex Tomas #include <linux/quotaops.h>
26a86c6181SAlex Tomas #include <linux/string.h>
27a86c6181SAlex Tomas #include <linux/slab.h>
287c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
296873fa0dSEric Sandeen #include <linux/fiemap.h>
30d3b6f23fSRitesh Harjani #include <linux/iomap.h>
314034247aSNeilBrown #include <linux/sched/mm.h>
323dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
334a092d73STheodore Ts'o #include "ext4_extents.h"
34f19d5870STao Ma #include "xattr.h"
35a86c6181SAlex Tomas
360562e0baSJiaying Zhang #include <trace/events/ext4.h>
370562e0baSJiaying Zhang
385f95d21fSLukas Czerner /*
395f95d21fSLukas Czerner * used by extent splitting.
405f95d21fSLukas Czerner */
415f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
425f95d21fSLukas Czerner due to ENOSPC */
43556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
44556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
455f95d21fSLukas Czerner
46dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
47dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
48dee1f973SDmitry Monakhov
ext4_extent_block_csum(struct inode * inode,struct ext4_extent_header * eh)497ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode,
507ac5990dSDarrick J. Wong struct ext4_extent_header *eh)
517ac5990dSDarrick J. Wong {
527ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode);
537ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
547ac5990dSDarrick J. Wong __u32 csum;
557ac5990dSDarrick J. Wong
567ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
577ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh));
587ac5990dSDarrick J. Wong return cpu_to_le32(csum);
597ac5990dSDarrick J. Wong }
607ac5990dSDarrick J. Wong
ext4_extent_block_csum_verify(struct inode * inode,struct ext4_extent_header * eh)617ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode,
627ac5990dSDarrick J. Wong struct ext4_extent_header *eh)
637ac5990dSDarrick J. Wong {
647ac5990dSDarrick J. Wong struct ext4_extent_tail *et;
657ac5990dSDarrick J. Wong
669aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb))
677ac5990dSDarrick J. Wong return 1;
687ac5990dSDarrick J. Wong
697ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh);
707ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh))
717ac5990dSDarrick J. Wong return 0;
727ac5990dSDarrick J. Wong return 1;
737ac5990dSDarrick J. Wong }
747ac5990dSDarrick J. Wong
ext4_extent_block_csum_set(struct inode * inode,struct ext4_extent_header * eh)757ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode,
767ac5990dSDarrick J. Wong struct ext4_extent_header *eh)
777ac5990dSDarrick J. Wong {
787ac5990dSDarrick J. Wong struct ext4_extent_tail *et;
797ac5990dSDarrick J. Wong
809aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb))
817ac5990dSDarrick J. Wong return;
827ac5990dSDarrick J. Wong
837ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh);
847ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh);
857ac5990dSDarrick J. Wong }
867ac5990dSDarrick J. Wong
875f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle,
885f95d21fSLukas Czerner struct inode *inode,
89dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
905f95d21fSLukas Czerner ext4_lblk_t split,
915f95d21fSLukas Czerner int split_flag,
925f95d21fSLukas Czerner int flags);
935f95d21fSLukas Czerner
ext4_ext_trunc_restart_fn(struct inode * inode,int * dropped)94a4130367SJan Kara static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
95a86c6181SAlex Tomas {
967b808191STheodore Ts'o /*
97a4130367SJan Kara * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
98a4130367SJan Kara * moment, get_block can be called only for blocks inside i_size since
99a4130367SJan Kara * page cache has been already dropped and writes are blocked by
100f340b3d9Shongnanli * i_rwsem. So we can safely drop the i_data_sem here.
1017b808191STheodore Ts'o */
102a4130367SJan Kara BUG_ON(EXT4_JOURNAL(inode) == NULL);
10327bc446eSbrookxu ext4_discard_preallocations(inode, 0);
104a4130367SJan Kara up_write(&EXT4_I(inode)->i_data_sem);
105a4130367SJan Kara *dropped = 1;
106a4130367SJan Kara return 0;
107a4130367SJan Kara }
108487caeefSJan Kara
ext4_ext_drop_refs(struct ext4_ext_path * path)1097ff5fddaSYe Bin static void ext4_ext_drop_refs(struct ext4_ext_path *path)
1107ff5fddaSYe Bin {
1117ff5fddaSYe Bin int depth, i;
1127ff5fddaSYe Bin
1137ff5fddaSYe Bin if (!path)
1147ff5fddaSYe Bin return;
1157ff5fddaSYe Bin depth = path->p_depth;
1167ff5fddaSYe Bin for (i = 0; i <= depth; i++, path++) {
1177ff5fddaSYe Bin brelse(path->p_bh);
1187ff5fddaSYe Bin path->p_bh = NULL;
1197ff5fddaSYe Bin }
1207ff5fddaSYe Bin }
1217ff5fddaSYe Bin
ext4_free_ext_path(struct ext4_ext_path * path)1227ff5fddaSYe Bin void ext4_free_ext_path(struct ext4_ext_path *path)
1237ff5fddaSYe Bin {
1247ff5fddaSYe Bin ext4_ext_drop_refs(path);
1257ff5fddaSYe Bin kfree(path);
1267ff5fddaSYe Bin }
1277ff5fddaSYe Bin
128a4130367SJan Kara /*
129a4130367SJan Kara * Make sure 'handle' has at least 'check_cred' credits. If not, restart
130a4130367SJan Kara * transaction with 'restart_cred' credits. The function drops i_data_sem
131a4130367SJan Kara * when restarting transaction and gets it after transaction is restarted.
132a4130367SJan Kara *
133a4130367SJan Kara * The function returns 0 on success, 1 if transaction had to be restarted,
134a4130367SJan Kara * and < 0 in case of fatal error.
135a4130367SJan Kara */
ext4_datasem_ensure_credits(handle_t * handle,struct inode * inode,int check_cred,int restart_cred,int revoke_cred)136a4130367SJan Kara int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
13783448bdfSJan Kara int check_cred, int restart_cred,
13883448bdfSJan Kara int revoke_cred)
139a4130367SJan Kara {
140a4130367SJan Kara int ret;
141a4130367SJan Kara int dropped = 0;
142a4130367SJan Kara
143a4130367SJan Kara ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred,
14483448bdfSJan Kara revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped));
145a4130367SJan Kara if (dropped)
146a4130367SJan Kara down_write(&EXT4_I(inode)->i_data_sem);
147a4130367SJan Kara return ret;
148a86c6181SAlex Tomas }
149a86c6181SAlex Tomas
150a86c6181SAlex Tomas /*
151a86c6181SAlex Tomas * could return:
152a86c6181SAlex Tomas * - EROFS
153a86c6181SAlex Tomas * - ENOMEM
154a86c6181SAlex Tomas */
ext4_ext_get_access(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)155a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
156a86c6181SAlex Tomas struct ext4_ext_path *path)
157a86c6181SAlex Tomas {
1580f2f87d5SZhang Yi int err = 0;
1590f2f87d5SZhang Yi
160a86c6181SAlex Tomas if (path->p_bh) {
161a86c6181SAlex Tomas /* path points to block */
1625d601255Sliang xie BUFFER_TRACE(path->p_bh, "get_write_access");
1630f2f87d5SZhang Yi err = ext4_journal_get_write_access(handle, inode->i_sb,
164188c299eSJan Kara path->p_bh, EXT4_JTR_NONE);
1650f2f87d5SZhang Yi /*
1660f2f87d5SZhang Yi * The extent buffer's verified bit will be set again in
1670f2f87d5SZhang Yi * __ext4_ext_dirty(). We could leave an inconsistent
1680f2f87d5SZhang Yi * buffer if the extents updating procudure break off du
1690f2f87d5SZhang Yi * to some error happens, force to check it again.
1700f2f87d5SZhang Yi */
1710f2f87d5SZhang Yi if (!err)
1720f2f87d5SZhang Yi clear_buffer_verified(path->p_bh);
173a86c6181SAlex Tomas }
174a86c6181SAlex Tomas /* path points to leaf/index in inode body */
175a86c6181SAlex Tomas /* we use in-core data, no need to protect them */
1760f2f87d5SZhang Yi return err;
177a86c6181SAlex Tomas }
178a86c6181SAlex Tomas
179a86c6181SAlex Tomas /*
180a86c6181SAlex Tomas * could return:
181a86c6181SAlex Tomas * - EROFS
182a86c6181SAlex Tomas * - ENOMEM
183a86c6181SAlex Tomas * - EIO
184a86c6181SAlex Tomas */
__ext4_ext_dirty(const char * where,unsigned int line,handle_t * handle,struct inode * inode,struct ext4_ext_path * path)18543f81677SEric Biggers static int __ext4_ext_dirty(const char *where, unsigned int line,
18643f81677SEric Biggers handle_t *handle, struct inode *inode,
18743f81677SEric Biggers struct ext4_ext_path *path)
188a86c6181SAlex Tomas {
189a86c6181SAlex Tomas int err;
1904b1f1660SDmitry Monakhov
1914b1f1660SDmitry Monakhov WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
192a86c6181SAlex Tomas if (path->p_bh) {
1937ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
194a86c6181SAlex Tomas /* path points to block */
1959ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle,
1969ea7a0dfSTheodore Ts'o inode, path->p_bh);
1970f2f87d5SZhang Yi /* Extents updating done, re-set verified flag */
1980f2f87d5SZhang Yi if (!err)
1990f2f87d5SZhang Yi set_buffer_verified(path->p_bh);
200a86c6181SAlex Tomas } else {
201a86c6181SAlex Tomas /* path points to leaf/index in inode body */
202a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode);
203a86c6181SAlex Tomas }
204a86c6181SAlex Tomas return err;
205a86c6181SAlex Tomas }
206a86c6181SAlex Tomas
20743f81677SEric Biggers #define ext4_ext_dirty(handle, inode, path) \
20843f81677SEric Biggers __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
20943f81677SEric Biggers
ext4_ext_find_goal(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)210f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
211a86c6181SAlex Tomas struct ext4_ext_path *path,
212725d26d3SAneesh Kumar K.V ext4_lblk_t block)
213a86c6181SAlex Tomas {
214a86c6181SAlex Tomas if (path) {
21581fdbb4aSYongqiang Yang int depth = path->p_depth;
216a86c6181SAlex Tomas struct ext4_extent *ex;
217a86c6181SAlex Tomas
218ad4fb9caSKazuya Mio /*
219ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are
220ad4fb9caSKazuya Mio * filling in a file which will eventually be
221ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing
222ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way
223ad4fb9caSKazuya Mio * the eventually results in a contiguous object or
224ad4fb9caSKazuya Mio * executable file, or some database extending a table
225ad4fb9caSKazuya Mio * space file. However, this is actually somewhat
226ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as
227ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going
228ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up
229ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we
230ad4fb9caSKazuya Mio * should have some hueristics or some way to allow
231ad4fb9caSKazuya Mio * userspace to pass a hint to file system,
232b8d6568aSTao Ma * especially if the latter case turns out to be
233ad4fb9caSKazuya Mio * common.
234ad4fb9caSKazuya Mio */
2357e028976SAvantika Mathur ex = path[depth].p_ext;
236ad4fb9caSKazuya Mio if (ex) {
237ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
238ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
239ad4fb9caSKazuya Mio
240ad4fb9caSKazuya Mio if (block > ext_block)
241ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block);
242ad4fb9caSKazuya Mio else
243ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block);
244ad4fb9caSKazuya Mio }
245a86c6181SAlex Tomas
246d0d856e8SRandy Dunlap /* it looks like index is empty;
247d0d856e8SRandy Dunlap * try to find starting block from index itself */
248a86c6181SAlex Tomas if (path[depth].p_bh)
249a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr;
250a86c6181SAlex Tomas }
251a86c6181SAlex Tomas
252a86c6181SAlex Tomas /* OK. use inode's group */
253f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode);
254a86c6181SAlex Tomas }
255a86c6181SAlex Tomas
256654b4908SAneesh Kumar K.V /*
257654b4908SAneesh Kumar K.V * Allocation for a meta data block
258654b4908SAneesh Kumar K.V */
259f65e6fbaSAlex Tomas static ext4_fsblk_t
ext4_ext_new_meta_block(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex,int * err,unsigned int flags)260654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
261a86c6181SAlex Tomas struct ext4_ext_path *path,
26255f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags)
263a86c6181SAlex Tomas {
264f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock;
265a86c6181SAlex Tomas
266a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
26755f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
26855f020dbSAllison Henderson NULL, err);
269a86c6181SAlex Tomas return newblock;
270a86c6181SAlex Tomas }
271a86c6181SAlex Tomas
ext4_ext_space_block(struct inode * inode,int check)27255ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check)
273a86c6181SAlex Tomas {
274a86c6181SAlex Tomas int size;
275a86c6181SAlex Tomas
276a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
277a86c6181SAlex Tomas / sizeof(struct ext4_extent);
278bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
27902dc62fbSYongqiang Yang if (!check && size > 6)
280a86c6181SAlex Tomas size = 6;
281a86c6181SAlex Tomas #endif
282a86c6181SAlex Tomas return size;
283a86c6181SAlex Tomas }
284a86c6181SAlex Tomas
ext4_ext_space_block_idx(struct inode * inode,int check)28555ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
286a86c6181SAlex Tomas {
287a86c6181SAlex Tomas int size;
288a86c6181SAlex Tomas
289a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
290a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx);
291bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
29202dc62fbSYongqiang Yang if (!check && size > 5)
293a86c6181SAlex Tomas size = 5;
294a86c6181SAlex Tomas #endif
295a86c6181SAlex Tomas return size;
296a86c6181SAlex Tomas }
297a86c6181SAlex Tomas
ext4_ext_space_root(struct inode * inode,int check)29855ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check)
299a86c6181SAlex Tomas {
300a86c6181SAlex Tomas int size;
301a86c6181SAlex Tomas
302a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data);
303a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header);
304a86c6181SAlex Tomas size /= sizeof(struct ext4_extent);
305bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
30602dc62fbSYongqiang Yang if (!check && size > 3)
307a86c6181SAlex Tomas size = 3;
308a86c6181SAlex Tomas #endif
309a86c6181SAlex Tomas return size;
310a86c6181SAlex Tomas }
311a86c6181SAlex Tomas
ext4_ext_space_root_idx(struct inode * inode,int check)31255ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
313a86c6181SAlex Tomas {
314a86c6181SAlex Tomas int size;
315a86c6181SAlex Tomas
316a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data);
317a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header);
318a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx);
319bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
32002dc62fbSYongqiang Yang if (!check && size > 4)
321a86c6181SAlex Tomas size = 4;
322a86c6181SAlex Tomas #endif
323a86c6181SAlex Tomas return size;
324a86c6181SAlex Tomas }
325a86c6181SAlex Tomas
326fcf6b1b7SDmitry Monakhov static inline int
ext4_force_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,ext4_lblk_t lblk,int nofail)327fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
328dfe50809STheodore Ts'o struct ext4_ext_path **ppath, ext4_lblk_t lblk,
329fcf6b1b7SDmitry Monakhov int nofail)
330fcf6b1b7SDmitry Monakhov {
331dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
332fcf6b1b7SDmitry Monakhov int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
33373c384c0STheodore Ts'o int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
33473c384c0STheodore Ts'o
33573c384c0STheodore Ts'o if (nofail)
33673c384c0STheodore Ts'o flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
337fcf6b1b7SDmitry Monakhov
338dfe50809STheodore Ts'o return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
339fcf6b1b7SDmitry Monakhov EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
34073c384c0STheodore Ts'o flags);
341fcf6b1b7SDmitry Monakhov }
342fcf6b1b7SDmitry Monakhov
343c29c0ae7SAlex Tomas static int
ext4_ext_max_entries(struct inode * inode,int depth)344c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth)
345c29c0ae7SAlex Tomas {
346c29c0ae7SAlex Tomas int max;
347c29c0ae7SAlex Tomas
348c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) {
349c29c0ae7SAlex Tomas if (depth == 0)
35055ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1);
351c29c0ae7SAlex Tomas else
35255ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1);
353c29c0ae7SAlex Tomas } else {
354c29c0ae7SAlex Tomas if (depth == 0)
35555ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1);
356c29c0ae7SAlex Tomas else
35755ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1);
358c29c0ae7SAlex Tomas }
359c29c0ae7SAlex Tomas
360c29c0ae7SAlex Tomas return max;
361c29c0ae7SAlex Tomas }
362c29c0ae7SAlex Tomas
ext4_valid_extent(struct inode * inode,struct ext4_extent * ext)36356b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
36456b19868SAneesh Kumar K.V {
365bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext);
36656b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext);
3675946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
368e84a26ceSTheodore Ts'o
369f70749caSVegard Nossum /*
370f70749caSVegard Nossum * We allow neither:
371f70749caSVegard Nossum * - zero length
372f70749caSVegard Nossum * - overflow/wrap-around
373f70749caSVegard Nossum */
374f70749caSVegard Nossum if (lblock + len <= lblock)
37531d4f3a2STheodore Ts'o return 0;
376ce9f24ccSJan Kara return ext4_inode_block_valid(inode, block, len);
37756b19868SAneesh Kumar K.V }
37856b19868SAneesh Kumar K.V
ext4_valid_extent_idx(struct inode * inode,struct ext4_extent_idx * ext_idx)37956b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode,
38056b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx)
38156b19868SAneesh Kumar K.V {
382bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
383e84a26ceSTheodore Ts'o
384ce9f24ccSJan Kara return ext4_inode_block_valid(inode, block, 1);
38556b19868SAneesh Kumar K.V }
38656b19868SAneesh Kumar K.V
ext4_valid_extent_entries(struct inode * inode,struct ext4_extent_header * eh,ext4_lblk_t lblk,ext4_fsblk_t * pblk,int depth)38756b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode,
38856b19868SAneesh Kumar K.V struct ext4_extent_header *eh,
3899c6e0719SZhang Yi ext4_lblk_t lblk, ext4_fsblk_t *pblk,
3909c6e0719SZhang Yi int depth)
39156b19868SAneesh Kumar K.V {
39256b19868SAneesh Kumar K.V unsigned short entries;
3938dd27fecSZhang Yi ext4_lblk_t lblock = 0;
394d36f6ed7SBaokun Li ext4_lblk_t cur = 0;
3958dd27fecSZhang Yi
39656b19868SAneesh Kumar K.V if (eh->eh_entries == 0)
39756b19868SAneesh Kumar K.V return 1;
39856b19868SAneesh Kumar K.V
39956b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries);
40056b19868SAneesh Kumar K.V
40156b19868SAneesh Kumar K.V if (depth == 0) {
40256b19868SAneesh Kumar K.V /* leaf entries */
40381fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
4049c6e0719SZhang Yi
4059c6e0719SZhang Yi /*
4069c6e0719SZhang Yi * The logical block in the first entry should equal to
4079c6e0719SZhang Yi * the number in the index block.
4089c6e0719SZhang Yi */
4099c6e0719SZhang Yi if (depth != ext_depth(inode) &&
4109c6e0719SZhang Yi lblk != le32_to_cpu(ext->ee_block))
4119c6e0719SZhang Yi return 0;
41256b19868SAneesh Kumar K.V while (entries) {
41356b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext))
41456b19868SAneesh Kumar K.V return 0;
4155946d089SEryu Guan
4165946d089SEryu Guan /* Check for overlapping extents */
4175946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block);
418d36f6ed7SBaokun Li if (lblock < cur) {
41954d3adbcSTheodore Ts'o *pblk = ext4_ext_pblock(ext);
4205946d089SEryu Guan return 0;
4215946d089SEryu Guan }
422d36f6ed7SBaokun Li cur = lblock + ext4_ext_get_actual_len(ext);
42356b19868SAneesh Kumar K.V ext++;
42456b19868SAneesh Kumar K.V entries--;
42556b19868SAneesh Kumar K.V }
42656b19868SAneesh Kumar K.V } else {
42781fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
4289c6e0719SZhang Yi
4299c6e0719SZhang Yi /*
4309c6e0719SZhang Yi * The logical block in the first entry should equal to
4319c6e0719SZhang Yi * the number in the parent index block.
4329c6e0719SZhang Yi */
4339c6e0719SZhang Yi if (depth != ext_depth(inode) &&
4349c6e0719SZhang Yi lblk != le32_to_cpu(ext_idx->ei_block))
4359c6e0719SZhang Yi return 0;
43656b19868SAneesh Kumar K.V while (entries) {
43756b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx))
43856b19868SAneesh Kumar K.V return 0;
4398dd27fecSZhang Yi
4408dd27fecSZhang Yi /* Check for overlapping index extents */
4418dd27fecSZhang Yi lblock = le32_to_cpu(ext_idx->ei_block);
442d36f6ed7SBaokun Li if (lblock < cur) {
4438dd27fecSZhang Yi *pblk = ext4_idx_pblock(ext_idx);
4448dd27fecSZhang Yi return 0;
4458dd27fecSZhang Yi }
44656b19868SAneesh Kumar K.V ext_idx++;
44756b19868SAneesh Kumar K.V entries--;
448d36f6ed7SBaokun Li cur = lblock + 1;
44956b19868SAneesh Kumar K.V }
45056b19868SAneesh Kumar K.V }
45156b19868SAneesh Kumar K.V return 1;
45256b19868SAneesh Kumar K.V }
45356b19868SAneesh Kumar K.V
__ext4_ext_check(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_header * eh,int depth,ext4_fsblk_t pblk,ext4_lblk_t lblk)454c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line,
455c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh,
4569c6e0719SZhang Yi int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
457c29c0ae7SAlex Tomas {
458c29c0ae7SAlex Tomas const char *error_msg;
4596a797d27SDarrick J. Wong int max = 0, err = -EFSCORRUPTED;
460c29c0ae7SAlex Tomas
461c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
462c29c0ae7SAlex Tomas error_msg = "invalid magic";
463c29c0ae7SAlex Tomas goto corrupted;
464c29c0ae7SAlex Tomas }
465c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
466c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth";
467c29c0ae7SAlex Tomas goto corrupted;
468c29c0ae7SAlex Tomas }
469c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) {
470c29c0ae7SAlex Tomas error_msg = "invalid eh_max";
471c29c0ae7SAlex Tomas goto corrupted;
472c29c0ae7SAlex Tomas }
473c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth);
474c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
475c29c0ae7SAlex Tomas error_msg = "too large eh_max";
476c29c0ae7SAlex Tomas goto corrupted;
477c29c0ae7SAlex Tomas }
478c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
479c29c0ae7SAlex Tomas error_msg = "invalid eh_entries";
480c29c0ae7SAlex Tomas goto corrupted;
481c29c0ae7SAlex Tomas }
48229a5b8a1SLuís Henriques if (unlikely((eh->eh_entries == 0) && (depth > 0))) {
48329a5b8a1SLuís Henriques error_msg = "eh_entries is 0 but eh_depth is > 0";
48429a5b8a1SLuís Henriques goto corrupted;
48529a5b8a1SLuís Henriques }
4869c6e0719SZhang Yi if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
48756b19868SAneesh Kumar K.V error_msg = "invalid extent entries";
48856b19868SAneesh Kumar K.V goto corrupted;
48956b19868SAneesh Kumar K.V }
4907bc94916SVegard Nossum if (unlikely(depth > 32)) {
4917bc94916SVegard Nossum error_msg = "too large eh_depth";
4927bc94916SVegard Nossum goto corrupted;
4937bc94916SVegard Nossum }
4947ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */
4957ac5990dSDarrick J. Wong if (ext_depth(inode) != depth &&
4967ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) {
4977ac5990dSDarrick J. Wong error_msg = "extent tree corrupted";
4986a797d27SDarrick J. Wong err = -EFSBADCRC;
4997ac5990dSDarrick J. Wong goto corrupted;
5007ac5990dSDarrick J. Wong }
501c29c0ae7SAlex Tomas return 0;
502c29c0ae7SAlex Tomas
503c29c0ae7SAlex Tomas corrupted:
50454d3adbcSTheodore Ts'o ext4_error_inode_err(inode, function, line, 0, -err,
505c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, "
506c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)",
507c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg,
508c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic),
50954d3adbcSTheodore Ts'o le16_to_cpu(eh->eh_entries),
51054d3adbcSTheodore Ts'o le16_to_cpu(eh->eh_max),
511c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth);
5126a797d27SDarrick J. Wong return err;
513c29c0ae7SAlex Tomas }
514c29c0ae7SAlex Tomas
515c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \
5169c6e0719SZhang Yi __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
517c29c0ae7SAlex Tomas
ext4_ext_check_inode(struct inode * inode)5187a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode)
5197a262f7cSAneesh Kumar K.V {
520c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
5217a262f7cSAneesh Kumar K.V }
5227a262f7cSAneesh Kumar K.V
ext4_cache_extents(struct inode * inode,struct ext4_extent_header * eh)5234068664eSDmitry Monakhov static void ext4_cache_extents(struct inode *inode,
5244068664eSDmitry Monakhov struct ext4_extent_header *eh)
5254068664eSDmitry Monakhov {
5264068664eSDmitry Monakhov struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
5274068664eSDmitry Monakhov ext4_lblk_t prev = 0;
5284068664eSDmitry Monakhov int i;
5294068664eSDmitry Monakhov
5304068664eSDmitry Monakhov for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
5314068664eSDmitry Monakhov unsigned int status = EXTENT_STATUS_WRITTEN;
5324068664eSDmitry Monakhov ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
5334068664eSDmitry Monakhov int len = ext4_ext_get_actual_len(ex);
5344068664eSDmitry Monakhov
5354068664eSDmitry Monakhov if (prev && (prev != lblk))
5364068664eSDmitry Monakhov ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
5374068664eSDmitry Monakhov EXTENT_STATUS_HOLE);
5384068664eSDmitry Monakhov
5394068664eSDmitry Monakhov if (ext4_ext_is_unwritten(ex))
5404068664eSDmitry Monakhov status = EXTENT_STATUS_UNWRITTEN;
5414068664eSDmitry Monakhov ext4_es_cache_extent(inode, lblk, len,
5424068664eSDmitry Monakhov ext4_ext_pblock(ex), status);
5434068664eSDmitry Monakhov prev = lblk + len;
5444068664eSDmitry Monakhov }
5454068664eSDmitry Monakhov }
5464068664eSDmitry Monakhov
5477d7ea89eSTheodore Ts'o static struct buffer_head *
__read_extent_tree_block(const char * function,unsigned int line,struct inode * inode,struct ext4_extent_idx * idx,int depth,int flags)5487d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line,
5499c6e0719SZhang Yi struct inode *inode, struct ext4_extent_idx *idx,
5509c6e0719SZhang Yi int depth, int flags)
551f8489128SDarrick J. Wong {
5527d7ea89eSTheodore Ts'o struct buffer_head *bh;
5537d7ea89eSTheodore Ts'o int err;
55473c384c0STheodore Ts'o gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
5559c6e0719SZhang Yi ext4_fsblk_t pblk;
556f8489128SDarrick J. Wong
55773c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL)
55873c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL;
55973c384c0STheodore Ts'o
5609c6e0719SZhang Yi pblk = ext4_idx_pblock(idx);
56173c384c0STheodore Ts'o bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
5627d7ea89eSTheodore Ts'o if (unlikely(!bh))
5637d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM);
5647d7ea89eSTheodore Ts'o
5657d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) {
5667d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
56777035e4dSLong Li err = ext4_read_bh(bh, 0, NULL, false);
5687d7ea89eSTheodore Ts'o if (err < 0)
5697d7ea89eSTheodore Ts'o goto errout;
5707d7ea89eSTheodore Ts'o }
5717869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
5727d7ea89eSTheodore Ts'o return bh;
5739c6e0719SZhang Yi err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
5749c6e0719SZhang Yi depth, pblk, le32_to_cpu(idx->ei_block));
5757d7ea89eSTheodore Ts'o if (err)
5767d7ea89eSTheodore Ts'o goto errout;
577f8489128SDarrick J. Wong set_buffer_verified(bh);
578107a7bd3STheodore Ts'o /*
579107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries
580107a7bd3STheodore Ts'o */
581107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
582107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh);
5834068664eSDmitry Monakhov ext4_cache_extents(inode, eh);
584107a7bd3STheodore Ts'o }
5857d7ea89eSTheodore Ts'o return bh;
5867d7ea89eSTheodore Ts'o errout:
5877d7ea89eSTheodore Ts'o put_bh(bh);
5887d7ea89eSTheodore Ts'o return ERR_PTR(err);
5897d7ea89eSTheodore Ts'o
590f8489128SDarrick J. Wong }
591f8489128SDarrick J. Wong
5929c6e0719SZhang Yi #define read_extent_tree_block(inode, idx, depth, flags) \
5939c6e0719SZhang Yi __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
594107a7bd3STheodore Ts'o (depth), (flags))
595f8489128SDarrick J. Wong
5967869a4a6STheodore Ts'o /*
5977869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the
5987869a4a6STheodore Ts'o * extent status tree
5997869a4a6STheodore Ts'o */
ext4_ext_precache(struct inode * inode)6007869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode)
6017869a4a6STheodore Ts'o {
6027869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode);
6037869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL;
6047869a4a6STheodore Ts'o struct buffer_head *bh;
6057869a4a6STheodore Ts'o int i = 0, depth, ret = 0;
6067869a4a6STheodore Ts'o
6077869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
6087869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */
6097869a4a6STheodore Ts'o
6107869a4a6STheodore Ts'o down_read(&ei->i_data_sem);
6117869a4a6STheodore Ts'o depth = ext_depth(inode);
6127869a4a6STheodore Ts'o
6132f424a5aSRitesh Harjani /* Don't cache anything if there are no external extent blocks */
6142f424a5aSRitesh Harjani if (!depth) {
6152f424a5aSRitesh Harjani up_read(&ei->i_data_sem);
6162f424a5aSRitesh Harjani return ret;
6172f424a5aSRitesh Harjani }
6182f424a5aSRitesh Harjani
6196396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
6207869a4a6STheodore Ts'o GFP_NOFS);
6217869a4a6STheodore Ts'o if (path == NULL) {
6227869a4a6STheodore Ts'o up_read(&ei->i_data_sem);
6237869a4a6STheodore Ts'o return -ENOMEM;
6247869a4a6STheodore Ts'o }
6257869a4a6STheodore Ts'o
6267869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode);
6277869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
6287869a4a6STheodore Ts'o if (ret)
6297869a4a6STheodore Ts'o goto out;
6307869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
6317869a4a6STheodore Ts'o while (i >= 0) {
6327869a4a6STheodore Ts'o /*
6337869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of
6347869a4a6STheodore Ts'o * the index block, go up
6357869a4a6STheodore Ts'o */
6367869a4a6STheodore Ts'o if ((i == depth) ||
6377869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
6387869a4a6STheodore Ts'o brelse(path[i].p_bh);
6397869a4a6STheodore Ts'o path[i].p_bh = NULL;
6407869a4a6STheodore Ts'o i--;
6417869a4a6STheodore Ts'o continue;
6427869a4a6STheodore Ts'o }
6439c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[i].p_idx++,
6447869a4a6STheodore Ts'o depth - i - 1,
6457869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE);
6467869a4a6STheodore Ts'o if (IS_ERR(bh)) {
6477869a4a6STheodore Ts'o ret = PTR_ERR(bh);
6487869a4a6STheodore Ts'o break;
6497869a4a6STheodore Ts'o }
6507869a4a6STheodore Ts'o i++;
6517869a4a6STheodore Ts'o path[i].p_bh = bh;
6527869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh);
6537869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
6547869a4a6STheodore Ts'o }
6557869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
6567869a4a6STheodore Ts'o out:
6577869a4a6STheodore Ts'o up_read(&ei->i_data_sem);
6587ff5fddaSYe Bin ext4_free_ext_path(path);
6597869a4a6STheodore Ts'o return ret;
6607869a4a6STheodore Ts'o }
6617869a4a6STheodore Ts'o
662a86c6181SAlex Tomas #ifdef EXT_DEBUG
ext4_ext_show_path(struct inode * inode,struct ext4_ext_path * path)663a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
664a86c6181SAlex Tomas {
665a86c6181SAlex Tomas int k, l = path->p_depth;
666a86c6181SAlex Tomas
66770aa1554SRitesh Harjani ext_debug(inode, "path:");
668a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) {
669a86c6181SAlex Tomas if (path->p_idx) {
67070aa1554SRitesh Harjani ext_debug(inode, " %d->%llu",
6716e89bbb7SEric Biggers le32_to_cpu(path->p_idx->ei_block),
672bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx));
673a86c6181SAlex Tomas } else if (path->p_ext) {
67470aa1554SRitesh Harjani ext_debug(inode, " %d:[%d]%d:%llu ",
675a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block),
676556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext),
677a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext),
678bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext));
679a86c6181SAlex Tomas } else
68070aa1554SRitesh Harjani ext_debug(inode, " []");
681a86c6181SAlex Tomas }
68270aa1554SRitesh Harjani ext_debug(inode, "\n");
683a86c6181SAlex Tomas }
684a86c6181SAlex Tomas
ext4_ext_show_leaf(struct inode * inode,struct ext4_ext_path * path)685a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
686a86c6181SAlex Tomas {
687a86c6181SAlex Tomas int depth = ext_depth(inode);
688a86c6181SAlex Tomas struct ext4_extent_header *eh;
689a86c6181SAlex Tomas struct ext4_extent *ex;
690a86c6181SAlex Tomas int i;
691a86c6181SAlex Tomas
692a86c6181SAlex Tomas if (!path)
693a86c6181SAlex Tomas return;
694a86c6181SAlex Tomas
695a86c6181SAlex Tomas eh = path[depth].p_hdr;
696a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh);
697a86c6181SAlex Tomas
69870aa1554SRitesh Harjani ext_debug(inode, "Displaying leaf extents\n");
699553f9008SMingming
700a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
70170aa1554SRitesh Harjani ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
702556615dcSLukas Czerner ext4_ext_is_unwritten(ex),
703bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
704a86c6181SAlex Tomas }
70570aa1554SRitesh Harjani ext_debug(inode, "\n");
706a86c6181SAlex Tomas }
7071b16da77SYongqiang Yang
ext4_ext_show_move(struct inode * inode,struct ext4_ext_path * path,ext4_fsblk_t newblock,int level)7081b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
7091b16da77SYongqiang Yang ext4_fsblk_t newblock, int level)
7101b16da77SYongqiang Yang {
7111b16da77SYongqiang Yang int depth = ext_depth(inode);
7121b16da77SYongqiang Yang struct ext4_extent *ex;
7131b16da77SYongqiang Yang
7141b16da77SYongqiang Yang if (depth != level) {
7151b16da77SYongqiang Yang struct ext4_extent_idx *idx;
7161b16da77SYongqiang Yang idx = path[level].p_idx;
7171b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
71870aa1554SRitesh Harjani ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
71970aa1554SRitesh Harjani level, le32_to_cpu(idx->ei_block),
72070aa1554SRitesh Harjani ext4_idx_pblock(idx), newblock);
7211b16da77SYongqiang Yang idx++;
7221b16da77SYongqiang Yang }
7231b16da77SYongqiang Yang
7241b16da77SYongqiang Yang return;
7251b16da77SYongqiang Yang }
7261b16da77SYongqiang Yang
7271b16da77SYongqiang Yang ex = path[depth].p_ext;
7281b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
72970aa1554SRitesh Harjani ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
7301b16da77SYongqiang Yang le32_to_cpu(ex->ee_block),
7311b16da77SYongqiang Yang ext4_ext_pblock(ex),
732556615dcSLukas Czerner ext4_ext_is_unwritten(ex),
7331b16da77SYongqiang Yang ext4_ext_get_actual_len(ex),
7341b16da77SYongqiang Yang newblock);
7351b16da77SYongqiang Yang ex++;
7361b16da77SYongqiang Yang }
7371b16da77SYongqiang Yang }
7381b16da77SYongqiang Yang
739a86c6181SAlex Tomas #else
740a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path)
741a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path)
7421b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level)
743a86c6181SAlex Tomas #endif
744a86c6181SAlex Tomas
745a86c6181SAlex Tomas /*
746d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx:
747d0d856e8SRandy Dunlap * binary search for the closest index of the given block
748c29c0ae7SAlex Tomas * the header must be checked before calling this
749a86c6181SAlex Tomas */
750a86c6181SAlex Tomas static void
ext4_ext_binsearch_idx(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)751725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode,
752725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block)
753a86c6181SAlex Tomas {
754a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr;
755a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m;
756a86c6181SAlex Tomas
757a86c6181SAlex Tomas
75870aa1554SRitesh Harjani ext_debug(inode, "binsearch for %u(idx): ", block);
759a86c6181SAlex Tomas
760a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1;
761e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh);
762a86c6181SAlex Tomas while (l <= r) {
763a86c6181SAlex Tomas m = l + (r - l) / 2;
76483c5688bSyangerkun ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
76583c5688bSyangerkun le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
76683c5688bSyangerkun r, le32_to_cpu(r->ei_block));
76783c5688bSyangerkun
768a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block))
769a86c6181SAlex Tomas r = m - 1;
770a86c6181SAlex Tomas else
771a86c6181SAlex Tomas l = m + 1;
772a86c6181SAlex Tomas }
773a86c6181SAlex Tomas
774a86c6181SAlex Tomas path->p_idx = l - 1;
77570aa1554SRitesh Harjani ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
776bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx));
777a86c6181SAlex Tomas
778a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
779a86c6181SAlex Tomas {
780a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix;
781a86c6181SAlex Tomas int k;
782a86c6181SAlex Tomas
783a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh);
784a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
7856e89bbb7SEric Biggers if (k != 0 && le32_to_cpu(ix->ei_block) <=
7866e89bbb7SEric Biggers le32_to_cpu(ix[-1].ei_block)) {
7874776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, "
7884776004fSTheodore Ts'o "first=0x%p\n", k,
789a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh));
7904776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n",
791a86c6181SAlex Tomas le32_to_cpu(ix->ei_block),
792a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block));
793a86c6181SAlex Tomas }
794a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block)
795a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block));
796a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block))
797a86c6181SAlex Tomas break;
798a86c6181SAlex Tomas chix = ix;
799a86c6181SAlex Tomas }
800a86c6181SAlex Tomas BUG_ON(chix != path->p_idx);
801a86c6181SAlex Tomas }
802a86c6181SAlex Tomas #endif
803a86c6181SAlex Tomas
804a86c6181SAlex Tomas }
805a86c6181SAlex Tomas
806a86c6181SAlex Tomas /*
807d0d856e8SRandy Dunlap * ext4_ext_binsearch:
808d0d856e8SRandy Dunlap * binary search for closest extent of the given block
809c29c0ae7SAlex Tomas * the header must be checked before calling this
810a86c6181SAlex Tomas */
811a86c6181SAlex Tomas static void
ext4_ext_binsearch(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t block)812725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode,
813725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block)
814a86c6181SAlex Tomas {
815a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr;
816a86c6181SAlex Tomas struct ext4_extent *r, *l, *m;
817a86c6181SAlex Tomas
818a86c6181SAlex Tomas if (eh->eh_entries == 0) {
819a86c6181SAlex Tomas /*
820d0d856e8SRandy Dunlap * this leaf is empty:
821a86c6181SAlex Tomas * we get such a leaf in split/add case
822a86c6181SAlex Tomas */
823a86c6181SAlex Tomas return;
824a86c6181SAlex Tomas }
825a86c6181SAlex Tomas
82670aa1554SRitesh Harjani ext_debug(inode, "binsearch for %u: ", block);
827a86c6181SAlex Tomas
828a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1;
829e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh);
830a86c6181SAlex Tomas
831a86c6181SAlex Tomas while (l <= r) {
832a86c6181SAlex Tomas m = l + (r - l) / 2;
83383c5688bSyangerkun ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
83483c5688bSyangerkun le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
83583c5688bSyangerkun r, le32_to_cpu(r->ee_block));
83683c5688bSyangerkun
837a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block))
838a86c6181SAlex Tomas r = m - 1;
839a86c6181SAlex Tomas else
840a86c6181SAlex Tomas l = m + 1;
841a86c6181SAlex Tomas }
842a86c6181SAlex Tomas
843a86c6181SAlex Tomas path->p_ext = l - 1;
84470aa1554SRitesh Harjani ext_debug(inode, " -> %d:%llu:[%d]%d ",
845a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block),
846bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext),
847556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext),
848a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext));
849a86c6181SAlex Tomas
850a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
851a86c6181SAlex Tomas {
852a86c6181SAlex Tomas struct ext4_extent *chex, *ex;
853a86c6181SAlex Tomas int k;
854a86c6181SAlex Tomas
855a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh);
856a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
857a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block)
858a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block));
859a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block))
860a86c6181SAlex Tomas break;
861a86c6181SAlex Tomas chex = ex;
862a86c6181SAlex Tomas }
863a86c6181SAlex Tomas BUG_ON(chex != path->p_ext);
864a86c6181SAlex Tomas }
865a86c6181SAlex Tomas #endif
866a86c6181SAlex Tomas
867a86c6181SAlex Tomas }
868a86c6181SAlex Tomas
ext4_ext_tree_init(handle_t * handle,struct inode * inode)8694209ae12SHarshad Shirwadkar void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
870a86c6181SAlex Tomas {
871a86c6181SAlex Tomas struct ext4_extent_header *eh;
872a86c6181SAlex Tomas
873a86c6181SAlex Tomas eh = ext_inode_hdr(inode);
874a86c6181SAlex Tomas eh->eh_depth = 0;
875a86c6181SAlex Tomas eh->eh_entries = 0;
876a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC;
87755ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
878ce3aba43SAnirudh Rayabharam eh->eh_generation = 0;
879a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode);
880a86c6181SAlex Tomas }
881a86c6181SAlex Tomas
882a86c6181SAlex Tomas struct ext4_ext_path *
ext4_find_extent(struct inode * inode,ext4_lblk_t block,struct ext4_ext_path ** orig_path,int flags)883ed8a1a76STheodore Ts'o ext4_find_extent(struct inode *inode, ext4_lblk_t block,
884705912caSTheodore Ts'o struct ext4_ext_path **orig_path, int flags)
885a86c6181SAlex Tomas {
886a86c6181SAlex Tomas struct ext4_extent_header *eh;
887a86c6181SAlex Tomas struct buffer_head *bh;
888705912caSTheodore Ts'o struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
889705912caSTheodore Ts'o short int depth, i, ppos = 0;
890860d21e2STheodore Ts'o int ret;
89173c384c0STheodore Ts'o gfp_t gfp_flags = GFP_NOFS;
89273c384c0STheodore Ts'o
89373c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL)
89473c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL;
895a86c6181SAlex Tomas
896a86c6181SAlex Tomas eh = ext_inode_hdr(inode);
897c29c0ae7SAlex Tomas depth = ext_depth(inode);
898bc890a60STheodore Ts'o if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
899bc890a60STheodore Ts'o EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
900bc890a60STheodore Ts'o depth);
901bc890a60STheodore Ts'o ret = -EFSCORRUPTED;
902bc890a60STheodore Ts'o goto err;
903bc890a60STheodore Ts'o }
904a86c6181SAlex Tomas
90510809df8STheodore Ts'o if (path) {
906523f431cSTheodore Ts'o ext4_ext_drop_refs(path);
90710809df8STheodore Ts'o if (depth > path[0].p_maxdepth) {
90810809df8STheodore Ts'o kfree(path);
90910809df8STheodore Ts'o *orig_path = path = NULL;
91010809df8STheodore Ts'o }
91110809df8STheodore Ts'o }
91210809df8STheodore Ts'o if (!path) {
913a86c6181SAlex Tomas /* account possible depth increase */
9146396bb22SKees Cook path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
91573c384c0STheodore Ts'o gfp_flags);
91619008f6dSTheodore Ts'o if (unlikely(!path))
917a86c6181SAlex Tomas return ERR_PTR(-ENOMEM);
91810809df8STheodore Ts'o path[0].p_maxdepth = depth + 1;
919a86c6181SAlex Tomas }
920a86c6181SAlex Tomas path[0].p_hdr = eh;
9211973adcbSShen Feng path[0].p_bh = NULL;
922a86c6181SAlex Tomas
923c29c0ae7SAlex Tomas i = depth;
9244068664eSDmitry Monakhov if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
9254068664eSDmitry Monakhov ext4_cache_extents(inode, eh);
926a86c6181SAlex Tomas /* walk through the tree */
927a86c6181SAlex Tomas while (i) {
92870aa1554SRitesh Harjani ext_debug(inode, "depth %d: num %d, max %d\n",
929a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
930c29c0ae7SAlex Tomas
931a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block);
932bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
933a86c6181SAlex Tomas path[ppos].p_depth = i;
934a86c6181SAlex Tomas path[ppos].p_ext = NULL;
935a86c6181SAlex Tomas
9369c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
937a1c83681SViresh Kumar if (IS_ERR(bh)) {
9387d7ea89eSTheodore Ts'o ret = PTR_ERR(bh);
939a86c6181SAlex Tomas goto err;
940860d21e2STheodore Ts'o }
9417d7ea89eSTheodore Ts'o
942a86c6181SAlex Tomas eh = ext_block_hdr(bh);
943a86c6181SAlex Tomas ppos++;
944a86c6181SAlex Tomas path[ppos].p_bh = bh;
945a86c6181SAlex Tomas path[ppos].p_hdr = eh;
946a86c6181SAlex Tomas }
947a86c6181SAlex Tomas
948a86c6181SAlex Tomas path[ppos].p_depth = i;
949a86c6181SAlex Tomas path[ppos].p_ext = NULL;
950a86c6181SAlex Tomas path[ppos].p_idx = NULL;
951a86c6181SAlex Tomas
952a86c6181SAlex Tomas /* find extent */
953a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block);
9541973adcbSShen Feng /* if not an empty leaf */
9551973adcbSShen Feng if (path[ppos].p_ext)
956bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
957a86c6181SAlex Tomas
958a86c6181SAlex Tomas ext4_ext_show_path(inode, path);
959a86c6181SAlex Tomas
960f55ecc58SBaokun Li if (orig_path)
961f55ecc58SBaokun Li *orig_path = path;
962a86c6181SAlex Tomas return path;
963a86c6181SAlex Tomas
964a86c6181SAlex Tomas err:
9657ff5fddaSYe Bin ext4_free_ext_path(path);
966705912caSTheodore Ts'o if (orig_path)
967705912caSTheodore Ts'o *orig_path = NULL;
968860d21e2STheodore Ts'o return ERR_PTR(ret);
969a86c6181SAlex Tomas }
970a86c6181SAlex Tomas
971a86c6181SAlex Tomas /*
972d0d856e8SRandy Dunlap * ext4_ext_insert_index:
973d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp;
974d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp
975a86c6181SAlex Tomas */
ext4_ext_insert_index(handle_t * handle,struct inode * inode,struct ext4_ext_path * curp,int logical,ext4_fsblk_t ptr)9761f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
977a86c6181SAlex Tomas struct ext4_ext_path *curp,
978f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr)
979a86c6181SAlex Tomas {
980a86c6181SAlex Tomas struct ext4_extent_idx *ix;
981a86c6181SAlex Tomas int len, err;
982a86c6181SAlex Tomas
9837e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp);
9847e028976SAvantika Mathur if (err)
985a86c6181SAlex Tomas return err;
986a86c6181SAlex Tomas
987273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
988273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
989273df556SFrank Mayhar "logical %d == ei_block %d!",
990273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block));
9916a797d27SDarrick J. Wong return -EFSCORRUPTED;
992273df556SFrank Mayhar }
993d4620315SRobin Dong
994d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
995d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) {
996d4620315SRobin Dong EXT4_ERROR_INODE(inode,
997d4620315SRobin Dong "eh_entries %d >= eh_max %d!",
998d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries),
999d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max));
10006a797d27SDarrick J. Wong return -EFSCORRUPTED;
1001d4620315SRobin Dong }
1002d4620315SRobin Dong
1003a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
1004a86c6181SAlex Tomas /* insert after */
100570aa1554SRitesh Harjani ext_debug(inode, "insert new index %d after: %llu\n",
100670aa1554SRitesh Harjani logical, ptr);
1007a86c6181SAlex Tomas ix = curp->p_idx + 1;
1008a86c6181SAlex Tomas } else {
1009a86c6181SAlex Tomas /* insert before */
101070aa1554SRitesh Harjani ext_debug(inode, "insert new index %d before: %llu\n",
101170aa1554SRitesh Harjani logical, ptr);
1012a86c6181SAlex Tomas ix = curp->p_idx;
1013a86c6181SAlex Tomas }
1014a86c6181SAlex Tomas
101511d05f01SGou Hao if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
101611d05f01SGou Hao EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
101711d05f01SGou Hao return -EFSCORRUPTED;
101811d05f01SGou Hao }
101911d05f01SGou Hao
102080e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
102180e675f9SEric Gouriou BUG_ON(len < 0);
102280e675f9SEric Gouriou if (len > 0) {
102370aa1554SRitesh Harjani ext_debug(inode, "insert new index %d: "
102480e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n",
102580e675f9SEric Gouriou logical, len, ix, ix + 1);
102680e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
102780e675f9SEric Gouriou }
102880e675f9SEric Gouriou
1029a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical);
1030f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr);
1031e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1);
1032a86c6181SAlex Tomas
1033273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
1034273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
10356a797d27SDarrick J. Wong return -EFSCORRUPTED;
1036273df556SFrank Mayhar }
1037a86c6181SAlex Tomas
1038a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp);
1039a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err);
1040a86c6181SAlex Tomas
1041a86c6181SAlex Tomas return err;
1042a86c6181SAlex Tomas }
1043a86c6181SAlex Tomas
1044a86c6181SAlex Tomas /*
1045d0d856e8SRandy Dunlap * ext4_ext_split:
1046d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry
1047d0d856e8SRandy Dunlap * at depth @at:
1048a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks)
1049a86c6181SAlex Tomas * - makes decision where to split
1050d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point)
1051a86c6181SAlex Tomas * into the newly allocated blocks
1052d0d856e8SRandy Dunlap * - initializes subtree
1053a86c6181SAlex Tomas */
ext4_ext_split(handle_t * handle,struct inode * inode,unsigned int flags,struct ext4_ext_path * path,struct ext4_extent * newext,int at)1054a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode,
105555f020dbSAllison Henderson unsigned int flags,
1056a86c6181SAlex Tomas struct ext4_ext_path *path,
1057a86c6181SAlex Tomas struct ext4_extent *newext, int at)
1058a86c6181SAlex Tomas {
1059a86c6181SAlex Tomas struct buffer_head *bh = NULL;
1060a86c6181SAlex Tomas int depth = ext_depth(inode);
1061a86c6181SAlex Tomas struct ext4_extent_header *neh;
1062a86c6181SAlex Tomas struct ext4_extent_idx *fidx;
1063a86c6181SAlex Tomas int i = at, k, m, a;
1064f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock;
1065a86c6181SAlex Tomas __le32 border;
1066f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
106773c384c0STheodore Ts'o gfp_t gfp_flags = GFP_NOFS;
1068a86c6181SAlex Tomas int err = 0;
1069592acbf1SSriram Rajagopalan size_t ext_size = 0;
1070a86c6181SAlex Tomas
107173c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL)
107273c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL;
107373c384c0STheodore Ts'o
1074a86c6181SAlex Tomas /* make decision: where to split? */
1075d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */
1076a86c6181SAlex Tomas
1077d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use
1078a86c6181SAlex Tomas * border from split point */
1079273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1080273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
10816a797d27SDarrick J. Wong return -EFSCORRUPTED;
1082273df556SFrank Mayhar }
1083a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1084a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block;
108570aa1554SRitesh Harjani ext_debug(inode, "leaf will be split."
1086a86c6181SAlex Tomas " next leaf starts at %d\n",
1087a86c6181SAlex Tomas le32_to_cpu(border));
1088a86c6181SAlex Tomas } else {
1089a86c6181SAlex Tomas border = newext->ee_block;
109070aa1554SRitesh Harjani ext_debug(inode, "leaf will be added."
1091a86c6181SAlex Tomas " next leaf starts at %d\n",
1092a86c6181SAlex Tomas le32_to_cpu(border));
1093a86c6181SAlex Tomas }
1094a86c6181SAlex Tomas
1095a86c6181SAlex Tomas /*
1096d0d856e8SRandy Dunlap * If error occurs, then we break processing
1097d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't
1098a86c6181SAlex Tomas * be inserted and tree will be in consistent
1099d0d856e8SRandy Dunlap * state. Next mount will repair buffers too.
1100a86c6181SAlex Tomas */
1101a86c6181SAlex Tomas
1102a86c6181SAlex Tomas /*
1103d0d856e8SRandy Dunlap * Get array to track all allocated blocks.
1104d0d856e8SRandy Dunlap * We need this to handle errors and free blocks
1105d0d856e8SRandy Dunlap * upon them.
1106a86c6181SAlex Tomas */
110773c384c0STheodore Ts'o ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
1108a86c6181SAlex Tomas if (!ablocks)
1109a86c6181SAlex Tomas return -ENOMEM;
1110a86c6181SAlex Tomas
1111a86c6181SAlex Tomas /* allocate all needed blocks */
111270aa1554SRitesh Harjani ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
1113a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) {
1114654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path,
111555f020dbSAllison Henderson newext, &err, flags);
1116a86c6181SAlex Tomas if (newblock == 0)
1117a86c6181SAlex Tomas goto cleanup;
1118a86c6181SAlex Tomas ablocks[a] = newblock;
1119a86c6181SAlex Tomas }
1120a86c6181SAlex Tomas
1121a86c6181SAlex Tomas /* initialize new leaf */
1122a86c6181SAlex Tomas newblock = ablocks[--a];
1123273df556SFrank Mayhar if (unlikely(newblock == 0)) {
1124273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!");
11256a797d27SDarrick J. Wong err = -EFSCORRUPTED;
1126273df556SFrank Mayhar goto cleanup;
1127273df556SFrank Mayhar }
1128c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1129aebf0243SWang Shilong if (unlikely(!bh)) {
1130860d21e2STheodore Ts'o err = -ENOMEM;
1131a86c6181SAlex Tomas goto cleanup;
1132a86c6181SAlex Tomas }
1133a86c6181SAlex Tomas lock_buffer(bh);
1134a86c6181SAlex Tomas
1135188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1136188c299eSJan Kara EXT4_JTR_NONE);
11377e028976SAvantika Mathur if (err)
1138a86c6181SAlex Tomas goto cleanup;
1139a86c6181SAlex Tomas
1140a86c6181SAlex Tomas neh = ext_block_hdr(bh);
1141a86c6181SAlex Tomas neh->eh_entries = 0;
114255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1143a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC;
1144a86c6181SAlex Tomas neh->eh_depth = 0;
1145ce3aba43SAnirudh Rayabharam neh->eh_generation = 0;
1146a86c6181SAlex Tomas
1147d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */
1148273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries !=
1149273df556SFrank Mayhar path[depth].p_hdr->eh_max)) {
1150273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1151273df556SFrank Mayhar path[depth].p_hdr->eh_entries,
1152273df556SFrank Mayhar path[depth].p_hdr->eh_max);
11536a797d27SDarrick J. Wong err = -EFSCORRUPTED;
1154273df556SFrank Mayhar goto cleanup;
1155273df556SFrank Mayhar }
1156a86c6181SAlex Tomas /* start copy from next extent */
11571b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
11581b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth);
1159a86c6181SAlex Tomas if (m) {
11601b16da77SYongqiang Yang struct ext4_extent *ex;
11611b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh);
11621b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1163e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m);
1164a86c6181SAlex Tomas }
1165a86c6181SAlex Tomas
1166592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */
1167592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) +
1168592acbf1SSriram Rajagopalan sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
1169592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
11707ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh);
1171a86c6181SAlex Tomas set_buffer_uptodate(bh);
1172a86c6181SAlex Tomas unlock_buffer(bh);
1173a86c6181SAlex Tomas
11740390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh);
11757e028976SAvantika Mathur if (err)
1176a86c6181SAlex Tomas goto cleanup;
1177a86c6181SAlex Tomas brelse(bh);
1178a86c6181SAlex Tomas bh = NULL;
1179a86c6181SAlex Tomas
1180a86c6181SAlex Tomas /* correct old leaf */
1181a86c6181SAlex Tomas if (m) {
11827e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth);
11837e028976SAvantika Mathur if (err)
1184a86c6181SAlex Tomas goto cleanup;
1185e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
11867e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth);
11877e028976SAvantika Mathur if (err)
1188a86c6181SAlex Tomas goto cleanup;
1189a86c6181SAlex Tomas
1190a86c6181SAlex Tomas }
1191a86c6181SAlex Tomas
1192a86c6181SAlex Tomas /* create intermediate indexes */
1193a86c6181SAlex Tomas k = depth - at - 1;
1194273df556SFrank Mayhar if (unlikely(k < 0)) {
1195273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k);
11966a797d27SDarrick J. Wong err = -EFSCORRUPTED;
1197273df556SFrank Mayhar goto cleanup;
1198273df556SFrank Mayhar }
1199a86c6181SAlex Tomas if (k)
120070aa1554SRitesh Harjani ext_debug(inode, "create %d intermediate indices\n", k);
1201a86c6181SAlex Tomas /* insert new index into current index block */
1202a86c6181SAlex Tomas /* current depth stored in i var */
1203a86c6181SAlex Tomas i = depth - 1;
1204a86c6181SAlex Tomas while (k--) {
1205a86c6181SAlex Tomas oldblock = newblock;
1206a86c6181SAlex Tomas newblock = ablocks[--a];
1207bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock);
1208aebf0243SWang Shilong if (unlikely(!bh)) {
1209860d21e2STheodore Ts'o err = -ENOMEM;
1210a86c6181SAlex Tomas goto cleanup;
1211a86c6181SAlex Tomas }
1212a86c6181SAlex Tomas lock_buffer(bh);
1213a86c6181SAlex Tomas
1214188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1215188c299eSJan Kara EXT4_JTR_NONE);
12167e028976SAvantika Mathur if (err)
1217a86c6181SAlex Tomas goto cleanup;
1218a86c6181SAlex Tomas
1219a86c6181SAlex Tomas neh = ext_block_hdr(bh);
1220a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1);
1221a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC;
122255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1223a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i);
1224ce3aba43SAnirudh Rayabharam neh->eh_generation = 0;
1225a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh);
1226a86c6181SAlex Tomas fidx->ei_block = border;
1227f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock);
1228a86c6181SAlex Tomas
122970aa1554SRitesh Harjani ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
1230bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock);
1231a86c6181SAlex Tomas
12321b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */
1233273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1234273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) {
1235273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1236273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1237273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block));
12386a797d27SDarrick J. Wong err = -EFSCORRUPTED;
1239273df556SFrank Mayhar goto cleanup;
1240273df556SFrank Mayhar }
12411b16da77SYongqiang Yang /* start copy indexes */
12421b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
124370aa1554SRitesh Harjani ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
12441b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr));
12451b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i);
1246a86c6181SAlex Tomas if (m) {
12471b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx,
1248a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m);
1249e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m);
1250a86c6181SAlex Tomas }
1251592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */
1252592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) +
1253592acbf1SSriram Rajagopalan (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
1254592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0,
1255592acbf1SSriram Rajagopalan inode->i_sb->s_blocksize - ext_size);
12567ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh);
1257a86c6181SAlex Tomas set_buffer_uptodate(bh);
1258a86c6181SAlex Tomas unlock_buffer(bh);
1259a86c6181SAlex Tomas
12600390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh);
12617e028976SAvantika Mathur if (err)
1262a86c6181SAlex Tomas goto cleanup;
1263a86c6181SAlex Tomas brelse(bh);
1264a86c6181SAlex Tomas bh = NULL;
1265a86c6181SAlex Tomas
1266a86c6181SAlex Tomas /* correct old index */
1267a86c6181SAlex Tomas if (m) {
1268a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i);
1269a86c6181SAlex Tomas if (err)
1270a86c6181SAlex Tomas goto cleanup;
1271e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1272a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i);
1273a86c6181SAlex Tomas if (err)
1274a86c6181SAlex Tomas goto cleanup;
1275a86c6181SAlex Tomas }
1276a86c6181SAlex Tomas
1277a86c6181SAlex Tomas i--;
1278a86c6181SAlex Tomas }
1279a86c6181SAlex Tomas
1280a86c6181SAlex Tomas /* insert new index */
1281a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at,
1282a86c6181SAlex Tomas le32_to_cpu(border), newblock);
1283a86c6181SAlex Tomas
1284a86c6181SAlex Tomas cleanup:
1285a86c6181SAlex Tomas if (bh) {
1286a86c6181SAlex Tomas if (buffer_locked(bh))
1287a86c6181SAlex Tomas unlock_buffer(bh);
1288a86c6181SAlex Tomas brelse(bh);
1289a86c6181SAlex Tomas }
1290a86c6181SAlex Tomas
1291a86c6181SAlex Tomas if (err) {
1292a86c6181SAlex Tomas /* free all allocated blocks in error case */
1293a86c6181SAlex Tomas for (i = 0; i < depth; i++) {
1294a86c6181SAlex Tomas if (!ablocks[i])
1295a86c6181SAlex Tomas continue;
12967dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1297e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA);
1298a86c6181SAlex Tomas }
1299a86c6181SAlex Tomas }
1300a86c6181SAlex Tomas kfree(ablocks);
1301a86c6181SAlex Tomas
1302a86c6181SAlex Tomas return err;
1303a86c6181SAlex Tomas }
1304a86c6181SAlex Tomas
1305a86c6181SAlex Tomas /*
1306d0d856e8SRandy Dunlap * ext4_ext_grow_indepth:
1307d0d856e8SRandy Dunlap * implements tree growing procedure:
1308a86c6181SAlex Tomas * - allocates new block
1309a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block
1310d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the
1311a86c6181SAlex Tomas * just created block
1312a86c6181SAlex Tomas */
ext4_ext_grow_indepth(handle_t * handle,struct inode * inode,unsigned int flags)1313a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1314be5cd90dSDmitry Monakhov unsigned int flags)
1315a86c6181SAlex Tomas {
1316a86c6181SAlex Tomas struct ext4_extent_header *neh;
1317a86c6181SAlex Tomas struct buffer_head *bh;
1318be5cd90dSDmitry Monakhov ext4_fsblk_t newblock, goal = 0;
1319be5cd90dSDmitry Monakhov struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1320a86c6181SAlex Tomas int err = 0;
1321592acbf1SSriram Rajagopalan size_t ext_size = 0;
1322a86c6181SAlex Tomas
1323be5cd90dSDmitry Monakhov /* Try to prepend new index to old one */
1324be5cd90dSDmitry Monakhov if (ext_depth(inode))
1325be5cd90dSDmitry Monakhov goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1326be5cd90dSDmitry Monakhov if (goal > le32_to_cpu(es->s_first_data_block)) {
1327be5cd90dSDmitry Monakhov flags |= EXT4_MB_HINT_TRY_GOAL;
1328be5cd90dSDmitry Monakhov goal--;
1329be5cd90dSDmitry Monakhov } else
1330be5cd90dSDmitry Monakhov goal = ext4_inode_to_goal_block(inode);
1331be5cd90dSDmitry Monakhov newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1332be5cd90dSDmitry Monakhov NULL, &err);
1333a86c6181SAlex Tomas if (newblock == 0)
1334a86c6181SAlex Tomas return err;
1335a86c6181SAlex Tomas
1336c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1337aebf0243SWang Shilong if (unlikely(!bh))
1338860d21e2STheodore Ts'o return -ENOMEM;
1339a86c6181SAlex Tomas lock_buffer(bh);
1340a86c6181SAlex Tomas
1341188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh,
1342188c299eSJan Kara EXT4_JTR_NONE);
13437e028976SAvantika Mathur if (err) {
1344a86c6181SAlex Tomas unlock_buffer(bh);
1345a86c6181SAlex Tomas goto out;
1346a86c6181SAlex Tomas }
1347a86c6181SAlex Tomas
1348592acbf1SSriram Rajagopalan ext_size = sizeof(EXT4_I(inode)->i_data);
1349a86c6181SAlex Tomas /* move top-level index/leaf into new block */
1350592acbf1SSriram Rajagopalan memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
1351592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */
1352592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
1353a86c6181SAlex Tomas
1354a86c6181SAlex Tomas /* set size of new block */
1355a86c6181SAlex Tomas neh = ext_block_hdr(bh);
1356a86c6181SAlex Tomas /* old root could have indexes or leaves
1357a86c6181SAlex Tomas * so calculate e_max right way */
1358a86c6181SAlex Tomas if (ext_depth(inode))
135955ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1360a86c6181SAlex Tomas else
136155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1362a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC;
13637ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh);
1364a86c6181SAlex Tomas set_buffer_uptodate(bh);
13650caaefbaSyangerkun set_buffer_verified(bh);
1366a86c6181SAlex Tomas unlock_buffer(bh);
1367a86c6181SAlex Tomas
13680390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh);
13697e028976SAvantika Mathur if (err)
1370a86c6181SAlex Tomas goto out;
1371a86c6181SAlex Tomas
13721939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */
1373a86c6181SAlex Tomas neh = ext_inode_hdr(inode);
13741939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1);
13751939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
13761939dd84SDmitry Monakhov if (neh->eh_depth == 0) {
13771939dd84SDmitry Monakhov /* Root extent block becomes index block */
13781939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
13791939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block =
13801939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block;
13811939dd84SDmitry Monakhov }
138270aa1554SRitesh Harjani ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
1383a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
13845a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1385bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1386a86c6181SAlex Tomas
1387ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1);
13884209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode);
1389a86c6181SAlex Tomas out:
1390a86c6181SAlex Tomas brelse(bh);
1391a86c6181SAlex Tomas
1392a86c6181SAlex Tomas return err;
1393a86c6181SAlex Tomas }
1394a86c6181SAlex Tomas
1395a86c6181SAlex Tomas /*
1396d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf:
1397d0d856e8SRandy Dunlap * finds empty index and adds new leaf.
1398d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing.
1399a86c6181SAlex Tomas */
ext4_ext_create_new_leaf(handle_t * handle,struct inode * inode,unsigned int mb_flags,unsigned int gb_flags,struct ext4_ext_path ** ppath,struct ext4_extent * newext)1400a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1401107a7bd3STheodore Ts'o unsigned int mb_flags,
1402107a7bd3STheodore Ts'o unsigned int gb_flags,
1403dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
1404a86c6181SAlex Tomas struct ext4_extent *newext)
1405a86c6181SAlex Tomas {
1406dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
1407a86c6181SAlex Tomas struct ext4_ext_path *curp;
1408a86c6181SAlex Tomas int depth, i, err = 0;
1409a86c6181SAlex Tomas
1410a86c6181SAlex Tomas repeat:
1411a86c6181SAlex Tomas i = depth = ext_depth(inode);
1412a86c6181SAlex Tomas
1413a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */
1414a86c6181SAlex Tomas curp = path + depth;
1415a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1416a86c6181SAlex Tomas i--;
1417a86c6181SAlex Tomas curp--;
1418a86c6181SAlex Tomas }
1419a86c6181SAlex Tomas
1420d0d856e8SRandy Dunlap /* we use already allocated block for index block,
1421d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */
1422a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) {
1423a86c6181SAlex Tomas /* if we found index with free entry, then use that
1424a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */
1425107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1426787e0981SShen Feng if (err)
1427787e0981SShen Feng goto out;
1428a86c6181SAlex Tomas
1429a86c6181SAlex Tomas /* refill path */
1430ed8a1a76STheodore Ts'o path = ext4_find_extent(inode,
1431725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1432dfe50809STheodore Ts'o ppath, gb_flags);
1433a86c6181SAlex Tomas if (IS_ERR(path))
1434a86c6181SAlex Tomas err = PTR_ERR(path);
1435a86c6181SAlex Tomas } else {
1436a86c6181SAlex Tomas /* tree is full, time to grow in depth */
1437be5cd90dSDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1438a86c6181SAlex Tomas if (err)
1439a86c6181SAlex Tomas goto out;
1440a86c6181SAlex Tomas
1441a86c6181SAlex Tomas /* refill path */
1442ed8a1a76STheodore Ts'o path = ext4_find_extent(inode,
1443725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1444dfe50809STheodore Ts'o ppath, gb_flags);
1445a86c6181SAlex Tomas if (IS_ERR(path)) {
1446a86c6181SAlex Tomas err = PTR_ERR(path);
1447a86c6181SAlex Tomas goto out;
1448a86c6181SAlex Tomas }
1449a86c6181SAlex Tomas
1450a86c6181SAlex Tomas /*
1451d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space;
1452d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree
1453a86c6181SAlex Tomas */
1454a86c6181SAlex Tomas depth = ext_depth(inode);
1455a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1456d0d856e8SRandy Dunlap /* now we need to split */
1457a86c6181SAlex Tomas goto repeat;
1458a86c6181SAlex Tomas }
1459a86c6181SAlex Tomas }
1460a86c6181SAlex Tomas
1461a86c6181SAlex Tomas out:
1462a86c6181SAlex Tomas return err;
1463a86c6181SAlex Tomas }
1464a86c6181SAlex Tomas
1465a86c6181SAlex Tomas /*
14661988b51eSAlex Tomas * search the closest allocated block to the left for *logical
14671988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys
14681988b51eSAlex Tomas * if *logical is the smallest allocated block, the function
14691988b51eSAlex Tomas * returns 0 at @phys
14701988b51eSAlex Tomas * return value contains 0 (success) or error code
14711988b51eSAlex Tomas */
ext4_ext_search_left(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys)14721f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode,
14731f109d5aSTheodore Ts'o struct ext4_ext_path *path,
14741988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys)
14751988b51eSAlex Tomas {
14761988b51eSAlex Tomas struct ext4_extent_idx *ix;
14771988b51eSAlex Tomas struct ext4_extent *ex;
1478b939e376SAneesh Kumar K.V int depth, ee_len;
14791988b51eSAlex Tomas
1480273df556SFrank Mayhar if (unlikely(path == NULL)) {
1481273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
14826a797d27SDarrick J. Wong return -EFSCORRUPTED;
1483273df556SFrank Mayhar }
14841988b51eSAlex Tomas depth = path->p_depth;
14851988b51eSAlex Tomas *phys = 0;
14861988b51eSAlex Tomas
14871988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL)
14881988b51eSAlex Tomas return 0;
14891988b51eSAlex Tomas
14901988b51eSAlex Tomas /* usually extent in the path covers blocks smaller
14911988b51eSAlex Tomas * then *logical, but it can be that extent is the
14921988b51eSAlex Tomas * first one in the file */
14931988b51eSAlex Tomas
14941988b51eSAlex Tomas ex = path[depth].p_ext;
1495b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex);
14961988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) {
1497273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1498273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1499273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1500273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block));
15016a797d27SDarrick J. Wong return -EFSCORRUPTED;
1502273df556SFrank Mayhar }
15031988b51eSAlex Tomas while (--depth >= 0) {
15041988b51eSAlex Tomas ix = path[depth].p_idx;
1505273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1506273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1507273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
15086ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1509037e7c52SAdam Borowski le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block),
1510273df556SFrank Mayhar depth);
15116a797d27SDarrick J. Wong return -EFSCORRUPTED;
1512273df556SFrank Mayhar }
15131988b51eSAlex Tomas }
15141988b51eSAlex Tomas return 0;
15151988b51eSAlex Tomas }
15161988b51eSAlex Tomas
1517273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1518273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1519273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!",
1520273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len);
15216a797d27SDarrick J. Wong return -EFSCORRUPTED;
1522273df556SFrank Mayhar }
15231988b51eSAlex Tomas
1524b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1525bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1;
15261988b51eSAlex Tomas return 0;
15271988b51eSAlex Tomas }
15281988b51eSAlex Tomas
15291988b51eSAlex Tomas /*
1530d7dce9e0Syangerkun * Search the closest allocated block to the right for *logical
1531d7dce9e0Syangerkun * and returns it at @logical + it's physical address at @phys.
1532d7dce9e0Syangerkun * If not exists, return 0 and @phys is set to 0. We will return
1533d7dce9e0Syangerkun * 1 which means we found an allocated block and ret_ex is valid.
1534d7dce9e0Syangerkun * Or return a (< 0) error code.
15351988b51eSAlex Tomas */
ext4_ext_search_right(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * logical,ext4_fsblk_t * phys,struct ext4_extent * ret_ex)15361f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode,
15371f109d5aSTheodore Ts'o struct ext4_ext_path *path,
15384d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys,
1539d7dce9e0Syangerkun struct ext4_extent *ret_ex)
15401988b51eSAlex Tomas {
15411988b51eSAlex Tomas struct buffer_head *bh = NULL;
15421988b51eSAlex Tomas struct ext4_extent_header *eh;
15431988b51eSAlex Tomas struct ext4_extent_idx *ix;
15441988b51eSAlex Tomas struct ext4_extent *ex;
1545395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */
1546395a87bfSEric Sandeen int ee_len;
15471988b51eSAlex Tomas
1548273df556SFrank Mayhar if (unlikely(path == NULL)) {
1549273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
15506a797d27SDarrick J. Wong return -EFSCORRUPTED;
1551273df556SFrank Mayhar }
15521988b51eSAlex Tomas depth = path->p_depth;
15531988b51eSAlex Tomas *phys = 0;
15541988b51eSAlex Tomas
15551988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL)
15561988b51eSAlex Tomas return 0;
15571988b51eSAlex Tomas
15581988b51eSAlex Tomas /* usually extent in the path covers blocks smaller
15591988b51eSAlex Tomas * then *logical, but it can be that extent is the
15601988b51eSAlex Tomas * first one in the file */
15611988b51eSAlex Tomas
15621988b51eSAlex Tomas ex = path[depth].p_ext;
1563b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex);
15641988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) {
1565273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1566273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1567273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex",
1568273df556SFrank Mayhar depth);
15696a797d27SDarrick J. Wong return -EFSCORRUPTED;
1570273df556SFrank Mayhar }
15711988b51eSAlex Tomas while (--depth >= 0) {
15721988b51eSAlex Tomas ix = path[depth].p_idx;
1573273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1574273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1575273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!",
1576273df556SFrank Mayhar *logical);
15776a797d27SDarrick J. Wong return -EFSCORRUPTED;
1578273df556SFrank Mayhar }
15791988b51eSAlex Tomas }
15804d33b1efSTheodore Ts'o goto found_extent;
15811988b51eSAlex Tomas }
15821988b51eSAlex Tomas
1583273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1584273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1585273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!",
1586273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len);
15876a797d27SDarrick J. Wong return -EFSCORRUPTED;
1588273df556SFrank Mayhar }
15891988b51eSAlex Tomas
15901988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
15911988b51eSAlex Tomas /* next allocated block in this leaf */
15921988b51eSAlex Tomas ex++;
15934d33b1efSTheodore Ts'o goto found_extent;
15941988b51eSAlex Tomas }
15951988b51eSAlex Tomas
15961988b51eSAlex Tomas /* go up and search for index to the right */
15971988b51eSAlex Tomas while (--depth >= 0) {
15981988b51eSAlex Tomas ix = path[depth].p_idx;
15991988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
160025f1ee3aSWu Fengguang goto got_index;
16011988b51eSAlex Tomas }
16021988b51eSAlex Tomas
160325f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */
16041988b51eSAlex Tomas return 0;
16051988b51eSAlex Tomas
160625f1ee3aSWu Fengguang got_index:
16071988b51eSAlex Tomas /* we've found index to the right, let's
16081988b51eSAlex Tomas * follow it and find the closest allocated
16091988b51eSAlex Tomas * block to the right */
16101988b51eSAlex Tomas ix++;
16111988b51eSAlex Tomas while (++depth < path->p_depth) {
1612395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */
16139c6e0719SZhang Yi bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
16147d7ea89eSTheodore Ts'o if (IS_ERR(bh))
16157d7ea89eSTheodore Ts'o return PTR_ERR(bh);
16167d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh);
16171988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh);
16181988b51eSAlex Tomas put_bh(bh);
16191988b51eSAlex Tomas }
16201988b51eSAlex Tomas
16219c6e0719SZhang Yi bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
16227d7ea89eSTheodore Ts'o if (IS_ERR(bh))
16237d7ea89eSTheodore Ts'o return PTR_ERR(bh);
16241988b51eSAlex Tomas eh = ext_block_hdr(bh);
16251988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh);
16264d33b1efSTheodore Ts'o found_extent:
16271988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block);
1628bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex);
1629d7dce9e0Syangerkun if (ret_ex)
1630d7dce9e0Syangerkun *ret_ex = *ex;
16314d33b1efSTheodore Ts'o if (bh)
16321988b51eSAlex Tomas put_bh(bh);
1633d7dce9e0Syangerkun return 1;
16341988b51eSAlex Tomas }
16351988b51eSAlex Tomas
16361988b51eSAlex Tomas /*
1637d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block:
1638f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1639d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as
1640d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent
1641d0d856e8SRandy Dunlap * with leaves.
1642a86c6181SAlex Tomas */
1643fcf6b1b7SDmitry Monakhov ext4_lblk_t
ext4_ext_next_allocated_block(struct ext4_ext_path * path)1644a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1645a86c6181SAlex Tomas {
1646a86c6181SAlex Tomas int depth;
1647a86c6181SAlex Tomas
1648a86c6181SAlex Tomas BUG_ON(path == NULL);
1649a86c6181SAlex Tomas depth = path->p_depth;
1650a86c6181SAlex Tomas
1651a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL)
1652f17722f9SLukas Czerner return EXT_MAX_BLOCKS;
1653a86c6181SAlex Tomas
1654a86c6181SAlex Tomas while (depth >= 0) {
16556e89bbb7SEric Biggers struct ext4_ext_path *p = &path[depth];
16566e89bbb7SEric Biggers
1657a86c6181SAlex Tomas if (depth == path->p_depth) {
1658a86c6181SAlex Tomas /* leaf */
16596e89bbb7SEric Biggers if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr))
16606e89bbb7SEric Biggers return le32_to_cpu(p->p_ext[1].ee_block);
1661a86c6181SAlex Tomas } else {
1662a86c6181SAlex Tomas /* index */
16636e89bbb7SEric Biggers if (p->p_idx != EXT_LAST_INDEX(p->p_hdr))
16646e89bbb7SEric Biggers return le32_to_cpu(p->p_idx[1].ei_block);
1665a86c6181SAlex Tomas }
1666a86c6181SAlex Tomas depth--;
1667a86c6181SAlex Tomas }
1668a86c6181SAlex Tomas
1669f17722f9SLukas Czerner return EXT_MAX_BLOCKS;
1670a86c6181SAlex Tomas }
1671a86c6181SAlex Tomas
1672a86c6181SAlex Tomas /*
1673d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block:
1674f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1675a86c6181SAlex Tomas */
ext4_ext_next_leaf_block(struct ext4_ext_path * path)16765718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1677a86c6181SAlex Tomas {
1678a86c6181SAlex Tomas int depth;
1679a86c6181SAlex Tomas
1680a86c6181SAlex Tomas BUG_ON(path == NULL);
1681a86c6181SAlex Tomas depth = path->p_depth;
1682a86c6181SAlex Tomas
1683a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */
1684a86c6181SAlex Tomas if (depth == 0)
1685f17722f9SLukas Czerner return EXT_MAX_BLOCKS;
1686a86c6181SAlex Tomas
1687a86c6181SAlex Tomas /* go to index block */
1688a86c6181SAlex Tomas depth--;
1689a86c6181SAlex Tomas
1690a86c6181SAlex Tomas while (depth >= 0) {
1691a86c6181SAlex Tomas if (path[depth].p_idx !=
1692a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr))
1693725d26d3SAneesh Kumar K.V return (ext4_lblk_t)
1694725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block);
1695a86c6181SAlex Tomas depth--;
1696a86c6181SAlex Tomas }
1697a86c6181SAlex Tomas
1698f17722f9SLukas Czerner return EXT_MAX_BLOCKS;
1699a86c6181SAlex Tomas }
1700a86c6181SAlex Tomas
1701a86c6181SAlex Tomas /*
1702d0d856e8SRandy Dunlap * ext4_ext_correct_indexes:
1703d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf,
1704d0d856e8SRandy Dunlap * then we have to correct all indexes above.
1705a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases?
1706a86c6181SAlex Tomas */
ext4_ext_correct_indexes(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)17071d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1708a86c6181SAlex Tomas struct ext4_ext_path *path)
1709a86c6181SAlex Tomas {
1710a86c6181SAlex Tomas struct ext4_extent_header *eh;
1711a86c6181SAlex Tomas int depth = ext_depth(inode);
1712a86c6181SAlex Tomas struct ext4_extent *ex;
1713a86c6181SAlex Tomas __le32 border;
1714a86c6181SAlex Tomas int k, err = 0;
1715a86c6181SAlex Tomas
1716a86c6181SAlex Tomas eh = path[depth].p_hdr;
1717a86c6181SAlex Tomas ex = path[depth].p_ext;
1718273df556SFrank Mayhar
1719273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) {
1720273df556SFrank Mayhar EXT4_ERROR_INODE(inode,
1721273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh);
17226a797d27SDarrick J. Wong return -EFSCORRUPTED;
1723273df556SFrank Mayhar }
1724a86c6181SAlex Tomas
1725a86c6181SAlex Tomas if (depth == 0) {
1726a86c6181SAlex Tomas /* there is no tree at all */
1727a86c6181SAlex Tomas return 0;
1728a86c6181SAlex Tomas }
1729a86c6181SAlex Tomas
1730a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) {
1731a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */
1732a86c6181SAlex Tomas return 0;
1733a86c6181SAlex Tomas }
1734a86c6181SAlex Tomas
1735a86c6181SAlex Tomas /*
1736d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one
1737a86c6181SAlex Tomas */
1738a86c6181SAlex Tomas k = depth - 1;
1739a86c6181SAlex Tomas border = path[depth].p_ext->ee_block;
17407e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k);
17417e028976SAvantika Mathur if (err)
1742a86c6181SAlex Tomas return err;
1743a86c6181SAlex Tomas path[k].p_idx->ei_block = border;
17447e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k);
17457e028976SAvantika Mathur if (err)
1746a86c6181SAlex Tomas return err;
1747a86c6181SAlex Tomas
1748a86c6181SAlex Tomas while (k--) {
1749a86c6181SAlex Tomas /* change all left-side indexes */
1750a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1751a86c6181SAlex Tomas break;
17527e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k);
17537e028976SAvantika Mathur if (err)
1754a86c6181SAlex Tomas break;
1755a86c6181SAlex Tomas path[k].p_idx->ei_block = border;
17567e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k);
17577e028976SAvantika Mathur if (err)
1758a86c6181SAlex Tomas break;
1759a86c6181SAlex Tomas }
1760a86c6181SAlex Tomas
1761a86c6181SAlex Tomas return err;
1762a86c6181SAlex Tomas }
1763a86c6181SAlex Tomas
ext4_can_extents_be_merged(struct inode * inode,struct ext4_extent * ex1,struct ext4_extent * ex2)176443f81677SEric Biggers static int ext4_can_extents_be_merged(struct inode *inode,
176543f81677SEric Biggers struct ext4_extent *ex1,
1766a86c6181SAlex Tomas struct ext4_extent *ex2)
1767a86c6181SAlex Tomas {
1768da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len;
1769a2df2a63SAmit Arora
1770556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
1771a2df2a63SAmit Arora return 0;
1772a2df2a63SAmit Arora
1773a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1);
1774a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2);
1775a2df2a63SAmit Arora
1776a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
177763f57933SAndrew Morton le32_to_cpu(ex2->ee_block))
1778a86c6181SAlex Tomas return 0;
1779a86c6181SAlex Tomas
1780da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1781471d4011SSuparna Bhattacharya return 0;
1782378f32baSMatthew Bobrowski
1783556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) &&
1784378f32baSMatthew Bobrowski ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)
1785a9b82415SDarrick J. Wong return 0;
1786bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
1787b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4)
1788a86c6181SAlex Tomas return 0;
1789a86c6181SAlex Tomas #endif
1790a86c6181SAlex Tomas
1791bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1792a86c6181SAlex Tomas return 1;
1793a86c6181SAlex Tomas return 0;
1794a86c6181SAlex Tomas }
1795a86c6181SAlex Tomas
1796a86c6181SAlex Tomas /*
179756055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree.
179856055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards
179956055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex".
180056055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
180156055d3aSAmit Arora * 1 if they got merged.
180256055d3aSAmit Arora */
ext4_ext_try_to_merge_right(struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1803197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode,
180456055d3aSAmit Arora struct ext4_ext_path *path,
180556055d3aSAmit Arora struct ext4_extent *ex)
180656055d3aSAmit Arora {
180756055d3aSAmit Arora struct ext4_extent_header *eh;
180856055d3aSAmit Arora unsigned int depth, len;
1809556615dcSLukas Czerner int merge_done = 0, unwritten;
181056055d3aSAmit Arora
181156055d3aSAmit Arora depth = ext_depth(inode);
181256055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL);
181356055d3aSAmit Arora eh = path[depth].p_hdr;
181456055d3aSAmit Arora
181556055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) {
181656055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
181756055d3aSAmit Arora break;
181856055d3aSAmit Arora /* merge with next extent! */
1819556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex);
182056055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
182156055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1));
1822556615dcSLukas Czerner if (unwritten)
1823556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
182456055d3aSAmit Arora
182556055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) {
182656055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1)
182756055d3aSAmit Arora * sizeof(struct ext4_extent);
182856055d3aSAmit Arora memmove(ex + 1, ex + 2, len);
182956055d3aSAmit Arora }
1830e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1);
183156055d3aSAmit Arora merge_done = 1;
183256055d3aSAmit Arora WARN_ON(eh->eh_entries == 0);
183356055d3aSAmit Arora if (!eh->eh_entries)
183424676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
183556055d3aSAmit Arora }
183656055d3aSAmit Arora
183756055d3aSAmit Arora return merge_done;
183856055d3aSAmit Arora }
183956055d3aSAmit Arora
184056055d3aSAmit Arora /*
1841ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse
1842ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode.
1843ecb94f5fSTheodore Ts'o */
ext4_ext_try_to_merge_up(handle_t * handle,struct inode * inode,struct ext4_ext_path * path)1844ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle,
1845ecb94f5fSTheodore Ts'o struct inode *inode,
1846ecb94f5fSTheodore Ts'o struct ext4_ext_path *path)
1847ecb94f5fSTheodore Ts'o {
1848ecb94f5fSTheodore Ts'o size_t s;
1849ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0);
1850ecb94f5fSTheodore Ts'o ext4_fsblk_t blk;
1851ecb94f5fSTheodore Ts'o
1852ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) ||
1853ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1854ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1855ecb94f5fSTheodore Ts'o return;
1856ecb94f5fSTheodore Ts'o
1857ecb94f5fSTheodore Ts'o /*
1858ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block
1859ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we
1860ecb94f5fSTheodore Ts'o * can't get the journal credits, give up.
1861ecb94f5fSTheodore Ts'o */
186283448bdfSJan Kara if (ext4_journal_extend(handle, 2,
186383448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 1)))
1864ecb94f5fSTheodore Ts'o return;
1865ecb94f5fSTheodore Ts'o
1866ecb94f5fSTheodore Ts'o /*
1867ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode
1868ecb94f5fSTheodore Ts'o */
1869ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx);
1870ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1871ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx);
1872ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header);
1873ecb94f5fSTheodore Ts'o
187410809df8STheodore Ts'o path[1].p_maxdepth = path[0].p_maxdepth;
1875ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s);
1876ecb94f5fSTheodore Ts'o path[0].p_depth = 0;
1877ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1878ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1879ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1880ecb94f5fSTheodore Ts'o
1881ecb94f5fSTheodore Ts'o brelse(path[1].p_bh);
188268a69cf6SBaokun Li path[1].p_bh = NULL;
1883ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1,
188471d4f7d0STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1885ecb94f5fSTheodore Ts'o }
1886ecb94f5fSTheodore Ts'o
1887ecb94f5fSTheodore Ts'o /*
1888adde81cfSEric Biggers * This function tries to merge the @ex extent to neighbours in the tree, then
1889adde81cfSEric Biggers * tries to collapse the extent tree into the inode.
1890197217a5SYongqiang Yang */
ext4_ext_try_to_merge(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct ext4_extent * ex)1891ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle,
1892ecb94f5fSTheodore Ts'o struct inode *inode,
1893197217a5SYongqiang Yang struct ext4_ext_path *path,
1894adde81cfSEric Biggers struct ext4_extent *ex)
1895adde81cfSEric Biggers {
1896197217a5SYongqiang Yang struct ext4_extent_header *eh;
1897197217a5SYongqiang Yang unsigned int depth;
1898197217a5SYongqiang Yang int merge_done = 0;
1899197217a5SYongqiang Yang
1900197217a5SYongqiang Yang depth = ext_depth(inode);
1901197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL);
1902197217a5SYongqiang Yang eh = path[depth].p_hdr;
1903197217a5SYongqiang Yang
1904197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh))
1905197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1906197217a5SYongqiang Yang
1907197217a5SYongqiang Yang if (!merge_done)
1908ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex);
1909197217a5SYongqiang Yang
1910ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path);
1911197217a5SYongqiang Yang }
1912197217a5SYongqiang Yang
1913197217a5SYongqiang Yang /*
191425d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an
191525d14f98SAmit Arora * existing extent.
191625d14f98SAmit Arora *
191725d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext
191825d14f98SAmit Arora * such that there will be no overlap, and then returns 1.
191925d14f98SAmit Arora * If there is no overlap found, it returns 0.
192025d14f98SAmit Arora */
ext4_ext_check_overlap(struct ext4_sb_info * sbi,struct inode * inode,struct ext4_extent * newext,struct ext4_ext_path * path)19214d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
19224d33b1efSTheodore Ts'o struct inode *inode,
192325d14f98SAmit Arora struct ext4_extent *newext,
192425d14f98SAmit Arora struct ext4_ext_path *path)
192525d14f98SAmit Arora {
1926725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2;
192725d14f98SAmit Arora unsigned int depth, len1;
192825d14f98SAmit Arora unsigned int ret = 0;
192925d14f98SAmit Arora
193025d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block);
1931a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext);
193225d14f98SAmit Arora depth = ext_depth(inode);
193325d14f98SAmit Arora if (!path[depth].p_ext)
193425d14f98SAmit Arora goto out;
1935f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
193625d14f98SAmit Arora
193725d14f98SAmit Arora /*
193825d14f98SAmit Arora * get the next allocated block if the extent in the path
193925d14f98SAmit Arora * is before the requested block(s)
194025d14f98SAmit Arora */
194125d14f98SAmit Arora if (b2 < b1) {
194225d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path);
1943f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS)
194425d14f98SAmit Arora goto out;
1945f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2);
194625d14f98SAmit Arora }
194725d14f98SAmit Arora
1948725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/
194925d14f98SAmit Arora if (b1 + len1 < b1) {
1950f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1;
195125d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1);
195225d14f98SAmit Arora ret = 1;
195325d14f98SAmit Arora }
195425d14f98SAmit Arora
195525d14f98SAmit Arora /* check for overlap */
195625d14f98SAmit Arora if (b1 + len1 > b2) {
195725d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1);
195825d14f98SAmit Arora ret = 1;
195925d14f98SAmit Arora }
196025d14f98SAmit Arora out:
196125d14f98SAmit Arora return ret;
196225d14f98SAmit Arora }
196325d14f98SAmit Arora
196425d14f98SAmit Arora /*
1965d0d856e8SRandy Dunlap * ext4_ext_insert_extent:
1966e4d7f2d3SKeyur Patel * tries to merge requested extent into the existing extent or
1967d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree,
1968d0d856e8SRandy Dunlap * creating new leaf in the no-space case.
1969a86c6181SAlex Tomas */
ext4_ext_insert_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,struct ext4_extent * newext,int gb_flags)1970a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1971dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
1972107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags)
1973a86c6181SAlex Tomas {
1974dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
1975a86c6181SAlex Tomas struct ext4_extent_header *eh;
1976a86c6181SAlex Tomas struct ext4_extent *ex, *fex;
1977a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */
1978a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL;
1979725d26d3SAneesh Kumar K.V int depth, len, err;
1980725d26d3SAneesh Kumar K.V ext4_lblk_t next;
1981556615dcSLukas Czerner int mb_flags = 0, unwritten;
1982a86c6181SAlex Tomas
1983e3cf5d5dSTheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1984e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1985273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1986273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
19876a797d27SDarrick J. Wong return -EFSCORRUPTED;
1988273df556SFrank Mayhar }
1989a86c6181SAlex Tomas depth = ext_depth(inode);
1990a86c6181SAlex Tomas ex = path[depth].p_ext;
1991be8981beSLukas Czerner eh = path[depth].p_hdr;
1992273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) {
1993273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
19946a797d27SDarrick J. Wong return -EFSCORRUPTED;
1995273df556SFrank Mayhar }
1996a86c6181SAlex Tomas
1997a86c6181SAlex Tomas /* try to insert block into found extent and return */
1998107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1999be8981beSLukas Czerner
2000be8981beSLukas Czerner /*
2001be8981beSLukas Czerner * Try to see whether we should rather test the extent on
2002be8981beSLukas Czerner * right from ex, or from the left of ex. This is because
2003ed8a1a76STheodore Ts'o * ext4_find_extent() can return either extent on the
2004be8981beSLukas Czerner * left, or on the right from the searched position. This
2005be8981beSLukas Czerner * will make merging more effective.
2006be8981beSLukas Czerner */
2007be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) &&
2008be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) +
2009be8981beSLukas Czerner ext4_ext_get_actual_len(ex) <
2010be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) {
2011be8981beSLukas Czerner ex += 1;
2012be8981beSLukas Czerner goto prepend;
2013be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
2014be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) +
2015be8981beSLukas Czerner ext4_ext_get_actual_len(newext) <
2016be8981beSLukas Czerner le32_to_cpu(ex->ee_block)))
2017be8981beSLukas Czerner ex -= 1;
2018be8981beSLukas Czerner
2019be8981beSLukas Czerner /* Try to append newex to the ex */
2020be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) {
202170aa1554SRitesh Harjani ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
2022be8981beSLukas Czerner "(from %llu)\n",
2023556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
2024a2df2a63SAmit Arora ext4_ext_get_actual_len(newext),
2025a86c6181SAlex Tomas le32_to_cpu(ex->ee_block),
2026556615dcSLukas Czerner ext4_ext_is_unwritten(ex),
2027bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex),
2028bf89d16fSTheodore Ts'o ext4_ext_pblock(ex));
2029be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode,
2030be8981beSLukas Czerner path + depth);
20317e028976SAvantika Mathur if (err)
2032a86c6181SAlex Tomas return err;
2033556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex);
2034a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2035a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext));
2036556615dcSLukas Czerner if (unwritten)
2037556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
2038a86c6181SAlex Tomas nearex = ex;
2039a86c6181SAlex Tomas goto merge;
2040a86c6181SAlex Tomas }
2041a86c6181SAlex Tomas
2042be8981beSLukas Czerner prepend:
2043be8981beSLukas Czerner /* Try to prepend newex to the ex */
2044be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) {
204570aa1554SRitesh Harjani ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
2046be8981beSLukas Czerner "(from %llu)\n",
2047be8981beSLukas Czerner le32_to_cpu(newext->ee_block),
2048556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
2049be8981beSLukas Czerner ext4_ext_get_actual_len(newext),
2050be8981beSLukas Czerner le32_to_cpu(ex->ee_block),
2051556615dcSLukas Czerner ext4_ext_is_unwritten(ex),
2052be8981beSLukas Czerner ext4_ext_get_actual_len(ex),
2053be8981beSLukas Czerner ext4_ext_pblock(ex));
2054be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode,
2055be8981beSLukas Czerner path + depth);
2056be8981beSLukas Czerner if (err)
2057be8981beSLukas Czerner return err;
2058be8981beSLukas Czerner
2059556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex);
2060be8981beSLukas Czerner ex->ee_block = newext->ee_block;
2061be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
2062be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
2063be8981beSLukas Czerner + ext4_ext_get_actual_len(newext));
2064556615dcSLukas Czerner if (unwritten)
2065556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
2066be8981beSLukas Czerner nearex = ex;
2067be8981beSLukas Czerner goto merge;
2068be8981beSLukas Czerner }
2069be8981beSLukas Czerner }
2070be8981beSLukas Czerner
2071a86c6181SAlex Tomas depth = ext_depth(inode);
2072a86c6181SAlex Tomas eh = path[depth].p_hdr;
2073a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
2074a86c6181SAlex Tomas goto has_space;
2075a86c6181SAlex Tomas
2076a86c6181SAlex Tomas /* probably next leaf has space for us? */
2077a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh);
2078598dbdf2SRobin Dong next = EXT_MAX_BLOCKS;
2079598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
20805718789dSRobin Dong next = ext4_ext_next_leaf_block(path);
2081598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) {
208270aa1554SRitesh Harjani ext_debug(inode, "next leaf block - %u\n", next);
2083a86c6181SAlex Tomas BUG_ON(npath != NULL);
208473c384c0STheodore Ts'o npath = ext4_find_extent(inode, next, NULL, gb_flags);
2085a86c6181SAlex Tomas if (IS_ERR(npath))
2086a86c6181SAlex Tomas return PTR_ERR(npath);
2087a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth);
2088a86c6181SAlex Tomas eh = npath[depth].p_hdr;
2089a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
209070aa1554SRitesh Harjani ext_debug(inode, "next leaf isn't full(%d)\n",
2091a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries));
2092a86c6181SAlex Tomas path = npath;
2093ffb505ffSRobin Dong goto has_space;
2094a86c6181SAlex Tomas }
209570aa1554SRitesh Harjani ext_debug(inode, "next leaf has no free space(%d,%d)\n",
2096a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2097a86c6181SAlex Tomas }
2098a86c6181SAlex Tomas
2099a86c6181SAlex Tomas /*
2100d0d856e8SRandy Dunlap * There is no free space in the found leaf.
2101d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree.
2102a86c6181SAlex Tomas */
2103107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2104e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_USE_RESERVED;
2105107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2106dfe50809STheodore Ts'o ppath, newext);
2107a86c6181SAlex Tomas if (err)
2108a86c6181SAlex Tomas goto cleanup;
21098162ee5dSBaokun Li path = *ppath;
2110a86c6181SAlex Tomas depth = ext_depth(inode);
2111a86c6181SAlex Tomas eh = path[depth].p_hdr;
2112a86c6181SAlex Tomas
2113a86c6181SAlex Tomas has_space:
2114a86c6181SAlex Tomas nearex = path[depth].p_ext;
2115a86c6181SAlex Tomas
21167e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth);
21177e028976SAvantika Mathur if (err)
2118a86c6181SAlex Tomas goto cleanup;
2119a86c6181SAlex Tomas
2120a86c6181SAlex Tomas if (!nearex) {
2121a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */
212270aa1554SRitesh Harjani ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
2123a86c6181SAlex Tomas le32_to_cpu(newext->ee_block),
2124bf89d16fSTheodore Ts'o ext4_ext_pblock(newext),
2125556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
2126a2df2a63SAmit Arora ext4_ext_get_actual_len(newext));
212780e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh);
2128a86c6181SAlex Tomas } else {
212980e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block)
213080e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) {
213180e675f9SEric Gouriou /* Insert after */
213270aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d before: "
213332de6756SYongqiang Yang "nearest %p\n",
2134a86c6181SAlex Tomas le32_to_cpu(newext->ee_block),
2135bf89d16fSTheodore Ts'o ext4_ext_pblock(newext),
2136556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
2137a2df2a63SAmit Arora ext4_ext_get_actual_len(newext),
213880e675f9SEric Gouriou nearex);
213980e675f9SEric Gouriou nearex++;
214080e675f9SEric Gouriou } else {
214180e675f9SEric Gouriou /* Insert before */
214280e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block);
214370aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d after: "
214432de6756SYongqiang Yang "nearest %p\n",
214580e675f9SEric Gouriou le32_to_cpu(newext->ee_block),
214680e675f9SEric Gouriou ext4_ext_pblock(newext),
2147556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
214880e675f9SEric Gouriou ext4_ext_get_actual_len(newext),
214980e675f9SEric Gouriou nearex);
215080e675f9SEric Gouriou }
215180e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1;
215280e675f9SEric Gouriou if (len > 0) {
215370aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d: "
215480e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n",
215580e675f9SEric Gouriou le32_to_cpu(newext->ee_block),
215680e675f9SEric Gouriou ext4_ext_pblock(newext),
2157556615dcSLukas Czerner ext4_ext_is_unwritten(newext),
215880e675f9SEric Gouriou ext4_ext_get_actual_len(newext),
215980e675f9SEric Gouriou len, nearex, nearex + 1);
216080e675f9SEric Gouriou memmove(nearex + 1, nearex,
216180e675f9SEric Gouriou len * sizeof(struct ext4_extent));
216280e675f9SEric Gouriou }
2163a86c6181SAlex Tomas }
2164a86c6181SAlex Tomas
2165e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1);
216680e675f9SEric Gouriou path[depth].p_ext = nearex;
2167a86c6181SAlex Tomas nearex->ee_block = newext->ee_block;
2168bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2169a86c6181SAlex Tomas nearex->ee_len = newext->ee_len;
2170a86c6181SAlex Tomas
2171a86c6181SAlex Tomas merge:
2172e7bcf823SHaiboLiu /* try to merge extents */
2173107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2174ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex);
2175a86c6181SAlex Tomas
2176a86c6181SAlex Tomas
2177a86c6181SAlex Tomas /* time to correct all indexes above */
2178a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path);
2179a86c6181SAlex Tomas if (err)
2180a86c6181SAlex Tomas goto cleanup;
2181a86c6181SAlex Tomas
2182ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2183a86c6181SAlex Tomas
2184a86c6181SAlex Tomas cleanup:
21857ff5fddaSYe Bin ext4_free_ext_path(npath);
2186a86c6181SAlex Tomas return err;
2187a86c6181SAlex Tomas }
2188a86c6181SAlex Tomas
ext4_fill_es_cache_info(struct inode * inode,ext4_lblk_t block,ext4_lblk_t num,struct fiemap_extent_info * fieinfo)2189bb5835edSTheodore Ts'o static int ext4_fill_es_cache_info(struct inode *inode,
2190bb5835edSTheodore Ts'o ext4_lblk_t block, ext4_lblk_t num,
2191bb5835edSTheodore Ts'o struct fiemap_extent_info *fieinfo)
2192bb5835edSTheodore Ts'o {
2193bb5835edSTheodore Ts'o ext4_lblk_t next, end = block + num - 1;
2194bb5835edSTheodore Ts'o struct extent_status es;
2195bb5835edSTheodore Ts'o unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2196bb5835edSTheodore Ts'o unsigned int flags;
2197bb5835edSTheodore Ts'o int err;
2198bb5835edSTheodore Ts'o
2199bb5835edSTheodore Ts'o while (block <= end) {
2200bb5835edSTheodore Ts'o next = 0;
2201bb5835edSTheodore Ts'o flags = 0;
2202bb5835edSTheodore Ts'o if (!ext4_es_lookup_extent(inode, block, &next, &es))
2203bb5835edSTheodore Ts'o break;
2204bb5835edSTheodore Ts'o if (ext4_es_is_unwritten(&es))
2205bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_UNWRITTEN;
2206bb5835edSTheodore Ts'o if (ext4_es_is_delayed(&es))
2207bb5835edSTheodore Ts'o flags |= (FIEMAP_EXTENT_DELALLOC |
2208bb5835edSTheodore Ts'o FIEMAP_EXTENT_UNKNOWN);
2209bb5835edSTheodore Ts'o if (ext4_es_is_hole(&es))
2210bb5835edSTheodore Ts'o flags |= EXT4_FIEMAP_EXTENT_HOLE;
2211bb5835edSTheodore Ts'o if (next == 0)
2212bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_LAST;
2213bb5835edSTheodore Ts'o if (flags & (FIEMAP_EXTENT_DELALLOC|
2214bb5835edSTheodore Ts'o EXT4_FIEMAP_EXTENT_HOLE))
2215bb5835edSTheodore Ts'o es.es_pblk = 0;
2216bb5835edSTheodore Ts'o else
2217bb5835edSTheodore Ts'o es.es_pblk = ext4_es_pblock(&es);
2218bb5835edSTheodore Ts'o err = fiemap_fill_next_extent(fieinfo,
2219bb5835edSTheodore Ts'o (__u64)es.es_lblk << blksize_bits,
2220bb5835edSTheodore Ts'o (__u64)es.es_pblk << blksize_bits,
2221bb5835edSTheodore Ts'o (__u64)es.es_len << blksize_bits,
2222bb5835edSTheodore Ts'o flags);
2223bb5835edSTheodore Ts'o if (next == 0)
2224bb5835edSTheodore Ts'o break;
2225bb5835edSTheodore Ts'o block = next;
2226bb5835edSTheodore Ts'o if (err < 0)
2227bb5835edSTheodore Ts'o return err;
2228bb5835edSTheodore Ts'o if (err == 1)
2229bb5835edSTheodore Ts'o return 0;
2230bb5835edSTheodore Ts'o }
2231bb5835edSTheodore Ts'o return 0;
2232bb5835edSTheodore Ts'o }
2233bb5835edSTheodore Ts'o
2234bb5835edSTheodore Ts'o
2235a86c6181SAlex Tomas /*
2236f5411b76SZhang Yi * ext4_ext_find_hole - find hole around given block according to the given path
2237140a5250SJan Kara * @inode: inode we lookup in
2238140a5250SJan Kara * @path: path in extent tree to @lblk
2239140a5250SJan Kara * @lblk: pointer to logical block around which we want to determine hole
2240140a5250SJan Kara *
2241140a5250SJan Kara * Determine hole length (and start if easily possible) around given logical
2242140a5250SJan Kara * block. We don't try too hard to find the beginning of the hole but @path
2243140a5250SJan Kara * actually points to extent before @lblk, we provide it.
2244140a5250SJan Kara *
2245140a5250SJan Kara * The function returns the length of a hole starting at @lblk. We update @lblk
2246140a5250SJan Kara * to the beginning of the hole if we managed to find it.
2247140a5250SJan Kara */
ext4_ext_find_hole(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t * lblk)2248f5411b76SZhang Yi static ext4_lblk_t ext4_ext_find_hole(struct inode *inode,
2249140a5250SJan Kara struct ext4_ext_path *path,
2250140a5250SJan Kara ext4_lblk_t *lblk)
2251140a5250SJan Kara {
2252140a5250SJan Kara int depth = ext_depth(inode);
2253140a5250SJan Kara struct ext4_extent *ex;
2254140a5250SJan Kara ext4_lblk_t len;
2255140a5250SJan Kara
2256140a5250SJan Kara ex = path[depth].p_ext;
2257140a5250SJan Kara if (ex == NULL) {
2258140a5250SJan Kara /* there is no extent yet, so gap is [0;-] */
2259140a5250SJan Kara *lblk = 0;
2260140a5250SJan Kara len = EXT_MAX_BLOCKS;
2261140a5250SJan Kara } else if (*lblk < le32_to_cpu(ex->ee_block)) {
2262140a5250SJan Kara len = le32_to_cpu(ex->ee_block) - *lblk;
2263140a5250SJan Kara } else if (*lblk >= le32_to_cpu(ex->ee_block)
2264140a5250SJan Kara + ext4_ext_get_actual_len(ex)) {
2265140a5250SJan Kara ext4_lblk_t next;
2266140a5250SJan Kara
2267140a5250SJan Kara *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
2268140a5250SJan Kara next = ext4_ext_next_allocated_block(path);
2269140a5250SJan Kara BUG_ON(next == *lblk);
2270140a5250SJan Kara len = next - *lblk;
2271140a5250SJan Kara } else {
2272140a5250SJan Kara BUG();
2273140a5250SJan Kara }
2274140a5250SJan Kara return len;
2275140a5250SJan Kara }
2276140a5250SJan Kara
2277140a5250SJan Kara /*
2278d0d856e8SRandy Dunlap * ext4_ext_rm_idx:
2279d0d856e8SRandy Dunlap * removes index from the index block.
2280a86c6181SAlex Tomas */
ext4_ext_rm_idx(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,int depth)22811d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2282c36575e6SForrest Liu struct ext4_ext_path *path, int depth)
2283a86c6181SAlex Tomas {
2284a86c6181SAlex Tomas int err;
2285f65e6fbaSAlex Tomas ext4_fsblk_t leaf;
2286a86c6181SAlex Tomas
2287a86c6181SAlex Tomas /* free index block */
2288c36575e6SForrest Liu depth--;
2289c36575e6SForrest Liu path = path + depth;
2290bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx);
2291273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) {
2292273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
22936a797d27SDarrick J. Wong return -EFSCORRUPTED;
2294273df556SFrank Mayhar }
22957e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path);
22967e028976SAvantika Mathur if (err)
2297a86c6181SAlex Tomas return err;
22980e1147b0SRobin Dong
22990e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
23000e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
23010e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx);
23020e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len);
23030e1147b0SRobin Dong }
23040e1147b0SRobin Dong
2305e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1);
23067e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path);
23077e028976SAvantika Mathur if (err)
2308a86c6181SAlex Tomas return err;
230970aa1554SRitesh Harjani ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
2310d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf);
2311d8990240SAditya Kali
23127dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1,
2313e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2314c36575e6SForrest Liu
2315c36575e6SForrest Liu while (--depth >= 0) {
2316c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2317c36575e6SForrest Liu break;
2318c36575e6SForrest Liu path--;
2319c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path);
2320c36575e6SForrest Liu if (err)
2321c36575e6SForrest Liu break;
2322c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2323c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path);
2324c36575e6SForrest Liu if (err)
2325c36575e6SForrest Liu break;
2326c36575e6SForrest Liu }
2327a86c6181SAlex Tomas return err;
2328a86c6181SAlex Tomas }
2329a86c6181SAlex Tomas
2330a86c6181SAlex Tomas /*
2331ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent:
2332ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent
2333ee12b630SMingming Cao * to the extent tree.
2334ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits
2335ee12b630SMingming Cao * under i_data_sem.
2336a86c6181SAlex Tomas */
ext4_ext_calc_credits_for_single_extent(struct inode * inode,int nrblocks,struct ext4_ext_path * path)2337525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2338a86c6181SAlex Tomas struct ext4_ext_path *path)
2339a86c6181SAlex Tomas {
2340a86c6181SAlex Tomas if (path) {
2341ee12b630SMingming Cao int depth = ext_depth(inode);
2342f3bd1f3fSMingming Cao int ret = 0;
2343ee12b630SMingming Cao
2344a86c6181SAlex Tomas /* probably there is space in leaf? */
2345a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2346ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2347ee12b630SMingming Cao
2348ee12b630SMingming Cao /*
2349ee12b630SMingming Cao * There are some space in the leaf tree, no
2350ee12b630SMingming Cao * need to account for leaf block credit
2351ee12b630SMingming Cao *
2352ee12b630SMingming Cao * bitmaps and block group descriptor blocks
2353df3ab170STao Ma * and other metadata blocks still need to be
2354ee12b630SMingming Cao * accounted.
2355ee12b630SMingming Cao */
2356525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */
2357ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
23585887e98bSAneesh Kumar K.V return ret;
2359ee12b630SMingming Cao }
2360ee12b630SMingming Cao }
2361ee12b630SMingming Cao
2362525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks);
2363a86c6181SAlex Tomas }
2364a86c6181SAlex Tomas
2365a86c6181SAlex Tomas /*
2366fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents?
2367ee12b630SMingming Cao *
2368fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level
2369fffb2739SJan Kara * index/leaf need to be changed in case of the tree split.
2370ee12b630SMingming Cao *
2371fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more
2372fffb2739SJan Kara * than once, but this is really rare.
2373a86c6181SAlex Tomas */
ext4_ext_index_trans_blocks(struct inode * inode,int extents)2374fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2375ee12b630SMingming Cao {
2376ee12b630SMingming Cao int index;
2377f19d5870STao Ma int depth;
2378f19d5870STao Ma
2379f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */
2380f19d5870STao Ma if (ext4_has_inline_data(inode))
2381f19d5870STao Ma return 1;
2382f19d5870STao Ma
2383f19d5870STao Ma depth = ext_depth(inode);
2384a86c6181SAlex Tomas
2385fffb2739SJan Kara if (extents <= 1)
2386ee12b630SMingming Cao index = depth * 2;
2387ee12b630SMingming Cao else
2388ee12b630SMingming Cao index = depth * 3;
2389a86c6181SAlex Tomas
2390ee12b630SMingming Cao return index;
2391a86c6181SAlex Tomas }
2392a86c6181SAlex Tomas
get_default_free_blocks_flags(struct inode * inode)2393981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode)
2394981250caSTheodore Ts'o {
2395ddfa17e4STahsin Erdogan if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
2396ddfa17e4STahsin Erdogan ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
2397981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2398981250caSTheodore Ts'o else if (ext4_should_journal_data(inode))
2399981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET;
2400981250caSTheodore Ts'o return 0;
2401981250caSTheodore Ts'o }
2402981250caSTheodore Ts'o
24039fe67149SEric Whitney /*
24049fe67149SEric Whitney * ext4_rereserve_cluster - increment the reserved cluster count when
24059fe67149SEric Whitney * freeing a cluster with a pending reservation
24069fe67149SEric Whitney *
24079fe67149SEric Whitney * @inode - file containing the cluster
24089fe67149SEric Whitney * @lblk - logical block in cluster to be reserved
24099fe67149SEric Whitney *
24109fe67149SEric Whitney * Increments the reserved cluster count and adjusts quota in a bigalloc
24119fe67149SEric Whitney * file system when freeing a partial cluster containing at least one
24129fe67149SEric Whitney * delayed and unwritten block. A partial cluster meeting that
24139fe67149SEric Whitney * requirement will have a pending reservation. If so, the
24149fe67149SEric Whitney * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to
24159fe67149SEric Whitney * defer reserved and allocated space accounting to a subsequent call
24169fe67149SEric Whitney * to this function.
24179fe67149SEric Whitney */
ext4_rereserve_cluster(struct inode * inode,ext4_lblk_t lblk)24189fe67149SEric Whitney static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk)
24199fe67149SEric Whitney {
24209fe67149SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
24219fe67149SEric Whitney struct ext4_inode_info *ei = EXT4_I(inode);
24229fe67149SEric Whitney
24239fe67149SEric Whitney dquot_reclaim_block(inode, EXT4_C2B(sbi, 1));
24249fe67149SEric Whitney
24259fe67149SEric Whitney spin_lock(&ei->i_block_reservation_lock);
24269fe67149SEric Whitney ei->i_reserved_data_blocks++;
24279fe67149SEric Whitney percpu_counter_add(&sbi->s_dirtyclusters_counter, 1);
24289fe67149SEric Whitney spin_unlock(&ei->i_block_reservation_lock);
24299fe67149SEric Whitney
24309fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 1);
24319fe67149SEric Whitney ext4_remove_pending(inode, lblk);
24329fe67149SEric Whitney }
24339fe67149SEric Whitney
ext4_remove_blocks(handle_t * handle,struct inode * inode,struct ext4_extent * ex,struct partial_cluster * partial,ext4_lblk_t from,ext4_lblk_t to)2434a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2435a86c6181SAlex Tomas struct ext4_extent *ex,
24369fe67149SEric Whitney struct partial_cluster *partial,
2437725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to)
2438a86c6181SAlex Tomas {
24390aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2440a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex);
24419fe67149SEric Whitney ext4_fsblk_t last_pblk, pblk;
24429fe67149SEric Whitney ext4_lblk_t num;
24439fe67149SEric Whitney int flags;
244418888cf0SAndrey Sidorov
24459fe67149SEric Whitney /* only extent tail removal is allowed */
24469fe67149SEric Whitney if (from < le32_to_cpu(ex->ee_block) ||
24479fe67149SEric Whitney to != le32_to_cpu(ex->ee_block) + ee_len - 1) {
24489fe67149SEric Whitney ext4_error(sbi->s_sb,
24499fe67149SEric Whitney "strange request: removal(2) %u-%u from %u:%u",
24509fe67149SEric Whitney from, to, le32_to_cpu(ex->ee_block), ee_len);
24519fe67149SEric Whitney return 0;
24520aa06000STheodore Ts'o }
24530aa06000STheodore Ts'o
2454a86c6181SAlex Tomas #ifdef EXTENTS_STATS
2455a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock);
2456a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len;
2457a86c6181SAlex Tomas sbi->s_ext_extents++;
2458a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min)
2459a86c6181SAlex Tomas sbi->s_ext_min = ee_len;
2460a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max)
2461a86c6181SAlex Tomas sbi->s_ext_max = ee_len;
2462a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max)
2463a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode);
2464a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock);
2465a86c6181SAlex Tomas #endif
24669fe67149SEric Whitney
24679fe67149SEric Whitney trace_ext4_remove_blocks(inode, ex, from, to, partial);
24689fe67149SEric Whitney
24699fe67149SEric Whitney /*
24709fe67149SEric Whitney * if we have a partial cluster, and it's different from the
24719fe67149SEric Whitney * cluster of the last block in the extent, we free it
24729fe67149SEric Whitney */
24739fe67149SEric Whitney last_pblk = ext4_ext_pblock(ex) + ee_len - 1;
24749fe67149SEric Whitney
24759fe67149SEric Whitney if (partial->state != initial &&
24769fe67149SEric Whitney partial->pclu != EXT4_B2C(sbi, last_pblk)) {
24779fe67149SEric Whitney if (partial->state == tofree) {
24789fe67149SEric Whitney flags = get_default_free_blocks_flags(inode);
24799fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk))
24809fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
24819fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL,
24829fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu),
24839fe67149SEric Whitney sbi->s_cluster_ratio, flags);
24849fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
24859fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk);
24869fe67149SEric Whitney }
24879fe67149SEric Whitney partial->state = initial;
24889fe67149SEric Whitney }
2489725d26d3SAneesh Kumar K.V
2490a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from;
24910aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num;
24929fe67149SEric Whitney
2493d23142c6SLukas Czerner /*
24949fe67149SEric Whitney * We free the partial cluster at the end of the extent (if any),
24959fe67149SEric Whitney * unless the cluster is used by another extent (partial_cluster
24969fe67149SEric Whitney * state is nofree). If a partial cluster exists here, it must be
24979fe67149SEric Whitney * shared with the last block in the extent.
2498d23142c6SLukas Czerner */
24999fe67149SEric Whitney flags = get_default_free_blocks_flags(inode);
25009fe67149SEric Whitney
25019fe67149SEric Whitney /* partial, left end cluster aligned, right end unaligned */
25029fe67149SEric Whitney if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) &&
25039fe67149SEric Whitney (EXT4_LBLK_CMASK(sbi, to) >= from) &&
25049fe67149SEric Whitney (partial->state != nofree)) {
25059fe67149SEric Whitney if (ext4_is_pending(inode, to))
25069fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
25079fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL,
25089fe67149SEric Whitney EXT4_PBLK_CMASK(sbi, last_pblk),
25099fe67149SEric Whitney sbi->s_cluster_ratio, flags);
25109fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
25119fe67149SEric Whitney ext4_rereserve_cluster(inode, to);
25129fe67149SEric Whitney partial->state = initial;
25139fe67149SEric Whitney flags = get_default_free_blocks_flags(inode);
25149fe67149SEric Whitney }
25159fe67149SEric Whitney
2516d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2517d23142c6SLukas Czerner
25180aa06000STheodore Ts'o /*
25199fe67149SEric Whitney * For bigalloc file systems, we never free a partial cluster
25209fe67149SEric Whitney * at the beginning of the extent. Instead, we check to see if we
25219fe67149SEric Whitney * need to free it on a subsequent call to ext4_remove_blocks,
25229fe67149SEric Whitney * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
25230aa06000STheodore Ts'o */
25249fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
25259fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
25269fe67149SEric Whitney
25279fe67149SEric Whitney /* reset the partial cluster if we've freed past it */
25289fe67149SEric Whitney if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk))
25299fe67149SEric Whitney partial->state = initial;
25309fe67149SEric Whitney
25319fe67149SEric Whitney /*
25329fe67149SEric Whitney * If we've freed the entire extent but the beginning is not left
25339fe67149SEric Whitney * cluster aligned and is not marked as ineligible for freeing we
25349fe67149SEric Whitney * record the partial cluster at the beginning of the extent. It
25359fe67149SEric Whitney * wasn't freed by the preceding ext4_free_blocks() call, and we
25369fe67149SEric Whitney * need to look farther to the left to determine if it's to be freed
25379fe67149SEric Whitney * (not shared with another extent). Else, reset the partial
25389fe67149SEric Whitney * cluster - we're either done freeing or the beginning of the
25399fe67149SEric Whitney * extent is left cluster aligned.
25409fe67149SEric Whitney */
25419fe67149SEric Whitney if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) {
25429fe67149SEric Whitney if (partial->state == initial) {
25439fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk);
25449fe67149SEric Whitney partial->lblk = from;
25459fe67149SEric Whitney partial->state = tofree;
2546345ee947SEric Whitney }
25479fe67149SEric Whitney } else {
25489fe67149SEric Whitney partial->state = initial;
2549a86c6181SAlex Tomas }
2550a86c6181SAlex Tomas
25519fe67149SEric Whitney return 0;
25529fe67149SEric Whitney }
2553d583fb87SAllison Henderson
2554d583fb87SAllison Henderson /*
2555d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the
25565bf43760SEric Whitney * blocks appearing between "start" and "end". Both "start"
25575bf43760SEric Whitney * and "end" must appear in the same extent or EIO is returned.
2558d583fb87SAllison Henderson *
2559d583fb87SAllison Henderson * @handle: The journal handle
2560d583fb87SAllison Henderson * @inode: The files inode
2561d583fb87SAllison Henderson * @path: The path to the leaf
2562d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents
25635bf43760SEric Whitney * has been released from it. However, if this value is
25645bf43760SEric Whitney * negative, it's a cluster just to the right of the
25655bf43760SEric Whitney * punched region and it must not be freed.
2566d583fb87SAllison Henderson * @start: The first block to remove
2567d583fb87SAllison Henderson * @end: The last block to remove
2568d583fb87SAllison Henderson */
2569a86c6181SAlex Tomas static int
ext4_ext_rm_leaf(handle_t * handle,struct inode * inode,struct ext4_ext_path * path,struct partial_cluster * partial,ext4_lblk_t start,ext4_lblk_t end)2570a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2571d23142c6SLukas Czerner struct ext4_ext_path *path,
25729fe67149SEric Whitney struct partial_cluster *partial,
25730aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end)
2574a86c6181SAlex Tomas {
25750aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2576a86c6181SAlex Tomas int err = 0, correct_index = 0;
257783448bdfSJan Kara int depth = ext_depth(inode), credits, revoke_credits;
2578a86c6181SAlex Tomas struct ext4_extent_header *eh;
2579750c9c47SDmitry Monakhov ext4_lblk_t a, b;
2580725d26d3SAneesh Kumar K.V unsigned num;
2581725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block;
2582a86c6181SAlex Tomas unsigned short ex_ee_len;
2583556615dcSLukas Czerner unsigned unwritten = 0;
2584a86c6181SAlex Tomas struct ext4_extent *ex;
2585d23142c6SLukas Czerner ext4_fsblk_t pblk;
2586a86c6181SAlex Tomas
2587c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */
258870aa1554SRitesh Harjani ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
2589a86c6181SAlex Tomas if (!path[depth].p_hdr)
2590a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2591a86c6181SAlex Tomas eh = path[depth].p_hdr;
2592273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) {
2593273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
25946a797d27SDarrick J. Wong return -EFSCORRUPTED;
2595273df556SFrank Mayhar }
2596a86c6181SAlex Tomas /* find where to start removing */
25976ae06ff5SAshish Sangwan ex = path[depth].p_ext;
25986ae06ff5SAshish Sangwan if (!ex)
2599a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh);
2600a86c6181SAlex Tomas
2601a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block);
2602a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex);
2603a86c6181SAlex Tomas
26049fe67149SEric Whitney trace_ext4_ext_rm_leaf(inode, start, ex, partial);
2605d8990240SAditya Kali
2606a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) &&
2607a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) {
2608a41f2071SAneesh Kumar K.V
2609556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex))
2610556615dcSLukas Czerner unwritten = 1;
2611a41f2071SAneesh Kumar K.V else
2612556615dcSLukas Czerner unwritten = 0;
2613a41f2071SAneesh Kumar K.V
261470aa1554SRitesh Harjani ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
2615556615dcSLukas Czerner unwritten, ex_ee_len);
2616a86c6181SAlex Tomas path[depth].p_ext = ex;
2617a86c6181SAlex Tomas
261866267814SJiangshan Yi a = max(ex_ee_block, start);
261966267814SJiangshan Yi b = min(ex_ee_block + ex_ee_len - 1, end);
2620a86c6181SAlex Tomas
262170aa1554SRitesh Harjani ext_debug(inode, " border %u:%u\n", a, b);
2622a86c6181SAlex Tomas
2623d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */
26245f95d21fSLukas Czerner if (end < ex_ee_block) {
2625d23142c6SLukas Czerner /*
2626d23142c6SLukas Czerner * We're going to skip this extent and move to another,
2627f4226d9eSEric Whitney * so note that its first cluster is in use to avoid
2628f4226d9eSEric Whitney * freeing it when removing blocks. Eventually, the
2629f4226d9eSEric Whitney * right edge of the truncated/punched region will
2630f4226d9eSEric Whitney * be just to the left.
2631d23142c6SLukas Czerner */
2632f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) {
2633d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex);
26349fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk);
26359fe67149SEric Whitney partial->state = nofree;
2636f4226d9eSEric Whitney }
2637d583fb87SAllison Henderson ex--;
2638d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block);
2639d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex);
2640d583fb87SAllison Henderson continue;
2641750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) {
2642dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode,
2643dc1841d6SLukas Czerner "can not handle truncate %u:%u "
2644dc1841d6SLukas Czerner "on extent %u:%u",
2645dc1841d6SLukas Czerner start, end, ex_ee_block,
2646dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1);
26476a797d27SDarrick J. Wong err = -EFSCORRUPTED;
2648d583fb87SAllison Henderson goto out;
2649a86c6181SAlex Tomas } else if (a != ex_ee_block) {
2650a86c6181SAlex Tomas /* remove tail of the extent */
2651750c9c47SDmitry Monakhov num = a - ex_ee_block;
2652a86c6181SAlex Tomas } else {
2653a86c6181SAlex Tomas /* remove whole extent: excellent! */
2654a86c6181SAlex Tomas num = 0;
2655d583fb87SAllison Henderson }
265634071da7STheodore Ts'o /*
265734071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group
265834071da7STheodore Ts'o * descriptor) for each block group; assume two block
265934071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for
266034071da7STheodore Ts'o * the worst case
266134071da7STheodore Ts'o */
266234071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2663a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) {
2664a86c6181SAlex Tomas correct_index = 1;
2665a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1;
2666a86c6181SAlex Tomas }
26675aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
266883448bdfSJan Kara /*
266983448bdfSJan Kara * We may end up freeing some index blocks and data from the
267083448bdfSJan Kara * punched range. Note that partial clusters are accounted for
267183448bdfSJan Kara * by ext4_free_data_revoke_credits().
267283448bdfSJan Kara */
267383448bdfSJan Kara revoke_credits =
267483448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb,
267583448bdfSJan Kara ext_depth(inode)) +
267683448bdfSJan Kara ext4_free_data_revoke_credits(inode, b - a + 1);
2677a86c6181SAlex Tomas
2678a4130367SJan Kara err = ext4_datasem_ensure_credits(handle, inode, credits,
267983448bdfSJan Kara credits, revoke_credits);
2680a4130367SJan Kara if (err) {
2681a4130367SJan Kara if (err > 0)
2682a4130367SJan Kara err = -EAGAIN;
2683a86c6181SAlex Tomas goto out;
2684a4130367SJan Kara }
2685a86c6181SAlex Tomas
2686a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth);
2687a86c6181SAlex Tomas if (err)
2688a86c6181SAlex Tomas goto out;
2689a86c6181SAlex Tomas
26909fe67149SEric Whitney err = ext4_remove_blocks(handle, inode, ex, partial, a, b);
2691a86c6181SAlex Tomas if (err)
2692a86c6181SAlex Tomas goto out;
2693a86c6181SAlex Tomas
2694750c9c47SDmitry Monakhov if (num == 0)
2695d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */
2696f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0);
2697a86c6181SAlex Tomas
2698a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num);
2699749269faSAmit Arora /*
2700556615dcSLukas Czerner * Do not mark unwritten if all the blocks in the
2701749269faSAmit Arora * extent have been removed.
2702749269faSAmit Arora */
2703556615dcSLukas Czerner if (unwritten && num)
2704556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
2705d583fb87SAllison Henderson /*
2706d583fb87SAllison Henderson * If the extent was completely released,
2707d583fb87SAllison Henderson * we need to remove it from the leaf
2708d583fb87SAllison Henderson */
2709d583fb87SAllison Henderson if (num == 0) {
2710f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) {
2711d583fb87SAllison Henderson /*
2712d583fb87SAllison Henderson * For hole punching, we need to scoot all the
2713d583fb87SAllison Henderson * extents up when an extent is removed so that
2714d583fb87SAllison Henderson * we dont have blank extents in the middle
2715d583fb87SAllison Henderson */
2716d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2717d583fb87SAllison Henderson sizeof(struct ext4_extent));
2718d583fb87SAllison Henderson
2719d583fb87SAllison Henderson /* Now get rid of the one at the end */
2720d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0,
2721d583fb87SAllison Henderson sizeof(struct ext4_extent));
2722d583fb87SAllison Henderson }
2723d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1);
27245bf43760SEric Whitney }
2725d583fb87SAllison Henderson
2726750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth);
2727750c9c47SDmitry Monakhov if (err)
2728750c9c47SDmitry Monakhov goto out;
2729750c9c47SDmitry Monakhov
273070aa1554SRitesh Harjani ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
2731bf89d16fSTheodore Ts'o ext4_ext_pblock(ex));
2732a86c6181SAlex Tomas ex--;
2733a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block);
2734a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex);
2735a86c6181SAlex Tomas }
2736a86c6181SAlex Tomas
2737a86c6181SAlex Tomas if (correct_index && eh->eh_entries)
2738a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path);
2739a86c6181SAlex Tomas
27400aa06000STheodore Ts'o /*
2741ad6599abSEric Whitney * If there's a partial cluster and at least one extent remains in
2742ad6599abSEric Whitney * the leaf, free the partial cluster if it isn't shared with the
27435bf43760SEric Whitney * current extent. If it is shared with the current extent
27449fe67149SEric Whitney * we reset the partial cluster because we've reached the start of the
27455bf43760SEric Whitney * truncated/punched region and we're done removing blocks.
27460aa06000STheodore Ts'o */
27479fe67149SEric Whitney if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) {
27485bf43760SEric Whitney pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
27499fe67149SEric Whitney if (partial->pclu != EXT4_B2C(sbi, pblk)) {
27509fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode);
27519fe67149SEric Whitney
27529fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk))
27539fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
27540aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL,
27559fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu),
27569fe67149SEric Whitney sbi->s_cluster_ratio, flags);
27579fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
27589fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk);
27595bf43760SEric Whitney }
27609fe67149SEric Whitney partial->state = initial;
27610aa06000STheodore Ts'o }
27620aa06000STheodore Ts'o
2763a86c6181SAlex Tomas /* if this leaf is free, then we should
2764a86c6181SAlex Tomas * remove it from index block above */
2765a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2766c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth);
2767a86c6181SAlex Tomas
2768a86c6181SAlex Tomas out:
2769a86c6181SAlex Tomas return err;
2770a86c6181SAlex Tomas }
2771a86c6181SAlex Tomas
2772a86c6181SAlex Tomas /*
2773d0d856e8SRandy Dunlap * ext4_ext_more_to_rm:
2774d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial)
2775a86c6181SAlex Tomas */
277609b88252SAvantika Mathur static int
ext4_ext_more_to_rm(struct ext4_ext_path * path)2777a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path)
2778a86c6181SAlex Tomas {
2779a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL);
2780a86c6181SAlex Tomas
2781a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2782a86c6181SAlex Tomas return 0;
2783a86c6181SAlex Tomas
2784a86c6181SAlex Tomas /*
2785d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial,
2786a86c6181SAlex Tomas * so we have to consider current index for truncation
2787a86c6181SAlex Tomas */
2788a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2789a86c6181SAlex Tomas return 0;
2790a86c6181SAlex Tomas return 1;
2791a86c6181SAlex Tomas }
2792a86c6181SAlex Tomas
ext4_ext_remove_space(struct inode * inode,ext4_lblk_t start,ext4_lblk_t end)279326a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
27945f95d21fSLukas Czerner ext4_lblk_t end)
2795a86c6181SAlex Tomas {
2796f4226d9eSEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2797a86c6181SAlex Tomas int depth = ext_depth(inode);
2798968dee77SAshish Sangwan struct ext4_ext_path *path = NULL;
27999fe67149SEric Whitney struct partial_cluster partial;
2800a86c6181SAlex Tomas handle_t *handle;
28016f2080e6SDmitry Monakhov int i = 0, err = 0;
2802a86c6181SAlex Tomas
28039fe67149SEric Whitney partial.pclu = 0;
28049fe67149SEric Whitney partial.lblk = 0;
28059fe67149SEric Whitney partial.state = initial;
28069fe67149SEric Whitney
280770aa1554SRitesh Harjani ext_debug(inode, "truncate since %u to %u\n", start, end);
2808a86c6181SAlex Tomas
2809a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */
281083448bdfSJan Kara handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
281183448bdfSJan Kara depth + 1,
281283448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, depth));
2813a86c6181SAlex Tomas if (IS_ERR(handle))
2814a86c6181SAlex Tomas return PTR_ERR(handle);
2815a86c6181SAlex Tomas
28160617b83fSDmitry Monakhov again:
281761801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth);
2818d8990240SAditya Kali
2819a86c6181SAlex Tomas /*
28205f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that
28215f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree
28225f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering
28235f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it
28245f95d21fSLukas Czerner * in ext4_ext_rm_leaf().
28255f95d21fSLukas Czerner */
28265f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) {
28275f95d21fSLukas Czerner struct ext4_extent *ex;
2828f4226d9eSEric Whitney ext4_lblk_t ee_block, ex_end, lblk;
2829f4226d9eSEric Whitney ext4_fsblk_t pblk;
28305f95d21fSLukas Czerner
2831f4226d9eSEric Whitney /* find extent for or closest extent to this block */
283273c384c0STheodore Ts'o path = ext4_find_extent(inode, end, NULL,
283373c384c0STheodore Ts'o EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
28345f95d21fSLukas Czerner if (IS_ERR(path)) {
28355f95d21fSLukas Czerner ext4_journal_stop(handle);
28365f95d21fSLukas Czerner return PTR_ERR(path);
28375f95d21fSLukas Czerner }
28385f95d21fSLukas Czerner depth = ext_depth(inode);
28396f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */
28405f95d21fSLukas Czerner ex = path[depth].p_ext;
2841968dee77SAshish Sangwan if (!ex) {
28426f2080e6SDmitry Monakhov if (depth) {
28436f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode,
28446f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL",
28456f2080e6SDmitry Monakhov depth);
28466a797d27SDarrick J. Wong err = -EFSCORRUPTED;
28476f2080e6SDmitry Monakhov }
28486f2080e6SDmitry Monakhov goto out;
2849968dee77SAshish Sangwan }
28505f95d21fSLukas Czerner
28515f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block);
2852f4226d9eSEric Whitney ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
28535f95d21fSLukas Czerner
28545f95d21fSLukas Czerner /*
28555f95d21fSLukas Czerner * See if the last block is inside the extent, if so split
28565f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the
28575f95d21fSLukas Czerner * tail of the first part of the split extent in
28585f95d21fSLukas Czerner * ext4_ext_rm_leaf().
28595f95d21fSLukas Czerner */
2860f4226d9eSEric Whitney if (end >= ee_block && end < ex_end) {
2861f4226d9eSEric Whitney
2862f4226d9eSEric Whitney /*
2863f4226d9eSEric Whitney * If we're going to split the extent, note that
2864f4226d9eSEric Whitney * the cluster containing the block after 'end' is
2865f4226d9eSEric Whitney * in use to avoid freeing it when removing blocks.
2866f4226d9eSEric Whitney */
2867f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) {
2868cfb3c85aSJeffle Xu pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
28699fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk);
28709fe67149SEric Whitney partial.state = nofree;
2871f4226d9eSEric Whitney }
2872f4226d9eSEric Whitney
28735f95d21fSLukas Czerner /*
28745f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last
287527dd4385SLukas Czerner * block in the first new extent. Also we should not
287627dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use
287727dd4385SLukas Czerner * reserved block if that happens.
28785f95d21fSLukas Czerner */
2879dfe50809STheodore Ts'o err = ext4_force_split_extent_at(handle, inode, &path,
2880fcf6b1b7SDmitry Monakhov end + 1, 1);
28815f95d21fSLukas Czerner if (err < 0)
28825f95d21fSLukas Czerner goto out;
2883f4226d9eSEric Whitney
28847bd75230SEric Whitney } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
28857bd75230SEric Whitney partial.state == initial) {
2886f4226d9eSEric Whitney /*
28877bd75230SEric Whitney * If we're punching, there's an extent to the right.
28887bd75230SEric Whitney * If the partial cluster hasn't been set, set it to
28897bd75230SEric Whitney * that extent's first cluster and its state to nofree
28907bd75230SEric Whitney * so it won't be freed should it contain blocks to be
28917bd75230SEric Whitney * removed. If it's already set (tofree/nofree), we're
28927bd75230SEric Whitney * retrying and keep the original partial cluster info
28937bd75230SEric Whitney * so a cluster marked tofree as a result of earlier
28947bd75230SEric Whitney * extent removal is not lost.
2895f4226d9eSEric Whitney */
2896f4226d9eSEric Whitney lblk = ex_end + 1;
2897f4226d9eSEric Whitney err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2898d7dce9e0Syangerkun NULL);
2899d7dce9e0Syangerkun if (err < 0)
2900f4226d9eSEric Whitney goto out;
29019fe67149SEric Whitney if (pblk) {
29029fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk);
29039fe67149SEric Whitney partial.state = nofree;
29049fe67149SEric Whitney }
29055f95d21fSLukas Czerner }
29065f95d21fSLukas Czerner }
29075f95d21fSLukas Czerner /*
2908d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks
2909d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise.
2910a86c6181SAlex Tomas */
29110617b83fSDmitry Monakhov depth = ext_depth(inode);
2912968dee77SAshish Sangwan if (path) {
2913968dee77SAshish Sangwan int k = i = depth;
2914968dee77SAshish Sangwan while (--k > 0)
2915968dee77SAshish Sangwan path[k].p_block =
2916968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2917968dee77SAshish Sangwan } else {
29186396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
291973c384c0STheodore Ts'o GFP_NOFS | __GFP_NOFAIL);
2920a86c6181SAlex Tomas if (path == NULL) {
2921a86c6181SAlex Tomas ext4_journal_stop(handle);
2922a86c6181SAlex Tomas return -ENOMEM;
2923a86c6181SAlex Tomas }
292410809df8STheodore Ts'o path[0].p_maxdepth = path[0].p_depth = depth;
2925a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode);
292689a4e48fSTheodore Ts'o i = 0;
29275f95d21fSLukas Czerner
2928c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
29296a797d27SDarrick J. Wong err = -EFSCORRUPTED;
2930a86c6181SAlex Tomas goto out;
2931a86c6181SAlex Tomas }
2932968dee77SAshish Sangwan }
2933968dee77SAshish Sangwan err = 0;
2934a86c6181SAlex Tomas
2935a86c6181SAlex Tomas while (i >= 0 && err == 0) {
2936a86c6181SAlex Tomas if (i == depth) {
2937a86c6181SAlex Tomas /* this is leaf block */
2938d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path,
29399fe67149SEric Whitney &partial, start, end);
2940d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */
2941a86c6181SAlex Tomas brelse(path[i].p_bh);
2942a86c6181SAlex Tomas path[i].p_bh = NULL;
2943a86c6181SAlex Tomas i--;
2944a86c6181SAlex Tomas continue;
2945a86c6181SAlex Tomas }
2946a86c6181SAlex Tomas
2947a86c6181SAlex Tomas /* this is index block */
2948a86c6181SAlex Tomas if (!path[i].p_hdr) {
294970aa1554SRitesh Harjani ext_debug(inode, "initialize header\n");
2950a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2951a86c6181SAlex Tomas }
2952a86c6181SAlex Tomas
2953a86c6181SAlex Tomas if (!path[i].p_idx) {
2954d0d856e8SRandy Dunlap /* this level hasn't been touched yet */
2955a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2956a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
295770aa1554SRitesh Harjani ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
2958a86c6181SAlex Tomas path[i].p_hdr,
2959a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries));
2960a86c6181SAlex Tomas } else {
2961d0d856e8SRandy Dunlap /* we were already here, see at next index */
2962a86c6181SAlex Tomas path[i].p_idx--;
2963a86c6181SAlex Tomas }
2964a86c6181SAlex Tomas
296570aa1554SRitesh Harjani ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
2966a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr),
2967a86c6181SAlex Tomas path[i].p_idx);
2968a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) {
2969c29c0ae7SAlex Tomas struct buffer_head *bh;
2970a86c6181SAlex Tomas /* go to the next level */
297170aa1554SRitesh Harjani ext_debug(inode, "move to level %d (block %llu)\n",
2972bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx));
2973a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path));
29749c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[i].p_idx,
29759c6e0719SZhang Yi depth - i - 1,
2976107a7bd3STheodore Ts'o EXT4_EX_NOCACHE);
29777d7ea89eSTheodore Ts'o if (IS_ERR(bh)) {
2978a86c6181SAlex Tomas /* should we reset i_size? */
29797d7ea89eSTheodore Ts'o err = PTR_ERR(bh);
2980a86c6181SAlex Tomas break;
2981a86c6181SAlex Tomas }
298276828c88STheodore Ts'o /* Yield here to deal with large extent trees.
298376828c88STheodore Ts'o * Should be a no-op if we did IO above. */
298476828c88STheodore Ts'o cond_resched();
2985c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) {
29866a797d27SDarrick J. Wong err = -EFSCORRUPTED;
2987c29c0ae7SAlex Tomas break;
2988c29c0ae7SAlex Tomas }
2989c29c0ae7SAlex Tomas path[i + 1].p_bh = bh;
2990a86c6181SAlex Tomas
2991d0d856e8SRandy Dunlap /* save actual number of indexes since this
2992d0d856e8SRandy Dunlap * number is changed at the next iteration */
2993a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2994a86c6181SAlex Tomas i++;
2995a86c6181SAlex Tomas } else {
2996d0d856e8SRandy Dunlap /* we finished processing this index, go up */
2997a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2998d0d856e8SRandy Dunlap /* index is empty, remove it;
2999a86c6181SAlex Tomas * handle must be already prepared by the
3000a86c6181SAlex Tomas * truncatei_leaf() */
3001c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i);
3002a86c6181SAlex Tomas }
3003d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */
3004a86c6181SAlex Tomas brelse(path[i].p_bh);
3005a86c6181SAlex Tomas path[i].p_bh = NULL;
3006a86c6181SAlex Tomas i--;
300770aa1554SRitesh Harjani ext_debug(inode, "return to level %d\n", i);
3008a86c6181SAlex Tomas }
3009a86c6181SAlex Tomas }
3010a86c6181SAlex Tomas
30119fe67149SEric Whitney trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial,
30129fe67149SEric Whitney path->p_hdr->eh_entries);
3013d8990240SAditya Kali
30140756b908SEric Whitney /*
30159fe67149SEric Whitney * if there's a partial cluster and we have removed the first extent
30169fe67149SEric Whitney * in the file, then we also free the partial cluster, if any
30170756b908SEric Whitney */
30189fe67149SEric Whitney if (partial.state == tofree && err == 0) {
30199fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode);
30209fe67149SEric Whitney
30219fe67149SEric Whitney if (ext4_is_pending(inode, partial.lblk))
30229fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER;
30237b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL,
30249fe67149SEric Whitney EXT4_C2B(sbi, partial.pclu),
30259fe67149SEric Whitney sbi->s_cluster_ratio, flags);
30269fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)
30279fe67149SEric Whitney ext4_rereserve_cluster(inode, partial.lblk);
30289fe67149SEric Whitney partial.state = initial;
30297b415bf6SAditya Kali }
30307b415bf6SAditya Kali
3031a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */
3032a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) {
3033a86c6181SAlex Tomas /*
3034d0d856e8SRandy Dunlap * truncate to zero freed all the tree,
3035d0d856e8SRandy Dunlap * so we need to correct eh_depth
3036a86c6181SAlex Tomas */
3037a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path);
3038a86c6181SAlex Tomas if (err == 0) {
3039a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0;
3040a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max =
304155ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0));
3042a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path);
3043a86c6181SAlex Tomas }
3044a86c6181SAlex Tomas }
3045a86c6181SAlex Tomas out:
30467ff5fddaSYe Bin ext4_free_ext_path(path);
3047968dee77SAshish Sangwan path = NULL;
3048dfe50809STheodore Ts'o if (err == -EAGAIN)
3049dfe50809STheodore Ts'o goto again;
3050a86c6181SAlex Tomas ext4_journal_stop(handle);
3051a86c6181SAlex Tomas
3052a86c6181SAlex Tomas return err;
3053a86c6181SAlex Tomas }
3054a86c6181SAlex Tomas
3055a86c6181SAlex Tomas /*
3056a86c6181SAlex Tomas * called at mount time
3057a86c6181SAlex Tomas */
ext4_ext_init(struct super_block * sb)3058a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb)
3059a86c6181SAlex Tomas {
3060a86c6181SAlex Tomas /*
3061a86c6181SAlex Tomas * possible initialization would be here
3062a86c6181SAlex Tomas */
3063a86c6181SAlex Tomas
3064e2b911c5SDarrick J. Wong if (ext4_has_feature_extents(sb)) {
306590576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
306692b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled"
3067bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST
306892b97816STheodore Ts'o ", aggressive tests"
3069a86c6181SAlex Tomas #endif
3070a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH
307192b97816STheodore Ts'o ", check binsearch"
3072a86c6181SAlex Tomas #endif
3073a86c6181SAlex Tomas #ifdef EXTENTS_STATS
307492b97816STheodore Ts'o ", stats"
3075a86c6181SAlex Tomas #endif
307692b97816STheodore Ts'o "\n");
307790576c0bSTheodore Ts'o #endif
3078a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3079a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3080a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30;
3081a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0;
3082a86c6181SAlex Tomas #endif
3083a86c6181SAlex Tomas }
3084a86c6181SAlex Tomas }
3085a86c6181SAlex Tomas
3086a86c6181SAlex Tomas /*
3087a86c6181SAlex Tomas * called at umount time
3088a86c6181SAlex Tomas */
ext4_ext_release(struct super_block * sb)3089a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb)
3090a86c6181SAlex Tomas {
3091e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb))
3092a86c6181SAlex Tomas return;
3093a86c6181SAlex Tomas
3094a86c6181SAlex Tomas #ifdef EXTENTS_STATS
3095a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3096a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb);
3097a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3098a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents,
3099a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents);
3100a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3101a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3102a86c6181SAlex Tomas }
3103a86c6181SAlex Tomas #endif
3104a86c6181SAlex Tomas }
3105a86c6181SAlex Tomas
ext4_zeroout_es(struct inode * inode,struct ext4_extent * ex)3106ab8627e1SBaokun Li static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3107d7b2a00cSZheng Liu {
3108d7b2a00cSZheng Liu ext4_lblk_t ee_block;
3109d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock;
3110d7b2a00cSZheng Liu unsigned int ee_len;
3111d7b2a00cSZheng Liu
3112d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block);
3113d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex);
3114d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex);
3115d7b2a00cSZheng Liu
3116d7b2a00cSZheng Liu if (ee_len == 0)
3117ab8627e1SBaokun Li return;
3118d7b2a00cSZheng Liu
31196c120399SBaokun Li ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3120d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN);
3121d7b2a00cSZheng Liu }
3122d7b2a00cSZheng Liu
3123093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */
ext4_ext_zeroout(struct inode * inode,struct ext4_extent * ex)3124093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3125093a088bSAneesh Kumar K.V {
31262407518dSLukas Czerner ext4_fsblk_t ee_pblock;
31272407518dSLukas Czerner unsigned int ee_len;
3128093a088bSAneesh Kumar K.V
3129093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex);
3130bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex);
313153085facSJan Kara return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
313253085facSJan Kara ee_len);
3133093a088bSAneesh Kumar K.V }
3134093a088bSAneesh Kumar K.V
313547ea3bb5SYongqiang Yang /*
313647ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block.
313747ea3bb5SYongqiang Yang *
313847ea3bb5SYongqiang Yang * @handle: the journal handle
313947ea3bb5SYongqiang Yang * @inode: the file inode
314047ea3bb5SYongqiang Yang * @path: the path to the extent
314147ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted.
314247ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and
3143556615dcSLukas Czerner * the states(init or unwritten) of new extents.
314447ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree.
314547ea3bb5SYongqiang Yang *
314647ea3bb5SYongqiang Yang *
314747ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3148e4d7f2d3SKeyur Patel * of which are determined by split_flag.
314947ea3bb5SYongqiang Yang *
315047ea3bb5SYongqiang Yang * There are two cases:
315147ea3bb5SYongqiang Yang * a> the extent are splitted into two extent.
315247ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent.
315347ea3bb5SYongqiang Yang *
315447ea3bb5SYongqiang Yang * return 0 on success.
315547ea3bb5SYongqiang Yang */
ext4_split_extent_at(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,ext4_lblk_t split,int split_flag,int flags)315647ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle,
315747ea3bb5SYongqiang Yang struct inode *inode,
3158dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
315947ea3bb5SYongqiang Yang ext4_lblk_t split,
316047ea3bb5SYongqiang Yang int split_flag,
316147ea3bb5SYongqiang Yang int flags)
316247ea3bb5SYongqiang Yang {
3163dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
316447ea3bb5SYongqiang Yang ext4_fsblk_t newblock;
316547ea3bb5SYongqiang Yang ext4_lblk_t ee_block;
3166adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex;
316747ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL;
316847ea3bb5SYongqiang Yang unsigned int ee_len, depth;
316947ea3bb5SYongqiang Yang int err = 0;
317047ea3bb5SYongqiang Yang
3171dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3172dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3173dee1f973SDmitry Monakhov
317470aa1554SRitesh Harjani ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
317547ea3bb5SYongqiang Yang
317647ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path);
317747ea3bb5SYongqiang Yang
317847ea3bb5SYongqiang Yang depth = ext_depth(inode);
317947ea3bb5SYongqiang Yang ex = path[depth].p_ext;
318047ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block);
318147ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex);
318247ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex);
318347ea3bb5SYongqiang Yang
318447ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3185556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex) &&
3186357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT |
3187556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT1 |
3188556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2));
318947ea3bb5SYongqiang Yang
319047ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth);
319147ea3bb5SYongqiang Yang if (err)
319247ea3bb5SYongqiang Yang goto out;
319347ea3bb5SYongqiang Yang
319447ea3bb5SYongqiang Yang if (split == ee_block) {
319547ea3bb5SYongqiang Yang /*
319647ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with
319747ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting
319847ea3bb5SYongqiang Yang * is not needed.
319947ea3bb5SYongqiang Yang */
3200556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3201556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
320247ea3bb5SYongqiang Yang else
320347ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex);
320447ea3bb5SYongqiang Yang
320547ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3206ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex);
320747ea3bb5SYongqiang Yang
3208ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth);
320947ea3bb5SYongqiang Yang goto out;
321047ea3bb5SYongqiang Yang }
321147ea3bb5SYongqiang Yang
321247ea3bb5SYongqiang Yang /* case a */
321347ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex));
321447ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block);
3215556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT1)
3216556615dcSLukas Czerner ext4_ext_mark_unwritten(ex);
321747ea3bb5SYongqiang Yang
321847ea3bb5SYongqiang Yang /*
321947ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more
322047ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns,
322147ea3bb5SYongqiang Yang */
322247ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth);
322347ea3bb5SYongqiang Yang if (err)
322447ea3bb5SYongqiang Yang goto fix_extent_len;
322547ea3bb5SYongqiang Yang
322647ea3bb5SYongqiang Yang ex2 = &newex;
322747ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split);
322847ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
322947ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock);
3230556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3231556615dcSLukas Czerner ext4_ext_mark_unwritten(ex2);
323247ea3bb5SYongqiang Yang
3233dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
32343f542479Szhanchengbin if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM)
3235082cd4ecSYe Bin goto out;
3236082cd4ecSYe Bin
32378fe11779SBaokun Li /*
32388fe11779SBaokun Li * Update path is required because previous ext4_ext_insert_extent()
32398fe11779SBaokun Li * may have freed or reallocated the path. Using EXT4_EX_NOFAIL
32408fe11779SBaokun Li * guarantees that ext4_find_extent() will not return -ENOMEM,
32418fe11779SBaokun Li * otherwise -ENOMEM will cause a retry in do_writepages(), and a
32428fe11779SBaokun Li * WARN_ON may be triggered in ext4_da_update_reserve_space() due to
32438fe11779SBaokun Li * an incorrect ee_len causing the i_reserved_data_blocks exception.
32448fe11779SBaokun Li */
32458fe11779SBaokun Li path = ext4_find_extent(inode, ee_block, ppath,
32468fe11779SBaokun Li flags | EXT4_EX_NOFAIL);
32478fe11779SBaokun Li if (IS_ERR(path)) {
32488fe11779SBaokun Li EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld",
32498fe11779SBaokun Li split, PTR_ERR(path));
32508fe11779SBaokun Li return PTR_ERR(path);
32518fe11779SBaokun Li }
32528fe11779SBaokun Li depth = ext_depth(inode);
32538fe11779SBaokun Li ex = path[depth].p_ext;
32548fe11779SBaokun Li
3255082cd4ecSYe Bin if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
3256dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3257adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) {
3258dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2);
3259adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block;
32608cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(
32618cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2));
3262adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex,
3263adb23551SZheng Liu ext4_ext_pblock(ex2));
3264adb23551SZheng Liu } else {
3265dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex);
3266adb23551SZheng Liu zero_ex.ee_block = ex->ee_block;
32678cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(
32688cde7ad1SZheng Liu ext4_ext_get_actual_len(ex));
3269adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex,
3270adb23551SZheng Liu ext4_ext_pblock(ex));
3271adb23551SZheng Liu }
3272adb23551SZheng Liu } else {
327347ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex);
3274adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block;
32758cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(
32768cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex));
3277adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex,
3278adb23551SZheng Liu ext4_ext_pblock(&orig_ex));
3279adb23551SZheng Liu }
3280dee1f973SDmitry Monakhov
3281082cd4ecSYe Bin if (!err) {
328247ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */
3283af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len);
3284ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex);
3285ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3286082cd4ecSYe Bin if (!err)
3287adb23551SZheng Liu /* update extent status tree */
3288ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex);
3289082cd4ecSYe Bin /* If we failed at this point, we don't know in which
3290082cd4ecSYe Bin * state the extent tree exactly is so don't try to fix
3291082cd4ecSYe Bin * length of the original extent as it may do even more
3292082cd4ecSYe Bin * damage.
3293082cd4ecSYe Bin */
329447ea3bb5SYongqiang Yang goto out;
3295082cd4ecSYe Bin }
3296082cd4ecSYe Bin }
329747ea3bb5SYongqiang Yang
329847ea3bb5SYongqiang Yang fix_extent_len:
329947ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len;
3300b60ca334SHarshad Shirwadkar /*
3301b60ca334SHarshad Shirwadkar * Ignore ext4_ext_dirty return value since we are already in error path
3302b60ca334SHarshad Shirwadkar * and err is a non-zero error code.
3303b60ca334SHarshad Shirwadkar */
330429faed16SDmitry Monakhov ext4_ext_dirty(handle, inode, path + path->p_depth);
330547ea3bb5SYongqiang Yang return err;
3306082cd4ecSYe Bin out:
33078fe11779SBaokun Li ext4_ext_show_leaf(inode, *ppath);
3308082cd4ecSYe Bin return err;
330947ea3bb5SYongqiang Yang }
331047ea3bb5SYongqiang Yang
331147ea3bb5SYongqiang Yang /*
331234b20963SBaokun Li * ext4_split_extent() splits an extent and mark extent which is covered
331347ea3bb5SYongqiang Yang * by @map as split_flags indicates
331447ea3bb5SYongqiang Yang *
331547ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three)
331647ea3bb5SYongqiang Yang * There are three possibilities:
331747ea3bb5SYongqiang Yang * a> There is no split required
331847ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent
331947ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent
332047ea3bb5SYongqiang Yang *
332147ea3bb5SYongqiang Yang */
ext4_split_extent(handle_t * handle,struct inode * inode,struct ext4_ext_path ** ppath,struct ext4_map_blocks * map,int split_flag,int flags)332247ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle,
332347ea3bb5SYongqiang Yang struct inode *inode,
3324dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
332547ea3bb5SYongqiang Yang struct ext4_map_blocks *map,
332647ea3bb5SYongqiang Yang int split_flag,
332747ea3bb5SYongqiang Yang int flags)
332847ea3bb5SYongqiang Yang {
3329dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
333047ea3bb5SYongqiang Yang ext4_lblk_t ee_block;
333147ea3bb5SYongqiang Yang struct ext4_extent *ex;
333247ea3bb5SYongqiang Yang unsigned int ee_len, depth;
333347ea3bb5SYongqiang Yang int err = 0;
3334556615dcSLukas Czerner int unwritten;
333547ea3bb5SYongqiang Yang int split_flag1, flags1;
33363a225670SZheng Liu int allocated = map->m_len;
333747ea3bb5SYongqiang Yang
333847ea3bb5SYongqiang Yang depth = ext_depth(inode);
333947ea3bb5SYongqiang Yang ex = path[depth].p_ext;
334047ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block);
334147ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex);
3342556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex);
334347ea3bb5SYongqiang Yang
334447ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) {
3345dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
334647ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3347556615dcSLukas Czerner if (unwritten)
3348556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
3349556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2;
3350dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2)
3351dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1;
3352dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath,
335347ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1);
335493917411SYongqiang Yang if (err)
335593917411SYongqiang Yang goto out;
33563a225670SZheng Liu } else {
33573a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block);
335847ea3bb5SYongqiang Yang }
3359357b66fdSDmitry Monakhov /*
3360357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may
3361357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout.
3362357b66fdSDmitry Monakhov */
336373c384c0STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
336447ea3bb5SYongqiang Yang if (IS_ERR(path))
336547ea3bb5SYongqiang Yang return PTR_ERR(path);
3366357b66fdSDmitry Monakhov depth = ext_depth(inode);
3367357b66fdSDmitry Monakhov ex = path[depth].p_ext;
3368a18ed359SDmitry Monakhov if (!ex) {
3369a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3370a18ed359SDmitry Monakhov (unsigned long) map->m_lblk);
33716a797d27SDarrick J. Wong return -EFSCORRUPTED;
3372a18ed359SDmitry Monakhov }
3373556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex);
337447ea3bb5SYongqiang Yang
337547ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) {
3376357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3377556615dcSLukas Czerner if (unwritten) {
3378556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
3379357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3380556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2);
3381357b66fdSDmitry Monakhov }
3382dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath,
338347ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags);
338447ea3bb5SYongqiang Yang if (err)
338547ea3bb5SYongqiang Yang goto out;
338647ea3bb5SYongqiang Yang }
338747ea3bb5SYongqiang Yang
338834b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath);
338947ea3bb5SYongqiang Yang out:
33903a225670SZheng Liu return err ? err : allocated;
339147ea3bb5SYongqiang Yang }
339247ea3bb5SYongqiang Yang
339356055d3aSAmit Arora /*
3394e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write
3395556615dcSLukas Czerner * to an unwritten extent. It may result in splitting the unwritten
339656055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two
3397556615dcSLukas Czerner * unwritten).
339856055d3aSAmit Arora * There are three possibilities:
339956055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized
340056055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent
340156055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent
34026f91bc5fSEric Gouriou *
34036f91bc5fSEric Gouriou * Pre-conditions:
3404556615dcSLukas Czerner * - The extent pointed to by 'path' is unwritten.
34056f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset
34066f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
34076f91bc5fSEric Gouriou *
34086f91bc5fSEric Gouriou * Post-conditions on success:
34096f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk
34106f91bc5fSEric Gouriou * that are allocated and initialized.
34116f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len.
341256055d3aSAmit Arora */
ext4_ext_convert_to_initialized(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags)3413725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle,
3414725d26d3SAneesh Kumar K.V struct inode *inode,
3415e35fd660STheodore Ts'o struct ext4_map_blocks *map,
3416dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
341727dd4385SLukas Czerner int flags)
341856055d3aSAmit Arora {
3419dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
342067a5da56SZheng Liu struct ext4_sb_info *sbi;
34216f91bc5fSEric Gouriou struct ext4_extent_header *eh;
3422667eff35SYongqiang Yang struct ext4_map_blocks split_map;
34234f8caa60SJan Kara struct ext4_extent zero_ex1, zero_ex2;
3424bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex;
342521ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block;
3426bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len;
342756055d3aSAmit Arora int err = 0;
34284f8caa60SJan Kara int split_flag = EXT4_EXT_DATA_VALID2;
3429e9c0aa6cSBaokun Li int allocated = 0;
3430e9c0aa6cSBaokun Li unsigned int max_zeroout = 0;
343121ca087aSDmitry Monakhov
343270aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n",
3433bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len);
343421ca087aSDmitry Monakhov
343567a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb);
3436801674f3SJan Kara eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3437801674f3SJan Kara >> inode->i_sb->s_blocksize_bits;
3438bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len)
3439bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len;
344056055d3aSAmit Arora
344156055d3aSAmit Arora depth = ext_depth(inode);
34426f91bc5fSEric Gouriou eh = path[depth].p_hdr;
344356055d3aSAmit Arora ex = path[depth].p_ext;
344456055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block);
344556055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex);
34464f8caa60SJan Kara zero_ex1.ee_len = 0;
34474f8caa60SJan Kara zero_ex2.ee_len = 0;
344821ca087aSDmitry Monakhov
34496f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
34506f91bc5fSEric Gouriou
34516f91bc5fSEric Gouriou /* Pre-conditions */
3452556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex));
34536f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
34546f91bc5fSEric Gouriou
34556f91bc5fSEric Gouriou /*
34566f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently
3457556615dcSLukas Czerner * unwritten extent to its neighbor. This is much cheaper
34586f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly
3459bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in
3460bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3461bc2d9db4SLukas Czerner * followed by append writes.
34626f91bc5fSEric Gouriou *
34636f91bc5fSEric Gouriou * Limitations of the current logic:
3464bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent.
34656f91bc5fSEric Gouriou * This would require removing the extent if the transfer
34666f91bc5fSEric Gouriou * is possible.
3467bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the
34686f91bc5fSEric Gouriou * same extent tree node.
34696f91bc5fSEric Gouriou */
3470bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) &&
3471bc2d9db4SLukas Czerner /* See if we can merge left */
3472bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/
3473bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
34746f91bc5fSEric Gouriou ext4_lblk_t prev_lblk;
34756f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk;
3476bc2d9db4SLukas Czerner unsigned int prev_len;
34776f91bc5fSEric Gouriou
3478bc2d9db4SLukas Czerner abut_ex = ex - 1;
3479bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block);
3480bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex);
3481bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex);
34826f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex);
34836f91bc5fSEric Gouriou
34846f91bc5fSEric Gouriou /*
3485bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed
34866f91bc5fSEric Gouriou * upon those conditions:
3487bc2d9db4SLukas Czerner * - C1: abut_ex is initialized,
3488bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex,
3489bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex,
3490bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without
34916f91bc5fSEric Gouriou * overflowing the (initialized) length limit.
34926f91bc5fSEric Gouriou */
3493556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
34946f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/
34956f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3496bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
34976f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth);
34986f91bc5fSEric Gouriou if (err)
34996f91bc5fSEric Gouriou goto out;
35006f91bc5fSEric Gouriou
35016f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode,
3502bc2d9db4SLukas Czerner map, ex, abut_ex);
35036f91bc5fSEric Gouriou
3504bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */
3505bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len);
3506bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len);
3507bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len);
3508556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */
35096f91bc5fSEric Gouriou
3510bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */
3511bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
35126f91bc5fSEric Gouriou
3513bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */
3514bc2d9db4SLukas Czerner allocated = map_len;
3515bc2d9db4SLukas Czerner }
3516bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3517bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/
3518bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/
3519bc2d9db4SLukas Czerner /* See if we can merge right */
3520bc2d9db4SLukas Czerner ext4_lblk_t next_lblk;
3521bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk;
3522bc2d9db4SLukas Czerner unsigned int next_len;
3523bc2d9db4SLukas Czerner
3524bc2d9db4SLukas Czerner abut_ex = ex + 1;
3525bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block);
3526bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex);
3527bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex);
3528bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex);
3529bc2d9db4SLukas Czerner
3530bc2d9db4SLukas Czerner /*
3531bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3532bc2d9db4SLukas Czerner * upon those conditions:
3533bc2d9db4SLukas Czerner * - C1: abut_ex is initialized,
3534bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex,
3535bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex,
3536bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without
3537bc2d9db4SLukas Czerner * overflowing the (initialized) length limit.
3538bc2d9db4SLukas Czerner */
3539556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
3540bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/
3541bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/
3542bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3543bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth);
3544bc2d9db4SLukas Czerner if (err)
3545bc2d9db4SLukas Czerner goto out;
3546bc2d9db4SLukas Czerner
3547bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode,
3548bc2d9db4SLukas Czerner map, ex, abut_ex);
3549bc2d9db4SLukas Czerner
3550bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */
3551bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3552bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3553bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len);
3554556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */
3555bc2d9db4SLukas Czerner
3556bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */
3557bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3558bc2d9db4SLukas Czerner
3559bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */
3560bc2d9db4SLukas Czerner allocated = map_len;
3561bc2d9db4SLukas Czerner }
3562bc2d9db4SLukas Czerner }
3563bc2d9db4SLukas Czerner if (allocated) {
35646f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */
3565b60ca334SHarshad Shirwadkar err = ext4_ext_dirty(handle, inode, path + depth);
35666f91bc5fSEric Gouriou
35676f91bc5fSEric Gouriou /* Update path to point to the right extent */
3568bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex;
35696f91bc5fSEric Gouriou goto out;
3570bc2d9db4SLukas Czerner } else
3571bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block);
35726f91bc5fSEric Gouriou
3573667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block);
357421ca087aSDmitry Monakhov /*
357521ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit
35769e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size.
357721ca087aSDmitry Monakhov */
3578667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
357921ca087aSDmitry Monakhov
358067a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag)
358167a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >>
35824f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10);
358367a5da56SZheng Liu
3584667eff35SYongqiang Yang /*
35854f8caa60SJan Kara * five cases:
3586667eff35SYongqiang Yang * 1. split the extent into three extents.
35874f8caa60SJan Kara * 2. split the extent into two extents, zeroout the head of the first
35884f8caa60SJan Kara * extent.
35894f8caa60SJan Kara * 3. split the extent into two extents, zeroout the tail of the second
35904f8caa60SJan Kara * extent.
3591667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout.
35924f8caa60SJan Kara * 5. no splitting needed, just possibly zeroout the head and / or the
35934f8caa60SJan Kara * tail of the extent.
3594667eff35SYongqiang Yang */
3595667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk;
3596667eff35SYongqiang Yang split_map.m_len = map->m_len;
3597667eff35SYongqiang Yang
35984f8caa60SJan Kara if (max_zeroout && (allocated > split_map.m_len)) {
359967a5da56SZheng Liu if (allocated <= max_zeroout) {
36004f8caa60SJan Kara /* case 3 or 5 */
36014f8caa60SJan Kara zero_ex1.ee_block =
36024f8caa60SJan Kara cpu_to_le32(split_map.m_lblk +
36034f8caa60SJan Kara split_map.m_len);
36044f8caa60SJan Kara zero_ex1.ee_len =
36054f8caa60SJan Kara cpu_to_le16(allocated - split_map.m_len);
36064f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex1,
36074f8caa60SJan Kara ext4_ext_pblock(ex) + split_map.m_lblk +
36084f8caa60SJan Kara split_map.m_len - ee_block);
36094f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex1);
3610667eff35SYongqiang Yang if (err)
3611308c57ccSTheodore Ts'o goto fallback;
3612667eff35SYongqiang Yang split_map.m_len = allocated;
36134f8caa60SJan Kara }
36144f8caa60SJan Kara if (split_map.m_lblk - ee_block + split_map.m_len <
36154f8caa60SJan Kara max_zeroout) {
36164f8caa60SJan Kara /* case 2 or 5 */
36174f8caa60SJan Kara if (split_map.m_lblk != ee_block) {
36184f8caa60SJan Kara zero_ex2.ee_block = ex->ee_block;
36194f8caa60SJan Kara zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3620667eff35SYongqiang Yang ee_block);
36214f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex2,
3622667eff35SYongqiang Yang ext4_ext_pblock(ex));
36234f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex2);
3624667eff35SYongqiang Yang if (err)
3625308c57ccSTheodore Ts'o goto fallback;
3626667eff35SYongqiang Yang }
3627667eff35SYongqiang Yang
36284f8caa60SJan Kara split_map.m_len += split_map.m_lblk - ee_block;
3629667eff35SYongqiang Yang split_map.m_lblk = ee_block;
36309b940f8eSAllison Henderson allocated = map->m_len;
3631667eff35SYongqiang Yang }
3632667eff35SYongqiang Yang }
3633667eff35SYongqiang Yang
3634308c57ccSTheodore Ts'o fallback:
3635ae9e9c6aSJan Kara err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3636ae9e9c6aSJan Kara flags);
3637ae9e9c6aSJan Kara if (err > 0)
3638ae9e9c6aSJan Kara err = 0;
3639667eff35SYongqiang Yang out:
3640adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */
36414f8caa60SJan Kara if (!err) {
3642ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex1);
3643ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex2);
36444f8caa60SJan Kara }
3645667eff35SYongqiang Yang return err ? err : allocated;
364656055d3aSAmit Arora }
364756055d3aSAmit Arora
3648c278bfecSAneesh Kumar K.V /*
3649e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from
36500031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write
3651556615dcSLukas Czerner * to an unwritten extent.
36520031462bSMingming Cao *
3653556615dcSLukas Czerner * Writing to an unwritten extent may result in splitting the unwritten
3654556615dcSLukas Czerner * extent into multiple initialized/unwritten extents (up to three)
36550031462bSMingming Cao * There are three possibilities:
3656556615dcSLukas Czerner * a> There is no split required: Entire extent should be unwritten
36570031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent
36580031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent
36590031462bSMingming Cao *
3660b8a86845SLukas Czerner * This works the same way in the case of initialized -> unwritten conversion.
3661b8a86845SLukas Czerner *
36620031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after
3663556615dcSLukas Czerner * the unwritten extent split. To prevent ENOSPC occur at the IO
3664556615dcSLukas Czerner * complete, we need to split the unwritten extent before DIO submit
3665556615dcSLukas Czerner * the IO. The unwritten extent called at this time will be split
3666556615dcSLukas Czerner * into three unwritten extent(at most). After IO complete, the part
36670031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function
36680031462bSMingming Cao * via ext4_convert_unwritten_extents().
3669ba230c3fSMingming *
3670556615dcSLukas Czerner * Returns the size of unwritten extent to be written on success.
36710031462bSMingming Cao */
ext4_split_convert_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags)3672b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle,
36730031462bSMingming Cao struct inode *inode,
3674e35fd660STheodore Ts'o struct ext4_map_blocks *map,
3675dfe50809STheodore Ts'o struct ext4_ext_path **ppath,
36760031462bSMingming Cao int flags)
36770031462bSMingming Cao {
3678dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
3679667eff35SYongqiang Yang ext4_lblk_t eof_block;
3680667eff35SYongqiang Yang ext4_lblk_t ee_block;
3681667eff35SYongqiang Yang struct ext4_extent *ex;
3682667eff35SYongqiang Yang unsigned int ee_len;
3683667eff35SYongqiang Yang int split_flag = 0, depth;
36840031462bSMingming Cao
368570aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n",
3686e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len);
368721ca087aSDmitry Monakhov
3688801674f3SJan Kara eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
3689801674f3SJan Kara >> inode->i_sb->s_blocksize_bits;
3690e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len)
3691e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len;
36920031462bSMingming Cao /*
369321ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit
3694e4d7f2d3SKeyur Patel * zeroout only if extent is fully inside i_size or new_size.
369521ca087aSDmitry Monakhov */
3696667eff35SYongqiang Yang depth = ext_depth(inode);
36970031462bSMingming Cao ex = path[depth].p_ext;
3698667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block);
3699667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex);
37000031462bSMingming Cao
3701b8a86845SLukas Czerner /* Convert to unwritten */
3702b8a86845SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3703b8a86845SLukas Czerner split_flag |= EXT4_EXT_DATA_VALID1;
3704b8a86845SLukas Czerner /* Convert to initialized */
3705b8a86845SLukas Czerner } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3706b8a86845SLukas Czerner split_flag |= ee_block + ee_len <= eof_block ?
3707b8a86845SLukas Czerner EXT4_EXT_MAY_ZEROOUT : 0;
3708556615dcSLukas Czerner split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3709b8a86845SLukas Czerner }
3710667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO;
3711dfe50809STheodore Ts'o return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
37120031462bSMingming Cao }
3713197217a5SYongqiang Yang
ext4_convert_unwritten_extents_endio(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath)3714c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle,
37150031462bSMingming Cao struct inode *inode,
3716dee1f973SDmitry Monakhov struct ext4_map_blocks *map,
3717dfe50809STheodore Ts'o struct ext4_ext_path **ppath)
37180031462bSMingming Cao {
3719dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath;
37200031462bSMingming Cao struct ext4_extent *ex;
3721dee1f973SDmitry Monakhov ext4_lblk_t ee_block;
3722dee1f973SDmitry Monakhov unsigned int ee_len;
37230031462bSMingming Cao int depth;
37240031462bSMingming Cao int err = 0;
37250031462bSMingming Cao
37260031462bSMingming Cao depth = ext_depth(inode);
37270031462bSMingming Cao ex = path[depth].p_ext;
3728dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block);
3729dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex);
37300031462bSMingming Cao
373170aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n",
3732dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len);
3733dee1f973SDmitry Monakhov
3734ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still
3735ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still
3736ff95ec22SDmitry Monakhov * required.
3737ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be
3738ff95ec22SDmitry Monakhov * illegal.
3739ff95ec22SDmitry Monakhov */
3740dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) {
3741e3d550c2SRakesh Pandit #ifdef CONFIG_EXT4_DEBUG
3742e3d550c2SRakesh Pandit ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
37438d2ae1cbSJakub Wilk " len %u; IO logical block %llu, len %u",
3744ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len,
3745ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len);
3746ff95ec22SDmitry Monakhov #endif
3747dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath,
3748dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT);
3749dee1f973SDmitry Monakhov if (err < 0)
3750dfe50809STheodore Ts'o return err;
3751ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3752dfe50809STheodore Ts'o if (IS_ERR(path))
3753dfe50809STheodore Ts'o return PTR_ERR(path);
3754dee1f973SDmitry Monakhov depth = ext_depth(inode);
3755dee1f973SDmitry Monakhov ex = path[depth].p_ext;
3756dee1f973SDmitry Monakhov }
3757197217a5SYongqiang Yang
37580031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth);
37590031462bSMingming Cao if (err)
37600031462bSMingming Cao goto out;
37610031462bSMingming Cao /* first mark the extent as initialized */
37620031462bSMingming Cao ext4_ext_mark_initialized(ex);
37630031462bSMingming Cao
3764197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because
3765197217a5SYongqiang Yang * borders are not changed
37660031462bSMingming Cao */
3767ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex);
3768197217a5SYongqiang Yang
37690031462bSMingming Cao /* Mark modified extent as dirty */
3770ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth);
37710031462bSMingming Cao out:
37720031462bSMingming Cao ext4_ext_show_leaf(inode, path);
37730031462bSMingming Cao return err;
37740031462bSMingming Cao }
37750031462bSMingming Cao
37760031462bSMingming Cao static int
convert_initialized_extent(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,unsigned int * allocated)3777e8b83d93STheodore Ts'o convert_initialized_extent(handle_t *handle, struct inode *inode,
3778b8a86845SLukas Czerner struct ext4_map_blocks *map,
377929c6eaffSEric Whitney struct ext4_ext_path **ppath,
3780f064a9d6SEric Whitney unsigned int *allocated)
3781b8a86845SLukas Czerner {
37824f224b8bSTheodore Ts'o struct ext4_ext_path *path = *ppath;
3783e8b83d93STheodore Ts'o struct ext4_extent *ex;
3784e8b83d93STheodore Ts'o ext4_lblk_t ee_block;
3785e8b83d93STheodore Ts'o unsigned int ee_len;
3786e8b83d93STheodore Ts'o int depth;
3787b8a86845SLukas Czerner int err = 0;
3788b8a86845SLukas Czerner
3789b8a86845SLukas Czerner /*
3790b8a86845SLukas Czerner * Make sure that the extent is no bigger than we support with
3791556615dcSLukas Czerner * unwritten extent
3792b8a86845SLukas Czerner */
3793556615dcSLukas Czerner if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3794556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3795b8a86845SLukas Czerner
3796e8b83d93STheodore Ts'o depth = ext_depth(inode);
3797e8b83d93STheodore Ts'o ex = path[depth].p_ext;
3798e8b83d93STheodore Ts'o ee_block = le32_to_cpu(ex->ee_block);
3799e8b83d93STheodore Ts'o ee_len = ext4_ext_get_actual_len(ex);
3800e8b83d93STheodore Ts'o
380170aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n",
3802e8b83d93STheodore Ts'o (unsigned long long)ee_block, ee_len);
3803e8b83d93STheodore Ts'o
3804e8b83d93STheodore Ts'o if (ee_block != map->m_lblk || ee_len > map->m_len) {
3805dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath,
3806e8b83d93STheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3807e8b83d93STheodore Ts'o if (err < 0)
3808e8b83d93STheodore Ts'o return err;
3809ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3810e8b83d93STheodore Ts'o if (IS_ERR(path))
3811e8b83d93STheodore Ts'o return PTR_ERR(path);
3812e8b83d93STheodore Ts'o depth = ext_depth(inode);
3813e8b83d93STheodore Ts'o ex = path[depth].p_ext;
3814e8b83d93STheodore Ts'o if (!ex) {
3815e8b83d93STheodore Ts'o EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3816e8b83d93STheodore Ts'o (unsigned long) map->m_lblk);
38176a797d27SDarrick J. Wong return -EFSCORRUPTED;
3818e8b83d93STheodore Ts'o }
3819e8b83d93STheodore Ts'o }
3820e8b83d93STheodore Ts'o
3821e8b83d93STheodore Ts'o err = ext4_ext_get_access(handle, inode, path + depth);
3822e8b83d93STheodore Ts'o if (err)
3823e8b83d93STheodore Ts'o return err;
3824e8b83d93STheodore Ts'o /* first mark the extent as unwritten */
3825e8b83d93STheodore Ts'o ext4_ext_mark_unwritten(ex);
3826e8b83d93STheodore Ts'o
3827e8b83d93STheodore Ts'o /* note: ext4_ext_correct_indexes() isn't needed here because
3828e8b83d93STheodore Ts'o * borders are not changed
3829e8b83d93STheodore Ts'o */
3830e8b83d93STheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex);
3831e8b83d93STheodore Ts'o
3832e8b83d93STheodore Ts'o /* Mark modified extent as dirty */
3833e8b83d93STheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3834e8b83d93STheodore Ts'o if (err)
3835e8b83d93STheodore Ts'o return err;
3836e8b83d93STheodore Ts'o ext4_ext_show_leaf(inode, path);
3837e8b83d93STheodore Ts'o
3838b8a86845SLukas Czerner ext4_update_inode_fsync_trans(handle, inode, 1);
38394337ecd1SEric Whitney
3840b8a86845SLukas Czerner map->m_flags |= EXT4_MAP_UNWRITTEN;
3841f064a9d6SEric Whitney if (*allocated > map->m_len)
3842f064a9d6SEric Whitney *allocated = map->m_len;
3843f064a9d6SEric Whitney map->m_len = *allocated;
3844f064a9d6SEric Whitney return 0;
3845b8a86845SLukas Czerner }
3846b8a86845SLukas Czerner
3847b8a86845SLukas Czerner static int
ext4_ext_handle_unwritten_extents(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,struct ext4_ext_path ** ppath,int flags,unsigned int allocated,ext4_fsblk_t newblock)3848556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3849e35fd660STheodore Ts'o struct ext4_map_blocks *map,
3850dfe50809STheodore Ts'o struct ext4_ext_path **ppath, int flags,
3851e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock)
38520031462bSMingming Cao {
38530031462bSMingming Cao int ret = 0;
38540031462bSMingming Cao int err = 0;
38550031462bSMingming Cao
385670aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
385770aa1554SRitesh Harjani (unsigned long long)map->m_lblk, map->m_len, flags,
385870aa1554SRitesh Harjani allocated);
385934b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath);
38600031462bSMingming Cao
386127dd4385SLukas Czerner /*
3862556615dcSLukas Czerner * When writing into unwritten space, we should not fail to
386327dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed.
386427dd4385SLukas Czerner */
386527dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
386627dd4385SLukas Czerner
3867556615dcSLukas Czerner trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
3868b5645534SZheng Liu allocated, newblock);
3869d8990240SAditya Kali
3870779e2651SEric Whitney /* get_block() before submitting IO, split the extent */
3871c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) {
3872dfe50809STheodore Ts'o ret = ext4_split_convert_extents(handle, inode, map, ppath,
3873dfe50809STheodore Ts'o flags | EXT4_GET_BLOCKS_CONVERT);
3874779e2651SEric Whitney if (ret < 0) {
3875779e2651SEric Whitney err = ret;
3876779e2651SEric Whitney goto out2;
3877779e2651SEric Whitney }
3878779e2651SEric Whitney /*
3879779e2651SEric Whitney * shouldn't get a 0 return when splitting an extent unless
3880779e2651SEric Whitney * m_len is 0 (bug) or extent has been corrupted
3881779e2651SEric Whitney */
3882779e2651SEric Whitney if (unlikely(ret == 0)) {
3883779e2651SEric Whitney EXT4_ERROR_INODE(inode,
3884779e2651SEric Whitney "unexpected ret == 0, m_len = %u",
3885779e2651SEric Whitney map->m_len);
3886779e2651SEric Whitney err = -EFSCORRUPTED;
3887779e2651SEric Whitney goto out2;
3888779e2651SEric Whitney }
3889a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN;
38900031462bSMingming Cao goto out;
38910031462bSMingming Cao }
3892c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */
3893c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT) {
3894bee6cf00SEric Whitney err = ext4_convert_unwritten_extents_endio(handle, inode, map,
3895dfe50809STheodore Ts'o ppath);
3896bee6cf00SEric Whitney if (err < 0)
38970031462bSMingming Cao goto out2;
3898bee6cf00SEric Whitney ext4_update_inode_fsync_trans(handle, inode, 1);
3899bee6cf00SEric Whitney goto map_out;
39000031462bSMingming Cao }
3901bee6cf00SEric Whitney /* buffered IO cases */
39020031462bSMingming Cao /*
39030031462bSMingming Cao * repeat fallocate creation request
39040031462bSMingming Cao * we already have an unwritten extent
39050031462bSMingming Cao */
3906556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
3907a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN;
39080031462bSMingming Cao goto map_out;
3909a25a4e1aSZheng Liu }
39100031462bSMingming Cao
39110031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */
39120031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
39130031462bSMingming Cao /*
39140031462bSMingming Cao * We have blocks reserved already. We
39150031462bSMingming Cao * return allocated blocks so that delalloc
39160031462bSMingming Cao * won't do block reservation for us. But
39170031462bSMingming Cao * the buffer head will be unmapped so that
39180031462bSMingming Cao * a read from the block returns 0s.
39190031462bSMingming Cao */
3920e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN;
39210031462bSMingming Cao goto out1;
39220031462bSMingming Cao }
39230031462bSMingming Cao
3924be809e12SEric Whitney /*
3925be809e12SEric Whitney * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
3926be809e12SEric Whitney * For buffered writes, at writepage time, etc. Convert a
3927be809e12SEric Whitney * discovered unwritten extent to written.
3928be809e12SEric Whitney */
3929dfe50809STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
3930be809e12SEric Whitney if (ret < 0) {
39310031462bSMingming Cao err = ret;
39320031462bSMingming Cao goto out2;
3933779e2651SEric Whitney }
3934be809e12SEric Whitney ext4_update_inode_fsync_trans(handle, inode, 1);
3935be809e12SEric Whitney /*
3936be809e12SEric Whitney * shouldn't get a 0 return when converting an unwritten extent
3937be809e12SEric Whitney * unless m_len is 0 (bug) or extent has been corrupted
3938be809e12SEric Whitney */
3939be809e12SEric Whitney if (unlikely(ret == 0)) {
3940be809e12SEric Whitney EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
3941be809e12SEric Whitney map->m_len);
3942be809e12SEric Whitney err = -EFSCORRUPTED;
3943be809e12SEric Whitney goto out2;
3944be809e12SEric Whitney }
3945be809e12SEric Whitney
3946779e2651SEric Whitney out:
39470031462bSMingming Cao allocated = ret;
3948e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW;
39490031462bSMingming Cao map_out:
3950e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED;
39510031462bSMingming Cao out1:
3952bee6cf00SEric Whitney map->m_pblk = newblock;
3953e35fd660STheodore Ts'o if (allocated > map->m_len)
3954e35fd660STheodore Ts'o allocated = map->m_len;
3955e35fd660STheodore Ts'o map->m_len = allocated;
395634b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath);
39570031462bSMingming Cao out2:
39580031462bSMingming Cao return err ? err : allocated;
39590031462bSMingming Cao }
396058590b06STheodore Ts'o
39610031462bSMingming Cao /*
39624d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested
39634d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already
39644d33b1efSTheodore Ts'o * allocated in an extent.
3965d8990240SAditya Kali * @sb The filesystem superblock structure
39664d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping
39674d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied
39684d33b1efSTheodore Ts'o * cluster allocation
39694d33b1efSTheodore Ts'o *
39704d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to
39714d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence,
39724d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap
39734d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we
39744d33b1efSTheodore Ts'o * want to catch. The first is this case:
39754d33b1efSTheodore Ts'o *
39764d33b1efSTheodore Ts'o * |--- cluster # N--|
39774d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---|
39784d33b1efSTheodore Ts'o * |==========|
39794d33b1efSTheodore Ts'o *
39804d33b1efSTheodore Ts'o * The second case that we need to test for is this one:
39814d33b1efSTheodore Ts'o *
39824d33b1efSTheodore Ts'o * |--------- cluster # N ----------------|
39834d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----|
39844d33b1efSTheodore Ts'o * |=======================|
39854d33b1efSTheodore Ts'o *
39864d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents
39874d33b1efSTheodore Ts'o * within the same cluster:
39884d33b1efSTheodore Ts'o * |------------- cluster # N-------------|
39894d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----|
39904d33b1efSTheodore Ts'o * |------ requested region ------|
39914d33b1efSTheodore Ts'o * |================|
39924d33b1efSTheodore Ts'o *
39934d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and
39944d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as
39954d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in
39964d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
39974d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
39984d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and
39994d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters
40004d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks().
40014d33b1efSTheodore Ts'o */
get_implied_cluster_alloc(struct super_block * sb,struct ext4_map_blocks * map,struct ext4_extent * ex,struct ext4_ext_path * path)4002d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb,
40034d33b1efSTheodore Ts'o struct ext4_map_blocks *map,
40044d33b1efSTheodore Ts'o struct ext4_extent *ex,
40054d33b1efSTheodore Ts'o struct ext4_ext_path *path)
40064d33b1efSTheodore Ts'o {
4007d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb);
4008f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
40094d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end;
401014d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start;
40114d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
40124d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
40134d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex);
40144d33b1efSTheodore Ts'o
40154d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */
40164d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block);
40174d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
40184d33b1efSTheodore Ts'o
40194d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */
40204d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
40214d33b1efSTheodore Ts'o
40224d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) ||
40234d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) {
40244d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end)
40254d33b1efSTheodore Ts'o ee_start += ee_len - 1;
4026f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
40274d33b1efSTheodore Ts'o map->m_len = min(map->m_len,
40284d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset);
40294d33b1efSTheodore Ts'o /*
40304d33b1efSTheodore Ts'o * Check for and handle this case:
40314d33b1efSTheodore Ts'o *
40324d33b1efSTheodore Ts'o * |--------- cluster # N-------------|
40334d33b1efSTheodore Ts'o * |------- extent ----|
40344d33b1efSTheodore Ts'o * |--- requested region ---|
40354d33b1efSTheodore Ts'o * |===========|
40364d33b1efSTheodore Ts'o */
40374d33b1efSTheodore Ts'o
40384d33b1efSTheodore Ts'o if (map->m_lblk < ee_block)
40394d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk);
40404d33b1efSTheodore Ts'o
40414d33b1efSTheodore Ts'o /*
40424d33b1efSTheodore Ts'o * Check for the case where there is already another allocated
40434d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster.
40444d33b1efSTheodore Ts'o *
40454d33b1efSTheodore Ts'o * |------------- cluster # N-------------|
40464d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----|
40474d33b1efSTheodore Ts'o * |------ requested region ------|
40484d33b1efSTheodore Ts'o * |================|
40494d33b1efSTheodore Ts'o */
40504d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) {
40514d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path);
40524d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk);
40534d33b1efSTheodore Ts'o }
4054d8990240SAditya Kali
4055d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
40564d33b1efSTheodore Ts'o return 1;
40574d33b1efSTheodore Ts'o }
4058d8990240SAditya Kali
4059d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
40604d33b1efSTheodore Ts'o return 0;
40614d33b1efSTheodore Ts'o }
40624d33b1efSTheodore Ts'o
4063f5411b76SZhang Yi /*
4064f5411b76SZhang Yi * Determine hole length around the given logical block, first try to
4065f5411b76SZhang Yi * locate and expand the hole from the given @path, and then adjust it
4066f5411b76SZhang Yi * if it's partially or completely converted to delayed extents, insert
4067f5411b76SZhang Yi * it into the extent cache tree if it's indeed a hole, finally return
4068f5411b76SZhang Yi * the length of the determined extent.
4069f5411b76SZhang Yi */
ext4_ext_determine_insert_hole(struct inode * inode,struct ext4_ext_path * path,ext4_lblk_t lblk)4070f5411b76SZhang Yi static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode,
4071f5411b76SZhang Yi struct ext4_ext_path *path,
4072f5411b76SZhang Yi ext4_lblk_t lblk)
4073f5411b76SZhang Yi {
4074f5411b76SZhang Yi ext4_lblk_t hole_start, len;
4075f5411b76SZhang Yi struct extent_status es;
4076f5411b76SZhang Yi
4077f5411b76SZhang Yi hole_start = lblk;
4078f5411b76SZhang Yi len = ext4_ext_find_hole(inode, path, &hole_start);
4079f5411b76SZhang Yi again:
4080f5411b76SZhang Yi ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start,
4081f5411b76SZhang Yi hole_start + len - 1, &es);
4082f5411b76SZhang Yi if (!es.es_len)
4083f5411b76SZhang Yi goto insert_hole;
4084f5411b76SZhang Yi
4085f5411b76SZhang Yi /*
4086f5411b76SZhang Yi * There's a delalloc extent in the hole, handle it if the delalloc
4087f5411b76SZhang Yi * extent is in front of, behind and straddle the queried range.
4088f5411b76SZhang Yi */
4089f5411b76SZhang Yi if (lblk >= es.es_lblk + es.es_len) {
4090f5411b76SZhang Yi /*
4091f5411b76SZhang Yi * The delalloc extent is in front of the queried range,
4092f5411b76SZhang Yi * find again from the queried start block.
4093f5411b76SZhang Yi */
4094f5411b76SZhang Yi len -= lblk - hole_start;
4095f5411b76SZhang Yi hole_start = lblk;
4096f5411b76SZhang Yi goto again;
4097f5411b76SZhang Yi } else if (in_range(lblk, es.es_lblk, es.es_len)) {
4098f5411b76SZhang Yi /*
4099f5411b76SZhang Yi * The delalloc extent containing lblk, it must have been
4100f5411b76SZhang Yi * added after ext4_map_blocks() checked the extent status
4101f5411b76SZhang Yi * tree, adjust the length to the delalloc extent's after
4102f5411b76SZhang Yi * lblk.
4103f5411b76SZhang Yi */
4104f5411b76SZhang Yi len = es.es_lblk + es.es_len - lblk;
4105f5411b76SZhang Yi return len;
4106f5411b76SZhang Yi } else {
4107f5411b76SZhang Yi /*
4108f5411b76SZhang Yi * The delalloc extent is partially or completely behind
4109f5411b76SZhang Yi * the queried range, update hole length until the
4110f5411b76SZhang Yi * beginning of the delalloc extent.
4111f5411b76SZhang Yi */
4112f5411b76SZhang Yi len = min(es.es_lblk - hole_start, len);
4113f5411b76SZhang Yi }
4114f5411b76SZhang Yi
4115f5411b76SZhang Yi insert_hole:
4116f5411b76SZhang Yi /* Put just found gap into cache to speed up subsequent requests */
4117f5411b76SZhang Yi ext_debug(inode, " -> %u:%u\n", hole_start, len);
4118f5411b76SZhang Yi ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE);
4119f5411b76SZhang Yi
4120f5411b76SZhang Yi /* Update hole_len to reflect hole size after lblk */
4121f5411b76SZhang Yi if (hole_start != lblk)
4122f5411b76SZhang Yi len -= lblk - hole_start;
4123f5411b76SZhang Yi
4124f5411b76SZhang Yi return len;
4125f5411b76SZhang Yi }
41264d33b1efSTheodore Ts'o
41274d33b1efSTheodore Ts'o /*
4128f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files
4129f5ab0d1fSMingming Cao *
4130f5ab0d1fSMingming Cao *
4131c278bfecSAneesh Kumar K.V * Need to be called with
41320e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
41330e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4134f5ab0d1fSMingming Cao *
4135b483bb77SRandy Dunlap * return > 0, number of blocks already mapped/allocated
4136f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks
4137f5ab0d1fSMingming Cao * buffer head is unmapped
4138f5ab0d1fSMingming Cao * otherwise blocks are mapped
4139f5ab0d1fSMingming Cao *
4140f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated)
4141f5ab0d1fSMingming Cao * buffer head is unmapped
4142f5ab0d1fSMingming Cao *
4143f5ab0d1fSMingming Cao * return < 0, error case.
4144c278bfecSAneesh Kumar K.V */
ext4_ext_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)4145e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4146e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags)
4147a86c6181SAlex Tomas {
4148a86c6181SAlex Tomas struct ext4_ext_path *path = NULL;
4149d7dce9e0Syangerkun struct ext4_extent newex, *ex, ex2;
41504d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
41518ad8d710SEric Whitney ext4_fsblk_t newblock = 0, pblk;
415234990461SEric Whitney int err = 0, depth, ret;
41534d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0;
415481fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0;
4155c9de560dSAlex Tomas struct ext4_allocation_request ar;
41564d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset;
4157a86c6181SAlex Tomas
415870aa1554SRitesh Harjani ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
41590562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4160a86c6181SAlex Tomas
4161a86c6181SAlex Tomas /* find extent for this block */
4162ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4163a86c6181SAlex Tomas if (IS_ERR(path)) {
4164a86c6181SAlex Tomas err = PTR_ERR(path);
4165a86c6181SAlex Tomas path = NULL;
41668ad8d710SEric Whitney goto out;
4167a86c6181SAlex Tomas }
4168a86c6181SAlex Tomas
4169a86c6181SAlex Tomas depth = ext_depth(inode);
4170a86c6181SAlex Tomas
4171a86c6181SAlex Tomas /*
4172d0d856e8SRandy Dunlap * consistent leaf must not be empty;
4173d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification;
4174ed8a1a76STheodore Ts'o * this is why assert can't be put in ext4_find_extent()
4175a86c6181SAlex Tomas */
4176273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4177273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address "
4178f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld",
4179f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth,
4180f70f362bSTheodore Ts'o path[depth].p_block);
41816a797d27SDarrick J. Wong err = -EFSCORRUPTED;
41828ad8d710SEric Whitney goto out;
4183034fb4c9SSurbhi Palande }
4184a86c6181SAlex Tomas
41857e028976SAvantika Mathur ex = path[depth].p_ext;
41867e028976SAvantika Mathur if (ex) {
4187725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4188bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4189a2df2a63SAmit Arora unsigned short ee_len;
4190471d4011SSuparna Bhattacharya
4191b8a86845SLukas Czerner
4192471d4011SSuparna Bhattacharya /*
4193556615dcSLukas Czerner * unwritten extents are treated as holes, except that
419456055d3aSAmit Arora * we split out initialized portions during a write.
4195471d4011SSuparna Bhattacharya */
4196a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex);
4197d8990240SAditya Kali
4198d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4199d8990240SAditya Kali
4200d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */
4201e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) {
4202e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start;
4203d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */
4204e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block);
420570aa1554SRitesh Harjani ext_debug(inode, "%u fit into %u:%d -> %llu\n",
420670aa1554SRitesh Harjani map->m_lblk, ee_block, ee_len, newblock);
420756055d3aSAmit Arora
4208b8a86845SLukas Czerner /*
4209b8a86845SLukas Czerner * If the extent is initialized check whether the
4210b8a86845SLukas Czerner * caller wants to convert it to unwritten.
4211b8a86845SLukas Czerner */
4212556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(ex)) &&
4213b8a86845SLukas Czerner (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4214f064a9d6SEric Whitney err = convert_initialized_extent(handle,
4215f064a9d6SEric Whitney inode, map, &path, &allocated);
42168ad8d710SEric Whitney goto out;
4217f064a9d6SEric Whitney } else if (!ext4_ext_is_unwritten(ex)) {
42188ad8d710SEric Whitney map->m_flags |= EXT4_MAP_MAPPED;
42198ad8d710SEric Whitney map->m_pblk = newblock;
42208ad8d710SEric Whitney if (allocated > map->m_len)
42218ad8d710SEric Whitney allocated = map->m_len;
42228ad8d710SEric Whitney map->m_len = allocated;
42238ad8d710SEric Whitney ext4_ext_show_leaf(inode, path);
4224a86c6181SAlex Tomas goto out;
4225f064a9d6SEric Whitney }
422669eb33dcSZheng Liu
4227556615dcSLukas Czerner ret = ext4_ext_handle_unwritten_extents(
4228dfe50809STheodore Ts'o handle, inode, map, &path, flags,
4229e861304bSAllison Henderson allocated, newblock);
4230ce37c429SEric Whitney if (ret < 0)
4231ce37c429SEric Whitney err = ret;
4232ce37c429SEric Whitney else
4233ce37c429SEric Whitney allocated = ret;
42348ad8d710SEric Whitney goto out;
423556055d3aSAmit Arora }
4236a86c6181SAlex Tomas }
4237a86c6181SAlex Tomas
4238a86c6181SAlex Tomas /*
4239d0d856e8SRandy Dunlap * requested block isn't allocated yet;
4240a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero
4241a86c6181SAlex Tomas */
4242c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4243f5411b76SZhang Yi ext4_lblk_t len;
4244140a5250SJan Kara
4245f5411b76SZhang Yi len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk);
4246facab4d9SJan Kara
4247facab4d9SJan Kara map->m_pblk = 0;
4248f5411b76SZhang Yi map->m_len = min_t(unsigned int, map->m_len, len);
42498ad8d710SEric Whitney goto out;
4250a86c6181SAlex Tomas }
42514d33b1efSTheodore Ts'o
4252a86c6181SAlex Tomas /*
4253c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation.
4254a86c6181SAlex Tomas */
42554d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk);
4256d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
42574d33b1efSTheodore Ts'o
42584d33b1efSTheodore Ts'o /*
42594d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned
4260ed8a1a76STheodore Ts'o * by ext4_find_extent() implies a cluster we can use.
42614d33b1efSTheodore Ts'o */
42624d33b1efSTheodore Ts'o if (cluster_offset && ex &&
4263d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
42644d33b1efSTheodore Ts'o ar.len = allocated = map->m_len;
42654d33b1efSTheodore Ts'o newblock = map->m_pblk;
42664d33b1efSTheodore Ts'o goto got_allocated_blocks;
42674d33b1efSTheodore Ts'o }
4268a86c6181SAlex Tomas
4269c9de560dSAlex Tomas /* find neighbour allocated blocks */
4270e35fd660STheodore Ts'o ar.lleft = map->m_lblk;
4271c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4272c9de560dSAlex Tomas if (err)
42738ad8d710SEric Whitney goto out;
4274e35fd660STheodore Ts'o ar.lright = map->m_lblk;
42754d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4276d7dce9e0Syangerkun if (err < 0)
42778ad8d710SEric Whitney goto out;
427825d14f98SAmit Arora
42794d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a
42804d33b1efSTheodore Ts'o * cluster we can use. */
4281d7dce9e0Syangerkun if ((sbi->s_cluster_ratio > 1) && err &&
4282d7dce9e0Syangerkun get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) {
42834d33b1efSTheodore Ts'o ar.len = allocated = map->m_len;
42844d33b1efSTheodore Ts'o newblock = map->m_pblk;
42854d33b1efSTheodore Ts'o goto got_allocated_blocks;
42864d33b1efSTheodore Ts'o }
42874d33b1efSTheodore Ts'o
4288749269faSAmit Arora /*
4289749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in
4290749269faSAmit Arora * a single extent. For an initialized extent this limit is
4291556615dcSLukas Czerner * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
4292556615dcSLukas Czerner * EXT_UNWRITTEN_MAX_LEN.
4293749269faSAmit Arora */
4294e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN &&
4295556615dcSLukas Czerner !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4296e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN;
4297556615dcSLukas Czerner else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
4298556615dcSLukas Czerner (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
4299556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN;
4300749269faSAmit Arora
4301e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4302e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len);
43034d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path);
430425d14f98SAmit Arora if (err)
4305b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex);
430625d14f98SAmit Arora else
4307e35fd660STheodore Ts'o allocated = map->m_len;
4308c9de560dSAlex Tomas
4309c9de560dSAlex Tomas /* allocate new block */
4310c9de560dSAlex Tomas ar.inode = inode;
4311e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4312e35fd660STheodore Ts'o ar.logical = map->m_lblk;
43134d33b1efSTheodore Ts'o /*
43144d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster
43154d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a
43164d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the
43174d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is
43184d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc()
43194d33b1efSTheodore Ts'o * work correctly.
43204d33b1efSTheodore Ts'o */
4321f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
43224d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
43234d33b1efSTheodore Ts'o ar.goal -= offset;
43244d33b1efSTheodore Ts'o ar.logical -= offset;
4325c9de560dSAlex Tomas if (S_ISREG(inode->i_mode))
4326c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA;
4327c9de560dSAlex Tomas else
4328c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */
4329c9de560dSAlex Tomas ar.flags = 0;
4330556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4331556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4332e3cf5d5dSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4333e3cf5d5dSTheodore Ts'o ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4334c5e298aeSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
4335c5e298aeSTheodore Ts'o ar.flags |= EXT4_MB_USE_RESERVED;
4336c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err);
4337a86c6181SAlex Tomas if (!newblock)
43388ad8d710SEric Whitney goto out;
43397b415bf6SAditya Kali allocated_clusters = ar.len;
43404d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset;
434170aa1554SRitesh Harjani ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
4342ec8c60beSRitesh Harjani ar.goal, newblock, ar.len, allocated);
43434d33b1efSTheodore Ts'o if (ar.len > allocated)
43444d33b1efSTheodore Ts'o ar.len = allocated;
4345a86c6181SAlex Tomas
43464d33b1efSTheodore Ts'o got_allocated_blocks:
4347a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */
43488ad8d710SEric Whitney pblk = newblock + offset;
43498ad8d710SEric Whitney ext4_ext_store_pblock(&newex, pblk);
4350c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len);
4351556615dcSLukas Czerner /* Mark unwritten */
4352556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
4353556615dcSLukas Czerner ext4_ext_mark_unwritten(&newex);
4354a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN;
43558d5d02e6SMingming Cao }
4356c8d46e41SJiaying Zhang
43574337ecd1SEric Whitney err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
435834990461SEric Whitney if (err) {
435934990461SEric Whitney if (allocated_clusters) {
436034990461SEric Whitney int fb_flags = 0;
436182e54229SDmitry Monakhov
436234990461SEric Whitney /*
436334990461SEric Whitney * free data blocks we just allocated.
436434990461SEric Whitney * not a good idea to call discard here directly,
436534990461SEric Whitney * but otherwise we'd need to call it every free().
436634990461SEric Whitney */
436727bc446eSbrookxu ext4_discard_preallocations(inode, 0);
436834990461SEric Whitney if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
436934990461SEric Whitney fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
4370c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock,
437134990461SEric Whitney EXT4_C2B(sbi, allocated_clusters),
437234990461SEric Whitney fb_flags);
437334990461SEric Whitney }
43748ad8d710SEric Whitney goto out;
4375315054f0SAlex Tomas }
4376a86c6181SAlex Tomas
4377b436b9beSJan Kara /*
4378b6bf9171SEric Whitney * Reduce the reserved cluster count to reflect successful deferred
4379b6bf9171SEric Whitney * allocation of delayed allocated clusters or direct allocation of
4380b6bf9171SEric Whitney * clusters discovered to be delayed allocated. Once allocated, a
4381b6bf9171SEric Whitney * cluster is not included in the reserved count.
43825f634d06SAneesh Kumar K.V */
43832971148dSEric Whitney if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
43847b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
43857b415bf6SAditya Kali /*
4386b6bf9171SEric Whitney * When allocating delayed allocated clusters, simply
4387b6bf9171SEric Whitney * reduce the reserved cluster count and claim quota
4388232ec872SLukas Czerner */
4389232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters,
4390232ec872SLukas Czerner 1);
4391b6bf9171SEric Whitney } else {
4392b6bf9171SEric Whitney ext4_lblk_t lblk, len;
4393b6bf9171SEric Whitney unsigned int n;
4394b6bf9171SEric Whitney
4395b6bf9171SEric Whitney /*
4396b6bf9171SEric Whitney * When allocating non-delayed allocated clusters
4397b6bf9171SEric Whitney * (from fallocate, filemap, DIO, or clusters
4398b6bf9171SEric Whitney * allocated when delalloc has been disabled by
4399b6bf9171SEric Whitney * ext4_nonda_switch), reduce the reserved cluster
4400b6bf9171SEric Whitney * count by the number of allocated clusters that
4401b6bf9171SEric Whitney * have previously been delayed allocated. Quota
4402b6bf9171SEric Whitney * has been claimed by ext4_mb_new_blocks() above,
4403b6bf9171SEric Whitney * so release the quota reservations made for any
4404b6bf9171SEric Whitney * previously delayed allocated clusters.
4405b6bf9171SEric Whitney */
4406b6bf9171SEric Whitney lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk);
4407b6bf9171SEric Whitney len = allocated_clusters << sbi->s_cluster_bits;
4408b6bf9171SEric Whitney n = ext4_es_delayed_clu(inode, lblk, len);
4409b6bf9171SEric Whitney if (n > 0)
4410b6bf9171SEric Whitney ext4_da_update_reserve_space(inode, (int) n, 0);
44117b415bf6SAditya Kali }
44127b415bf6SAditya Kali }
44135f634d06SAneesh Kumar K.V
44145f634d06SAneesh Kumar K.V /*
4415b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only
4416556615dcSLukas Czerner * when it is _not_ an unwritten extent.
4417b436b9beSJan Kara */
4418556615dcSLukas Czerner if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
4419b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1);
442069eb33dcSZheng Liu else
4421b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0);
44228ad8d710SEric Whitney
44238ad8d710SEric Whitney map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
44248ad8d710SEric Whitney map->m_pblk = pblk;
44258ad8d710SEric Whitney map->m_len = ar.len;
4426e35fd660STheodore Ts'o allocated = map->m_len;
4427a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path);
44288ad8d710SEric Whitney out:
44297ff5fddaSYe Bin ext4_free_ext_path(path);
4430e861304bSAllison Henderson
443163b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map,
443263b99968STheodore Ts'o err ? err : allocated);
44337877191cSLukas Czerner return err ? err : allocated;
4434a86c6181SAlex Tomas }
4435a86c6181SAlex Tomas
ext4_ext_truncate(handle_t * handle,struct inode * inode)4436d0abb36dSTheodore Ts'o int ext4_ext_truncate(handle_t *handle, struct inode *inode)
4437a86c6181SAlex Tomas {
4438a86c6181SAlex Tomas struct super_block *sb = inode->i_sb;
4439725d26d3SAneesh Kumar K.V ext4_lblk_t last_block;
4440a86c6181SAlex Tomas int err = 0;
4441a86c6181SAlex Tomas
4442a86c6181SAlex Tomas /*
4443d0d856e8SRandy Dunlap * TODO: optimization is possible here.
4444d0d856e8SRandy Dunlap * Probably we need not scan at all,
4445d0d856e8SRandy Dunlap * because page truncation is enough.
4446a86c6181SAlex Tomas */
4447a86c6181SAlex Tomas
4448a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */
4449a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size;
4450d0abb36dSTheodore Ts'o err = ext4_mark_inode_dirty(handle, inode);
4451d0abb36dSTheodore Ts'o if (err)
4452d0abb36dSTheodore Ts'o return err;
4453a86c6181SAlex Tomas
4454a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1)
4455a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb);
4456ed5d285bSBaokun Li ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
4457ed5d285bSBaokun Li
445873c384c0STheodore Ts'o retry_remove_space:
445973c384c0STheodore Ts'o err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
446073c384c0STheodore Ts'o if (err == -ENOMEM) {
44614034247aSNeilBrown memalloc_retry_wait(GFP_ATOMIC);
446273c384c0STheodore Ts'o goto retry_remove_space;
446373c384c0STheodore Ts'o }
446473c384c0STheodore Ts'o return err;
4465a86c6181SAlex Tomas }
4466a86c6181SAlex Tomas
ext4_alloc_file_blocks(struct file * file,ext4_lblk_t offset,ext4_lblk_t len,loff_t new_size,int flags)44670e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4468c174e6d6SDmitry Monakhov ext4_lblk_t len, loff_t new_size,
446977a2e84dSTahsin Erdogan int flags)
4470a2df2a63SAmit Arora {
4471496ad9aaSAl Viro struct inode *inode = file_inode(file);
4472a2df2a63SAmit Arora handle_t *handle;
447364395d95STheodore Ts'o int ret = 0, ret2 = 0, ret3 = 0;
4474a2df2a63SAmit Arora int retries = 0;
44754134f5c8SLukas Czerner int depth = 0;
44762ed88685STheodore Ts'o struct ext4_map_blocks map;
44770e8b6879SLukas Czerner unsigned int credits;
4478*93011887SBrian Foster loff_t epos, old_size = i_size_read(inode);
4479a2df2a63SAmit Arora
4480c3fe493cSFabian Frederick BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
44810e8b6879SLukas Czerner map.m_lblk = offset;
4482c174e6d6SDmitry Monakhov map.m_len = len;
44833c6fe770SGreg Harm /*
44843c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so
44853c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple
44863c6fe770SGreg Harm * extents.
44873c6fe770SGreg Harm */
4488556615dcSLukas Czerner if (len <= EXT_UNWRITTEN_MAX_LEN)
44893c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
449060d4616fSDmitry Monakhov
44910e8b6879SLukas Czerner /*
44920e8b6879SLukas Czerner * credits to insert 1 extent into extent tree
44930e8b6879SLukas Czerner */
44940e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len);
44954134f5c8SLukas Czerner depth = ext_depth(inode);
44960e8b6879SLukas Czerner
4497a2df2a63SAmit Arora retry:
44983258386aSEric Whitney while (len) {
44994134f5c8SLukas Czerner /*
45004134f5c8SLukas Czerner * Recalculate credits when extent tree depth changes.
45014134f5c8SLukas Czerner */
4502011c88e3SDan Carpenter if (depth != ext_depth(inode)) {
45034134f5c8SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len);
45044134f5c8SLukas Czerner depth = ext_depth(inode);
45054134f5c8SLukas Czerner }
45064134f5c8SLukas Czerner
45079924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
45089924a92aSTheodore Ts'o credits);
4509a2df2a63SAmit Arora if (IS_ERR(handle)) {
4510a2df2a63SAmit Arora ret = PTR_ERR(handle);
4511a2df2a63SAmit Arora break;
4512a2df2a63SAmit Arora }
4513a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags);
4514221879c9SAneesh Kumar K.V if (ret <= 0) {
4515f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: "
4516b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d",
4517b06acd38SLukas Czerner inode->i_ino, map.m_lblk,
4518b06acd38SLukas Czerner map.m_len, ret);
4519a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode);
45203258386aSEric Whitney ext4_journal_stop(handle);
4521a2df2a63SAmit Arora break;
4522a2df2a63SAmit Arora }
45233258386aSEric Whitney /*
45243258386aSEric Whitney * allow a full retry cycle for any remaining allocations
45253258386aSEric Whitney */
45263258386aSEric Whitney retries = 0;
4527c174e6d6SDmitry Monakhov map.m_lblk += ret;
4528c174e6d6SDmitry Monakhov map.m_len = len = len - ret;
4529c174e6d6SDmitry Monakhov epos = (loff_t)map.m_lblk << inode->i_blkbits;
45301bc33893SJeff Layton inode_set_ctime_current(inode);
4531c174e6d6SDmitry Monakhov if (new_size) {
4532c174e6d6SDmitry Monakhov if (epos > new_size)
4533c174e6d6SDmitry Monakhov epos = new_size;
4534c174e6d6SDmitry Monakhov if (ext4_update_inode_size(inode, epos) & 0x1)
4535fa42d5f1SJeff Layton inode_set_mtime_to_ts(inode,
4536fa42d5f1SJeff Layton inode_get_ctime(inode));
4537*93011887SBrian Foster if (epos > old_size) {
4538*93011887SBrian Foster pagecache_isize_extended(inode, old_size, epos);
4539*93011887SBrian Foster ext4_zero_partial_blocks(handle, inode,
4540*93011887SBrian Foster old_size, epos - old_size);
4541*93011887SBrian Foster }
4542c174e6d6SDmitry Monakhov }
45434209ae12SHarshad Shirwadkar ret2 = ext4_mark_inode_dirty(handle, inode);
4544c894aa97SEryu Guan ext4_update_inode_fsync_trans(handle, inode, 1);
45454209ae12SHarshad Shirwadkar ret3 = ext4_journal_stop(handle);
45464209ae12SHarshad Shirwadkar ret2 = ret3 ? ret3 : ret2;
45474209ae12SHarshad Shirwadkar if (unlikely(ret2))
4548a2df2a63SAmit Arora break;
4549a2df2a63SAmit Arora }
45503258386aSEric Whitney if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4551a2df2a63SAmit Arora goto retry;
4552f282ac19SLukas Czerner
45530e8b6879SLukas Czerner return ret > 0 ? ret2 : ret;
45540e8b6879SLukas Czerner }
45550e8b6879SLukas Czerner
4556ad5cd4f4SDarrick J. Wong static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
455743f81677SEric Biggers
4558ad5cd4f4SDarrick J. Wong static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
455943f81677SEric Biggers
ext4_zero_range(struct file * file,loff_t offset,loff_t len,int mode)4560b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset,
4561b8a86845SLukas Czerner loff_t len, int mode)
4562b8a86845SLukas Czerner {
4563b8a86845SLukas Czerner struct inode *inode = file_inode(file);
4564d4f5258eSJan Kara struct address_space *mapping = file->f_mapping;
4565b8a86845SLukas Czerner handle_t *handle = NULL;
4566b8a86845SLukas Czerner unsigned int max_blocks;
4567b8a86845SLukas Czerner loff_t new_size = 0;
4568b8a86845SLukas Czerner int ret = 0;
4569b8a86845SLukas Czerner int flags;
457069dc9536SDmitry Monakhov int credits;
4571c174e6d6SDmitry Monakhov int partial_begin, partial_end;
4572b8a86845SLukas Czerner loff_t start, end;
4573b8a86845SLukas Czerner ext4_lblk_t lblk;
4574b8a86845SLukas Czerner unsigned int blkbits = inode->i_blkbits;
4575b8a86845SLukas Czerner
4576b8a86845SLukas Czerner trace_ext4_zero_range(inode, offset, len, mode);
4577b8a86845SLukas Czerner
4578b8a86845SLukas Czerner /*
4579e4d7f2d3SKeyur Patel * Round up offset. This is not fallocate, we need to zero out
4580b8a86845SLukas Czerner * blocks, so convert interior block aligned part of the range to
4581b8a86845SLukas Czerner * unwritten and possibly manually zero out unaligned parts of the
4582d91ecb89SOjaswin Mujoo * range. Here, start and partial_begin are inclusive, end and
4583d91ecb89SOjaswin Mujoo * partial_end are exclusive.
4584b8a86845SLukas Czerner */
4585b8a86845SLukas Czerner start = round_up(offset, 1 << blkbits);
4586b8a86845SLukas Czerner end = round_down((offset + len), 1 << blkbits);
4587b8a86845SLukas Czerner
4588b8a86845SLukas Czerner if (start < offset || end > offset + len)
4589b8a86845SLukas Czerner return -EINVAL;
4590c174e6d6SDmitry Monakhov partial_begin = offset & ((1 << blkbits) - 1);
4591c174e6d6SDmitry Monakhov partial_end = (offset + len) & ((1 << blkbits) - 1);
4592b8a86845SLukas Czerner
4593b8a86845SLukas Czerner lblk = start >> blkbits;
4594b8a86845SLukas Czerner max_blocks = (end >> blkbits);
4595b8a86845SLukas Czerner if (max_blocks < lblk)
4596b8a86845SLukas Czerner max_blocks = 0;
4597b8a86845SLukas Czerner else
4598b8a86845SLukas Czerner max_blocks -= lblk;
4599b8a86845SLukas Czerner
46005955102cSAl Viro inode_lock(inode);
4601b8a86845SLukas Czerner
4602b8a86845SLukas Czerner /*
460380dd4978SChristophe JAILLET * Indirect files do not support unwritten extents
4604b8a86845SLukas Czerner */
4605b8a86845SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4606b8a86845SLukas Czerner ret = -EOPNOTSUPP;
4607b8a86845SLukas Czerner goto out_mutex;
4608b8a86845SLukas Czerner }
4609b8a86845SLukas Czerner
4610b8a86845SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) &&
46119b02e498SEric Biggers (offset + len > inode->i_size ||
461251e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) {
4613b8a86845SLukas Czerner new_size = offset + len;
4614b8a86845SLukas Czerner ret = inode_newsize_ok(inode, new_size);
4615b8a86845SLukas Czerner if (ret)
4616b8a86845SLukas Czerner goto out_mutex;
4617b8a86845SLukas Czerner }
4618b8a86845SLukas Czerner
46190f2af21aSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
46200f2af21aSLukas Czerner
4621f340b3d9Shongnanli /* Wait all existing dio workers, newcomers will block on i_rwsem */
462217048e8aSJan Kara inode_dio_wait(inode);
462317048e8aSJan Kara
4624ad5cd4f4SDarrick J. Wong ret = file_modified(file);
4625ad5cd4f4SDarrick J. Wong if (ret)
4626ad5cd4f4SDarrick J. Wong goto out_mutex;
4627ad5cd4f4SDarrick J. Wong
46280f2af21aSLukas Czerner /* Preallocate the range including the unaligned edges */
46290f2af21aSLukas Czerner if (partial_begin || partial_end) {
46300f2af21aSLukas Czerner ret = ext4_alloc_file_blocks(file,
46310f2af21aSLukas Czerner round_down(offset, 1 << blkbits) >> blkbits,
46320f2af21aSLukas Czerner (round_up((offset + len), 1 << blkbits) -
46330f2af21aSLukas Czerner round_down(offset, 1 << blkbits)) >> blkbits,
463477a2e84dSTahsin Erdogan new_size, flags);
46350f2af21aSLukas Czerner if (ret)
46361d39834fSNikolay Borisov goto out_mutex;
46370f2af21aSLukas Czerner
46380f2af21aSLukas Czerner }
46390f2af21aSLukas Czerner
46400f2af21aSLukas Czerner /* Zero range excluding the unaligned edges */
4641b8a86845SLukas Czerner if (max_blocks > 0) {
46420f2af21aSLukas Czerner flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
46430f2af21aSLukas Czerner EXT4_EX_NOCACHE);
4644b8a86845SLukas Czerner
4645ea3d7209SJan Kara /*
4646ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have
4647ea3d7209SJan Kara * released from page cache.
4648ea3d7209SJan Kara */
4649d4f5258eSJan Kara filemap_invalidate_lock(mapping);
4650430657b6SRoss Zwisler
4651430657b6SRoss Zwisler ret = ext4_break_layouts(inode);
4652430657b6SRoss Zwisler if (ret) {
4653d4f5258eSJan Kara filemap_invalidate_unlock(mapping);
4654430657b6SRoss Zwisler goto out_mutex;
4655430657b6SRoss Zwisler }
4656430657b6SRoss Zwisler
465701127848SJan Kara ret = ext4_update_disksize_before_punch(inode, offset, len);
465801127848SJan Kara if (ret) {
4659d4f5258eSJan Kara filemap_invalidate_unlock(mapping);
46601d39834fSNikolay Borisov goto out_mutex;
466101127848SJan Kara }
4662783ae448SJan Kara
4663783ae448SJan Kara /*
4664783ae448SJan Kara * For journalled data we need to write (and checkpoint) pages
4665783ae448SJan Kara * before discarding page cache to avoid inconsitent data on
4666783ae448SJan Kara * disk in case of crash before zeroing trans is committed.
4667783ae448SJan Kara */
4668783ae448SJan Kara if (ext4_should_journal_data(inode)) {
4669d91ecb89SOjaswin Mujoo ret = filemap_write_and_wait_range(mapping, start,
4670d91ecb89SOjaswin Mujoo end - 1);
4671783ae448SJan Kara if (ret) {
4672783ae448SJan Kara filemap_invalidate_unlock(mapping);
4673783ae448SJan Kara goto out_mutex;
4674783ae448SJan Kara }
4675783ae448SJan Kara }
4676783ae448SJan Kara
4677ea3d7209SJan Kara /* Now release the pages and zero block aligned part of pages */
4678ea3d7209SJan Kara truncate_pagecache_range(inode, start, end - 1);
4679fa42d5f1SJeff Layton inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
4680ea3d7209SJan Kara
4681c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
468277a2e84dSTahsin Erdogan flags);
4683d4f5258eSJan Kara filemap_invalidate_unlock(mapping);
4684b8a86845SLukas Czerner if (ret)
46851d39834fSNikolay Borisov goto out_mutex;
4686b8a86845SLukas Czerner }
4687c174e6d6SDmitry Monakhov if (!partial_begin && !partial_end)
46881d39834fSNikolay Borisov goto out_mutex;
4689c174e6d6SDmitry Monakhov
469069dc9536SDmitry Monakhov /*
469169dc9536SDmitry Monakhov * In worst case we have to writeout two nonadjacent unwritten
469269dc9536SDmitry Monakhov * blocks and update the inode
469369dc9536SDmitry Monakhov */
469469dc9536SDmitry Monakhov credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
469569dc9536SDmitry Monakhov if (ext4_should_journal_data(inode))
469669dc9536SDmitry Monakhov credits += 2;
469769dc9536SDmitry Monakhov handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
4698b8a86845SLukas Czerner if (IS_ERR(handle)) {
4699b8a86845SLukas Czerner ret = PTR_ERR(handle);
4700b8a86845SLukas Czerner ext4_std_error(inode->i_sb, ret);
47011d39834fSNikolay Borisov goto out_mutex;
4702b8a86845SLukas Czerner }
4703b8a86845SLukas Czerner
4704fa42d5f1SJeff Layton inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
47054337ecd1SEric Whitney if (new_size)
47064631dbf6SDmitry Monakhov ext4_update_inode_size(inode, new_size);
47074209ae12SHarshad Shirwadkar ret = ext4_mark_inode_dirty(handle, inode);
47084209ae12SHarshad Shirwadkar if (unlikely(ret))
47094209ae12SHarshad Shirwadkar goto out_handle;
4710b8a86845SLukas Czerner /* Zero out partial block at the edges of the range */
4711b8a86845SLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, len);
471267a7d5f5SJan Kara if (ret >= 0)
471367a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1);
4714b8a86845SLukas Czerner
4715b8a86845SLukas Czerner if (file->f_flags & O_SYNC)
4716b8a86845SLukas Czerner ext4_handle_sync(handle);
4717b8a86845SLukas Czerner
47184209ae12SHarshad Shirwadkar out_handle:
4719b8a86845SLukas Czerner ext4_journal_stop(handle);
4720b8a86845SLukas Czerner out_mutex:
47215955102cSAl Viro inode_unlock(inode);
4722b8a86845SLukas Czerner return ret;
4723b8a86845SLukas Czerner }
4724b8a86845SLukas Czerner
47250e8b6879SLukas Czerner /*
47260e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file
47270e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call.
47280e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method
47290e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is
47300e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call).
47310e8b6879SLukas Czerner */
ext4_fallocate(struct file * file,int mode,loff_t offset,loff_t len)47320e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
47330e8b6879SLukas Czerner {
47340e8b6879SLukas Czerner struct inode *inode = file_inode(file);
47350e8b6879SLukas Czerner loff_t new_size = 0;
47360e8b6879SLukas Czerner unsigned int max_blocks;
47370e8b6879SLukas Czerner int ret = 0;
47380e8b6879SLukas Czerner int flags;
47390e8b6879SLukas Czerner ext4_lblk_t lblk;
47400e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits;
47410e8b6879SLukas Czerner
47422058f83aSMichael Halcrow /*
47432058f83aSMichael Halcrow * Encrypted inodes can't handle collapse range or insert
47442058f83aSMichael Halcrow * range since we would need to re-encrypt blocks with a
47452058f83aSMichael Halcrow * different IV or XTS tweak (which are based on the logical
47462058f83aSMichael Halcrow * block number).
47472058f83aSMichael Halcrow */
4748592ddec7SChandan Rajendra if (IS_ENCRYPTED(inode) &&
4749457b1e35SEric Biggers (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
47502058f83aSMichael Halcrow return -EOPNOTSUPP;
47512058f83aSMichael Halcrow
47520e8b6879SLukas Czerner /* Return error if mode is not supported */
47530e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4754331573feSNamjae Jeon FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
4755331573feSNamjae Jeon FALLOC_FL_INSERT_RANGE))
47560e8b6879SLukas Czerner return -EOPNOTSUPP;
47570e8b6879SLukas Czerner
4758f87c7a4bSBaokun Li inode_lock(inode);
4759f87c7a4bSBaokun Li ret = ext4_convert_inline_data(inode);
4760f87c7a4bSBaokun Li inode_unlock(inode);
4761f87c7a4bSBaokun Li if (ret)
4762f87c7a4bSBaokun Li goto exit;
4763f87c7a4bSBaokun Li
4764aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_PUNCH_HOLE) {
4765ad5cd4f4SDarrick J. Wong ret = ext4_punch_hole(file, offset, len);
4766aa75f4d3SHarshad Shirwadkar goto exit;
4767aa75f4d3SHarshad Shirwadkar }
47680e8b6879SLukas Czerner
4769aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_COLLAPSE_RANGE) {
4770ad5cd4f4SDarrick J. Wong ret = ext4_collapse_range(file, offset, len);
4771aa75f4d3SHarshad Shirwadkar goto exit;
4772aa75f4d3SHarshad Shirwadkar }
477340c406c7STheodore Ts'o
4774aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_INSERT_RANGE) {
4775ad5cd4f4SDarrick J. Wong ret = ext4_insert_range(file, offset, len);
4776aa75f4d3SHarshad Shirwadkar goto exit;
4777aa75f4d3SHarshad Shirwadkar }
4778331573feSNamjae Jeon
4779aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_ZERO_RANGE) {
4780aa75f4d3SHarshad Shirwadkar ret = ext4_zero_range(file, offset, len, mode);
4781aa75f4d3SHarshad Shirwadkar goto exit;
4782aa75f4d3SHarshad Shirwadkar }
47830e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode);
47840e8b6879SLukas Czerner lblk = offset >> blkbits;
47850e8b6879SLukas Czerner
4786518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4787556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
47880e8b6879SLukas Czerner
47895955102cSAl Viro inode_lock(inode);
47900e8b6879SLukas Czerner
4791280227a7SDavide Italiano /*
4792280227a7SDavide Italiano * We only support preallocation for extent-based files only
4793280227a7SDavide Italiano */
4794280227a7SDavide Italiano if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4795280227a7SDavide Italiano ret = -EOPNOTSUPP;
4796280227a7SDavide Italiano goto out;
4797280227a7SDavide Italiano }
4798280227a7SDavide Italiano
47990e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) &&
48009b02e498SEric Biggers (offset + len > inode->i_size ||
480151e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) {
48020e8b6879SLukas Czerner new_size = offset + len;
48030e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size);
48040e8b6879SLukas Czerner if (ret)
48050e8b6879SLukas Czerner goto out;
48060e8b6879SLukas Czerner }
48070e8b6879SLukas Czerner
4808f340b3d9Shongnanli /* Wait all existing dio workers, newcomers will block on i_rwsem */
480917048e8aSJan Kara inode_dio_wait(inode);
481017048e8aSJan Kara
4811ad5cd4f4SDarrick J. Wong ret = file_modified(file);
4812ad5cd4f4SDarrick J. Wong if (ret)
4813ad5cd4f4SDarrick J. Wong goto out;
4814ad5cd4f4SDarrick J. Wong
481577a2e84dSTahsin Erdogan ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
48160e8b6879SLukas Czerner if (ret)
48170e8b6879SLukas Czerner goto out;
48180e8b6879SLukas Czerner
4819c174e6d6SDmitry Monakhov if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) {
4820aa75f4d3SHarshad Shirwadkar ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
4821c174e6d6SDmitry Monakhov EXT4_I(inode)->i_sync_tid);
4822f282ac19SLukas Czerner }
4823f282ac19SLukas Czerner out:
48245955102cSAl Viro inode_unlock(inode);
48250e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4826aa75f4d3SHarshad Shirwadkar exit:
48270e8b6879SLukas Czerner return ret;
4828a2df2a63SAmit Arora }
48296873fa0dSEric Sandeen
48306873fa0dSEric Sandeen /*
48310031462bSMingming Cao * This function convert a range of blocks to written extents
48320031462bSMingming Cao * The caller of this function will pass the start offset and the size.
48330031462bSMingming Cao * all unwritten extents within this range will be converted to
48340031462bSMingming Cao * written extents.
48350031462bSMingming Cao *
48360031462bSMingming Cao * This function is called from the direct IO end io call back
48370031462bSMingming Cao * function, to convert the fallocated extents after IO is completed.
4838109f5565SMingming * Returns 0 on success.
48390031462bSMingming Cao */
ext4_convert_unwritten_extents(handle_t * handle,struct inode * inode,loff_t offset,ssize_t len)48406b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
48416b523df4SJan Kara loff_t offset, ssize_t len)
48420031462bSMingming Cao {
48430031462bSMingming Cao unsigned int max_blocks;
48444209ae12SHarshad Shirwadkar int ret = 0, ret2 = 0, ret3 = 0;
48452ed88685STheodore Ts'o struct ext4_map_blocks map;
4846a00713eaSRitesh Harjani unsigned int blkbits = inode->i_blkbits;
4847a00713eaSRitesh Harjani unsigned int credits = 0;
48480031462bSMingming Cao
48492ed88685STheodore Ts'o map.m_lblk = offset >> blkbits;
4850518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
4851518eaa63SFabian Frederick
4852a00713eaSRitesh Harjani if (!handle) {
48536b523df4SJan Kara /*
48540031462bSMingming Cao * credits to insert 1 extent into extent tree
48550031462bSMingming Cao */
48560031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks);
48576b523df4SJan Kara }
48580031462bSMingming Cao while (ret >= 0 && ret < max_blocks) {
48592ed88685STheodore Ts'o map.m_lblk += ret;
48602ed88685STheodore Ts'o map.m_len = (max_blocks -= ret);
48616b523df4SJan Kara if (credits) {
48626b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
48636b523df4SJan Kara credits);
48640031462bSMingming Cao if (IS_ERR(handle)) {
48650031462bSMingming Cao ret = PTR_ERR(handle);
48660031462bSMingming Cao break;
48670031462bSMingming Cao }
48686b523df4SJan Kara }
48692ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map,
4870c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4871b06acd38SLukas Czerner if (ret <= 0)
4872b06acd38SLukas Czerner ext4_warning(inode->i_sb,
4873b06acd38SLukas Czerner "inode #%lu: block %u: len %u: "
487492b97816STheodore Ts'o "ext4_ext_map_blocks returned %d",
4875b06acd38SLukas Czerner inode->i_ino, map.m_lblk,
487692b97816STheodore Ts'o map.m_len, ret);
48774209ae12SHarshad Shirwadkar ret2 = ext4_mark_inode_dirty(handle, inode);
48784209ae12SHarshad Shirwadkar if (credits) {
48794209ae12SHarshad Shirwadkar ret3 = ext4_journal_stop(handle);
48804209ae12SHarshad Shirwadkar if (unlikely(ret3))
48814209ae12SHarshad Shirwadkar ret2 = ret3;
48824209ae12SHarshad Shirwadkar }
48834209ae12SHarshad Shirwadkar
48840031462bSMingming Cao if (ret <= 0 || ret2)
48850031462bSMingming Cao break;
48860031462bSMingming Cao }
48870031462bSMingming Cao return ret > 0 ? ret2 : ret;
48880031462bSMingming Cao }
48896d9c85ebSYongqiang Yang
ext4_convert_unwritten_io_end_vec(handle_t * handle,ext4_io_end_t * io_end)4890a00713eaSRitesh Harjani int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
4891a00713eaSRitesh Harjani {
4892d1e18b88SRitesh Harjani int ret = 0, err = 0;
4893c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec;
4894a00713eaSRitesh Harjani
4895a00713eaSRitesh Harjani /*
4896a00713eaSRitesh Harjani * This is somewhat ugly but the idea is clear: When transaction is
4897a00713eaSRitesh Harjani * reserved, everything goes into it. Otherwise we rather start several
4898a00713eaSRitesh Harjani * smaller transactions for conversion of each extent separately.
4899a00713eaSRitesh Harjani */
4900a00713eaSRitesh Harjani if (handle) {
4901a00713eaSRitesh Harjani handle = ext4_journal_start_reserved(handle,
4902a00713eaSRitesh Harjani EXT4_HT_EXT_CONVERT);
4903a00713eaSRitesh Harjani if (IS_ERR(handle))
4904a00713eaSRitesh Harjani return PTR_ERR(handle);
4905a00713eaSRitesh Harjani }
4906a00713eaSRitesh Harjani
4907c8cc8816SRitesh Harjani list_for_each_entry(io_end_vec, &io_end->list_vec, list) {
4908a00713eaSRitesh Harjani ret = ext4_convert_unwritten_extents(handle, io_end->inode,
4909c8cc8816SRitesh Harjani io_end_vec->offset,
4910c8cc8816SRitesh Harjani io_end_vec->size);
4911c8cc8816SRitesh Harjani if (ret)
4912c8cc8816SRitesh Harjani break;
4913c8cc8816SRitesh Harjani }
4914c8cc8816SRitesh Harjani
4915a00713eaSRitesh Harjani if (handle)
4916a00713eaSRitesh Harjani err = ext4_journal_stop(handle);
4917a00713eaSRitesh Harjani
4918a00713eaSRitesh Harjani return ret < 0 ? ret : err;
4919a00713eaSRitesh Harjani }
4920a00713eaSRitesh Harjani
ext4_iomap_xattr_fiemap(struct inode * inode,struct iomap * iomap)4921d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
49226873fa0dSEric Sandeen {
49236873fa0dSEric Sandeen __u64 physical = 0;
4924d3b6f23fSRitesh Harjani __u64 length = 0;
49256873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits;
49266873fa0dSEric Sandeen int error = 0;
4927d3b6f23fSRitesh Harjani u16 iomap_type;
49286873fa0dSEric Sandeen
49296873fa0dSEric Sandeen /* in-inode? */
493019f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
49316873fa0dSEric Sandeen struct ext4_iloc iloc;
49326873fa0dSEric Sandeen int offset; /* offset of xattr in inode */
49336873fa0dSEric Sandeen
49346873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc);
49356873fa0dSEric Sandeen if (error)
49366873fa0dSEric Sandeen return error;
4937a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits;
49386873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE +
49396873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize;
49406873fa0dSEric Sandeen physical += offset;
49416873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4942fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh);
4943d3b6f23fSRitesh Harjani iomap_type = IOMAP_INLINE;
4944d3b6f23fSRitesh Harjani } else if (EXT4_I(inode)->i_file_acl) { /* external block */
4945a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
49466873fa0dSEric Sandeen length = inode->i_sb->s_blocksize;
4947d3b6f23fSRitesh Harjani iomap_type = IOMAP_MAPPED;
4948d3b6f23fSRitesh Harjani } else {
4949d3b6f23fSRitesh Harjani /* no in-inode or external block for xattr, so return -ENOENT */
4950d3b6f23fSRitesh Harjani error = -ENOENT;
4951d3b6f23fSRitesh Harjani goto out;
49526873fa0dSEric Sandeen }
49536873fa0dSEric Sandeen
4954d3b6f23fSRitesh Harjani iomap->addr = physical;
4955d3b6f23fSRitesh Harjani iomap->offset = 0;
4956d3b6f23fSRitesh Harjani iomap->length = length;
4957d3b6f23fSRitesh Harjani iomap->type = iomap_type;
4958d3b6f23fSRitesh Harjani iomap->flags = 0;
4959d3b6f23fSRitesh Harjani out:
4960d3b6f23fSRitesh Harjani return error;
49616873fa0dSEric Sandeen }
49626873fa0dSEric Sandeen
ext4_iomap_xattr_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)4963d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
4964d3b6f23fSRitesh Harjani loff_t length, unsigned flags,
4965d3b6f23fSRitesh Harjani struct iomap *iomap, struct iomap *srcmap)
4966d3b6f23fSRitesh Harjani {
4967d3b6f23fSRitesh Harjani int error;
4968d3b6f23fSRitesh Harjani
4969d3b6f23fSRitesh Harjani error = ext4_iomap_xattr_fiemap(inode, iomap);
4970d3b6f23fSRitesh Harjani if (error == 0 && (offset >= iomap->length))
4971d3b6f23fSRitesh Harjani error = -ENOENT;
4972d3b6f23fSRitesh Harjani return error;
4973d3b6f23fSRitesh Harjani }
4974d3b6f23fSRitesh Harjani
4975d3b6f23fSRitesh Harjani static const struct iomap_ops ext4_iomap_xattr_ops = {
4976d3b6f23fSRitesh Harjani .iomap_begin = ext4_iomap_xattr_begin,
4977d3b6f23fSRitesh Harjani };
4978d3b6f23fSRitesh Harjani
ext4_fiemap_check_ranges(struct inode * inode,u64 start,u64 * len)4979328e24aeSChristoph Hellwig static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
4980328e24aeSChristoph Hellwig {
4981328e24aeSChristoph Hellwig u64 maxbytes;
4982328e24aeSChristoph Hellwig
4983328e24aeSChristoph Hellwig if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4984328e24aeSChristoph Hellwig maxbytes = inode->i_sb->s_maxbytes;
4985328e24aeSChristoph Hellwig else
4986328e24aeSChristoph Hellwig maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
4987328e24aeSChristoph Hellwig
4988328e24aeSChristoph Hellwig if (*len == 0)
4989328e24aeSChristoph Hellwig return -EINVAL;
4990328e24aeSChristoph Hellwig if (start > maxbytes)
4991328e24aeSChristoph Hellwig return -EFBIG;
4992328e24aeSChristoph Hellwig
4993328e24aeSChristoph Hellwig /*
4994328e24aeSChristoph Hellwig * Shrink request scope to what the fs can actually handle.
4995328e24aeSChristoph Hellwig */
4996328e24aeSChristoph Hellwig if (*len > maxbytes || (maxbytes - *len) < start)
4997328e24aeSChristoph Hellwig *len = maxbytes - start;
4998328e24aeSChristoph Hellwig return 0;
4999328e24aeSChristoph Hellwig }
5000328e24aeSChristoph Hellwig
ext4_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)500103a5ed24SChristoph Hellwig int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
500203a5ed24SChristoph Hellwig u64 start, u64 len)
50036873fa0dSEric Sandeen {
50046873fa0dSEric Sandeen int error = 0;
50056873fa0dSEric Sandeen
50067869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
50077869a4a6STheodore Ts'o error = ext4_ext_precache(inode);
50087869a4a6STheodore Ts'o if (error)
50097869a4a6STheodore Ts'o return error;
5010bb5835edSTheodore Ts'o fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
50117869a4a6STheodore Ts'o }
50127869a4a6STheodore Ts'o
5013328e24aeSChristoph Hellwig /*
5014328e24aeSChristoph Hellwig * For bitmap files the maximum size limit could be smaller than
5015328e24aeSChristoph Hellwig * s_maxbytes, so check len here manually instead of just relying on the
5016328e24aeSChristoph Hellwig * generic check.
5017328e24aeSChristoph Hellwig */
5018328e24aeSChristoph Hellwig error = ext4_fiemap_check_ranges(inode, start, &len);
5019328e24aeSChristoph Hellwig if (error)
5020328e24aeSChristoph Hellwig return error;
5021328e24aeSChristoph Hellwig
50226873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5023d3b6f23fSRitesh Harjani fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
502403a5ed24SChristoph Hellwig return iomap_fiemap(inode, fieinfo, start, len,
5025d3b6f23fSRitesh Harjani &ext4_iomap_xattr_ops);
502603a5ed24SChristoph Hellwig }
502703a5ed24SChristoph Hellwig
502803a5ed24SChristoph Hellwig return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
502903a5ed24SChristoph Hellwig }
503003a5ed24SChristoph Hellwig
ext4_get_es_cache(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)503103a5ed24SChristoph Hellwig int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
503203a5ed24SChristoph Hellwig __u64 start, __u64 len)
503303a5ed24SChristoph Hellwig {
503403a5ed24SChristoph Hellwig ext4_lblk_t start_blk, len_blks;
5035aca92ff6SLeonard Michlmayr __u64 last_blk;
503603a5ed24SChristoph Hellwig int error = 0;
503703a5ed24SChristoph Hellwig
503803a5ed24SChristoph Hellwig if (ext4_has_inline_data(inode)) {
503903a5ed24SChristoph Hellwig int has_inline;
504003a5ed24SChristoph Hellwig
504103a5ed24SChristoph Hellwig down_read(&EXT4_I(inode)->xattr_sem);
504203a5ed24SChristoph Hellwig has_inline = ext4_has_inline_data(inode);
504303a5ed24SChristoph Hellwig up_read(&EXT4_I(inode)->xattr_sem);
504403a5ed24SChristoph Hellwig if (has_inline)
504503a5ed24SChristoph Hellwig return 0;
504603a5ed24SChristoph Hellwig }
504703a5ed24SChristoph Hellwig
504803a5ed24SChristoph Hellwig if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
504903a5ed24SChristoph Hellwig error = ext4_ext_precache(inode);
505003a5ed24SChristoph Hellwig if (error)
505103a5ed24SChristoph Hellwig return error;
505203a5ed24SChristoph Hellwig fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
505303a5ed24SChristoph Hellwig }
505403a5ed24SChristoph Hellwig
505545dd052eSChristoph Hellwig error = fiemap_prep(inode, fieinfo, start, &len, 0);
5056cddf8a2cSChristoph Hellwig if (error)
5057cddf8a2cSChristoph Hellwig return error;
505803a5ed24SChristoph Hellwig
505903a5ed24SChristoph Hellwig error = ext4_fiemap_check_ranges(inode, start, &len);
506003a5ed24SChristoph Hellwig if (error)
506103a5ed24SChristoph Hellwig return error;
5062aca92ff6SLeonard Michlmayr
50636873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits;
5064aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5065f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS)
5066f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1;
5067aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
50686873fa0dSEric Sandeen
50696873fa0dSEric Sandeen /*
507091dd8c11SLukas Czerner * Walk the extent tree gathering extent information
507191dd8c11SLukas Czerner * and pushing extents back to the user.
50726873fa0dSEric Sandeen */
507303a5ed24SChristoph Hellwig return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
50746873fa0dSEric Sandeen }
5075bb5835edSTheodore Ts'o
50769eb79482SNamjae Jeon /*
50779eb79482SNamjae Jeon * ext4_ext_shift_path_extents:
50789eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext
5079331573feSNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells
5080331573feSNamjae Jeon * if it is right shift or left shift operation.
50819eb79482SNamjae Jeon */
50829eb79482SNamjae Jeon static int
ext4_ext_shift_path_extents(struct ext4_ext_path * path,ext4_lblk_t shift,struct inode * inode,handle_t * handle,enum SHIFT_DIRECTION SHIFT)50839eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
50849eb79482SNamjae Jeon struct inode *inode, handle_t *handle,
5085331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT)
50869eb79482SNamjae Jeon {
50879eb79482SNamjae Jeon int depth, err = 0;
50889eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last;
50894756ee18Szhengbin bool update = false;
50904268496eSyangerkun int credits, restart_credits;
50919eb79482SNamjae Jeon depth = path->p_depth;
50929eb79482SNamjae Jeon
50939eb79482SNamjae Jeon while (depth >= 0) {
50949eb79482SNamjae Jeon if (depth == path->p_depth) {
50959eb79482SNamjae Jeon ex_start = path[depth].p_ext;
50969eb79482SNamjae Jeon if (!ex_start)
50976a797d27SDarrick J. Wong return -EFSCORRUPTED;
50989eb79482SNamjae Jeon
50999eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
51004268496eSyangerkun /* leaf + sb + inode */
51014268496eSyangerkun credits = 3;
51024268496eSyangerkun if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
51034268496eSyangerkun update = true;
51044268496eSyangerkun /* extent tree + sb + inode */
51054268496eSyangerkun credits = depth + 2;
51064268496eSyangerkun }
51079eb79482SNamjae Jeon
51084268496eSyangerkun restart_credits = ext4_writepage_trans_blocks(inode);
51094268496eSyangerkun err = ext4_datasem_ensure_credits(handle, inode, credits,
51104268496eSyangerkun restart_credits, 0);
51111811bc40Syangerkun if (err) {
51121811bc40Syangerkun if (err > 0)
51131811bc40Syangerkun err = -EAGAIN;
51149eb79482SNamjae Jeon goto out;
51151811bc40Syangerkun }
51169eb79482SNamjae Jeon
51174268496eSyangerkun err = ext4_ext_get_access(handle, inode, path + depth);
51184268496eSyangerkun if (err)
51194268496eSyangerkun goto out;
51209eb79482SNamjae Jeon
51219eb79482SNamjae Jeon while (ex_start <= ex_last) {
5122331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) {
5123331573feSNamjae Jeon le32_add_cpu(&ex_start->ee_block,
5124331573feSNamjae Jeon -shift);
51256dd834efSLukas Czerner /* Try to merge to the left. */
51266dd834efSLukas Czerner if ((ex_start >
5127331573feSNamjae Jeon EXT_FIRST_EXTENT(path[depth].p_hdr))
5128331573feSNamjae Jeon &&
51296dd834efSLukas Czerner ext4_ext_try_to_merge_right(inode,
51309eb79482SNamjae Jeon path, ex_start - 1))
51319eb79482SNamjae Jeon ex_last--;
51326dd834efSLukas Czerner else
51339eb79482SNamjae Jeon ex_start++;
5134331573feSNamjae Jeon } else {
5135331573feSNamjae Jeon le32_add_cpu(&ex_last->ee_block, shift);
5136331573feSNamjae Jeon ext4_ext_try_to_merge_right(inode, path,
5137331573feSNamjae Jeon ex_last);
5138331573feSNamjae Jeon ex_last--;
5139331573feSNamjae Jeon }
51409eb79482SNamjae Jeon }
51419eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth);
51429eb79482SNamjae Jeon if (err)
51439eb79482SNamjae Jeon goto out;
51449eb79482SNamjae Jeon
51459eb79482SNamjae Jeon if (--depth < 0 || !update)
51469eb79482SNamjae Jeon break;
51479eb79482SNamjae Jeon }
51489eb79482SNamjae Jeon
51499eb79482SNamjae Jeon /* Update index too */
51504268496eSyangerkun err = ext4_ext_get_access(handle, inode, path + depth);
51519eb79482SNamjae Jeon if (err)
51529eb79482SNamjae Jeon goto out;
51539eb79482SNamjae Jeon
5154331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT)
5155847c6c42SZheng Liu le32_add_cpu(&path[depth].p_idx->ei_block, -shift);
5156331573feSNamjae Jeon else
5157331573feSNamjae Jeon le32_add_cpu(&path[depth].p_idx->ei_block, shift);
51589eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth);
51599eb79482SNamjae Jeon if (err)
51609eb79482SNamjae Jeon goto out;
51619eb79482SNamjae Jeon
51629eb79482SNamjae Jeon /* we are done if current index is not a starting index */
51639eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
51649eb79482SNamjae Jeon break;
51659eb79482SNamjae Jeon
51669eb79482SNamjae Jeon depth--;
51679eb79482SNamjae Jeon }
51689eb79482SNamjae Jeon
51699eb79482SNamjae Jeon out:
51709eb79482SNamjae Jeon return err;
51719eb79482SNamjae Jeon }
51729eb79482SNamjae Jeon
51739eb79482SNamjae Jeon /*
51749eb79482SNamjae Jeon * ext4_ext_shift_extents:
5175331573feSNamjae Jeon * All the extents which lies in the range from @start to the last allocated
5176331573feSNamjae Jeon * block for the @inode are shifted either towards left or right (depending
5177331573feSNamjae Jeon * upon @SHIFT) by @shift blocks.
51789eb79482SNamjae Jeon * On success, 0 is returned, error otherwise.
51799eb79482SNamjae Jeon */
51809eb79482SNamjae Jeon static int
ext4_ext_shift_extents(struct inode * inode,handle_t * handle,ext4_lblk_t start,ext4_lblk_t shift,enum SHIFT_DIRECTION SHIFT)51819eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5182331573feSNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift,
5183331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT)
51849eb79482SNamjae Jeon {
51859eb79482SNamjae Jeon struct ext4_ext_path *path;
51869eb79482SNamjae Jeon int ret = 0, depth;
51879eb79482SNamjae Jeon struct ext4_extent *extent;
5188331573feSNamjae Jeon ext4_lblk_t stop, *iterator, ex_start, ex_end;
51891811bc40Syangerkun ext4_lblk_t tmp = EXT_MAX_BLOCKS;
51909eb79482SNamjae Jeon
51919eb79482SNamjae Jeon /* Let path point to the last extent */
519203e916faSRoman Pen path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
519303e916faSRoman Pen EXT4_EX_NOCACHE);
51949eb79482SNamjae Jeon if (IS_ERR(path))
51959eb79482SNamjae Jeon return PTR_ERR(path);
51969eb79482SNamjae Jeon
51979eb79482SNamjae Jeon depth = path->p_depth;
51989eb79482SNamjae Jeon extent = path[depth].p_ext;
5199ee4bd0d9STheodore Ts'o if (!extent)
5200ee4bd0d9STheodore Ts'o goto out;
52019eb79482SNamjae Jeon
52022a9b8cbaSRoman Pen stop = le32_to_cpu(extent->ee_block);
52039eb79482SNamjae Jeon
52049eb79482SNamjae Jeon /*
5205349fa7d6SEric Biggers * For left shifts, make sure the hole on the left is big enough to
5206349fa7d6SEric Biggers * accommodate the shift. For right shifts, make sure the last extent
5207349fa7d6SEric Biggers * won't be shifted beyond EXT_MAX_BLOCKS.
52089eb79482SNamjae Jeon */
5209331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) {
521003e916faSRoman Pen path = ext4_find_extent(inode, start - 1, &path,
521103e916faSRoman Pen EXT4_EX_NOCACHE);
52128dc79ec4SDmitry Monakhov if (IS_ERR(path))
52138dc79ec4SDmitry Monakhov return PTR_ERR(path);
52149eb79482SNamjae Jeon depth = path->p_depth;
52159eb79482SNamjae Jeon extent = path[depth].p_ext;
52168dc79ec4SDmitry Monakhov if (extent) {
5217847c6c42SZheng Liu ex_start = le32_to_cpu(extent->ee_block);
5218847c6c42SZheng Liu ex_end = le32_to_cpu(extent->ee_block) +
5219847c6c42SZheng Liu ext4_ext_get_actual_len(extent);
52208dc79ec4SDmitry Monakhov } else {
52218dc79ec4SDmitry Monakhov ex_start = 0;
52228dc79ec4SDmitry Monakhov ex_end = 0;
52238dc79ec4SDmitry Monakhov }
52249eb79482SNamjae Jeon
52259eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) ||
5226331573feSNamjae Jeon (shift > start - ex_end)) {
5227349fa7d6SEric Biggers ret = -EINVAL;
5228349fa7d6SEric Biggers goto out;
5229349fa7d6SEric Biggers }
5230349fa7d6SEric Biggers } else {
5231349fa7d6SEric Biggers if (shift > EXT_MAX_BLOCKS -
5232349fa7d6SEric Biggers (stop + ext4_ext_get_actual_len(extent))) {
5233349fa7d6SEric Biggers ret = -EINVAL;
5234349fa7d6SEric Biggers goto out;
5235331573feSNamjae Jeon }
5236331573feSNamjae Jeon }
5237331573feSNamjae Jeon
5238331573feSNamjae Jeon /*
5239331573feSNamjae Jeon * In case of left shift, iterator points to start and it is increased
5240331573feSNamjae Jeon * till we reach stop. In case of right shift, iterator points to stop
5241331573feSNamjae Jeon * and it is decreased till we reach start.
5242331573feSNamjae Jeon */
52431811bc40Syangerkun again:
5244f6b1a1cfSBaokun Li ret = 0;
5245331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT)
5246331573feSNamjae Jeon iterator = &start;
5247331573feSNamjae Jeon else
5248331573feSNamjae Jeon iterator = &stop;
52499eb79482SNamjae Jeon
52501811bc40Syangerkun if (tmp != EXT_MAX_BLOCKS)
52511811bc40Syangerkun *iterator = tmp;
52521811bc40Syangerkun
52532a9b8cbaSRoman Pen /*
52542a9b8cbaSRoman Pen * Its safe to start updating extents. Start and stop are unsigned, so
52552a9b8cbaSRoman Pen * in case of right shift if extent with 0 block is reached, iterator
52562a9b8cbaSRoman Pen * becomes NULL to indicate the end of the loop.
52572a9b8cbaSRoman Pen */
52582a9b8cbaSRoman Pen while (iterator && start <= stop) {
525903e916faSRoman Pen path = ext4_find_extent(inode, *iterator, &path,
526003e916faSRoman Pen EXT4_EX_NOCACHE);
52619eb79482SNamjae Jeon if (IS_ERR(path))
52629eb79482SNamjae Jeon return PTR_ERR(path);
52639eb79482SNamjae Jeon depth = path->p_depth;
52649eb79482SNamjae Jeon extent = path[depth].p_ext;
5265a18ed359SDmitry Monakhov if (!extent) {
5266a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
5267331573feSNamjae Jeon (unsigned long) *iterator);
52686a797d27SDarrick J. Wong return -EFSCORRUPTED;
5269a18ed359SDmitry Monakhov }
5270331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT && *iterator >
5271331573feSNamjae Jeon le32_to_cpu(extent->ee_block)) {
52729eb79482SNamjae Jeon /* Hole, move to the next extent */
5273f8fb4f41SDmitry Monakhov if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5274f8fb4f41SDmitry Monakhov path[depth].p_ext++;
5275f8fb4f41SDmitry Monakhov } else {
5276331573feSNamjae Jeon *iterator = ext4_ext_next_allocated_block(path);
5277f8fb4f41SDmitry Monakhov continue;
52789eb79482SNamjae Jeon }
52799eb79482SNamjae Jeon }
5280331573feSNamjae Jeon
52811811bc40Syangerkun tmp = *iterator;
5282331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) {
5283331573feSNamjae Jeon extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5284331573feSNamjae Jeon *iterator = le32_to_cpu(extent->ee_block) +
5285331573feSNamjae Jeon ext4_ext_get_actual_len(extent);
5286331573feSNamjae Jeon } else {
5287331573feSNamjae Jeon extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
5288f6b1a1cfSBaokun Li if (le32_to_cpu(extent->ee_block) > start)
52892a9b8cbaSRoman Pen *iterator = le32_to_cpu(extent->ee_block) - 1;
5290f6b1a1cfSBaokun Li else if (le32_to_cpu(extent->ee_block) == start)
52912a9b8cbaSRoman Pen iterator = NULL;
5292f6b1a1cfSBaokun Li else {
5293f6b1a1cfSBaokun Li extent = EXT_LAST_EXTENT(path[depth].p_hdr);
5294f6b1a1cfSBaokun Li while (le32_to_cpu(extent->ee_block) >= start)
5295f6b1a1cfSBaokun Li extent--;
5296f6b1a1cfSBaokun Li
5297f6b1a1cfSBaokun Li if (extent == EXT_LAST_EXTENT(path[depth].p_hdr))
5298f6b1a1cfSBaokun Li break;
5299f6b1a1cfSBaokun Li
5300331573feSNamjae Jeon extent++;
5301f6b1a1cfSBaokun Li iterator = NULL;
5302f6b1a1cfSBaokun Li }
5303331573feSNamjae Jeon path[depth].p_ext = extent;
5304331573feSNamjae Jeon }
53059eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode,
5306331573feSNamjae Jeon handle, SHIFT);
53071811bc40Syangerkun /* iterator can be NULL which means we should break */
53081811bc40Syangerkun if (ret == -EAGAIN)
53091811bc40Syangerkun goto again;
53109eb79482SNamjae Jeon if (ret)
53119eb79482SNamjae Jeon break;
53129eb79482SNamjae Jeon }
5313ee4bd0d9STheodore Ts'o out:
53147ff5fddaSYe Bin ext4_free_ext_path(path);
53159eb79482SNamjae Jeon return ret;
53169eb79482SNamjae Jeon }
53179eb79482SNamjae Jeon
53189eb79482SNamjae Jeon /*
53199eb79482SNamjae Jeon * ext4_collapse_range:
53209eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4
53219eb79482SNamjae Jeon * Returns: 0 and non-zero on error.
53229eb79482SNamjae Jeon */
ext4_collapse_range(struct file * file,loff_t offset,loff_t len)5323ad5cd4f4SDarrick J. Wong static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
53249eb79482SNamjae Jeon {
5325ad5cd4f4SDarrick J. Wong struct inode *inode = file_inode(file);
53269eb79482SNamjae Jeon struct super_block *sb = inode->i_sb;
5327d4f5258eSJan Kara struct address_space *mapping = inode->i_mapping;
53289eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop;
53299eb79482SNamjae Jeon handle_t *handle;
53309eb79482SNamjae Jeon unsigned int credits;
5331a8680e0dSNamjae Jeon loff_t new_size, ioffset;
53329eb79482SNamjae Jeon int ret;
53339eb79482SNamjae Jeon
5334b9576fc3STheodore Ts'o /*
5335b9576fc3STheodore Ts'o * We need to test this early because xfstests assumes that a
5336b9576fc3STheodore Ts'o * collapse range of (0, 1) will return EOPNOTSUPP if the file
5337b9576fc3STheodore Ts'o * system does not support collapse range.
5338b9576fc3STheodore Ts'o */
5339b9576fc3STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5340b9576fc3STheodore Ts'o return -EOPNOTSUPP;
5341b9576fc3STheodore Ts'o
53429b02e498SEric Biggers /* Collapse range works only on fs cluster size aligned regions. */
53439b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
53449eb79482SNamjae Jeon return -EINVAL;
53459eb79482SNamjae Jeon
53469eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len);
53479eb79482SNamjae Jeon
53489eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
53499eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
53509eb79482SNamjae Jeon
53515955102cSAl Viro inode_lock(inode);
535223fffa92SLukas Czerner /*
535323fffa92SLukas Czerner * There is no need to overlap collapse range with EOF, in which case
535423fffa92SLukas Czerner * it is effectively a truncate operation
535523fffa92SLukas Czerner */
53569b02e498SEric Biggers if (offset + len >= inode->i_size) {
535723fffa92SLukas Czerner ret = -EINVAL;
535823fffa92SLukas Czerner goto out_mutex;
535923fffa92SLukas Czerner }
536023fffa92SLukas Czerner
53619eb79482SNamjae Jeon /* Currently just for extent based files */
53629eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
53639eb79482SNamjae Jeon ret = -EOPNOTSUPP;
53649eb79482SNamjae Jeon goto out_mutex;
53659eb79482SNamjae Jeon }
53669eb79482SNamjae Jeon
53679eb79482SNamjae Jeon /* Wait for existing dio to complete */
53689eb79482SNamjae Jeon inode_dio_wait(inode);
53699eb79482SNamjae Jeon
5370ad5cd4f4SDarrick J. Wong ret = file_modified(file);
5371ad5cd4f4SDarrick J. Wong if (ret)
5372ad5cd4f4SDarrick J. Wong goto out_mutex;
5373ad5cd4f4SDarrick J. Wong
5374ea3d7209SJan Kara /*
5375ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from
5376ea3d7209SJan Kara * page cache.
5377ea3d7209SJan Kara */
5378d4f5258eSJan Kara filemap_invalidate_lock(mapping);
5379430657b6SRoss Zwisler
5380430657b6SRoss Zwisler ret = ext4_break_layouts(inode);
5381430657b6SRoss Zwisler if (ret)
5382430657b6SRoss Zwisler goto out_mmap;
5383430657b6SRoss Zwisler
538432ebffd3SJan Kara /*
538532ebffd3SJan Kara * Need to round down offset to be aligned with page size boundary
538632ebffd3SJan Kara * for page size > block size.
538732ebffd3SJan Kara */
538832ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE);
538932ebffd3SJan Kara /*
539032ebffd3SJan Kara * Write tail of the last page before removed range since it will get
539132ebffd3SJan Kara * removed from the page cache below.
539232ebffd3SJan Kara */
5393d4f5258eSJan Kara ret = filemap_write_and_wait_range(mapping, ioffset, offset);
539432ebffd3SJan Kara if (ret)
539532ebffd3SJan Kara goto out_mmap;
539632ebffd3SJan Kara /*
539732ebffd3SJan Kara * Write data that will be shifted to preserve them when discarding
539832ebffd3SJan Kara * page cache below. We are also protected from pages becoming dirty
5399d4f5258eSJan Kara * by i_rwsem and invalidate_lock.
540032ebffd3SJan Kara */
5401d4f5258eSJan Kara ret = filemap_write_and_wait_range(mapping, offset + len,
540232ebffd3SJan Kara LLONG_MAX);
540332ebffd3SJan Kara if (ret)
540432ebffd3SJan Kara goto out_mmap;
5405ea3d7209SJan Kara truncate_pagecache(inode, ioffset);
5406ea3d7209SJan Kara
54079eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode);
54089eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
54099eb79482SNamjae Jeon if (IS_ERR(handle)) {
54109eb79482SNamjae Jeon ret = PTR_ERR(handle);
5411ea3d7209SJan Kara goto out_mmap;
54129eb79482SNamjae Jeon }
5413e85c81baSXin Yin ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
54149eb79482SNamjae Jeon
54159eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem);
541627bc446eSbrookxu ext4_discard_preallocations(inode, 0);
5417ed5d285bSBaokun Li ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start);
54189eb79482SNamjae Jeon
54199eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
54209eb79482SNamjae Jeon if (ret) {
54219eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
54229eb79482SNamjae Jeon goto out_stop;
54239eb79482SNamjae Jeon }
542427bc446eSbrookxu ext4_discard_preallocations(inode, 0);
54259eb79482SNamjae Jeon
54269eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5427331573feSNamjae Jeon punch_stop - punch_start, SHIFT_LEFT);
54289eb79482SNamjae Jeon if (ret) {
54299eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
54309eb79482SNamjae Jeon goto out_stop;
54319eb79482SNamjae Jeon }
54329eb79482SNamjae Jeon
54339b02e498SEric Biggers new_size = inode->i_size - len;
54349337d5d3SLukas Czerner i_size_write(inode, new_size);
54359eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size;
54369eb79482SNamjae Jeon
54379eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
54389eb79482SNamjae Jeon if (IS_SYNC(inode))
54399eb79482SNamjae Jeon ext4_handle_sync(handle);
5440fa42d5f1SJeff Layton inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
54414209ae12SHarshad Shirwadkar ret = ext4_mark_inode_dirty(handle, inode);
544267a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1);
54439eb79482SNamjae Jeon
54449eb79482SNamjae Jeon out_stop:
54459eb79482SNamjae Jeon ext4_journal_stop(handle);
5446ea3d7209SJan Kara out_mmap:
5447d4f5258eSJan Kara filemap_invalidate_unlock(mapping);
54489eb79482SNamjae Jeon out_mutex:
54495955102cSAl Viro inode_unlock(inode);
54509eb79482SNamjae Jeon return ret;
54519eb79482SNamjae Jeon }
5452fcf6b1b7SDmitry Monakhov
5453331573feSNamjae Jeon /*
5454331573feSNamjae Jeon * ext4_insert_range:
5455331573feSNamjae Jeon * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate.
5456331573feSNamjae Jeon * The data blocks starting from @offset to the EOF are shifted by @len
5457331573feSNamjae Jeon * towards right to create a hole in the @inode. Inode size is increased
5458331573feSNamjae Jeon * by len bytes.
5459331573feSNamjae Jeon * Returns 0 on success, error otherwise.
5460331573feSNamjae Jeon */
ext4_insert_range(struct file * file,loff_t offset,loff_t len)5461ad5cd4f4SDarrick J. Wong static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
5462331573feSNamjae Jeon {
5463ad5cd4f4SDarrick J. Wong struct inode *inode = file_inode(file);
5464331573feSNamjae Jeon struct super_block *sb = inode->i_sb;
5465d4f5258eSJan Kara struct address_space *mapping = inode->i_mapping;
5466331573feSNamjae Jeon handle_t *handle;
5467331573feSNamjae Jeon struct ext4_ext_path *path;
5468331573feSNamjae Jeon struct ext4_extent *extent;
5469331573feSNamjae Jeon ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0;
5470331573feSNamjae Jeon unsigned int credits, ee_len;
5471331573feSNamjae Jeon int ret = 0, depth, split_flag = 0;
5472331573feSNamjae Jeon loff_t ioffset;
5473331573feSNamjae Jeon
5474331573feSNamjae Jeon /*
5475331573feSNamjae Jeon * We need to test this early because xfstests assumes that an
5476331573feSNamjae Jeon * insert range of (0, 1) will return EOPNOTSUPP if the file
5477331573feSNamjae Jeon * system does not support insert range.
5478331573feSNamjae Jeon */
5479331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
5480331573feSNamjae Jeon return -EOPNOTSUPP;
5481331573feSNamjae Jeon
54829b02e498SEric Biggers /* Insert range works only on fs cluster size aligned regions. */
54839b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb)))
5484331573feSNamjae Jeon return -EINVAL;
5485331573feSNamjae Jeon
5486331573feSNamjae Jeon trace_ext4_insert_range(inode, offset, len);
5487331573feSNamjae Jeon
5488331573feSNamjae Jeon offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5489331573feSNamjae Jeon len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb);
5490331573feSNamjae Jeon
54915955102cSAl Viro inode_lock(inode);
5492331573feSNamjae Jeon /* Currently just for extent based files */
5493331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5494331573feSNamjae Jeon ret = -EOPNOTSUPP;
5495331573feSNamjae Jeon goto out_mutex;
5496331573feSNamjae Jeon }
5497331573feSNamjae Jeon
54989b02e498SEric Biggers /* Check whether the maximum file size would be exceeded */
54999b02e498SEric Biggers if (len > inode->i_sb->s_maxbytes - inode->i_size) {
5500331573feSNamjae Jeon ret = -EFBIG;
5501331573feSNamjae Jeon goto out_mutex;
5502331573feSNamjae Jeon }
5503331573feSNamjae Jeon
55049b02e498SEric Biggers /* Offset must be less than i_size */
55059b02e498SEric Biggers if (offset >= inode->i_size) {
5506331573feSNamjae Jeon ret = -EINVAL;
5507331573feSNamjae Jeon goto out_mutex;
5508331573feSNamjae Jeon }
5509331573feSNamjae Jeon
5510331573feSNamjae Jeon /* Wait for existing dio to complete */
5511331573feSNamjae Jeon inode_dio_wait(inode);
5512331573feSNamjae Jeon
5513ad5cd4f4SDarrick J. Wong ret = file_modified(file);
5514ad5cd4f4SDarrick J. Wong if (ret)
5515ad5cd4f4SDarrick J. Wong goto out_mutex;
5516ad5cd4f4SDarrick J. Wong
5517ea3d7209SJan Kara /*
5518ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from
5519ea3d7209SJan Kara * page cache.
5520ea3d7209SJan Kara */
5521d4f5258eSJan Kara filemap_invalidate_lock(mapping);
5522430657b6SRoss Zwisler
5523430657b6SRoss Zwisler ret = ext4_break_layouts(inode);
5524430657b6SRoss Zwisler if (ret)
5525430657b6SRoss Zwisler goto out_mmap;
5526430657b6SRoss Zwisler
552732ebffd3SJan Kara /*
552832ebffd3SJan Kara * Need to round down to align start offset to page size boundary
552932ebffd3SJan Kara * for page size > block size.
553032ebffd3SJan Kara */
553132ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE);
553232ebffd3SJan Kara /* Write out all dirty pages */
553332ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
553432ebffd3SJan Kara LLONG_MAX);
553532ebffd3SJan Kara if (ret)
553632ebffd3SJan Kara goto out_mmap;
5537ea3d7209SJan Kara truncate_pagecache(inode, ioffset);
5538ea3d7209SJan Kara
5539331573feSNamjae Jeon credits = ext4_writepage_trans_blocks(inode);
5540331573feSNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5541331573feSNamjae Jeon if (IS_ERR(handle)) {
5542331573feSNamjae Jeon ret = PTR_ERR(handle);
5543ea3d7209SJan Kara goto out_mmap;
5544331573feSNamjae Jeon }
5545e85c81baSXin Yin ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
5546331573feSNamjae Jeon
5547331573feSNamjae Jeon /* Expand file to avoid data loss if there is error while shifting */
5548331573feSNamjae Jeon inode->i_size += len;
5549331573feSNamjae Jeon EXT4_I(inode)->i_disksize += len;
5550fa42d5f1SJeff Layton inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
5551331573feSNamjae Jeon ret = ext4_mark_inode_dirty(handle, inode);
5552331573feSNamjae Jeon if (ret)
5553331573feSNamjae Jeon goto out_stop;
5554331573feSNamjae Jeon
5555331573feSNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem);
555627bc446eSbrookxu ext4_discard_preallocations(inode, 0);
5557331573feSNamjae Jeon
5558331573feSNamjae Jeon path = ext4_find_extent(inode, offset_lblk, NULL, 0);
5559331573feSNamjae Jeon if (IS_ERR(path)) {
5560331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
5561f4308d8eSBaokun Li ret = PTR_ERR(path);
5562331573feSNamjae Jeon goto out_stop;
5563331573feSNamjae Jeon }
5564331573feSNamjae Jeon
5565331573feSNamjae Jeon depth = ext_depth(inode);
5566331573feSNamjae Jeon extent = path[depth].p_ext;
5567331573feSNamjae Jeon if (extent) {
5568331573feSNamjae Jeon ee_start_lblk = le32_to_cpu(extent->ee_block);
5569331573feSNamjae Jeon ee_len = ext4_ext_get_actual_len(extent);
5570331573feSNamjae Jeon
5571331573feSNamjae Jeon /*
5572331573feSNamjae Jeon * If offset_lblk is not the starting block of extent, split
5573331573feSNamjae Jeon * the extent @offset_lblk
5574331573feSNamjae Jeon */
5575331573feSNamjae Jeon if ((offset_lblk > ee_start_lblk) &&
5576331573feSNamjae Jeon (offset_lblk < (ee_start_lblk + ee_len))) {
5577331573feSNamjae Jeon if (ext4_ext_is_unwritten(extent))
5578331573feSNamjae Jeon split_flag = EXT4_EXT_MARK_UNWRIT1 |
5579331573feSNamjae Jeon EXT4_EXT_MARK_UNWRIT2;
5580331573feSNamjae Jeon ret = ext4_split_extent_at(handle, inode, &path,
5581331573feSNamjae Jeon offset_lblk, split_flag,
5582331573feSNamjae Jeon EXT4_EX_NOCACHE |
5583331573feSNamjae Jeon EXT4_GET_BLOCKS_PRE_IO |
5584331573feSNamjae Jeon EXT4_GET_BLOCKS_METADATA_NOFAIL);
5585331573feSNamjae Jeon }
5586331573feSNamjae Jeon
55877ff5fddaSYe Bin ext4_free_ext_path(path);
5588331573feSNamjae Jeon if (ret < 0) {
5589331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
5590331573feSNamjae Jeon goto out_stop;
5591331573feSNamjae Jeon }
5592edf15aa1SFabian Frederick } else {
55937ff5fddaSYe Bin ext4_free_ext_path(path);
5594331573feSNamjae Jeon }
5595331573feSNamjae Jeon
5596ed5d285bSBaokun Li ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk);
5597331573feSNamjae Jeon
5598331573feSNamjae Jeon /*
5599331573feSNamjae Jeon * if offset_lblk lies in a hole which is at start of file, use
5600331573feSNamjae Jeon * ee_start_lblk to shift extents
5601331573feSNamjae Jeon */
5602331573feSNamjae Jeon ret = ext4_ext_shift_extents(inode, handle,
560366267814SJiangshan Yi max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT);
5604331573feSNamjae Jeon
5605331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem);
5606331573feSNamjae Jeon if (IS_SYNC(inode))
5607331573feSNamjae Jeon ext4_handle_sync(handle);
560867a7d5f5SJan Kara if (ret >= 0)
560967a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1);
5610331573feSNamjae Jeon
5611331573feSNamjae Jeon out_stop:
5612331573feSNamjae Jeon ext4_journal_stop(handle);
5613ea3d7209SJan Kara out_mmap:
5614d4f5258eSJan Kara filemap_invalidate_unlock(mapping);
5615331573feSNamjae Jeon out_mutex:
56165955102cSAl Viro inode_unlock(inode);
5617331573feSNamjae Jeon return ret;
5618331573feSNamjae Jeon }
5619331573feSNamjae Jeon
5620fcf6b1b7SDmitry Monakhov /**
5621c60990b3STheodore Ts'o * ext4_swap_extents() - Swap extents between two inodes
5622c60990b3STheodore Ts'o * @handle: handle for this transaction
5623fcf6b1b7SDmitry Monakhov * @inode1: First inode
5624fcf6b1b7SDmitry Monakhov * @inode2: Second inode
5625fcf6b1b7SDmitry Monakhov * @lblk1: Start block for first inode
5626fcf6b1b7SDmitry Monakhov * @lblk2: Start block for second inode
5627fcf6b1b7SDmitry Monakhov * @count: Number of blocks to swap
5628dcae058aSzhenwei.pi * @unwritten: Mark second inode's extents as unwritten after swap
5629fcf6b1b7SDmitry Monakhov * @erp: Pointer to save error value
5630fcf6b1b7SDmitry Monakhov *
5631fcf6b1b7SDmitry Monakhov * This helper routine does exactly what is promise "swap extents". All other
5632fcf6b1b7SDmitry Monakhov * stuff such as page-cache locking consistency, bh mapping consistency or
5633fcf6b1b7SDmitry Monakhov * extent's data copying must be performed by caller.
5634fcf6b1b7SDmitry Monakhov * Locking:
5635f340b3d9Shongnanli * i_rwsem is held for both inodes
5636fcf6b1b7SDmitry Monakhov * i_data_sem is locked for write for both inodes
5637fcf6b1b7SDmitry Monakhov * Assumptions:
5638fcf6b1b7SDmitry Monakhov * All pages from requested range are locked for both inodes
5639fcf6b1b7SDmitry Monakhov */
5640fcf6b1b7SDmitry Monakhov int
ext4_swap_extents(handle_t * handle,struct inode * inode1,struct inode * inode2,ext4_lblk_t lblk1,ext4_lblk_t lblk2,ext4_lblk_t count,int unwritten,int * erp)5641fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1,
5642fcf6b1b7SDmitry Monakhov struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5643fcf6b1b7SDmitry Monakhov ext4_lblk_t count, int unwritten, int *erp)
5644fcf6b1b7SDmitry Monakhov {
5645fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path1 = NULL;
5646fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path2 = NULL;
5647fcf6b1b7SDmitry Monakhov int replaced_count = 0;
5648fcf6b1b7SDmitry Monakhov
5649fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5650fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
56515955102cSAl Viro BUG_ON(!inode_is_locked(inode1));
56525955102cSAl Viro BUG_ON(!inode_is_locked(inode2));
5653fcf6b1b7SDmitry Monakhov
5654ed5d285bSBaokun Li ext4_es_remove_extent(inode1, lblk1, count);
5655ed5d285bSBaokun Li ext4_es_remove_extent(inode2, lblk2, count);
5656fcf6b1b7SDmitry Monakhov
5657fcf6b1b7SDmitry Monakhov while (count) {
5658fcf6b1b7SDmitry Monakhov struct ext4_extent *ex1, *ex2, tmp_ex;
5659fcf6b1b7SDmitry Monakhov ext4_lblk_t e1_blk, e2_blk;
5660fcf6b1b7SDmitry Monakhov int e1_len, e2_len, len;
5661fcf6b1b7SDmitry Monakhov int split = 0;
5662fcf6b1b7SDmitry Monakhov
5663ed8a1a76STheodore Ts'o path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5664a1c83681SViresh Kumar if (IS_ERR(path1)) {
5665fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path1);
566619008f6dSTheodore Ts'o path1 = NULL;
566719008f6dSTheodore Ts'o finish:
566819008f6dSTheodore Ts'o count = 0;
566919008f6dSTheodore Ts'o goto repeat;
5670fcf6b1b7SDmitry Monakhov }
5671ed8a1a76STheodore Ts'o path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5672a1c83681SViresh Kumar if (IS_ERR(path2)) {
5673fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path2);
567419008f6dSTheodore Ts'o path2 = NULL;
567519008f6dSTheodore Ts'o goto finish;
5676fcf6b1b7SDmitry Monakhov }
5677fcf6b1b7SDmitry Monakhov ex1 = path1[path1->p_depth].p_ext;
5678fcf6b1b7SDmitry Monakhov ex2 = path2[path2->p_depth].p_ext;
5679e4d7f2d3SKeyur Patel /* Do we have something to swap ? */
5680fcf6b1b7SDmitry Monakhov if (unlikely(!ex2 || !ex1))
568119008f6dSTheodore Ts'o goto finish;
5682fcf6b1b7SDmitry Monakhov
5683fcf6b1b7SDmitry Monakhov e1_blk = le32_to_cpu(ex1->ee_block);
5684fcf6b1b7SDmitry Monakhov e2_blk = le32_to_cpu(ex2->ee_block);
5685fcf6b1b7SDmitry Monakhov e1_len = ext4_ext_get_actual_len(ex1);
5686fcf6b1b7SDmitry Monakhov e2_len = ext4_ext_get_actual_len(ex2);
5687fcf6b1b7SDmitry Monakhov
5688fcf6b1b7SDmitry Monakhov /* Hole handling */
5689fcf6b1b7SDmitry Monakhov if (!in_range(lblk1, e1_blk, e1_len) ||
5690fcf6b1b7SDmitry Monakhov !in_range(lblk2, e2_blk, e2_len)) {
5691fcf6b1b7SDmitry Monakhov ext4_lblk_t next1, next2;
5692fcf6b1b7SDmitry Monakhov
5693fcf6b1b7SDmitry Monakhov /* if hole after extent, then go to next extent */
5694fcf6b1b7SDmitry Monakhov next1 = ext4_ext_next_allocated_block(path1);
5695fcf6b1b7SDmitry Monakhov next2 = ext4_ext_next_allocated_block(path2);
5696fcf6b1b7SDmitry Monakhov /* If hole before extent, then shift to that extent */
5697fcf6b1b7SDmitry Monakhov if (e1_blk > lblk1)
5698fcf6b1b7SDmitry Monakhov next1 = e1_blk;
5699fcf6b1b7SDmitry Monakhov if (e2_blk > lblk2)
57004e562013SManinder Singh next2 = e2_blk;
5701fcf6b1b7SDmitry Monakhov /* Do we have something to swap */
5702fcf6b1b7SDmitry Monakhov if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
570319008f6dSTheodore Ts'o goto finish;
5704fcf6b1b7SDmitry Monakhov /* Move to the rightest boundary */
5705fcf6b1b7SDmitry Monakhov len = next1 - lblk1;
5706fcf6b1b7SDmitry Monakhov if (len < next2 - lblk2)
5707fcf6b1b7SDmitry Monakhov len = next2 - lblk2;
5708fcf6b1b7SDmitry Monakhov if (len > count)
5709fcf6b1b7SDmitry Monakhov len = count;
5710fcf6b1b7SDmitry Monakhov lblk1 += len;
5711fcf6b1b7SDmitry Monakhov lblk2 += len;
5712fcf6b1b7SDmitry Monakhov count -= len;
5713fcf6b1b7SDmitry Monakhov goto repeat;
5714fcf6b1b7SDmitry Monakhov }
5715fcf6b1b7SDmitry Monakhov
5716fcf6b1b7SDmitry Monakhov /* Prepare left boundary */
5717fcf6b1b7SDmitry Monakhov if (e1_blk < lblk1) {
5718fcf6b1b7SDmitry Monakhov split = 1;
5719fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1,
5720dfe50809STheodore Ts'o &path1, lblk1, 0);
572119008f6dSTheodore Ts'o if (unlikely(*erp))
572219008f6dSTheodore Ts'o goto finish;
5723fcf6b1b7SDmitry Monakhov }
5724fcf6b1b7SDmitry Monakhov if (e2_blk < lblk2) {
5725fcf6b1b7SDmitry Monakhov split = 1;
5726fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2,
5727dfe50809STheodore Ts'o &path2, lblk2, 0);
572819008f6dSTheodore Ts'o if (unlikely(*erp))
572919008f6dSTheodore Ts'o goto finish;
5730fcf6b1b7SDmitry Monakhov }
5731dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split,
5732fcf6b1b7SDmitry Monakhov * path must to be revalidated. */
5733fcf6b1b7SDmitry Monakhov if (split)
5734fcf6b1b7SDmitry Monakhov goto repeat;
5735fcf6b1b7SDmitry Monakhov
5736fcf6b1b7SDmitry Monakhov /* Prepare right boundary */
5737fcf6b1b7SDmitry Monakhov len = count;
5738fcf6b1b7SDmitry Monakhov if (len > e1_blk + e1_len - lblk1)
5739fcf6b1b7SDmitry Monakhov len = e1_blk + e1_len - lblk1;
5740fcf6b1b7SDmitry Monakhov if (len > e2_blk + e2_len - lblk2)
5741fcf6b1b7SDmitry Monakhov len = e2_blk + e2_len - lblk2;
5742fcf6b1b7SDmitry Monakhov
5743fcf6b1b7SDmitry Monakhov if (len != e1_len) {
5744fcf6b1b7SDmitry Monakhov split = 1;
5745fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1,
5746dfe50809STheodore Ts'o &path1, lblk1 + len, 0);
574719008f6dSTheodore Ts'o if (unlikely(*erp))
574819008f6dSTheodore Ts'o goto finish;
5749fcf6b1b7SDmitry Monakhov }
5750fcf6b1b7SDmitry Monakhov if (len != e2_len) {
5751fcf6b1b7SDmitry Monakhov split = 1;
5752fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2,
5753dfe50809STheodore Ts'o &path2, lblk2 + len, 0);
5754fcf6b1b7SDmitry Monakhov if (*erp)
575519008f6dSTheodore Ts'o goto finish;
5756fcf6b1b7SDmitry Monakhov }
5757dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split,
5758fcf6b1b7SDmitry Monakhov * path must to be revalidated. */
5759fcf6b1b7SDmitry Monakhov if (split)
5760fcf6b1b7SDmitry Monakhov goto repeat;
5761fcf6b1b7SDmitry Monakhov
5762fcf6b1b7SDmitry Monakhov BUG_ON(e2_len != e1_len);
5763fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
576419008f6dSTheodore Ts'o if (unlikely(*erp))
576519008f6dSTheodore Ts'o goto finish;
5766fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
576719008f6dSTheodore Ts'o if (unlikely(*erp))
576819008f6dSTheodore Ts'o goto finish;
5769fcf6b1b7SDmitry Monakhov
5770fcf6b1b7SDmitry Monakhov /* Both extents are fully inside boundaries. Swap it now */
5771fcf6b1b7SDmitry Monakhov tmp_ex = *ex1;
5772fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5773fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5774fcf6b1b7SDmitry Monakhov ex1->ee_len = cpu_to_le16(e2_len);
5775fcf6b1b7SDmitry Monakhov ex2->ee_len = cpu_to_le16(e1_len);
5776fcf6b1b7SDmitry Monakhov if (unwritten)
5777fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex2);
5778fcf6b1b7SDmitry Monakhov if (ext4_ext_is_unwritten(&tmp_ex))
5779fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex1);
5780fcf6b1b7SDmitry Monakhov
5781fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5782fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5783fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode2, path2 +
5784fcf6b1b7SDmitry Monakhov path2->p_depth);
578519008f6dSTheodore Ts'o if (unlikely(*erp))
578619008f6dSTheodore Ts'o goto finish;
5787fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode1, path1 +
5788fcf6b1b7SDmitry Monakhov path1->p_depth);
5789fcf6b1b7SDmitry Monakhov /*
5790fcf6b1b7SDmitry Monakhov * Looks scarry ah..? second inode already points to new blocks,
5791fcf6b1b7SDmitry Monakhov * and it was successfully dirtied. But luckily error may happen
5792fcf6b1b7SDmitry Monakhov * only due to journal error, so full transaction will be
5793fcf6b1b7SDmitry Monakhov * aborted anyway.
5794fcf6b1b7SDmitry Monakhov */
579519008f6dSTheodore Ts'o if (unlikely(*erp))
579619008f6dSTheodore Ts'o goto finish;
5797fcf6b1b7SDmitry Monakhov lblk1 += len;
5798fcf6b1b7SDmitry Monakhov lblk2 += len;
5799fcf6b1b7SDmitry Monakhov replaced_count += len;
5800fcf6b1b7SDmitry Monakhov count -= len;
5801fcf6b1b7SDmitry Monakhov
5802fcf6b1b7SDmitry Monakhov repeat:
58037ff5fddaSYe Bin ext4_free_ext_path(path1);
58047ff5fddaSYe Bin ext4_free_ext_path(path2);
5805b7ea89adSTheodore Ts'o path1 = path2 = NULL;
5806fcf6b1b7SDmitry Monakhov }
5807fcf6b1b7SDmitry Monakhov return replaced_count;
5808fcf6b1b7SDmitry Monakhov }
58090b02f4c0SEric Whitney
58100b02f4c0SEric Whitney /*
58110b02f4c0SEric Whitney * ext4_clu_mapped - determine whether any block in a logical cluster has
58120b02f4c0SEric Whitney * been mapped to a physical cluster
58130b02f4c0SEric Whitney *
58140b02f4c0SEric Whitney * @inode - file containing the logical cluster
58150b02f4c0SEric Whitney * @lclu - logical cluster of interest
58160b02f4c0SEric Whitney *
58170b02f4c0SEric Whitney * Returns 1 if any block in the logical cluster is mapped, signifying
58180b02f4c0SEric Whitney * that a physical cluster has been allocated for it. Otherwise,
58190b02f4c0SEric Whitney * returns 0. Can also return negative error codes. Derived from
58200b02f4c0SEric Whitney * ext4_ext_map_blocks().
58210b02f4c0SEric Whitney */
ext4_clu_mapped(struct inode * inode,ext4_lblk_t lclu)58220b02f4c0SEric Whitney int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
58230b02f4c0SEric Whitney {
58240b02f4c0SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
58250b02f4c0SEric Whitney struct ext4_ext_path *path;
58260b02f4c0SEric Whitney int depth, mapped = 0, err = 0;
58270b02f4c0SEric Whitney struct ext4_extent *extent;
58280b02f4c0SEric Whitney ext4_lblk_t first_lblk, first_lclu, last_lclu;
58290b02f4c0SEric Whitney
5830131294c3SEric Whitney /*
5831131294c3SEric Whitney * if data can be stored inline, the logical cluster isn't
5832131294c3SEric Whitney * mapped - no physical clusters have been allocated, and the
5833131294c3SEric Whitney * file has no extents
5834131294c3SEric Whitney */
583583565959SYe Bin if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
583683565959SYe Bin ext4_has_inline_data(inode))
5837131294c3SEric Whitney return 0;
5838131294c3SEric Whitney
58390b02f4c0SEric Whitney /* search for the extent closest to the first block in the cluster */
58400b02f4c0SEric Whitney path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0);
58410b02f4c0SEric Whitney if (IS_ERR(path)) {
58420b02f4c0SEric Whitney err = PTR_ERR(path);
58430b02f4c0SEric Whitney path = NULL;
58440b02f4c0SEric Whitney goto out;
58450b02f4c0SEric Whitney }
58460b02f4c0SEric Whitney
58470b02f4c0SEric Whitney depth = ext_depth(inode);
58480b02f4c0SEric Whitney
58490b02f4c0SEric Whitney /*
58500b02f4c0SEric Whitney * A consistent leaf must not be empty. This situation is possible,
58510b02f4c0SEric Whitney * though, _during_ tree modification, and it's why an assert can't
58520b02f4c0SEric Whitney * be put in ext4_find_extent().
58530b02f4c0SEric Whitney */
58540b02f4c0SEric Whitney if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
58550b02f4c0SEric Whitney EXT4_ERROR_INODE(inode,
58560b02f4c0SEric Whitney "bad extent address - lblock: %lu, depth: %d, pblock: %lld",
58570b02f4c0SEric Whitney (unsigned long) EXT4_C2B(sbi, lclu),
58580b02f4c0SEric Whitney depth, path[depth].p_block);
58590b02f4c0SEric Whitney err = -EFSCORRUPTED;
58600b02f4c0SEric Whitney goto out;
58610b02f4c0SEric Whitney }
58620b02f4c0SEric Whitney
58630b02f4c0SEric Whitney extent = path[depth].p_ext;
58640b02f4c0SEric Whitney
58650b02f4c0SEric Whitney /* can't be mapped if the extent tree is empty */
58660b02f4c0SEric Whitney if (extent == NULL)
58670b02f4c0SEric Whitney goto out;
58680b02f4c0SEric Whitney
58690b02f4c0SEric Whitney first_lblk = le32_to_cpu(extent->ee_block);
58700b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk);
58710b02f4c0SEric Whitney
58720b02f4c0SEric Whitney /*
58730b02f4c0SEric Whitney * Three possible outcomes at this point - found extent spanning
58740b02f4c0SEric Whitney * the target cluster, to the left of the target cluster, or to the
58750b02f4c0SEric Whitney * right of the target cluster. The first two cases are handled here.
58760b02f4c0SEric Whitney * The last case indicates the target cluster is not mapped.
58770b02f4c0SEric Whitney */
58780b02f4c0SEric Whitney if (lclu >= first_lclu) {
58790b02f4c0SEric Whitney last_lclu = EXT4_B2C(sbi, first_lblk +
58800b02f4c0SEric Whitney ext4_ext_get_actual_len(extent) - 1);
58810b02f4c0SEric Whitney if (lclu <= last_lclu) {
58820b02f4c0SEric Whitney mapped = 1;
58830b02f4c0SEric Whitney } else {
58840b02f4c0SEric Whitney first_lblk = ext4_ext_next_allocated_block(path);
58850b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk);
58860b02f4c0SEric Whitney if (lclu == first_lclu)
58870b02f4c0SEric Whitney mapped = 1;
58880b02f4c0SEric Whitney }
58890b02f4c0SEric Whitney }
58900b02f4c0SEric Whitney
58910b02f4c0SEric Whitney out:
58927ff5fddaSYe Bin ext4_free_ext_path(path);
58930b02f4c0SEric Whitney
58940b02f4c0SEric Whitney return err ? err : mapped;
58950b02f4c0SEric Whitney }
58968016e29fSHarshad Shirwadkar
58978016e29fSHarshad Shirwadkar /*
58988016e29fSHarshad Shirwadkar * Updates physical block address and unwritten status of extent
58998016e29fSHarshad Shirwadkar * starting at lblk start and of len. If such an extent doesn't exist,
59008016e29fSHarshad Shirwadkar * this function splits the extent tree appropriately to create an
59018016e29fSHarshad Shirwadkar * extent like this. This function is called in the fast commit
59028016e29fSHarshad Shirwadkar * replay path. Returns 0 on success and error on failure.
59038016e29fSHarshad Shirwadkar */
ext4_ext_replay_update_ex(struct inode * inode,ext4_lblk_t start,int len,int unwritten,ext4_fsblk_t pblk)59048016e29fSHarshad Shirwadkar int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
59058016e29fSHarshad Shirwadkar int len, int unwritten, ext4_fsblk_t pblk)
59068016e29fSHarshad Shirwadkar {
59071b558006SBaokun Li struct ext4_ext_path *path;
59088016e29fSHarshad Shirwadkar struct ext4_extent *ex;
59098016e29fSHarshad Shirwadkar int ret;
59108016e29fSHarshad Shirwadkar
59118016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, start, NULL, 0);
5912bc18546bSDan Carpenter if (IS_ERR(path))
5913bc18546bSDan Carpenter return PTR_ERR(path);
59148016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
59158016e29fSHarshad Shirwadkar if (!ex) {
59168016e29fSHarshad Shirwadkar ret = -EFSCORRUPTED;
59178016e29fSHarshad Shirwadkar goto out;
59188016e29fSHarshad Shirwadkar }
59198016e29fSHarshad Shirwadkar
59208016e29fSHarshad Shirwadkar if (le32_to_cpu(ex->ee_block) != start ||
59218016e29fSHarshad Shirwadkar ext4_ext_get_actual_len(ex) != len) {
59228016e29fSHarshad Shirwadkar /* We need to split this extent to match our extent first */
59238016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem);
59241b558006SBaokun Li ret = ext4_force_split_extent_at(NULL, inode, &path, start, 1);
59258016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem);
59268016e29fSHarshad Shirwadkar if (ret)
59278016e29fSHarshad Shirwadkar goto out;
59281b558006SBaokun Li
59291b558006SBaokun Li path = ext4_find_extent(inode, start, &path, 0);
59308016e29fSHarshad Shirwadkar if (IS_ERR(path))
59311b558006SBaokun Li return PTR_ERR(path);
59328016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
59338016e29fSHarshad Shirwadkar WARN_ON(le32_to_cpu(ex->ee_block) != start);
59341b558006SBaokun Li
59358016e29fSHarshad Shirwadkar if (ext4_ext_get_actual_len(ex) != len) {
59368016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem);
59371b558006SBaokun Li ret = ext4_force_split_extent_at(NULL, inode, &path,
59388016e29fSHarshad Shirwadkar start + len, 1);
59398016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem);
59408016e29fSHarshad Shirwadkar if (ret)
59418016e29fSHarshad Shirwadkar goto out;
59421b558006SBaokun Li
59431b558006SBaokun Li path = ext4_find_extent(inode, start, &path, 0);
59448016e29fSHarshad Shirwadkar if (IS_ERR(path))
59451b558006SBaokun Li return PTR_ERR(path);
59468016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
59478016e29fSHarshad Shirwadkar }
59488016e29fSHarshad Shirwadkar }
59498016e29fSHarshad Shirwadkar if (unwritten)
59508016e29fSHarshad Shirwadkar ext4_ext_mark_unwritten(ex);
59518016e29fSHarshad Shirwadkar else
59528016e29fSHarshad Shirwadkar ext4_ext_mark_initialized(ex);
59538016e29fSHarshad Shirwadkar ext4_ext_store_pblock(ex, pblk);
59548016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem);
59558016e29fSHarshad Shirwadkar ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
59568016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem);
59578016e29fSHarshad Shirwadkar out:
59587ff5fddaSYe Bin ext4_free_ext_path(path);
59598016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode);
59608016e29fSHarshad Shirwadkar return ret;
59618016e29fSHarshad Shirwadkar }
59628016e29fSHarshad Shirwadkar
59638016e29fSHarshad Shirwadkar /* Try to shrink the extent tree */
ext4_ext_replay_shrink_inode(struct inode * inode,ext4_lblk_t end)59648016e29fSHarshad Shirwadkar void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
59658016e29fSHarshad Shirwadkar {
59668016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL;
59678016e29fSHarshad Shirwadkar struct ext4_extent *ex;
59688016e29fSHarshad Shirwadkar ext4_lblk_t old_cur, cur = 0;
59698016e29fSHarshad Shirwadkar
59708016e29fSHarshad Shirwadkar while (cur < end) {
59718016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0);
59728016e29fSHarshad Shirwadkar if (IS_ERR(path))
59738016e29fSHarshad Shirwadkar return;
59748016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
59758016e29fSHarshad Shirwadkar if (!ex) {
59767ff5fddaSYe Bin ext4_free_ext_path(path);
59778016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode);
59788016e29fSHarshad Shirwadkar return;
59798016e29fSHarshad Shirwadkar }
59808016e29fSHarshad Shirwadkar old_cur = cur;
59818016e29fSHarshad Shirwadkar cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
59828016e29fSHarshad Shirwadkar if (cur <= old_cur)
59838016e29fSHarshad Shirwadkar cur = old_cur + 1;
59848016e29fSHarshad Shirwadkar ext4_ext_try_to_merge(NULL, inode, path, ex);
59858016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem);
59868016e29fSHarshad Shirwadkar ext4_ext_dirty(NULL, inode, &path[path->p_depth]);
59878016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem);
59888016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode);
59897ff5fddaSYe Bin ext4_free_ext_path(path);
59908016e29fSHarshad Shirwadkar }
59918016e29fSHarshad Shirwadkar }
59928016e29fSHarshad Shirwadkar
59938016e29fSHarshad Shirwadkar /* Check if *cur is a hole and if it is, skip it */
skip_hole(struct inode * inode,ext4_lblk_t * cur)59941fd95c05STheodore Ts'o static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
59958016e29fSHarshad Shirwadkar {
59968016e29fSHarshad Shirwadkar int ret;
59978016e29fSHarshad Shirwadkar struct ext4_map_blocks map;
59988016e29fSHarshad Shirwadkar
59998016e29fSHarshad Shirwadkar map.m_lblk = *cur;
60008016e29fSHarshad Shirwadkar map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
60018016e29fSHarshad Shirwadkar
60028016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0);
60031fd95c05STheodore Ts'o if (ret < 0)
60041fd95c05STheodore Ts'o return ret;
60058016e29fSHarshad Shirwadkar if (ret != 0)
60061fd95c05STheodore Ts'o return 0;
60078016e29fSHarshad Shirwadkar *cur = *cur + map.m_len;
60081fd95c05STheodore Ts'o return 0;
60098016e29fSHarshad Shirwadkar }
60108016e29fSHarshad Shirwadkar
60118016e29fSHarshad Shirwadkar /* Count number of blocks used by this inode and update i_blocks */
ext4_ext_replay_set_iblocks(struct inode * inode)60128016e29fSHarshad Shirwadkar int ext4_ext_replay_set_iblocks(struct inode *inode)
60138016e29fSHarshad Shirwadkar {
60148016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL, *path2 = NULL;
60158016e29fSHarshad Shirwadkar struct ext4_extent *ex;
60168016e29fSHarshad Shirwadkar ext4_lblk_t cur = 0, end;
60178016e29fSHarshad Shirwadkar int numblks = 0, i, ret = 0;
60188016e29fSHarshad Shirwadkar ext4_fsblk_t cmp1, cmp2;
60198016e29fSHarshad Shirwadkar struct ext4_map_blocks map;
60208016e29fSHarshad Shirwadkar
60218016e29fSHarshad Shirwadkar /* Determin the size of the file first */
60228016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
60238016e29fSHarshad Shirwadkar EXT4_EX_NOCACHE);
60248016e29fSHarshad Shirwadkar if (IS_ERR(path))
60258016e29fSHarshad Shirwadkar return PTR_ERR(path);
60268016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
60278016e29fSHarshad Shirwadkar if (!ex) {
60287ff5fddaSYe Bin ext4_free_ext_path(path);
60298016e29fSHarshad Shirwadkar goto out;
60308016e29fSHarshad Shirwadkar }
60318016e29fSHarshad Shirwadkar end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
60327ff5fddaSYe Bin ext4_free_ext_path(path);
60338016e29fSHarshad Shirwadkar
60348016e29fSHarshad Shirwadkar /* Count the number of data blocks */
60358016e29fSHarshad Shirwadkar cur = 0;
60368016e29fSHarshad Shirwadkar while (cur < end) {
60378016e29fSHarshad Shirwadkar map.m_lblk = cur;
60388016e29fSHarshad Shirwadkar map.m_len = end - cur;
60398016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0);
60408016e29fSHarshad Shirwadkar if (ret < 0)
60418016e29fSHarshad Shirwadkar break;
60428016e29fSHarshad Shirwadkar if (ret > 0)
60438016e29fSHarshad Shirwadkar numblks += ret;
60448016e29fSHarshad Shirwadkar cur = cur + map.m_len;
60458016e29fSHarshad Shirwadkar }
60468016e29fSHarshad Shirwadkar
60478016e29fSHarshad Shirwadkar /*
60488016e29fSHarshad Shirwadkar * Count the number of extent tree blocks. We do it by looking up
60498016e29fSHarshad Shirwadkar * two successive extents and determining the difference between
60508016e29fSHarshad Shirwadkar * their paths. When path is different for 2 successive extents
60518016e29fSHarshad Shirwadkar * we compare the blocks in the path at each level and increment
60528016e29fSHarshad Shirwadkar * iblocks by total number of differences found.
60538016e29fSHarshad Shirwadkar */
60548016e29fSHarshad Shirwadkar cur = 0;
60551fd95c05STheodore Ts'o ret = skip_hole(inode, &cur);
60561fd95c05STheodore Ts'o if (ret < 0)
60571fd95c05STheodore Ts'o goto out;
60588016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0);
60598016e29fSHarshad Shirwadkar if (IS_ERR(path))
60608016e29fSHarshad Shirwadkar goto out;
60618016e29fSHarshad Shirwadkar numblks += path->p_depth;
60627ff5fddaSYe Bin ext4_free_ext_path(path);
60638016e29fSHarshad Shirwadkar while (cur < end) {
60648016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0);
60658016e29fSHarshad Shirwadkar if (IS_ERR(path))
60668016e29fSHarshad Shirwadkar break;
60678016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
60688016e29fSHarshad Shirwadkar if (!ex) {
60697ff5fddaSYe Bin ext4_free_ext_path(path);
60708016e29fSHarshad Shirwadkar return 0;
60718016e29fSHarshad Shirwadkar }
60728016e29fSHarshad Shirwadkar cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
60738016e29fSHarshad Shirwadkar ext4_ext_get_actual_len(ex));
60741fd95c05STheodore Ts'o ret = skip_hole(inode, &cur);
60751fd95c05STheodore Ts'o if (ret < 0) {
60767ff5fddaSYe Bin ext4_free_ext_path(path);
60771fd95c05STheodore Ts'o break;
60781fd95c05STheodore Ts'o }
60798016e29fSHarshad Shirwadkar path2 = ext4_find_extent(inode, cur, NULL, 0);
60808016e29fSHarshad Shirwadkar if (IS_ERR(path2)) {
60817ff5fddaSYe Bin ext4_free_ext_path(path);
60828016e29fSHarshad Shirwadkar break;
60838016e29fSHarshad Shirwadkar }
60848016e29fSHarshad Shirwadkar for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) {
60858016e29fSHarshad Shirwadkar cmp1 = cmp2 = 0;
60868016e29fSHarshad Shirwadkar if (i <= path->p_depth)
60878016e29fSHarshad Shirwadkar cmp1 = path[i].p_bh ?
60888016e29fSHarshad Shirwadkar path[i].p_bh->b_blocknr : 0;
60898016e29fSHarshad Shirwadkar if (i <= path2->p_depth)
60908016e29fSHarshad Shirwadkar cmp2 = path2[i].p_bh ?
60918016e29fSHarshad Shirwadkar path2[i].p_bh->b_blocknr : 0;
60928016e29fSHarshad Shirwadkar if (cmp1 != cmp2 && cmp2 != 0)
60938016e29fSHarshad Shirwadkar numblks++;
60948016e29fSHarshad Shirwadkar }
60957ff5fddaSYe Bin ext4_free_ext_path(path);
60967ff5fddaSYe Bin ext4_free_ext_path(path2);
60978016e29fSHarshad Shirwadkar }
60988016e29fSHarshad Shirwadkar
60998016e29fSHarshad Shirwadkar out:
61008016e29fSHarshad Shirwadkar inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9);
61018016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode);
61028016e29fSHarshad Shirwadkar return 0;
61038016e29fSHarshad Shirwadkar }
61048016e29fSHarshad Shirwadkar
ext4_ext_clear_bb(struct inode * inode)61058016e29fSHarshad Shirwadkar int ext4_ext_clear_bb(struct inode *inode)
61068016e29fSHarshad Shirwadkar {
61078016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL;
61088016e29fSHarshad Shirwadkar struct ext4_extent *ex;
61098016e29fSHarshad Shirwadkar ext4_lblk_t cur = 0, end;
61108016e29fSHarshad Shirwadkar int j, ret = 0;
61118016e29fSHarshad Shirwadkar struct ext4_map_blocks map;
61128016e29fSHarshad Shirwadkar
61131ebf2178SHarshad Shirwadkar if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
61141ebf2178SHarshad Shirwadkar return 0;
61151ebf2178SHarshad Shirwadkar
61168016e29fSHarshad Shirwadkar /* Determin the size of the file first */
61178016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
61188016e29fSHarshad Shirwadkar EXT4_EX_NOCACHE);
61198016e29fSHarshad Shirwadkar if (IS_ERR(path))
61208016e29fSHarshad Shirwadkar return PTR_ERR(path);
61218016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext;
61228016e29fSHarshad Shirwadkar if (!ex) {
61237ff5fddaSYe Bin ext4_free_ext_path(path);
61248016e29fSHarshad Shirwadkar return 0;
61258016e29fSHarshad Shirwadkar }
61268016e29fSHarshad Shirwadkar end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex);
61277ff5fddaSYe Bin ext4_free_ext_path(path);
61288016e29fSHarshad Shirwadkar
61298016e29fSHarshad Shirwadkar cur = 0;
61308016e29fSHarshad Shirwadkar while (cur < end) {
61318016e29fSHarshad Shirwadkar map.m_lblk = cur;
61328016e29fSHarshad Shirwadkar map.m_len = end - cur;
61338016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0);
61348016e29fSHarshad Shirwadkar if (ret < 0)
61358016e29fSHarshad Shirwadkar break;
61368016e29fSHarshad Shirwadkar if (ret > 0) {
61378016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
61388016e29fSHarshad Shirwadkar if (!IS_ERR_OR_NULL(path)) {
61398016e29fSHarshad Shirwadkar for (j = 0; j < path->p_depth; j++) {
61408016e29fSHarshad Shirwadkar
61418016e29fSHarshad Shirwadkar ext4_mb_mark_bb(inode->i_sb,
61428016e29fSHarshad Shirwadkar path[j].p_block, 1, 0);
6143599ea31dSXin Yin ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6144599ea31dSXin Yin 0, path[j].p_block, 1, 1);
61458016e29fSHarshad Shirwadkar }
61467ff5fddaSYe Bin ext4_free_ext_path(path);
61478016e29fSHarshad Shirwadkar }
61488016e29fSHarshad Shirwadkar ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
6149599ea31dSXin Yin ext4_fc_record_regions(inode->i_sb, inode->i_ino,
6150599ea31dSXin Yin map.m_lblk, map.m_pblk, map.m_len, 1);
61518016e29fSHarshad Shirwadkar }
61528016e29fSHarshad Shirwadkar cur = cur + map.m_len;
61538016e29fSHarshad Shirwadkar }
61548016e29fSHarshad Shirwadkar
61558016e29fSHarshad Shirwadkar return 0;
61568016e29fSHarshad Shirwadkar }
6157