1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2a86c6181SAlex Tomas /* 3a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5a86c6181SAlex Tomas * 6a86c6181SAlex Tomas * Architecture independence: 7a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 8a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9a86c6181SAlex Tomas */ 10a86c6181SAlex Tomas 11a86c6181SAlex Tomas /* 12a86c6181SAlex Tomas * Extents support for EXT4 13a86c6181SAlex Tomas * 14a86c6181SAlex Tomas * TODO: 15a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 16a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17a86c6181SAlex Tomas * - smart tree reduction 18a86c6181SAlex Tomas */ 19a86c6181SAlex Tomas 20a86c6181SAlex Tomas #include <linux/fs.h> 21a86c6181SAlex Tomas #include <linux/time.h> 22cd02ff0bSMingming Cao #include <linux/jbd2.h> 23a86c6181SAlex Tomas #include <linux/highuid.h> 24a86c6181SAlex Tomas #include <linux/pagemap.h> 25a86c6181SAlex Tomas #include <linux/quotaops.h> 26a86c6181SAlex Tomas #include <linux/string.h> 27a86c6181SAlex Tomas #include <linux/slab.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 296873fa0dSEric Sandeen #include <linux/fiemap.h> 30d3b6f23fSRitesh Harjani #include <linux/iomap.h> 314034247aSNeilBrown #include <linux/sched/mm.h> 323dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 334a092d73STheodore Ts'o #include "ext4_extents.h" 34f19d5870STao Ma #include "xattr.h" 35a86c6181SAlex Tomas 360562e0baSJiaying Zhang #include <trace/events/ext4.h> 370562e0baSJiaying Zhang 385f95d21fSLukas Czerner /* 395f95d21fSLukas Czerner * used by extent splitting. 405f95d21fSLukas Czerner */ 415f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 425f95d21fSLukas Czerner due to ENOSPC */ 43556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 44556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 455f95d21fSLukas Czerner 46dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 47dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 48dee1f973SDmitry Monakhov 497ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 507ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 517ac5990dSDarrick J. Wong { 527ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 537ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 547ac5990dSDarrick J. Wong __u32 csum; 557ac5990dSDarrick J. Wong 567ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 577ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 587ac5990dSDarrick J. Wong return cpu_to_le32(csum); 597ac5990dSDarrick J. Wong } 607ac5990dSDarrick J. Wong 617ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 627ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 637ac5990dSDarrick J. Wong { 647ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 657ac5990dSDarrick J. Wong 669aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 677ac5990dSDarrick J. Wong return 1; 687ac5990dSDarrick J. Wong 697ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 707ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 717ac5990dSDarrick J. Wong return 0; 727ac5990dSDarrick J. Wong return 1; 737ac5990dSDarrick J. Wong } 747ac5990dSDarrick J. Wong 757ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 767ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 777ac5990dSDarrick J. Wong { 787ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 797ac5990dSDarrick J. Wong 809aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 817ac5990dSDarrick J. Wong return; 827ac5990dSDarrick J. Wong 837ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 847ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 857ac5990dSDarrick J. Wong } 867ac5990dSDarrick J. Wong 875f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 885f95d21fSLukas Czerner struct inode *inode, 89dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 905f95d21fSLukas Czerner ext4_lblk_t split, 915f95d21fSLukas Czerner int split_flag, 925f95d21fSLukas Czerner int flags); 935f95d21fSLukas Czerner 94a4130367SJan Kara static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 95a86c6181SAlex Tomas { 967b808191STheodore Ts'o /* 97a4130367SJan Kara * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 98a4130367SJan Kara * moment, get_block can be called only for blocks inside i_size since 99a4130367SJan Kara * page cache has been already dropped and writes are blocked by 100f340b3d9Shongnanli * i_rwsem. So we can safely drop the i_data_sem here. 1017b808191STheodore Ts'o */ 102a4130367SJan Kara BUG_ON(EXT4_JOURNAL(inode) == NULL); 10327bc446eSbrookxu ext4_discard_preallocations(inode, 0); 104a4130367SJan Kara up_write(&EXT4_I(inode)->i_data_sem); 105a4130367SJan Kara *dropped = 1; 106a4130367SJan Kara return 0; 107a4130367SJan Kara } 108487caeefSJan Kara 1097ff5fddaSYe Bin static void ext4_ext_drop_refs(struct ext4_ext_path *path) 1107ff5fddaSYe Bin { 1117ff5fddaSYe Bin int depth, i; 1127ff5fddaSYe Bin 1137ff5fddaSYe Bin if (!path) 1147ff5fddaSYe Bin return; 1157ff5fddaSYe Bin depth = path->p_depth; 1167ff5fddaSYe Bin for (i = 0; i <= depth; i++, path++) { 1177ff5fddaSYe Bin brelse(path->p_bh); 1187ff5fddaSYe Bin path->p_bh = NULL; 1197ff5fddaSYe Bin } 1207ff5fddaSYe Bin } 1217ff5fddaSYe Bin 1227ff5fddaSYe Bin void ext4_free_ext_path(struct ext4_ext_path *path) 1237ff5fddaSYe Bin { 1247ff5fddaSYe Bin ext4_ext_drop_refs(path); 1257ff5fddaSYe Bin kfree(path); 1267ff5fddaSYe Bin } 1277ff5fddaSYe Bin 128a4130367SJan Kara /* 129a4130367SJan Kara * Make sure 'handle' has at least 'check_cred' credits. If not, restart 130a4130367SJan Kara * transaction with 'restart_cred' credits. The function drops i_data_sem 131a4130367SJan Kara * when restarting transaction and gets it after transaction is restarted. 132a4130367SJan Kara * 133a4130367SJan Kara * The function returns 0 on success, 1 if transaction had to be restarted, 134a4130367SJan Kara * and < 0 in case of fatal error. 135a4130367SJan Kara */ 136a4130367SJan Kara int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 13783448bdfSJan Kara int check_cred, int restart_cred, 13883448bdfSJan Kara int revoke_cred) 139a4130367SJan Kara { 140a4130367SJan Kara int ret; 141a4130367SJan Kara int dropped = 0; 142a4130367SJan Kara 143a4130367SJan Kara ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 14483448bdfSJan Kara revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 145a4130367SJan Kara if (dropped) 146a4130367SJan Kara down_write(&EXT4_I(inode)->i_data_sem); 147a4130367SJan Kara return ret; 148a86c6181SAlex Tomas } 149a86c6181SAlex Tomas 150a86c6181SAlex Tomas /* 151a86c6181SAlex Tomas * could return: 152a86c6181SAlex Tomas * - EROFS 153a86c6181SAlex Tomas * - ENOMEM 154a86c6181SAlex Tomas */ 155a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 156a86c6181SAlex Tomas struct ext4_ext_path *path) 157a86c6181SAlex Tomas { 1580f2f87d5SZhang Yi int err = 0; 1590f2f87d5SZhang Yi 160a86c6181SAlex Tomas if (path->p_bh) { 161a86c6181SAlex Tomas /* path points to block */ 1625d601255Sliang xie BUFFER_TRACE(path->p_bh, "get_write_access"); 1630f2f87d5SZhang Yi err = ext4_journal_get_write_access(handle, inode->i_sb, 164188c299eSJan Kara path->p_bh, EXT4_JTR_NONE); 1650f2f87d5SZhang Yi /* 1660f2f87d5SZhang Yi * The extent buffer's verified bit will be set again in 1670f2f87d5SZhang Yi * __ext4_ext_dirty(). We could leave an inconsistent 1680f2f87d5SZhang Yi * buffer if the extents updating procudure break off du 1690f2f87d5SZhang Yi * to some error happens, force to check it again. 1700f2f87d5SZhang Yi */ 1710f2f87d5SZhang Yi if (!err) 1720f2f87d5SZhang Yi clear_buffer_verified(path->p_bh); 173a86c6181SAlex Tomas } 174a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 175a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 1760f2f87d5SZhang Yi return err; 177a86c6181SAlex Tomas } 178a86c6181SAlex Tomas 179a86c6181SAlex Tomas /* 180a86c6181SAlex Tomas * could return: 181a86c6181SAlex Tomas * - EROFS 182a86c6181SAlex Tomas * - ENOMEM 183a86c6181SAlex Tomas * - EIO 184a86c6181SAlex Tomas */ 18543f81677SEric Biggers static int __ext4_ext_dirty(const char *where, unsigned int line, 18643f81677SEric Biggers handle_t *handle, struct inode *inode, 18743f81677SEric Biggers struct ext4_ext_path *path) 188a86c6181SAlex Tomas { 189a86c6181SAlex Tomas int err; 1904b1f1660SDmitry Monakhov 1914b1f1660SDmitry Monakhov WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 192a86c6181SAlex Tomas if (path->p_bh) { 1937ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 194a86c6181SAlex Tomas /* path points to block */ 1959ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1969ea7a0dfSTheodore Ts'o inode, path->p_bh); 1970f2f87d5SZhang Yi /* Extents updating done, re-set verified flag */ 1980f2f87d5SZhang Yi if (!err) 1990f2f87d5SZhang Yi set_buffer_verified(path->p_bh); 200a86c6181SAlex Tomas } else { 201a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 202a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 203a86c6181SAlex Tomas } 204a86c6181SAlex Tomas return err; 205a86c6181SAlex Tomas } 206a86c6181SAlex Tomas 20743f81677SEric Biggers #define ext4_ext_dirty(handle, inode, path) \ 20843f81677SEric Biggers __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 20943f81677SEric Biggers 210f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 211a86c6181SAlex Tomas struct ext4_ext_path *path, 212725d26d3SAneesh Kumar K.V ext4_lblk_t block) 213a86c6181SAlex Tomas { 214a86c6181SAlex Tomas if (path) { 21581fdbb4aSYongqiang Yang int depth = path->p_depth; 216a86c6181SAlex Tomas struct ext4_extent *ex; 217a86c6181SAlex Tomas 218ad4fb9caSKazuya Mio /* 219ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 220ad4fb9caSKazuya Mio * filling in a file which will eventually be 221ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 222ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 223ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 224ad4fb9caSKazuya Mio * executable file, or some database extending a table 225ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 226ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 227ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 228ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 229ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 230ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 231ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 232b8d6568aSTao Ma * especially if the latter case turns out to be 233ad4fb9caSKazuya Mio * common. 234ad4fb9caSKazuya Mio */ 2357e028976SAvantika Mathur ex = path[depth].p_ext; 236ad4fb9caSKazuya Mio if (ex) { 237ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 238ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 239ad4fb9caSKazuya Mio 240ad4fb9caSKazuya Mio if (block > ext_block) 241ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 242ad4fb9caSKazuya Mio else 243ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 244ad4fb9caSKazuya Mio } 245a86c6181SAlex Tomas 246d0d856e8SRandy Dunlap /* it looks like index is empty; 247d0d856e8SRandy Dunlap * try to find starting block from index itself */ 248a86c6181SAlex Tomas if (path[depth].p_bh) 249a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 250a86c6181SAlex Tomas } 251a86c6181SAlex Tomas 252a86c6181SAlex Tomas /* OK. use inode's group */ 253f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 254a86c6181SAlex Tomas } 255a86c6181SAlex Tomas 256654b4908SAneesh Kumar K.V /* 257654b4908SAneesh Kumar K.V * Allocation for a meta data block 258654b4908SAneesh Kumar K.V */ 259f65e6fbaSAlex Tomas static ext4_fsblk_t 260654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 261a86c6181SAlex Tomas struct ext4_ext_path *path, 26255f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 263a86c6181SAlex Tomas { 264f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 265a86c6181SAlex Tomas 266a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 26755f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 26855f020dbSAllison Henderson NULL, err); 269a86c6181SAlex Tomas return newblock; 270a86c6181SAlex Tomas } 271a86c6181SAlex Tomas 27255ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 273a86c6181SAlex Tomas { 274a86c6181SAlex Tomas int size; 275a86c6181SAlex Tomas 276a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 277a86c6181SAlex Tomas / sizeof(struct ext4_extent); 278bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27902dc62fbSYongqiang Yang if (!check && size > 6) 280a86c6181SAlex Tomas size = 6; 281a86c6181SAlex Tomas #endif 282a86c6181SAlex Tomas return size; 283a86c6181SAlex Tomas } 284a86c6181SAlex Tomas 28555ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 286a86c6181SAlex Tomas { 287a86c6181SAlex Tomas int size; 288a86c6181SAlex Tomas 289a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 290a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 291bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 29202dc62fbSYongqiang Yang if (!check && size > 5) 293a86c6181SAlex Tomas size = 5; 294a86c6181SAlex Tomas #endif 295a86c6181SAlex Tomas return size; 296a86c6181SAlex Tomas } 297a86c6181SAlex Tomas 29855ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 299a86c6181SAlex Tomas { 300a86c6181SAlex Tomas int size; 301a86c6181SAlex Tomas 302a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 303a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 304a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 305bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 30602dc62fbSYongqiang Yang if (!check && size > 3) 307a86c6181SAlex Tomas size = 3; 308a86c6181SAlex Tomas #endif 309a86c6181SAlex Tomas return size; 310a86c6181SAlex Tomas } 311a86c6181SAlex Tomas 31255ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 313a86c6181SAlex Tomas { 314a86c6181SAlex Tomas int size; 315a86c6181SAlex Tomas 316a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 317a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 318a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 319bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 32002dc62fbSYongqiang Yang if (!check && size > 4) 321a86c6181SAlex Tomas size = 4; 322a86c6181SAlex Tomas #endif 323a86c6181SAlex Tomas return size; 324a86c6181SAlex Tomas } 325a86c6181SAlex Tomas 326fcf6b1b7SDmitry Monakhov static inline int 327fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 328dfe50809STheodore Ts'o struct ext4_ext_path **ppath, ext4_lblk_t lblk, 329fcf6b1b7SDmitry Monakhov int nofail) 330fcf6b1b7SDmitry Monakhov { 331dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 332fcf6b1b7SDmitry Monakhov int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 33373c384c0STheodore Ts'o int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO; 33473c384c0STheodore Ts'o 33573c384c0STheodore Ts'o if (nofail) 33673c384c0STheodore Ts'o flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL; 337fcf6b1b7SDmitry Monakhov 338dfe50809STheodore Ts'o return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 339fcf6b1b7SDmitry Monakhov EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 34073c384c0STheodore Ts'o flags); 341fcf6b1b7SDmitry Monakhov } 342fcf6b1b7SDmitry Monakhov 343c29c0ae7SAlex Tomas static int 344c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 345c29c0ae7SAlex Tomas { 346c29c0ae7SAlex Tomas int max; 347c29c0ae7SAlex Tomas 348c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 349c29c0ae7SAlex Tomas if (depth == 0) 35055ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 351c29c0ae7SAlex Tomas else 35255ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 353c29c0ae7SAlex Tomas } else { 354c29c0ae7SAlex Tomas if (depth == 0) 35555ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 356c29c0ae7SAlex Tomas else 35755ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 358c29c0ae7SAlex Tomas } 359c29c0ae7SAlex Tomas 360c29c0ae7SAlex Tomas return max; 361c29c0ae7SAlex Tomas } 362c29c0ae7SAlex Tomas 36356b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 36456b19868SAneesh Kumar K.V { 365bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 36656b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 3675946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 368e84a26ceSTheodore Ts'o 369f70749caSVegard Nossum /* 370f70749caSVegard Nossum * We allow neither: 371f70749caSVegard Nossum * - zero length 372f70749caSVegard Nossum * - overflow/wrap-around 373f70749caSVegard Nossum */ 374f70749caSVegard Nossum if (lblock + len <= lblock) 37531d4f3a2STheodore Ts'o return 0; 376ce9f24ccSJan Kara return ext4_inode_block_valid(inode, block, len); 37756b19868SAneesh Kumar K.V } 37856b19868SAneesh Kumar K.V 37956b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 38056b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 38156b19868SAneesh Kumar K.V { 382bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 383e84a26ceSTheodore Ts'o 384ce9f24ccSJan Kara return ext4_inode_block_valid(inode, block, 1); 38556b19868SAneesh Kumar K.V } 38656b19868SAneesh Kumar K.V 38756b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 38856b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 3899c6e0719SZhang Yi ext4_lblk_t lblk, ext4_fsblk_t *pblk, 3909c6e0719SZhang Yi int depth) 39156b19868SAneesh Kumar K.V { 39256b19868SAneesh Kumar K.V unsigned short entries; 3938dd27fecSZhang Yi ext4_lblk_t lblock = 0; 394d36f6ed7SBaokun Li ext4_lblk_t cur = 0; 3958dd27fecSZhang Yi 39656b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 39756b19868SAneesh Kumar K.V return 1; 39856b19868SAneesh Kumar K.V 39956b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 40056b19868SAneesh Kumar K.V 40156b19868SAneesh Kumar K.V if (depth == 0) { 40256b19868SAneesh Kumar K.V /* leaf entries */ 40381fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 4049c6e0719SZhang Yi 4059c6e0719SZhang Yi /* 4069c6e0719SZhang Yi * The logical block in the first entry should equal to 4079c6e0719SZhang Yi * the number in the index block. 4089c6e0719SZhang Yi */ 4099c6e0719SZhang Yi if (depth != ext_depth(inode) && 4109c6e0719SZhang Yi lblk != le32_to_cpu(ext->ee_block)) 4119c6e0719SZhang Yi return 0; 41256b19868SAneesh Kumar K.V while (entries) { 41356b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 41456b19868SAneesh Kumar K.V return 0; 4155946d089SEryu Guan 4165946d089SEryu Guan /* Check for overlapping extents */ 4175946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block); 418d36f6ed7SBaokun Li if (lblock < cur) { 41954d3adbcSTheodore Ts'o *pblk = ext4_ext_pblock(ext); 4205946d089SEryu Guan return 0; 4215946d089SEryu Guan } 422d36f6ed7SBaokun Li cur = lblock + ext4_ext_get_actual_len(ext); 42356b19868SAneesh Kumar K.V ext++; 42456b19868SAneesh Kumar K.V entries--; 42556b19868SAneesh Kumar K.V } 42656b19868SAneesh Kumar K.V } else { 42781fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 4289c6e0719SZhang Yi 4299c6e0719SZhang Yi /* 4309c6e0719SZhang Yi * The logical block in the first entry should equal to 4319c6e0719SZhang Yi * the number in the parent index block. 4329c6e0719SZhang Yi */ 4339c6e0719SZhang Yi if (depth != ext_depth(inode) && 4349c6e0719SZhang Yi lblk != le32_to_cpu(ext_idx->ei_block)) 4359c6e0719SZhang Yi return 0; 43656b19868SAneesh Kumar K.V while (entries) { 43756b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 43856b19868SAneesh Kumar K.V return 0; 4398dd27fecSZhang Yi 4408dd27fecSZhang Yi /* Check for overlapping index extents */ 4418dd27fecSZhang Yi lblock = le32_to_cpu(ext_idx->ei_block); 442d36f6ed7SBaokun Li if (lblock < cur) { 4438dd27fecSZhang Yi *pblk = ext4_idx_pblock(ext_idx); 4448dd27fecSZhang Yi return 0; 4458dd27fecSZhang Yi } 44656b19868SAneesh Kumar K.V ext_idx++; 44756b19868SAneesh Kumar K.V entries--; 448d36f6ed7SBaokun Li cur = lblock + 1; 44956b19868SAneesh Kumar K.V } 45056b19868SAneesh Kumar K.V } 45156b19868SAneesh Kumar K.V return 1; 45256b19868SAneesh Kumar K.V } 45356b19868SAneesh Kumar K.V 454c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 455c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 4569c6e0719SZhang Yi int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk) 457c29c0ae7SAlex Tomas { 458c29c0ae7SAlex Tomas const char *error_msg; 4596a797d27SDarrick J. Wong int max = 0, err = -EFSCORRUPTED; 460c29c0ae7SAlex Tomas 461c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 462c29c0ae7SAlex Tomas error_msg = "invalid magic"; 463c29c0ae7SAlex Tomas goto corrupted; 464c29c0ae7SAlex Tomas } 465c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 466c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 467c29c0ae7SAlex Tomas goto corrupted; 468c29c0ae7SAlex Tomas } 469c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 470c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 471c29c0ae7SAlex Tomas goto corrupted; 472c29c0ae7SAlex Tomas } 473c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 474c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 475c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 476c29c0ae7SAlex Tomas goto corrupted; 477c29c0ae7SAlex Tomas } 478c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 479c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 480c29c0ae7SAlex Tomas goto corrupted; 481c29c0ae7SAlex Tomas } 48229a5b8a1SLuís Henriques if (unlikely((eh->eh_entries == 0) && (depth > 0))) { 48329a5b8a1SLuís Henriques error_msg = "eh_entries is 0 but eh_depth is > 0"; 48429a5b8a1SLuís Henriques goto corrupted; 48529a5b8a1SLuís Henriques } 4869c6e0719SZhang Yi if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) { 48756b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 48856b19868SAneesh Kumar K.V goto corrupted; 48956b19868SAneesh Kumar K.V } 4907bc94916SVegard Nossum if (unlikely(depth > 32)) { 4917bc94916SVegard Nossum error_msg = "too large eh_depth"; 4927bc94916SVegard Nossum goto corrupted; 4937bc94916SVegard Nossum } 4947ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4957ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4967ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4977ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4986a797d27SDarrick J. Wong err = -EFSBADCRC; 4997ac5990dSDarrick J. Wong goto corrupted; 5007ac5990dSDarrick J. Wong } 501c29c0ae7SAlex Tomas return 0; 502c29c0ae7SAlex Tomas 503c29c0ae7SAlex Tomas corrupted: 50454d3adbcSTheodore Ts'o ext4_error_inode_err(inode, function, line, 0, -err, 505c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, " 506c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 507c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg, 508c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic), 50954d3adbcSTheodore Ts'o le16_to_cpu(eh->eh_entries), 51054d3adbcSTheodore Ts'o le16_to_cpu(eh->eh_max), 511c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 5126a797d27SDarrick J. Wong return err; 513c29c0ae7SAlex Tomas } 514c29c0ae7SAlex Tomas 515c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \ 5169c6e0719SZhang Yi __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0) 517c29c0ae7SAlex Tomas 5187a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 5197a262f7cSAneesh Kumar K.V { 520c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 5217a262f7cSAneesh Kumar K.V } 5227a262f7cSAneesh Kumar K.V 5234068664eSDmitry Monakhov static void ext4_cache_extents(struct inode *inode, 5244068664eSDmitry Monakhov struct ext4_extent_header *eh) 5254068664eSDmitry Monakhov { 5264068664eSDmitry Monakhov struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 5274068664eSDmitry Monakhov ext4_lblk_t prev = 0; 5284068664eSDmitry Monakhov int i; 5294068664eSDmitry Monakhov 5304068664eSDmitry Monakhov for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 5314068664eSDmitry Monakhov unsigned int status = EXTENT_STATUS_WRITTEN; 5324068664eSDmitry Monakhov ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 5334068664eSDmitry Monakhov int len = ext4_ext_get_actual_len(ex); 5344068664eSDmitry Monakhov 5354068664eSDmitry Monakhov if (prev && (prev != lblk)) 5364068664eSDmitry Monakhov ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 5374068664eSDmitry Monakhov EXTENT_STATUS_HOLE); 5384068664eSDmitry Monakhov 5394068664eSDmitry Monakhov if (ext4_ext_is_unwritten(ex)) 5404068664eSDmitry Monakhov status = EXTENT_STATUS_UNWRITTEN; 5414068664eSDmitry Monakhov ext4_es_cache_extent(inode, lblk, len, 5424068664eSDmitry Monakhov ext4_ext_pblock(ex), status); 5434068664eSDmitry Monakhov prev = lblk + len; 5444068664eSDmitry Monakhov } 5454068664eSDmitry Monakhov } 5464068664eSDmitry Monakhov 5477d7ea89eSTheodore Ts'o static struct buffer_head * 5487d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line, 5499c6e0719SZhang Yi struct inode *inode, struct ext4_extent_idx *idx, 5509c6e0719SZhang Yi int depth, int flags) 551f8489128SDarrick J. Wong { 5527d7ea89eSTheodore Ts'o struct buffer_head *bh; 5537d7ea89eSTheodore Ts'o int err; 55473c384c0STheodore Ts'o gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS; 5559c6e0719SZhang Yi ext4_fsblk_t pblk; 556f8489128SDarrick J. Wong 55773c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL) 55873c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL; 55973c384c0STheodore Ts'o 5609c6e0719SZhang Yi pblk = ext4_idx_pblock(idx); 56173c384c0STheodore Ts'o bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags); 5627d7ea89eSTheodore Ts'o if (unlikely(!bh)) 5637d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM); 5647d7ea89eSTheodore Ts'o 5657d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) { 5667d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 5672d069c08Szhangyi (F) err = ext4_read_bh(bh, 0, NULL); 5687d7ea89eSTheodore Ts'o if (err < 0) 5697d7ea89eSTheodore Ts'o goto errout; 5707d7ea89eSTheodore Ts'o } 5717869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 5727d7ea89eSTheodore Ts'o return bh; 5739c6e0719SZhang Yi err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh), 5749c6e0719SZhang Yi depth, pblk, le32_to_cpu(idx->ei_block)); 5757d7ea89eSTheodore Ts'o if (err) 5767d7ea89eSTheodore Ts'o goto errout; 577f8489128SDarrick J. Wong set_buffer_verified(bh); 578107a7bd3STheodore Ts'o /* 579107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries 580107a7bd3STheodore Ts'o */ 581107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 582107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh); 5834068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 584107a7bd3STheodore Ts'o } 5857d7ea89eSTheodore Ts'o return bh; 5867d7ea89eSTheodore Ts'o errout: 5877d7ea89eSTheodore Ts'o put_bh(bh); 5887d7ea89eSTheodore Ts'o return ERR_PTR(err); 5897d7ea89eSTheodore Ts'o 590f8489128SDarrick J. Wong } 591f8489128SDarrick J. Wong 5929c6e0719SZhang Yi #define read_extent_tree_block(inode, idx, depth, flags) \ 5939c6e0719SZhang Yi __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \ 594107a7bd3STheodore Ts'o (depth), (flags)) 595f8489128SDarrick J. Wong 5967869a4a6STheodore Ts'o /* 5977869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the 5987869a4a6STheodore Ts'o * extent status tree 5997869a4a6STheodore Ts'o */ 6007869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode) 6017869a4a6STheodore Ts'o { 6027869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 6037869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL; 6047869a4a6STheodore Ts'o struct buffer_head *bh; 6057869a4a6STheodore Ts'o int i = 0, depth, ret = 0; 6067869a4a6STheodore Ts'o 6077869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 6087869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */ 6097869a4a6STheodore Ts'o 6107869a4a6STheodore Ts'o down_read(&ei->i_data_sem); 6117869a4a6STheodore Ts'o depth = ext_depth(inode); 6127869a4a6STheodore Ts'o 6132f424a5aSRitesh Harjani /* Don't cache anything if there are no external extent blocks */ 6142f424a5aSRitesh Harjani if (!depth) { 6152f424a5aSRitesh Harjani up_read(&ei->i_data_sem); 6162f424a5aSRitesh Harjani return ret; 6172f424a5aSRitesh Harjani } 6182f424a5aSRitesh Harjani 6196396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 6207869a4a6STheodore Ts'o GFP_NOFS); 6217869a4a6STheodore Ts'o if (path == NULL) { 6227869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 6237869a4a6STheodore Ts'o return -ENOMEM; 6247869a4a6STheodore Ts'o } 6257869a4a6STheodore Ts'o 6267869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode); 6277869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 6287869a4a6STheodore Ts'o if (ret) 6297869a4a6STheodore Ts'o goto out; 6307869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 6317869a4a6STheodore Ts'o while (i >= 0) { 6327869a4a6STheodore Ts'o /* 6337869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of 6347869a4a6STheodore Ts'o * the index block, go up 6357869a4a6STheodore Ts'o */ 6367869a4a6STheodore Ts'o if ((i == depth) || 6377869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 6387869a4a6STheodore Ts'o brelse(path[i].p_bh); 6397869a4a6STheodore Ts'o path[i].p_bh = NULL; 6407869a4a6STheodore Ts'o i--; 6417869a4a6STheodore Ts'o continue; 6427869a4a6STheodore Ts'o } 6439c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[i].p_idx++, 6447869a4a6STheodore Ts'o depth - i - 1, 6457869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE); 6467869a4a6STheodore Ts'o if (IS_ERR(bh)) { 6477869a4a6STheodore Ts'o ret = PTR_ERR(bh); 6487869a4a6STheodore Ts'o break; 6497869a4a6STheodore Ts'o } 6507869a4a6STheodore Ts'o i++; 6517869a4a6STheodore Ts'o path[i].p_bh = bh; 6527869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh); 6537869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 6547869a4a6STheodore Ts'o } 6557869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 6567869a4a6STheodore Ts'o out: 6577869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 6587ff5fddaSYe Bin ext4_free_ext_path(path); 6597869a4a6STheodore Ts'o return ret; 6607869a4a6STheodore Ts'o } 6617869a4a6STheodore Ts'o 662a86c6181SAlex Tomas #ifdef EXT_DEBUG 663a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 664a86c6181SAlex Tomas { 665a86c6181SAlex Tomas int k, l = path->p_depth; 666a86c6181SAlex Tomas 66770aa1554SRitesh Harjani ext_debug(inode, "path:"); 668a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 669a86c6181SAlex Tomas if (path->p_idx) { 67070aa1554SRitesh Harjani ext_debug(inode, " %d->%llu", 6716e89bbb7SEric Biggers le32_to_cpu(path->p_idx->ei_block), 672bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 673a86c6181SAlex Tomas } else if (path->p_ext) { 67470aa1554SRitesh Harjani ext_debug(inode, " %d:[%d]%d:%llu ", 675a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 676556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 677a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 678bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 679a86c6181SAlex Tomas } else 68070aa1554SRitesh Harjani ext_debug(inode, " []"); 681a86c6181SAlex Tomas } 68270aa1554SRitesh Harjani ext_debug(inode, "\n"); 683a86c6181SAlex Tomas } 684a86c6181SAlex Tomas 685a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 686a86c6181SAlex Tomas { 687a86c6181SAlex Tomas int depth = ext_depth(inode); 688a86c6181SAlex Tomas struct ext4_extent_header *eh; 689a86c6181SAlex Tomas struct ext4_extent *ex; 690a86c6181SAlex Tomas int i; 691a86c6181SAlex Tomas 692a86c6181SAlex Tomas if (!path) 693a86c6181SAlex Tomas return; 694a86c6181SAlex Tomas 695a86c6181SAlex Tomas eh = path[depth].p_hdr; 696a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 697a86c6181SAlex Tomas 69870aa1554SRitesh Harjani ext_debug(inode, "Displaying leaf extents\n"); 699553f9008SMingming 700a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 70170aa1554SRitesh Harjani ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 702556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 703bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 704a86c6181SAlex Tomas } 70570aa1554SRitesh Harjani ext_debug(inode, "\n"); 706a86c6181SAlex Tomas } 7071b16da77SYongqiang Yang 7081b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 7091b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 7101b16da77SYongqiang Yang { 7111b16da77SYongqiang Yang int depth = ext_depth(inode); 7121b16da77SYongqiang Yang struct ext4_extent *ex; 7131b16da77SYongqiang Yang 7141b16da77SYongqiang Yang if (depth != level) { 7151b16da77SYongqiang Yang struct ext4_extent_idx *idx; 7161b16da77SYongqiang Yang idx = path[level].p_idx; 7171b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 71870aa1554SRitesh Harjani ext_debug(inode, "%d: move %d:%llu in new index %llu\n", 71970aa1554SRitesh Harjani level, le32_to_cpu(idx->ei_block), 72070aa1554SRitesh Harjani ext4_idx_pblock(idx), newblock); 7211b16da77SYongqiang Yang idx++; 7221b16da77SYongqiang Yang } 7231b16da77SYongqiang Yang 7241b16da77SYongqiang Yang return; 7251b16da77SYongqiang Yang } 7261b16da77SYongqiang Yang 7271b16da77SYongqiang Yang ex = path[depth].p_ext; 7281b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 72970aa1554SRitesh Harjani ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n", 7301b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 7311b16da77SYongqiang Yang ext4_ext_pblock(ex), 732556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 7331b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 7341b16da77SYongqiang Yang newblock); 7351b16da77SYongqiang Yang ex++; 7361b16da77SYongqiang Yang } 7371b16da77SYongqiang Yang } 7381b16da77SYongqiang Yang 739a86c6181SAlex Tomas #else 740a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 741a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 7421b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 743a86c6181SAlex Tomas #endif 744a86c6181SAlex Tomas 745a86c6181SAlex Tomas /* 746d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 747d0d856e8SRandy Dunlap * binary search for the closest index of the given block 748c29c0ae7SAlex Tomas * the header must be checked before calling this 749a86c6181SAlex Tomas */ 750a86c6181SAlex Tomas static void 751725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 752725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 753a86c6181SAlex Tomas { 754a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 755a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 756a86c6181SAlex Tomas 757a86c6181SAlex Tomas 75870aa1554SRitesh Harjani ext_debug(inode, "binsearch for %u(idx): ", block); 759a86c6181SAlex Tomas 760a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 761e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 762a86c6181SAlex Tomas while (l <= r) { 763a86c6181SAlex Tomas m = l + (r - l) / 2; 76483c5688bSyangerkun ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 76583c5688bSyangerkun le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block), 76683c5688bSyangerkun r, le32_to_cpu(r->ei_block)); 76783c5688bSyangerkun 768a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 769a86c6181SAlex Tomas r = m - 1; 770a86c6181SAlex Tomas else 771a86c6181SAlex Tomas l = m + 1; 772a86c6181SAlex Tomas } 773a86c6181SAlex Tomas 774a86c6181SAlex Tomas path->p_idx = l - 1; 77570aa1554SRitesh Harjani ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 776bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 777a86c6181SAlex Tomas 778a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 779a86c6181SAlex Tomas { 780a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 781a86c6181SAlex Tomas int k; 782a86c6181SAlex Tomas 783a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 784a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 7856e89bbb7SEric Biggers if (k != 0 && le32_to_cpu(ix->ei_block) <= 7866e89bbb7SEric Biggers le32_to_cpu(ix[-1].ei_block)) { 7874776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 7884776004fSTheodore Ts'o "first=0x%p\n", k, 789a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 7904776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 791a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 792a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 793a86c6181SAlex Tomas } 794a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 795a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 796a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 797a86c6181SAlex Tomas break; 798a86c6181SAlex Tomas chix = ix; 799a86c6181SAlex Tomas } 800a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 801a86c6181SAlex Tomas } 802a86c6181SAlex Tomas #endif 803a86c6181SAlex Tomas 804a86c6181SAlex Tomas } 805a86c6181SAlex Tomas 806a86c6181SAlex Tomas /* 807d0d856e8SRandy Dunlap * ext4_ext_binsearch: 808d0d856e8SRandy Dunlap * binary search for closest extent of the given block 809c29c0ae7SAlex Tomas * the header must be checked before calling this 810a86c6181SAlex Tomas */ 811a86c6181SAlex Tomas static void 812725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 813725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 814a86c6181SAlex Tomas { 815a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 816a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 817a86c6181SAlex Tomas 818a86c6181SAlex Tomas if (eh->eh_entries == 0) { 819a86c6181SAlex Tomas /* 820d0d856e8SRandy Dunlap * this leaf is empty: 821a86c6181SAlex Tomas * we get such a leaf in split/add case 822a86c6181SAlex Tomas */ 823a86c6181SAlex Tomas return; 824a86c6181SAlex Tomas } 825a86c6181SAlex Tomas 82670aa1554SRitesh Harjani ext_debug(inode, "binsearch for %u: ", block); 827a86c6181SAlex Tomas 828a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 829e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 830a86c6181SAlex Tomas 831a86c6181SAlex Tomas while (l <= r) { 832a86c6181SAlex Tomas m = l + (r - l) / 2; 83383c5688bSyangerkun ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l, 83483c5688bSyangerkun le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block), 83583c5688bSyangerkun r, le32_to_cpu(r->ee_block)); 83683c5688bSyangerkun 837a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 838a86c6181SAlex Tomas r = m - 1; 839a86c6181SAlex Tomas else 840a86c6181SAlex Tomas l = m + 1; 841a86c6181SAlex Tomas } 842a86c6181SAlex Tomas 843a86c6181SAlex Tomas path->p_ext = l - 1; 84470aa1554SRitesh Harjani ext_debug(inode, " -> %d:%llu:[%d]%d ", 845a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 846bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 847556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 848a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 849a86c6181SAlex Tomas 850a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 851a86c6181SAlex Tomas { 852a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 853a86c6181SAlex Tomas int k; 854a86c6181SAlex Tomas 855a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 856a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 857a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 858a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 859a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 860a86c6181SAlex Tomas break; 861a86c6181SAlex Tomas chex = ex; 862a86c6181SAlex Tomas } 863a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 864a86c6181SAlex Tomas } 865a86c6181SAlex Tomas #endif 866a86c6181SAlex Tomas 867a86c6181SAlex Tomas } 868a86c6181SAlex Tomas 8694209ae12SHarshad Shirwadkar void ext4_ext_tree_init(handle_t *handle, struct inode *inode) 870a86c6181SAlex Tomas { 871a86c6181SAlex Tomas struct ext4_extent_header *eh; 872a86c6181SAlex Tomas 873a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 874a86c6181SAlex Tomas eh->eh_depth = 0; 875a86c6181SAlex Tomas eh->eh_entries = 0; 876a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 87755ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 878ce3aba43SAnirudh Rayabharam eh->eh_generation = 0; 879a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 880a86c6181SAlex Tomas } 881a86c6181SAlex Tomas 882a86c6181SAlex Tomas struct ext4_ext_path * 883ed8a1a76STheodore Ts'o ext4_find_extent(struct inode *inode, ext4_lblk_t block, 884705912caSTheodore Ts'o struct ext4_ext_path **orig_path, int flags) 885a86c6181SAlex Tomas { 886a86c6181SAlex Tomas struct ext4_extent_header *eh; 887a86c6181SAlex Tomas struct buffer_head *bh; 888705912caSTheodore Ts'o struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 889705912caSTheodore Ts'o short int depth, i, ppos = 0; 890860d21e2STheodore Ts'o int ret; 89173c384c0STheodore Ts'o gfp_t gfp_flags = GFP_NOFS; 89273c384c0STheodore Ts'o 89373c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL) 89473c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL; 895a86c6181SAlex Tomas 896a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 897c29c0ae7SAlex Tomas depth = ext_depth(inode); 898bc890a60STheodore Ts'o if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 899bc890a60STheodore Ts'o EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 900bc890a60STheodore Ts'o depth); 901bc890a60STheodore Ts'o ret = -EFSCORRUPTED; 902bc890a60STheodore Ts'o goto err; 903bc890a60STheodore Ts'o } 904a86c6181SAlex Tomas 90510809df8STheodore Ts'o if (path) { 906523f431cSTheodore Ts'o ext4_ext_drop_refs(path); 90710809df8STheodore Ts'o if (depth > path[0].p_maxdepth) { 90810809df8STheodore Ts'o kfree(path); 90910809df8STheodore Ts'o *orig_path = path = NULL; 91010809df8STheodore Ts'o } 91110809df8STheodore Ts'o } 91210809df8STheodore Ts'o if (!path) { 913a86c6181SAlex Tomas /* account possible depth increase */ 9146396bb22SKees Cook path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 91573c384c0STheodore Ts'o gfp_flags); 91619008f6dSTheodore Ts'o if (unlikely(!path)) 917a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 91810809df8STheodore Ts'o path[0].p_maxdepth = depth + 1; 919a86c6181SAlex Tomas } 920a86c6181SAlex Tomas path[0].p_hdr = eh; 9211973adcbSShen Feng path[0].p_bh = NULL; 922a86c6181SAlex Tomas 923c29c0ae7SAlex Tomas i = depth; 9244068664eSDmitry Monakhov if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 9254068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 926a86c6181SAlex Tomas /* walk through the tree */ 927a86c6181SAlex Tomas while (i) { 92870aa1554SRitesh Harjani ext_debug(inode, "depth %d: num %d, max %d\n", 929a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 930c29c0ae7SAlex Tomas 931a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 932bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 933a86c6181SAlex Tomas path[ppos].p_depth = i; 934a86c6181SAlex Tomas path[ppos].p_ext = NULL; 935a86c6181SAlex Tomas 9369c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags); 937a1c83681SViresh Kumar if (IS_ERR(bh)) { 9387d7ea89eSTheodore Ts'o ret = PTR_ERR(bh); 939a86c6181SAlex Tomas goto err; 940860d21e2STheodore Ts'o } 9417d7ea89eSTheodore Ts'o 942a86c6181SAlex Tomas eh = ext_block_hdr(bh); 943a86c6181SAlex Tomas ppos++; 944a86c6181SAlex Tomas path[ppos].p_bh = bh; 945a86c6181SAlex Tomas path[ppos].p_hdr = eh; 946a86c6181SAlex Tomas } 947a86c6181SAlex Tomas 948a86c6181SAlex Tomas path[ppos].p_depth = i; 949a86c6181SAlex Tomas path[ppos].p_ext = NULL; 950a86c6181SAlex Tomas path[ppos].p_idx = NULL; 951a86c6181SAlex Tomas 952a86c6181SAlex Tomas /* find extent */ 953a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 9541973adcbSShen Feng /* if not an empty leaf */ 9551973adcbSShen Feng if (path[ppos].p_ext) 956bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 957a86c6181SAlex Tomas 958a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 959a86c6181SAlex Tomas 960a86c6181SAlex Tomas return path; 961a86c6181SAlex Tomas 962a86c6181SAlex Tomas err: 9637ff5fddaSYe Bin ext4_free_ext_path(path); 964705912caSTheodore Ts'o if (orig_path) 965705912caSTheodore Ts'o *orig_path = NULL; 966860d21e2STheodore Ts'o return ERR_PTR(ret); 967a86c6181SAlex Tomas } 968a86c6181SAlex Tomas 969a86c6181SAlex Tomas /* 970d0d856e8SRandy Dunlap * ext4_ext_insert_index: 971d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 972d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 973a86c6181SAlex Tomas */ 9741f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 975a86c6181SAlex Tomas struct ext4_ext_path *curp, 976f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 977a86c6181SAlex Tomas { 978a86c6181SAlex Tomas struct ext4_extent_idx *ix; 979a86c6181SAlex Tomas int len, err; 980a86c6181SAlex Tomas 9817e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 9827e028976SAvantika Mathur if (err) 983a86c6181SAlex Tomas return err; 984a86c6181SAlex Tomas 985273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 986273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 987273df556SFrank Mayhar "logical %d == ei_block %d!", 988273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 9896a797d27SDarrick J. Wong return -EFSCORRUPTED; 990273df556SFrank Mayhar } 991d4620315SRobin Dong 992d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 993d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 994d4620315SRobin Dong EXT4_ERROR_INODE(inode, 995d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 996d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 997d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 9986a797d27SDarrick J. Wong return -EFSCORRUPTED; 999d4620315SRobin Dong } 1000d4620315SRobin Dong 1001a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 1002a86c6181SAlex Tomas /* insert after */ 100370aa1554SRitesh Harjani ext_debug(inode, "insert new index %d after: %llu\n", 100470aa1554SRitesh Harjani logical, ptr); 1005a86c6181SAlex Tomas ix = curp->p_idx + 1; 1006a86c6181SAlex Tomas } else { 1007a86c6181SAlex Tomas /* insert before */ 100870aa1554SRitesh Harjani ext_debug(inode, "insert new index %d before: %llu\n", 100970aa1554SRitesh Harjani logical, ptr); 1010a86c6181SAlex Tomas ix = curp->p_idx; 1011a86c6181SAlex Tomas } 1012a86c6181SAlex Tomas 101311d05f01SGou Hao if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 101411d05f01SGou Hao EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 101511d05f01SGou Hao return -EFSCORRUPTED; 101611d05f01SGou Hao } 101711d05f01SGou Hao 101880e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 101980e675f9SEric Gouriou BUG_ON(len < 0); 102080e675f9SEric Gouriou if (len > 0) { 102170aa1554SRitesh Harjani ext_debug(inode, "insert new index %d: " 102280e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 102380e675f9SEric Gouriou logical, len, ix, ix + 1); 102480e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 102580e675f9SEric Gouriou } 102680e675f9SEric Gouriou 1027a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 1028f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 1029e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1030a86c6181SAlex Tomas 1031273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1032273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 10336a797d27SDarrick J. Wong return -EFSCORRUPTED; 1034273df556SFrank Mayhar } 1035a86c6181SAlex Tomas 1036a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 1037a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 1038a86c6181SAlex Tomas 1039a86c6181SAlex Tomas return err; 1040a86c6181SAlex Tomas } 1041a86c6181SAlex Tomas 1042a86c6181SAlex Tomas /* 1043d0d856e8SRandy Dunlap * ext4_ext_split: 1044d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 1045d0d856e8SRandy Dunlap * at depth @at: 1046a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 1047a86c6181SAlex Tomas * - makes decision where to split 1048d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 1049a86c6181SAlex Tomas * into the newly allocated blocks 1050d0d856e8SRandy Dunlap * - initializes subtree 1051a86c6181SAlex Tomas */ 1052a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 105355f020dbSAllison Henderson unsigned int flags, 1054a86c6181SAlex Tomas struct ext4_ext_path *path, 1055a86c6181SAlex Tomas struct ext4_extent *newext, int at) 1056a86c6181SAlex Tomas { 1057a86c6181SAlex Tomas struct buffer_head *bh = NULL; 1058a86c6181SAlex Tomas int depth = ext_depth(inode); 1059a86c6181SAlex Tomas struct ext4_extent_header *neh; 1060a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 1061a86c6181SAlex Tomas int i = at, k, m, a; 1062f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 1063a86c6181SAlex Tomas __le32 border; 1064f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 106573c384c0STheodore Ts'o gfp_t gfp_flags = GFP_NOFS; 1066a86c6181SAlex Tomas int err = 0; 1067592acbf1SSriram Rajagopalan size_t ext_size = 0; 1068a86c6181SAlex Tomas 106973c384c0STheodore Ts'o if (flags & EXT4_EX_NOFAIL) 107073c384c0STheodore Ts'o gfp_flags |= __GFP_NOFAIL; 107173c384c0STheodore Ts'o 1072a86c6181SAlex Tomas /* make decision: where to split? */ 1073d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 1074a86c6181SAlex Tomas 1075d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 1076a86c6181SAlex Tomas * border from split point */ 1077273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1078273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 10796a797d27SDarrick J. Wong return -EFSCORRUPTED; 1080273df556SFrank Mayhar } 1081a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1082a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 108370aa1554SRitesh Harjani ext_debug(inode, "leaf will be split." 1084a86c6181SAlex Tomas " next leaf starts at %d\n", 1085a86c6181SAlex Tomas le32_to_cpu(border)); 1086a86c6181SAlex Tomas } else { 1087a86c6181SAlex Tomas border = newext->ee_block; 108870aa1554SRitesh Harjani ext_debug(inode, "leaf will be added." 1089a86c6181SAlex Tomas " next leaf starts at %d\n", 1090a86c6181SAlex Tomas le32_to_cpu(border)); 1091a86c6181SAlex Tomas } 1092a86c6181SAlex Tomas 1093a86c6181SAlex Tomas /* 1094d0d856e8SRandy Dunlap * If error occurs, then we break processing 1095d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 1096a86c6181SAlex Tomas * be inserted and tree will be in consistent 1097d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 1098a86c6181SAlex Tomas */ 1099a86c6181SAlex Tomas 1100a86c6181SAlex Tomas /* 1101d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 1102d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 1103d0d856e8SRandy Dunlap * upon them. 1104a86c6181SAlex Tomas */ 110573c384c0STheodore Ts'o ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags); 1106a86c6181SAlex Tomas if (!ablocks) 1107a86c6181SAlex Tomas return -ENOMEM; 1108a86c6181SAlex Tomas 1109a86c6181SAlex Tomas /* allocate all needed blocks */ 111070aa1554SRitesh Harjani ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at); 1111a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 1112654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 111355f020dbSAllison Henderson newext, &err, flags); 1114a86c6181SAlex Tomas if (newblock == 0) 1115a86c6181SAlex Tomas goto cleanup; 1116a86c6181SAlex Tomas ablocks[a] = newblock; 1117a86c6181SAlex Tomas } 1118a86c6181SAlex Tomas 1119a86c6181SAlex Tomas /* initialize new leaf */ 1120a86c6181SAlex Tomas newblock = ablocks[--a]; 1121273df556SFrank Mayhar if (unlikely(newblock == 0)) { 1122273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 11236a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1124273df556SFrank Mayhar goto cleanup; 1125273df556SFrank Mayhar } 1126c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1127aebf0243SWang Shilong if (unlikely(!bh)) { 1128860d21e2STheodore Ts'o err = -ENOMEM; 1129a86c6181SAlex Tomas goto cleanup; 1130a86c6181SAlex Tomas } 1131a86c6181SAlex Tomas lock_buffer(bh); 1132a86c6181SAlex Tomas 1133188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1134188c299eSJan Kara EXT4_JTR_NONE); 11357e028976SAvantika Mathur if (err) 1136a86c6181SAlex Tomas goto cleanup; 1137a86c6181SAlex Tomas 1138a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1139a86c6181SAlex Tomas neh->eh_entries = 0; 114055ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1141a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1142a86c6181SAlex Tomas neh->eh_depth = 0; 1143ce3aba43SAnirudh Rayabharam neh->eh_generation = 0; 1144a86c6181SAlex Tomas 1145d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 1146273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 1147273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 1148273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1149273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 1150273df556SFrank Mayhar path[depth].p_hdr->eh_max); 11516a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1152273df556SFrank Mayhar goto cleanup; 1153273df556SFrank Mayhar } 1154a86c6181SAlex Tomas /* start copy from next extent */ 11551b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 11561b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 1157a86c6181SAlex Tomas if (m) { 11581b16da77SYongqiang Yang struct ext4_extent *ex; 11591b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 11601b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1161e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1162a86c6181SAlex Tomas } 1163a86c6181SAlex Tomas 1164592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1165592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1166592acbf1SSriram Rajagopalan sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1167592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 11687ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1169a86c6181SAlex Tomas set_buffer_uptodate(bh); 1170a86c6181SAlex Tomas unlock_buffer(bh); 1171a86c6181SAlex Tomas 11720390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11737e028976SAvantika Mathur if (err) 1174a86c6181SAlex Tomas goto cleanup; 1175a86c6181SAlex Tomas brelse(bh); 1176a86c6181SAlex Tomas bh = NULL; 1177a86c6181SAlex Tomas 1178a86c6181SAlex Tomas /* correct old leaf */ 1179a86c6181SAlex Tomas if (m) { 11807e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 11817e028976SAvantika Mathur if (err) 1182a86c6181SAlex Tomas goto cleanup; 1183e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 11847e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 11857e028976SAvantika Mathur if (err) 1186a86c6181SAlex Tomas goto cleanup; 1187a86c6181SAlex Tomas 1188a86c6181SAlex Tomas } 1189a86c6181SAlex Tomas 1190a86c6181SAlex Tomas /* create intermediate indexes */ 1191a86c6181SAlex Tomas k = depth - at - 1; 1192273df556SFrank Mayhar if (unlikely(k < 0)) { 1193273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 11946a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1195273df556SFrank Mayhar goto cleanup; 1196273df556SFrank Mayhar } 1197a86c6181SAlex Tomas if (k) 119870aa1554SRitesh Harjani ext_debug(inode, "create %d intermediate indices\n", k); 1199a86c6181SAlex Tomas /* insert new index into current index block */ 1200a86c6181SAlex Tomas /* current depth stored in i var */ 1201a86c6181SAlex Tomas i = depth - 1; 1202a86c6181SAlex Tomas while (k--) { 1203a86c6181SAlex Tomas oldblock = newblock; 1204a86c6181SAlex Tomas newblock = ablocks[--a]; 1205bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1206aebf0243SWang Shilong if (unlikely(!bh)) { 1207860d21e2STheodore Ts'o err = -ENOMEM; 1208a86c6181SAlex Tomas goto cleanup; 1209a86c6181SAlex Tomas } 1210a86c6181SAlex Tomas lock_buffer(bh); 1211a86c6181SAlex Tomas 1212188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1213188c299eSJan Kara EXT4_JTR_NONE); 12147e028976SAvantika Mathur if (err) 1215a86c6181SAlex Tomas goto cleanup; 1216a86c6181SAlex Tomas 1217a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1218a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1219a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 122055ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1221a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1222ce3aba43SAnirudh Rayabharam neh->eh_generation = 0; 1223a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1224a86c6181SAlex Tomas fidx->ei_block = border; 1225f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1226a86c6181SAlex Tomas 122770aa1554SRitesh Harjani ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n", 1228bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1229a86c6181SAlex Tomas 12301b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1231273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1232273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1233273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1234273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1235273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 12366a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1237273df556SFrank Mayhar goto cleanup; 1238273df556SFrank Mayhar } 12391b16da77SYongqiang Yang /* start copy indexes */ 12401b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 124170aa1554SRitesh Harjani ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx, 12421b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 12431b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1244a86c6181SAlex Tomas if (m) { 12451b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1246a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1247e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1248a86c6181SAlex Tomas } 1249592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1250592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1251592acbf1SSriram Rajagopalan (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1252592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, 1253592acbf1SSriram Rajagopalan inode->i_sb->s_blocksize - ext_size); 12547ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1255a86c6181SAlex Tomas set_buffer_uptodate(bh); 1256a86c6181SAlex Tomas unlock_buffer(bh); 1257a86c6181SAlex Tomas 12580390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 12597e028976SAvantika Mathur if (err) 1260a86c6181SAlex Tomas goto cleanup; 1261a86c6181SAlex Tomas brelse(bh); 1262a86c6181SAlex Tomas bh = NULL; 1263a86c6181SAlex Tomas 1264a86c6181SAlex Tomas /* correct old index */ 1265a86c6181SAlex Tomas if (m) { 1266a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1267a86c6181SAlex Tomas if (err) 1268a86c6181SAlex Tomas goto cleanup; 1269e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1270a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1271a86c6181SAlex Tomas if (err) 1272a86c6181SAlex Tomas goto cleanup; 1273a86c6181SAlex Tomas } 1274a86c6181SAlex Tomas 1275a86c6181SAlex Tomas i--; 1276a86c6181SAlex Tomas } 1277a86c6181SAlex Tomas 1278a86c6181SAlex Tomas /* insert new index */ 1279a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1280a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1281a86c6181SAlex Tomas 1282a86c6181SAlex Tomas cleanup: 1283a86c6181SAlex Tomas if (bh) { 1284a86c6181SAlex Tomas if (buffer_locked(bh)) 1285a86c6181SAlex Tomas unlock_buffer(bh); 1286a86c6181SAlex Tomas brelse(bh); 1287a86c6181SAlex Tomas } 1288a86c6181SAlex Tomas 1289a86c6181SAlex Tomas if (err) { 1290a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1291a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1292a86c6181SAlex Tomas if (!ablocks[i]) 1293a86c6181SAlex Tomas continue; 12947dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1295e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1296a86c6181SAlex Tomas } 1297a86c6181SAlex Tomas } 1298a86c6181SAlex Tomas kfree(ablocks); 1299a86c6181SAlex Tomas 1300a86c6181SAlex Tomas return err; 1301a86c6181SAlex Tomas } 1302a86c6181SAlex Tomas 1303a86c6181SAlex Tomas /* 1304d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1305d0d856e8SRandy Dunlap * implements tree growing procedure: 1306a86c6181SAlex Tomas * - allocates new block 1307a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1308d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1309a86c6181SAlex Tomas * just created block 1310a86c6181SAlex Tomas */ 1311a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1312be5cd90dSDmitry Monakhov unsigned int flags) 1313a86c6181SAlex Tomas { 1314a86c6181SAlex Tomas struct ext4_extent_header *neh; 1315a86c6181SAlex Tomas struct buffer_head *bh; 1316be5cd90dSDmitry Monakhov ext4_fsblk_t newblock, goal = 0; 1317be5cd90dSDmitry Monakhov struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1318a86c6181SAlex Tomas int err = 0; 1319592acbf1SSriram Rajagopalan size_t ext_size = 0; 1320a86c6181SAlex Tomas 1321be5cd90dSDmitry Monakhov /* Try to prepend new index to old one */ 1322be5cd90dSDmitry Monakhov if (ext_depth(inode)) 1323be5cd90dSDmitry Monakhov goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1324be5cd90dSDmitry Monakhov if (goal > le32_to_cpu(es->s_first_data_block)) { 1325be5cd90dSDmitry Monakhov flags |= EXT4_MB_HINT_TRY_GOAL; 1326be5cd90dSDmitry Monakhov goal--; 1327be5cd90dSDmitry Monakhov } else 1328be5cd90dSDmitry Monakhov goal = ext4_inode_to_goal_block(inode); 1329be5cd90dSDmitry Monakhov newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1330be5cd90dSDmitry Monakhov NULL, &err); 1331a86c6181SAlex Tomas if (newblock == 0) 1332a86c6181SAlex Tomas return err; 1333a86c6181SAlex Tomas 1334c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1335aebf0243SWang Shilong if (unlikely(!bh)) 1336860d21e2STheodore Ts'o return -ENOMEM; 1337a86c6181SAlex Tomas lock_buffer(bh); 1338a86c6181SAlex Tomas 1339188c299eSJan Kara err = ext4_journal_get_create_access(handle, inode->i_sb, bh, 1340188c299eSJan Kara EXT4_JTR_NONE); 13417e028976SAvantika Mathur if (err) { 1342a86c6181SAlex Tomas unlock_buffer(bh); 1343a86c6181SAlex Tomas goto out; 1344a86c6181SAlex Tomas } 1345a86c6181SAlex Tomas 1346592acbf1SSriram Rajagopalan ext_size = sizeof(EXT4_I(inode)->i_data); 1347a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 1348592acbf1SSriram Rajagopalan memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1349592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1350592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1351a86c6181SAlex Tomas 1352a86c6181SAlex Tomas /* set size of new block */ 1353a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1354a86c6181SAlex Tomas /* old root could have indexes or leaves 1355a86c6181SAlex Tomas * so calculate e_max right way */ 1356a86c6181SAlex Tomas if (ext_depth(inode)) 135755ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1358a86c6181SAlex Tomas else 135955ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1360a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 13617ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1362a86c6181SAlex Tomas set_buffer_uptodate(bh); 13630caaefbaSyangerkun set_buffer_verified(bh); 1364a86c6181SAlex Tomas unlock_buffer(bh); 1365a86c6181SAlex Tomas 13660390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 13677e028976SAvantika Mathur if (err) 1368a86c6181SAlex Tomas goto out; 1369a86c6181SAlex Tomas 13701939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1371a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 13721939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 13731939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 13741939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 13751939dd84SDmitry Monakhov /* Root extent block becomes index block */ 13761939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 13771939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 13781939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 13791939dd84SDmitry Monakhov } 138070aa1554SRitesh Harjani ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n", 1381a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 13825a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1383bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1384a86c6181SAlex Tomas 1385ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 13864209ae12SHarshad Shirwadkar err = ext4_mark_inode_dirty(handle, inode); 1387a86c6181SAlex Tomas out: 1388a86c6181SAlex Tomas brelse(bh); 1389a86c6181SAlex Tomas 1390a86c6181SAlex Tomas return err; 1391a86c6181SAlex Tomas } 1392a86c6181SAlex Tomas 1393a86c6181SAlex Tomas /* 1394d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1395d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1396d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1397a86c6181SAlex Tomas */ 1398a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1399107a7bd3STheodore Ts'o unsigned int mb_flags, 1400107a7bd3STheodore Ts'o unsigned int gb_flags, 1401dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1402a86c6181SAlex Tomas struct ext4_extent *newext) 1403a86c6181SAlex Tomas { 1404dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1405a86c6181SAlex Tomas struct ext4_ext_path *curp; 1406a86c6181SAlex Tomas int depth, i, err = 0; 1407a86c6181SAlex Tomas 1408a86c6181SAlex Tomas repeat: 1409a86c6181SAlex Tomas i = depth = ext_depth(inode); 1410a86c6181SAlex Tomas 1411a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1412a86c6181SAlex Tomas curp = path + depth; 1413a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1414a86c6181SAlex Tomas i--; 1415a86c6181SAlex Tomas curp--; 1416a86c6181SAlex Tomas } 1417a86c6181SAlex Tomas 1418d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1419d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1420a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1421a86c6181SAlex Tomas /* if we found index with free entry, then use that 1422a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 1423107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1424787e0981SShen Feng if (err) 1425787e0981SShen Feng goto out; 1426a86c6181SAlex Tomas 1427a86c6181SAlex Tomas /* refill path */ 1428ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1429725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1430dfe50809STheodore Ts'o ppath, gb_flags); 1431a86c6181SAlex Tomas if (IS_ERR(path)) 1432a86c6181SAlex Tomas err = PTR_ERR(path); 1433a86c6181SAlex Tomas } else { 1434a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 1435be5cd90dSDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1436a86c6181SAlex Tomas if (err) 1437a86c6181SAlex Tomas goto out; 1438a86c6181SAlex Tomas 1439a86c6181SAlex Tomas /* refill path */ 1440ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1441725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1442dfe50809STheodore Ts'o ppath, gb_flags); 1443a86c6181SAlex Tomas if (IS_ERR(path)) { 1444a86c6181SAlex Tomas err = PTR_ERR(path); 1445a86c6181SAlex Tomas goto out; 1446a86c6181SAlex Tomas } 1447a86c6181SAlex Tomas 1448a86c6181SAlex Tomas /* 1449d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1450d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1451a86c6181SAlex Tomas */ 1452a86c6181SAlex Tomas depth = ext_depth(inode); 1453a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1454d0d856e8SRandy Dunlap /* now we need to split */ 1455a86c6181SAlex Tomas goto repeat; 1456a86c6181SAlex Tomas } 1457a86c6181SAlex Tomas } 1458a86c6181SAlex Tomas 1459a86c6181SAlex Tomas out: 1460a86c6181SAlex Tomas return err; 1461a86c6181SAlex Tomas } 1462a86c6181SAlex Tomas 1463a86c6181SAlex Tomas /* 14641988b51eSAlex Tomas * search the closest allocated block to the left for *logical 14651988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 14661988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 14671988b51eSAlex Tomas * returns 0 at @phys 14681988b51eSAlex Tomas * return value contains 0 (success) or error code 14691988b51eSAlex Tomas */ 14701f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 14711f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14721988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 14731988b51eSAlex Tomas { 14741988b51eSAlex Tomas struct ext4_extent_idx *ix; 14751988b51eSAlex Tomas struct ext4_extent *ex; 1476b939e376SAneesh Kumar K.V int depth, ee_len; 14771988b51eSAlex Tomas 1478273df556SFrank Mayhar if (unlikely(path == NULL)) { 1479273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 14806a797d27SDarrick J. Wong return -EFSCORRUPTED; 1481273df556SFrank Mayhar } 14821988b51eSAlex Tomas depth = path->p_depth; 14831988b51eSAlex Tomas *phys = 0; 14841988b51eSAlex Tomas 14851988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14861988b51eSAlex Tomas return 0; 14871988b51eSAlex Tomas 14881988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14891988b51eSAlex Tomas * then *logical, but it can be that extent is the 14901988b51eSAlex Tomas * first one in the file */ 14911988b51eSAlex Tomas 14921988b51eSAlex Tomas ex = path[depth].p_ext; 1493b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14941988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1495273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1496273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1497273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1498273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 14996a797d27SDarrick J. Wong return -EFSCORRUPTED; 1500273df556SFrank Mayhar } 15011988b51eSAlex Tomas while (--depth >= 0) { 15021988b51eSAlex Tomas ix = path[depth].p_idx; 1503273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1504273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1505273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 15066ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1507037e7c52SAdam Borowski le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block), 1508273df556SFrank Mayhar depth); 15096a797d27SDarrick J. Wong return -EFSCORRUPTED; 1510273df556SFrank Mayhar } 15111988b51eSAlex Tomas } 15121988b51eSAlex Tomas return 0; 15131988b51eSAlex Tomas } 15141988b51eSAlex Tomas 1515273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1516273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1517273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1518273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 15196a797d27SDarrick J. Wong return -EFSCORRUPTED; 1520273df556SFrank Mayhar } 15211988b51eSAlex Tomas 1522b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1523bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 15241988b51eSAlex Tomas return 0; 15251988b51eSAlex Tomas } 15261988b51eSAlex Tomas 15271988b51eSAlex Tomas /* 1528d7dce9e0Syangerkun * Search the closest allocated block to the right for *logical 1529d7dce9e0Syangerkun * and returns it at @logical + it's physical address at @phys. 1530d7dce9e0Syangerkun * If not exists, return 0 and @phys is set to 0. We will return 1531d7dce9e0Syangerkun * 1 which means we found an allocated block and ret_ex is valid. 1532d7dce9e0Syangerkun * Or return a (< 0) error code. 15331988b51eSAlex Tomas */ 15341f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 15351f109d5aSTheodore Ts'o struct ext4_ext_path *path, 15364d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 1537d7dce9e0Syangerkun struct ext4_extent *ret_ex) 15381988b51eSAlex Tomas { 15391988b51eSAlex Tomas struct buffer_head *bh = NULL; 15401988b51eSAlex Tomas struct ext4_extent_header *eh; 15411988b51eSAlex Tomas struct ext4_extent_idx *ix; 15421988b51eSAlex Tomas struct ext4_extent *ex; 1543395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1544395a87bfSEric Sandeen int ee_len; 15451988b51eSAlex Tomas 1546273df556SFrank Mayhar if (unlikely(path == NULL)) { 1547273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 15486a797d27SDarrick J. Wong return -EFSCORRUPTED; 1549273df556SFrank Mayhar } 15501988b51eSAlex Tomas depth = path->p_depth; 15511988b51eSAlex Tomas *phys = 0; 15521988b51eSAlex Tomas 15531988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 15541988b51eSAlex Tomas return 0; 15551988b51eSAlex Tomas 15561988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 15571988b51eSAlex Tomas * then *logical, but it can be that extent is the 15581988b51eSAlex Tomas * first one in the file */ 15591988b51eSAlex Tomas 15601988b51eSAlex Tomas ex = path[depth].p_ext; 1561b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 15621988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1563273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1564273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1565273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1566273df556SFrank Mayhar depth); 15676a797d27SDarrick J. Wong return -EFSCORRUPTED; 1568273df556SFrank Mayhar } 15691988b51eSAlex Tomas while (--depth >= 0) { 15701988b51eSAlex Tomas ix = path[depth].p_idx; 1571273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1572273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1573273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1574273df556SFrank Mayhar *logical); 15756a797d27SDarrick J. Wong return -EFSCORRUPTED; 1576273df556SFrank Mayhar } 15771988b51eSAlex Tomas } 15784d33b1efSTheodore Ts'o goto found_extent; 15791988b51eSAlex Tomas } 15801988b51eSAlex Tomas 1581273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1582273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1583273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1584273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 15856a797d27SDarrick J. Wong return -EFSCORRUPTED; 1586273df556SFrank Mayhar } 15871988b51eSAlex Tomas 15881988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 15891988b51eSAlex Tomas /* next allocated block in this leaf */ 15901988b51eSAlex Tomas ex++; 15914d33b1efSTheodore Ts'o goto found_extent; 15921988b51eSAlex Tomas } 15931988b51eSAlex Tomas 15941988b51eSAlex Tomas /* go up and search for index to the right */ 15951988b51eSAlex Tomas while (--depth >= 0) { 15961988b51eSAlex Tomas ix = path[depth].p_idx; 15971988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 159825f1ee3aSWu Fengguang goto got_index; 15991988b51eSAlex Tomas } 16001988b51eSAlex Tomas 160125f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 16021988b51eSAlex Tomas return 0; 16031988b51eSAlex Tomas 160425f1ee3aSWu Fengguang got_index: 16051988b51eSAlex Tomas /* we've found index to the right, let's 16061988b51eSAlex Tomas * follow it and find the closest allocated 16071988b51eSAlex Tomas * block to the right */ 16081988b51eSAlex Tomas ix++; 16091988b51eSAlex Tomas while (++depth < path->p_depth) { 1610395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 16119c6e0719SZhang Yi bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 16127d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 16137d7ea89eSTheodore Ts'o return PTR_ERR(bh); 16147d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh); 16151988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 16161988b51eSAlex Tomas put_bh(bh); 16171988b51eSAlex Tomas } 16181988b51eSAlex Tomas 16199c6e0719SZhang Yi bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); 16207d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 16217d7ea89eSTheodore Ts'o return PTR_ERR(bh); 16221988b51eSAlex Tomas eh = ext_block_hdr(bh); 16231988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 16244d33b1efSTheodore Ts'o found_extent: 16251988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1626bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 1627d7dce9e0Syangerkun if (ret_ex) 1628d7dce9e0Syangerkun *ret_ex = *ex; 16294d33b1efSTheodore Ts'o if (bh) 16301988b51eSAlex Tomas put_bh(bh); 1631d7dce9e0Syangerkun return 1; 16321988b51eSAlex Tomas } 16331988b51eSAlex Tomas 16341988b51eSAlex Tomas /* 1635d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1636f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1637d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1638d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1639d0d856e8SRandy Dunlap * with leaves. 1640a86c6181SAlex Tomas */ 1641fcf6b1b7SDmitry Monakhov ext4_lblk_t 1642a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1643a86c6181SAlex Tomas { 1644a86c6181SAlex Tomas int depth; 1645a86c6181SAlex Tomas 1646a86c6181SAlex Tomas BUG_ON(path == NULL); 1647a86c6181SAlex Tomas depth = path->p_depth; 1648a86c6181SAlex Tomas 1649a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1650f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1651a86c6181SAlex Tomas 1652a86c6181SAlex Tomas while (depth >= 0) { 16536e89bbb7SEric Biggers struct ext4_ext_path *p = &path[depth]; 16546e89bbb7SEric Biggers 1655a86c6181SAlex Tomas if (depth == path->p_depth) { 1656a86c6181SAlex Tomas /* leaf */ 16576e89bbb7SEric Biggers if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 16586e89bbb7SEric Biggers return le32_to_cpu(p->p_ext[1].ee_block); 1659a86c6181SAlex Tomas } else { 1660a86c6181SAlex Tomas /* index */ 16616e89bbb7SEric Biggers if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 16626e89bbb7SEric Biggers return le32_to_cpu(p->p_idx[1].ei_block); 1663a86c6181SAlex Tomas } 1664a86c6181SAlex Tomas depth--; 1665a86c6181SAlex Tomas } 1666a86c6181SAlex Tomas 1667f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1668a86c6181SAlex Tomas } 1669a86c6181SAlex Tomas 1670a86c6181SAlex Tomas /* 1671d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1672f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1673a86c6181SAlex Tomas */ 16745718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1675a86c6181SAlex Tomas { 1676a86c6181SAlex Tomas int depth; 1677a86c6181SAlex Tomas 1678a86c6181SAlex Tomas BUG_ON(path == NULL); 1679a86c6181SAlex Tomas depth = path->p_depth; 1680a86c6181SAlex Tomas 1681a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1682a86c6181SAlex Tomas if (depth == 0) 1683f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1684a86c6181SAlex Tomas 1685a86c6181SAlex Tomas /* go to index block */ 1686a86c6181SAlex Tomas depth--; 1687a86c6181SAlex Tomas 1688a86c6181SAlex Tomas while (depth >= 0) { 1689a86c6181SAlex Tomas if (path[depth].p_idx != 1690a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1691725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1692725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1693a86c6181SAlex Tomas depth--; 1694a86c6181SAlex Tomas } 1695a86c6181SAlex Tomas 1696f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1697a86c6181SAlex Tomas } 1698a86c6181SAlex Tomas 1699a86c6181SAlex Tomas /* 1700d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1701d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1702d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1703a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1704a86c6181SAlex Tomas */ 17051d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1706a86c6181SAlex Tomas struct ext4_ext_path *path) 1707a86c6181SAlex Tomas { 1708a86c6181SAlex Tomas struct ext4_extent_header *eh; 1709a86c6181SAlex Tomas int depth = ext_depth(inode); 1710a86c6181SAlex Tomas struct ext4_extent *ex; 1711a86c6181SAlex Tomas __le32 border; 1712a86c6181SAlex Tomas int k, err = 0; 1713a86c6181SAlex Tomas 1714a86c6181SAlex Tomas eh = path[depth].p_hdr; 1715a86c6181SAlex Tomas ex = path[depth].p_ext; 1716273df556SFrank Mayhar 1717273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1718273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1719273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 17206a797d27SDarrick J. Wong return -EFSCORRUPTED; 1721273df556SFrank Mayhar } 1722a86c6181SAlex Tomas 1723a86c6181SAlex Tomas if (depth == 0) { 1724a86c6181SAlex Tomas /* there is no tree at all */ 1725a86c6181SAlex Tomas return 0; 1726a86c6181SAlex Tomas } 1727a86c6181SAlex Tomas 1728a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1729a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1730a86c6181SAlex Tomas return 0; 1731a86c6181SAlex Tomas } 1732a86c6181SAlex Tomas 1733a86c6181SAlex Tomas /* 1734d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1735a86c6181SAlex Tomas */ 1736a86c6181SAlex Tomas k = depth - 1; 1737a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 17387e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 17397e028976SAvantika Mathur if (err) 1740a86c6181SAlex Tomas return err; 1741a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 17427e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 17437e028976SAvantika Mathur if (err) 1744a86c6181SAlex Tomas return err; 1745a86c6181SAlex Tomas 1746a86c6181SAlex Tomas while (k--) { 1747a86c6181SAlex Tomas /* change all left-side indexes */ 1748a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1749a86c6181SAlex Tomas break; 17507e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 17517e028976SAvantika Mathur if (err) 1752a86c6181SAlex Tomas break; 1753a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 17547e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 17557e028976SAvantika Mathur if (err) 1756a86c6181SAlex Tomas break; 1757a86c6181SAlex Tomas } 1758a86c6181SAlex Tomas 1759a86c6181SAlex Tomas return err; 1760a86c6181SAlex Tomas } 1761a86c6181SAlex Tomas 176243f81677SEric Biggers static int ext4_can_extents_be_merged(struct inode *inode, 176343f81677SEric Biggers struct ext4_extent *ex1, 1764a86c6181SAlex Tomas struct ext4_extent *ex2) 1765a86c6181SAlex Tomas { 1766da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len; 1767a2df2a63SAmit Arora 1768556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1769a2df2a63SAmit Arora return 0; 1770a2df2a63SAmit Arora 1771a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1772a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1773a2df2a63SAmit Arora 1774a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 177563f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1776a86c6181SAlex Tomas return 0; 1777a86c6181SAlex Tomas 1778da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1779471d4011SSuparna Bhattacharya return 0; 1780378f32baSMatthew Bobrowski 1781556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) && 1782378f32baSMatthew Bobrowski ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1783a9b82415SDarrick J. Wong return 0; 1784bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1785b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1786a86c6181SAlex Tomas return 0; 1787a86c6181SAlex Tomas #endif 1788a86c6181SAlex Tomas 1789bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1790a86c6181SAlex Tomas return 1; 1791a86c6181SAlex Tomas return 0; 1792a86c6181SAlex Tomas } 1793a86c6181SAlex Tomas 1794a86c6181SAlex Tomas /* 179556055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 179656055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 179756055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 179856055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 179956055d3aSAmit Arora * 1 if they got merged. 180056055d3aSAmit Arora */ 1801197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 180256055d3aSAmit Arora struct ext4_ext_path *path, 180356055d3aSAmit Arora struct ext4_extent *ex) 180456055d3aSAmit Arora { 180556055d3aSAmit Arora struct ext4_extent_header *eh; 180656055d3aSAmit Arora unsigned int depth, len; 1807556615dcSLukas Czerner int merge_done = 0, unwritten; 180856055d3aSAmit Arora 180956055d3aSAmit Arora depth = ext_depth(inode); 181056055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 181156055d3aSAmit Arora eh = path[depth].p_hdr; 181256055d3aSAmit Arora 181356055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 181456055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 181556055d3aSAmit Arora break; 181656055d3aSAmit Arora /* merge with next extent! */ 1817556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 181856055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 181956055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 1820556615dcSLukas Czerner if (unwritten) 1821556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 182256055d3aSAmit Arora 182356055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 182456055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 182556055d3aSAmit Arora * sizeof(struct ext4_extent); 182656055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 182756055d3aSAmit Arora } 1828e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 182956055d3aSAmit Arora merge_done = 1; 183056055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 183156055d3aSAmit Arora if (!eh->eh_entries) 183224676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 183356055d3aSAmit Arora } 183456055d3aSAmit Arora 183556055d3aSAmit Arora return merge_done; 183656055d3aSAmit Arora } 183756055d3aSAmit Arora 183856055d3aSAmit Arora /* 1839ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1840ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1841ecb94f5fSTheodore Ts'o */ 1842ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1843ecb94f5fSTheodore Ts'o struct inode *inode, 1844ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1845ecb94f5fSTheodore Ts'o { 1846ecb94f5fSTheodore Ts'o size_t s; 1847ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1848ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1849ecb94f5fSTheodore Ts'o 1850ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1851ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1852ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1853ecb94f5fSTheodore Ts'o return; 1854ecb94f5fSTheodore Ts'o 1855ecb94f5fSTheodore Ts'o /* 1856ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1857ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1858ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1859ecb94f5fSTheodore Ts'o */ 186083448bdfSJan Kara if (ext4_journal_extend(handle, 2, 186183448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1862ecb94f5fSTheodore Ts'o return; 1863ecb94f5fSTheodore Ts'o 1864ecb94f5fSTheodore Ts'o /* 1865ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1866ecb94f5fSTheodore Ts'o */ 1867ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1868ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1869ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1870ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1871ecb94f5fSTheodore Ts'o 187210809df8STheodore Ts'o path[1].p_maxdepth = path[0].p_maxdepth; 1873ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1874ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1875ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1876ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1877ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1878ecb94f5fSTheodore Ts'o 1879ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1880ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 188171d4f7d0STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1882ecb94f5fSTheodore Ts'o } 1883ecb94f5fSTheodore Ts'o 1884ecb94f5fSTheodore Ts'o /* 1885adde81cfSEric Biggers * This function tries to merge the @ex extent to neighbours in the tree, then 1886adde81cfSEric Biggers * tries to collapse the extent tree into the inode. 1887197217a5SYongqiang Yang */ 1888ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1889ecb94f5fSTheodore Ts'o struct inode *inode, 1890197217a5SYongqiang Yang struct ext4_ext_path *path, 1891adde81cfSEric Biggers struct ext4_extent *ex) 1892adde81cfSEric Biggers { 1893197217a5SYongqiang Yang struct ext4_extent_header *eh; 1894197217a5SYongqiang Yang unsigned int depth; 1895197217a5SYongqiang Yang int merge_done = 0; 1896197217a5SYongqiang Yang 1897197217a5SYongqiang Yang depth = ext_depth(inode); 1898197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1899197217a5SYongqiang Yang eh = path[depth].p_hdr; 1900197217a5SYongqiang Yang 1901197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1902197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1903197217a5SYongqiang Yang 1904197217a5SYongqiang Yang if (!merge_done) 1905ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1906197217a5SYongqiang Yang 1907ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1908197217a5SYongqiang Yang } 1909197217a5SYongqiang Yang 1910197217a5SYongqiang Yang /* 191125d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 191225d14f98SAmit Arora * existing extent. 191325d14f98SAmit Arora * 191425d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 191525d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 191625d14f98SAmit Arora * If there is no overlap found, it returns 0. 191725d14f98SAmit Arora */ 19184d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 19194d33b1efSTheodore Ts'o struct inode *inode, 192025d14f98SAmit Arora struct ext4_extent *newext, 192125d14f98SAmit Arora struct ext4_ext_path *path) 192225d14f98SAmit Arora { 1923725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 192425d14f98SAmit Arora unsigned int depth, len1; 192525d14f98SAmit Arora unsigned int ret = 0; 192625d14f98SAmit Arora 192725d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1928a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 192925d14f98SAmit Arora depth = ext_depth(inode); 193025d14f98SAmit Arora if (!path[depth].p_ext) 193125d14f98SAmit Arora goto out; 1932f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 193325d14f98SAmit Arora 193425d14f98SAmit Arora /* 193525d14f98SAmit Arora * get the next allocated block if the extent in the path 193625d14f98SAmit Arora * is before the requested block(s) 193725d14f98SAmit Arora */ 193825d14f98SAmit Arora if (b2 < b1) { 193925d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1940f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 194125d14f98SAmit Arora goto out; 1942f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2); 194325d14f98SAmit Arora } 194425d14f98SAmit Arora 1945725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 194625d14f98SAmit Arora if (b1 + len1 < b1) { 1947f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 194825d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 194925d14f98SAmit Arora ret = 1; 195025d14f98SAmit Arora } 195125d14f98SAmit Arora 195225d14f98SAmit Arora /* check for overlap */ 195325d14f98SAmit Arora if (b1 + len1 > b2) { 195425d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 195525d14f98SAmit Arora ret = 1; 195625d14f98SAmit Arora } 195725d14f98SAmit Arora out: 195825d14f98SAmit Arora return ret; 195925d14f98SAmit Arora } 196025d14f98SAmit Arora 196125d14f98SAmit Arora /* 1962d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1963e4d7f2d3SKeyur Patel * tries to merge requested extent into the existing extent or 1964d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1965d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1966a86c6181SAlex Tomas */ 1967a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1968dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1969107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags) 1970a86c6181SAlex Tomas { 1971dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1972a86c6181SAlex Tomas struct ext4_extent_header *eh; 1973a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1974a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1975a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1976725d26d3SAneesh Kumar K.V int depth, len, err; 1977725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1978556615dcSLukas Czerner int mb_flags = 0, unwritten; 1979a86c6181SAlex Tomas 1980e3cf5d5dSTheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1981e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1982273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1983273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 19846a797d27SDarrick J. Wong return -EFSCORRUPTED; 1985273df556SFrank Mayhar } 1986a86c6181SAlex Tomas depth = ext_depth(inode); 1987a86c6181SAlex Tomas ex = path[depth].p_ext; 1988be8981beSLukas Czerner eh = path[depth].p_hdr; 1989273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1990273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 19916a797d27SDarrick J. Wong return -EFSCORRUPTED; 1992273df556SFrank Mayhar } 1993a86c6181SAlex Tomas 1994a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1995107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1996be8981beSLukas Czerner 1997be8981beSLukas Czerner /* 1998be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1999be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 2000ed8a1a76STheodore Ts'o * ext4_find_extent() can return either extent on the 2001be8981beSLukas Czerner * left, or on the right from the searched position. This 2002be8981beSLukas Czerner * will make merging more effective. 2003be8981beSLukas Czerner */ 2004be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 2005be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 2006be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 2007be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 2008be8981beSLukas Czerner ex += 1; 2009be8981beSLukas Czerner goto prepend; 2010be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 2011be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 2012be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 2013be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 2014be8981beSLukas Czerner ex -= 1; 2015be8981beSLukas Czerner 2016be8981beSLukas Czerner /* Try to append newex to the ex */ 2017be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 201870aa1554SRitesh Harjani ext_debug(inode, "append [%d]%d block to %u:[%d]%d" 2019be8981beSLukas Czerner "(from %llu)\n", 2020556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2021a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 2022a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 2023556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 2024bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 2025bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2026be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 2027be8981beSLukas Czerner path + depth); 20287e028976SAvantika Mathur if (err) 2029a86c6181SAlex Tomas return err; 2030556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 2031a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2032a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 2033556615dcSLukas Czerner if (unwritten) 2034556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2035a86c6181SAlex Tomas nearex = ex; 2036a86c6181SAlex Tomas goto merge; 2037a86c6181SAlex Tomas } 2038a86c6181SAlex Tomas 2039be8981beSLukas Czerner prepend: 2040be8981beSLukas Czerner /* Try to prepend newex to the ex */ 2041be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 204270aa1554SRitesh Harjani ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d" 2043be8981beSLukas Czerner "(from %llu)\n", 2044be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 2045556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2046be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 2047be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 2048556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 2049be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 2050be8981beSLukas Czerner ext4_ext_pblock(ex)); 2051be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 2052be8981beSLukas Czerner path + depth); 2053be8981beSLukas Czerner if (err) 2054be8981beSLukas Czerner return err; 2055be8981beSLukas Czerner 2056556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 2057be8981beSLukas Czerner ex->ee_block = newext->ee_block; 2058be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2059be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2060be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 2061556615dcSLukas Czerner if (unwritten) 2062556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2063be8981beSLukas Czerner nearex = ex; 2064be8981beSLukas Czerner goto merge; 2065be8981beSLukas Czerner } 2066be8981beSLukas Czerner } 2067be8981beSLukas Czerner 2068a86c6181SAlex Tomas depth = ext_depth(inode); 2069a86c6181SAlex Tomas eh = path[depth].p_hdr; 2070a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2071a86c6181SAlex Tomas goto has_space; 2072a86c6181SAlex Tomas 2073a86c6181SAlex Tomas /* probably next leaf has space for us? */ 2074a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 2075598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 2076598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 20775718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 2078598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 207970aa1554SRitesh Harjani ext_debug(inode, "next leaf block - %u\n", next); 2080a86c6181SAlex Tomas BUG_ON(npath != NULL); 208173c384c0STheodore Ts'o npath = ext4_find_extent(inode, next, NULL, gb_flags); 2082a86c6181SAlex Tomas if (IS_ERR(npath)) 2083a86c6181SAlex Tomas return PTR_ERR(npath); 2084a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 2085a86c6181SAlex Tomas eh = npath[depth].p_hdr; 2086a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 208770aa1554SRitesh Harjani ext_debug(inode, "next leaf isn't full(%d)\n", 2088a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 2089a86c6181SAlex Tomas path = npath; 2090ffb505ffSRobin Dong goto has_space; 2091a86c6181SAlex Tomas } 209270aa1554SRitesh Harjani ext_debug(inode, "next leaf has no free space(%d,%d)\n", 2093a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2094a86c6181SAlex Tomas } 2095a86c6181SAlex Tomas 2096a86c6181SAlex Tomas /* 2097d0d856e8SRandy Dunlap * There is no free space in the found leaf. 2098d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 2099a86c6181SAlex Tomas */ 2100107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2101e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_USE_RESERVED; 2102107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2103dfe50809STheodore Ts'o ppath, newext); 2104a86c6181SAlex Tomas if (err) 2105a86c6181SAlex Tomas goto cleanup; 2106*8162ee5dSBaokun Li path = *ppath; 2107a86c6181SAlex Tomas depth = ext_depth(inode); 2108a86c6181SAlex Tomas eh = path[depth].p_hdr; 2109a86c6181SAlex Tomas 2110a86c6181SAlex Tomas has_space: 2111a86c6181SAlex Tomas nearex = path[depth].p_ext; 2112a86c6181SAlex Tomas 21137e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 21147e028976SAvantika Mathur if (err) 2115a86c6181SAlex Tomas goto cleanup; 2116a86c6181SAlex Tomas 2117a86c6181SAlex Tomas if (!nearex) { 2118a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 211970aa1554SRitesh Harjani ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n", 2120a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2121bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2122556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2123a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 212480e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 2125a86c6181SAlex Tomas } else { 212680e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 212780e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 212880e675f9SEric Gouriou /* Insert after */ 212970aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d before: " 213032de6756SYongqiang Yang "nearest %p\n", 2131a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2132bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2133556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2134a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 213580e675f9SEric Gouriou nearex); 213680e675f9SEric Gouriou nearex++; 213780e675f9SEric Gouriou } else { 213880e675f9SEric Gouriou /* Insert before */ 213980e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 214070aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d after: " 214132de6756SYongqiang Yang "nearest %p\n", 214280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 214380e675f9SEric Gouriou ext4_ext_pblock(newext), 2144556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 214580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 214680e675f9SEric Gouriou nearex); 214780e675f9SEric Gouriou } 214880e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 214980e675f9SEric Gouriou if (len > 0) { 215070aa1554SRitesh Harjani ext_debug(inode, "insert %u:%llu:[%d]%d: " 215180e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 215280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 215380e675f9SEric Gouriou ext4_ext_pblock(newext), 2154556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 215580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 215680e675f9SEric Gouriou len, nearex, nearex + 1); 215780e675f9SEric Gouriou memmove(nearex + 1, nearex, 215880e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 215980e675f9SEric Gouriou } 2160a86c6181SAlex Tomas } 2161a86c6181SAlex Tomas 2162e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 216380e675f9SEric Gouriou path[depth].p_ext = nearex; 2164a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2165bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2166a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2167a86c6181SAlex Tomas 2168a86c6181SAlex Tomas merge: 2169e7bcf823SHaiboLiu /* try to merge extents */ 2170107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2171ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2172a86c6181SAlex Tomas 2173a86c6181SAlex Tomas 2174a86c6181SAlex Tomas /* time to correct all indexes above */ 2175a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2176a86c6181SAlex Tomas if (err) 2177a86c6181SAlex Tomas goto cleanup; 2178a86c6181SAlex Tomas 2179ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2180a86c6181SAlex Tomas 2181a86c6181SAlex Tomas cleanup: 21827ff5fddaSYe Bin ext4_free_ext_path(npath); 2183a86c6181SAlex Tomas return err; 2184a86c6181SAlex Tomas } 2185a86c6181SAlex Tomas 2186bb5835edSTheodore Ts'o static int ext4_fill_es_cache_info(struct inode *inode, 2187bb5835edSTheodore Ts'o ext4_lblk_t block, ext4_lblk_t num, 2188bb5835edSTheodore Ts'o struct fiemap_extent_info *fieinfo) 2189bb5835edSTheodore Ts'o { 2190bb5835edSTheodore Ts'o ext4_lblk_t next, end = block + num - 1; 2191bb5835edSTheodore Ts'o struct extent_status es; 2192bb5835edSTheodore Ts'o unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2193bb5835edSTheodore Ts'o unsigned int flags; 2194bb5835edSTheodore Ts'o int err; 2195bb5835edSTheodore Ts'o 2196bb5835edSTheodore Ts'o while (block <= end) { 2197bb5835edSTheodore Ts'o next = 0; 2198bb5835edSTheodore Ts'o flags = 0; 2199bb5835edSTheodore Ts'o if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2200bb5835edSTheodore Ts'o break; 2201bb5835edSTheodore Ts'o if (ext4_es_is_unwritten(&es)) 2202bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_UNWRITTEN; 2203bb5835edSTheodore Ts'o if (ext4_es_is_delayed(&es)) 2204bb5835edSTheodore Ts'o flags |= (FIEMAP_EXTENT_DELALLOC | 2205bb5835edSTheodore Ts'o FIEMAP_EXTENT_UNKNOWN); 2206bb5835edSTheodore Ts'o if (ext4_es_is_hole(&es)) 2207bb5835edSTheodore Ts'o flags |= EXT4_FIEMAP_EXTENT_HOLE; 2208bb5835edSTheodore Ts'o if (next == 0) 2209bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_LAST; 2210bb5835edSTheodore Ts'o if (flags & (FIEMAP_EXTENT_DELALLOC| 2211bb5835edSTheodore Ts'o EXT4_FIEMAP_EXTENT_HOLE)) 2212bb5835edSTheodore Ts'o es.es_pblk = 0; 2213bb5835edSTheodore Ts'o else 2214bb5835edSTheodore Ts'o es.es_pblk = ext4_es_pblock(&es); 2215bb5835edSTheodore Ts'o err = fiemap_fill_next_extent(fieinfo, 2216bb5835edSTheodore Ts'o (__u64)es.es_lblk << blksize_bits, 2217bb5835edSTheodore Ts'o (__u64)es.es_pblk << blksize_bits, 2218bb5835edSTheodore Ts'o (__u64)es.es_len << blksize_bits, 2219bb5835edSTheodore Ts'o flags); 2220bb5835edSTheodore Ts'o if (next == 0) 2221bb5835edSTheodore Ts'o break; 2222bb5835edSTheodore Ts'o block = next; 2223bb5835edSTheodore Ts'o if (err < 0) 2224bb5835edSTheodore Ts'o return err; 2225bb5835edSTheodore Ts'o if (err == 1) 2226bb5835edSTheodore Ts'o return 0; 2227bb5835edSTheodore Ts'o } 2228bb5835edSTheodore Ts'o return 0; 2229bb5835edSTheodore Ts'o } 2230bb5835edSTheodore Ts'o 2231bb5835edSTheodore Ts'o 2232a86c6181SAlex Tomas /* 2233f5411b76SZhang Yi * ext4_ext_find_hole - find hole around given block according to the given path 2234140a5250SJan Kara * @inode: inode we lookup in 2235140a5250SJan Kara * @path: path in extent tree to @lblk 2236140a5250SJan Kara * @lblk: pointer to logical block around which we want to determine hole 2237140a5250SJan Kara * 2238140a5250SJan Kara * Determine hole length (and start if easily possible) around given logical 2239140a5250SJan Kara * block. We don't try too hard to find the beginning of the hole but @path 2240140a5250SJan Kara * actually points to extent before @lblk, we provide it. 2241140a5250SJan Kara * 2242140a5250SJan Kara * The function returns the length of a hole starting at @lblk. We update @lblk 2243140a5250SJan Kara * to the beginning of the hole if we managed to find it. 2244140a5250SJan Kara */ 2245f5411b76SZhang Yi static ext4_lblk_t ext4_ext_find_hole(struct inode *inode, 2246140a5250SJan Kara struct ext4_ext_path *path, 2247140a5250SJan Kara ext4_lblk_t *lblk) 2248140a5250SJan Kara { 2249140a5250SJan Kara int depth = ext_depth(inode); 2250140a5250SJan Kara struct ext4_extent *ex; 2251140a5250SJan Kara ext4_lblk_t len; 2252140a5250SJan Kara 2253140a5250SJan Kara ex = path[depth].p_ext; 2254140a5250SJan Kara if (ex == NULL) { 2255140a5250SJan Kara /* there is no extent yet, so gap is [0;-] */ 2256140a5250SJan Kara *lblk = 0; 2257140a5250SJan Kara len = EXT_MAX_BLOCKS; 2258140a5250SJan Kara } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2259140a5250SJan Kara len = le32_to_cpu(ex->ee_block) - *lblk; 2260140a5250SJan Kara } else if (*lblk >= le32_to_cpu(ex->ee_block) 2261140a5250SJan Kara + ext4_ext_get_actual_len(ex)) { 2262140a5250SJan Kara ext4_lblk_t next; 2263140a5250SJan Kara 2264140a5250SJan Kara *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2265140a5250SJan Kara next = ext4_ext_next_allocated_block(path); 2266140a5250SJan Kara BUG_ON(next == *lblk); 2267140a5250SJan Kara len = next - *lblk; 2268140a5250SJan Kara } else { 2269140a5250SJan Kara BUG(); 2270140a5250SJan Kara } 2271140a5250SJan Kara return len; 2272140a5250SJan Kara } 2273140a5250SJan Kara 2274140a5250SJan Kara /* 2275d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2276d0d856e8SRandy Dunlap * removes index from the index block. 2277a86c6181SAlex Tomas */ 22781d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2279c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2280a86c6181SAlex Tomas { 2281a86c6181SAlex Tomas int err; 2282f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2283a86c6181SAlex Tomas 2284a86c6181SAlex Tomas /* free index block */ 2285c36575e6SForrest Liu depth--; 2286c36575e6SForrest Liu path = path + depth; 2287bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2288273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2289273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 22906a797d27SDarrick J. Wong return -EFSCORRUPTED; 2291273df556SFrank Mayhar } 22927e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 22937e028976SAvantika Mathur if (err) 2294a86c6181SAlex Tomas return err; 22950e1147b0SRobin Dong 22960e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 22970e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 22980e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 22990e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 23000e1147b0SRobin Dong } 23010e1147b0SRobin Dong 2302e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 23037e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 23047e028976SAvantika Mathur if (err) 2305a86c6181SAlex Tomas return err; 230670aa1554SRitesh Harjani ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf); 2307d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2308d8990240SAditya Kali 23097dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2310e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2311c36575e6SForrest Liu 2312c36575e6SForrest Liu while (--depth >= 0) { 2313c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2314c36575e6SForrest Liu break; 2315c36575e6SForrest Liu path--; 2316c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2317c36575e6SForrest Liu if (err) 2318c36575e6SForrest Liu break; 2319c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2320c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2321c36575e6SForrest Liu if (err) 2322c36575e6SForrest Liu break; 2323c36575e6SForrest Liu } 2324a86c6181SAlex Tomas return err; 2325a86c6181SAlex Tomas } 2326a86c6181SAlex Tomas 2327a86c6181SAlex Tomas /* 2328ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2329ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2330ee12b630SMingming Cao * to the extent tree. 2331ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2332ee12b630SMingming Cao * under i_data_sem. 2333a86c6181SAlex Tomas */ 2334525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2335a86c6181SAlex Tomas struct ext4_ext_path *path) 2336a86c6181SAlex Tomas { 2337a86c6181SAlex Tomas if (path) { 2338ee12b630SMingming Cao int depth = ext_depth(inode); 2339f3bd1f3fSMingming Cao int ret = 0; 2340ee12b630SMingming Cao 2341a86c6181SAlex Tomas /* probably there is space in leaf? */ 2342a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2343ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2344ee12b630SMingming Cao 2345ee12b630SMingming Cao /* 2346ee12b630SMingming Cao * There are some space in the leaf tree, no 2347ee12b630SMingming Cao * need to account for leaf block credit 2348ee12b630SMingming Cao * 2349ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2350df3ab170STao Ma * and other metadata blocks still need to be 2351ee12b630SMingming Cao * accounted. 2352ee12b630SMingming Cao */ 2353525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2354ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 23555887e98bSAneesh Kumar K.V return ret; 2356ee12b630SMingming Cao } 2357ee12b630SMingming Cao } 2358ee12b630SMingming Cao 2359525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2360a86c6181SAlex Tomas } 2361a86c6181SAlex Tomas 2362a86c6181SAlex Tomas /* 2363fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2364ee12b630SMingming Cao * 2365fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2366fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2367ee12b630SMingming Cao * 2368fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2369fffb2739SJan Kara * than once, but this is really rare. 2370a86c6181SAlex Tomas */ 2371fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2372ee12b630SMingming Cao { 2373ee12b630SMingming Cao int index; 2374f19d5870STao Ma int depth; 2375f19d5870STao Ma 2376f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2377f19d5870STao Ma if (ext4_has_inline_data(inode)) 2378f19d5870STao Ma return 1; 2379f19d5870STao Ma 2380f19d5870STao Ma depth = ext_depth(inode); 2381a86c6181SAlex Tomas 2382fffb2739SJan Kara if (extents <= 1) 2383ee12b630SMingming Cao index = depth * 2; 2384ee12b630SMingming Cao else 2385ee12b630SMingming Cao index = depth * 3; 2386a86c6181SAlex Tomas 2387ee12b630SMingming Cao return index; 2388a86c6181SAlex Tomas } 2389a86c6181SAlex Tomas 2390981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode) 2391981250caSTheodore Ts'o { 2392ddfa17e4STahsin Erdogan if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2393ddfa17e4STahsin Erdogan ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2394981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2395981250caSTheodore Ts'o else if (ext4_should_journal_data(inode)) 2396981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET; 2397981250caSTheodore Ts'o return 0; 2398981250caSTheodore Ts'o } 2399981250caSTheodore Ts'o 24009fe67149SEric Whitney /* 24019fe67149SEric Whitney * ext4_rereserve_cluster - increment the reserved cluster count when 24029fe67149SEric Whitney * freeing a cluster with a pending reservation 24039fe67149SEric Whitney * 24049fe67149SEric Whitney * @inode - file containing the cluster 24059fe67149SEric Whitney * @lblk - logical block in cluster to be reserved 24069fe67149SEric Whitney * 24079fe67149SEric Whitney * Increments the reserved cluster count and adjusts quota in a bigalloc 24089fe67149SEric Whitney * file system when freeing a partial cluster containing at least one 24099fe67149SEric Whitney * delayed and unwritten block. A partial cluster meeting that 24109fe67149SEric Whitney * requirement will have a pending reservation. If so, the 24119fe67149SEric Whitney * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 24129fe67149SEric Whitney * defer reserved and allocated space accounting to a subsequent call 24139fe67149SEric Whitney * to this function. 24149fe67149SEric Whitney */ 24159fe67149SEric Whitney static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 24169fe67149SEric Whitney { 24179fe67149SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 24189fe67149SEric Whitney struct ext4_inode_info *ei = EXT4_I(inode); 24199fe67149SEric Whitney 24209fe67149SEric Whitney dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 24219fe67149SEric Whitney 24229fe67149SEric Whitney spin_lock(&ei->i_block_reservation_lock); 24239fe67149SEric Whitney ei->i_reserved_data_blocks++; 24249fe67149SEric Whitney percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 24259fe67149SEric Whitney spin_unlock(&ei->i_block_reservation_lock); 24269fe67149SEric Whitney 24279fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 1); 24289fe67149SEric Whitney ext4_remove_pending(inode, lblk); 24299fe67149SEric Whitney } 24309fe67149SEric Whitney 2431a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2432a86c6181SAlex Tomas struct ext4_extent *ex, 24339fe67149SEric Whitney struct partial_cluster *partial, 2434725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2435a86c6181SAlex Tomas { 24360aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2437a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 24389fe67149SEric Whitney ext4_fsblk_t last_pblk, pblk; 24399fe67149SEric Whitney ext4_lblk_t num; 24409fe67149SEric Whitney int flags; 244118888cf0SAndrey Sidorov 24429fe67149SEric Whitney /* only extent tail removal is allowed */ 24439fe67149SEric Whitney if (from < le32_to_cpu(ex->ee_block) || 24449fe67149SEric Whitney to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 24459fe67149SEric Whitney ext4_error(sbi->s_sb, 24469fe67149SEric Whitney "strange request: removal(2) %u-%u from %u:%u", 24479fe67149SEric Whitney from, to, le32_to_cpu(ex->ee_block), ee_len); 24489fe67149SEric Whitney return 0; 24490aa06000STheodore Ts'o } 24500aa06000STheodore Ts'o 2451a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2452a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2453a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2454a86c6181SAlex Tomas sbi->s_ext_extents++; 2455a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2456a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2457a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2458a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2459a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2460a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2461a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2462a86c6181SAlex Tomas #endif 24639fe67149SEric Whitney 24649fe67149SEric Whitney trace_ext4_remove_blocks(inode, ex, from, to, partial); 24659fe67149SEric Whitney 24669fe67149SEric Whitney /* 24679fe67149SEric Whitney * if we have a partial cluster, and it's different from the 24689fe67149SEric Whitney * cluster of the last block in the extent, we free it 24699fe67149SEric Whitney */ 24709fe67149SEric Whitney last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 24719fe67149SEric Whitney 24729fe67149SEric Whitney if (partial->state != initial && 24739fe67149SEric Whitney partial->pclu != EXT4_B2C(sbi, last_pblk)) { 24749fe67149SEric Whitney if (partial->state == tofree) { 24759fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 24769fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 24779fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 24789fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 24799fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 24809fe67149SEric Whitney sbi->s_cluster_ratio, flags); 24819fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 24829fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 24839fe67149SEric Whitney } 24849fe67149SEric Whitney partial->state = initial; 24859fe67149SEric Whitney } 2486725d26d3SAneesh Kumar K.V 2487a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 24880aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 24899fe67149SEric Whitney 2490d23142c6SLukas Czerner /* 24919fe67149SEric Whitney * We free the partial cluster at the end of the extent (if any), 24929fe67149SEric Whitney * unless the cluster is used by another extent (partial_cluster 24939fe67149SEric Whitney * state is nofree). If a partial cluster exists here, it must be 24949fe67149SEric Whitney * shared with the last block in the extent. 2495d23142c6SLukas Czerner */ 24969fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 24979fe67149SEric Whitney 24989fe67149SEric Whitney /* partial, left end cluster aligned, right end unaligned */ 24999fe67149SEric Whitney if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 25009fe67149SEric Whitney (EXT4_LBLK_CMASK(sbi, to) >= from) && 25019fe67149SEric Whitney (partial->state != nofree)) { 25029fe67149SEric Whitney if (ext4_is_pending(inode, to)) 25039fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 25049fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 25059fe67149SEric Whitney EXT4_PBLK_CMASK(sbi, last_pblk), 25069fe67149SEric Whitney sbi->s_cluster_ratio, flags); 25079fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 25089fe67149SEric Whitney ext4_rereserve_cluster(inode, to); 25099fe67149SEric Whitney partial->state = initial; 25109fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 25119fe67149SEric Whitney } 25129fe67149SEric Whitney 2513d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2514d23142c6SLukas Czerner 25150aa06000STheodore Ts'o /* 25169fe67149SEric Whitney * For bigalloc file systems, we never free a partial cluster 25179fe67149SEric Whitney * at the beginning of the extent. Instead, we check to see if we 25189fe67149SEric Whitney * need to free it on a subsequent call to ext4_remove_blocks, 25199fe67149SEric Whitney * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 25200aa06000STheodore Ts'o */ 25219fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 25229fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 25239fe67149SEric Whitney 25249fe67149SEric Whitney /* reset the partial cluster if we've freed past it */ 25259fe67149SEric Whitney if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 25269fe67149SEric Whitney partial->state = initial; 25279fe67149SEric Whitney 25289fe67149SEric Whitney /* 25299fe67149SEric Whitney * If we've freed the entire extent but the beginning is not left 25309fe67149SEric Whitney * cluster aligned and is not marked as ineligible for freeing we 25319fe67149SEric Whitney * record the partial cluster at the beginning of the extent. It 25329fe67149SEric Whitney * wasn't freed by the preceding ext4_free_blocks() call, and we 25339fe67149SEric Whitney * need to look farther to the left to determine if it's to be freed 25349fe67149SEric Whitney * (not shared with another extent). Else, reset the partial 25359fe67149SEric Whitney * cluster - we're either done freeing or the beginning of the 25369fe67149SEric Whitney * extent is left cluster aligned. 25379fe67149SEric Whitney */ 25389fe67149SEric Whitney if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 25399fe67149SEric Whitney if (partial->state == initial) { 25409fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 25419fe67149SEric Whitney partial->lblk = from; 25429fe67149SEric Whitney partial->state = tofree; 2543345ee947SEric Whitney } 25449fe67149SEric Whitney } else { 25459fe67149SEric Whitney partial->state = initial; 2546a86c6181SAlex Tomas } 2547a86c6181SAlex Tomas 25489fe67149SEric Whitney return 0; 25499fe67149SEric Whitney } 2550d583fb87SAllison Henderson 2551d583fb87SAllison Henderson /* 2552d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 25535bf43760SEric Whitney * blocks appearing between "start" and "end". Both "start" 25545bf43760SEric Whitney * and "end" must appear in the same extent or EIO is returned. 2555d583fb87SAllison Henderson * 2556d583fb87SAllison Henderson * @handle: The journal handle 2557d583fb87SAllison Henderson * @inode: The files inode 2558d583fb87SAllison Henderson * @path: The path to the leaf 2559d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 25605bf43760SEric Whitney * has been released from it. However, if this value is 25615bf43760SEric Whitney * negative, it's a cluster just to the right of the 25625bf43760SEric Whitney * punched region and it must not be freed. 2563d583fb87SAllison Henderson * @start: The first block to remove 2564d583fb87SAllison Henderson * @end: The last block to remove 2565d583fb87SAllison Henderson */ 2566a86c6181SAlex Tomas static int 2567a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2568d23142c6SLukas Czerner struct ext4_ext_path *path, 25699fe67149SEric Whitney struct partial_cluster *partial, 25700aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2571a86c6181SAlex Tomas { 25720aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2573a86c6181SAlex Tomas int err = 0, correct_index = 0; 257483448bdfSJan Kara int depth = ext_depth(inode), credits, revoke_credits; 2575a86c6181SAlex Tomas struct ext4_extent_header *eh; 2576750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2577725d26d3SAneesh Kumar K.V unsigned num; 2578725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2579a86c6181SAlex Tomas unsigned short ex_ee_len; 2580556615dcSLukas Czerner unsigned unwritten = 0; 2581a86c6181SAlex Tomas struct ext4_extent *ex; 2582d23142c6SLukas Czerner ext4_fsblk_t pblk; 2583a86c6181SAlex Tomas 2584c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 258570aa1554SRitesh Harjani ext_debug(inode, "truncate since %u in leaf to %u\n", start, end); 2586a86c6181SAlex Tomas if (!path[depth].p_hdr) 2587a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2588a86c6181SAlex Tomas eh = path[depth].p_hdr; 2589273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2590273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 25916a797d27SDarrick J. Wong return -EFSCORRUPTED; 2592273df556SFrank Mayhar } 2593a86c6181SAlex Tomas /* find where to start removing */ 25946ae06ff5SAshish Sangwan ex = path[depth].p_ext; 25956ae06ff5SAshish Sangwan if (!ex) 2596a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2597a86c6181SAlex Tomas 2598a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2599a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2600a86c6181SAlex Tomas 26019fe67149SEric Whitney trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2602d8990240SAditya Kali 2603a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2604a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2605a41f2071SAneesh Kumar K.V 2606556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 2607556615dcSLukas Czerner unwritten = 1; 2608a41f2071SAneesh Kumar K.V else 2609556615dcSLukas Czerner unwritten = 0; 2610a41f2071SAneesh Kumar K.V 261170aa1554SRitesh Harjani ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block, 2612556615dcSLukas Czerner unwritten, ex_ee_len); 2613a86c6181SAlex Tomas path[depth].p_ext = ex; 2614a86c6181SAlex Tomas 261566267814SJiangshan Yi a = max(ex_ee_block, start); 261666267814SJiangshan Yi b = min(ex_ee_block + ex_ee_len - 1, end); 2617a86c6181SAlex Tomas 261870aa1554SRitesh Harjani ext_debug(inode, " border %u:%u\n", a, b); 2619a86c6181SAlex Tomas 2620d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 26215f95d21fSLukas Czerner if (end < ex_ee_block) { 2622d23142c6SLukas Czerner /* 2623d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2624f4226d9eSEric Whitney * so note that its first cluster is in use to avoid 2625f4226d9eSEric Whitney * freeing it when removing blocks. Eventually, the 2626f4226d9eSEric Whitney * right edge of the truncated/punched region will 2627f4226d9eSEric Whitney * be just to the left. 2628d23142c6SLukas Czerner */ 2629f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2630d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 26319fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 26329fe67149SEric Whitney partial->state = nofree; 2633f4226d9eSEric Whitney } 2634d583fb87SAllison Henderson ex--; 2635d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2636d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2637d583fb87SAllison Henderson continue; 2638750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2639dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2640dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2641dc1841d6SLukas Czerner "on extent %u:%u", 2642dc1841d6SLukas Czerner start, end, ex_ee_block, 2643dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 26446a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2645d583fb87SAllison Henderson goto out; 2646a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2647a86c6181SAlex Tomas /* remove tail of the extent */ 2648750c9c47SDmitry Monakhov num = a - ex_ee_block; 2649a86c6181SAlex Tomas } else { 2650a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2651a86c6181SAlex Tomas num = 0; 2652d583fb87SAllison Henderson } 265334071da7STheodore Ts'o /* 265434071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 265534071da7STheodore Ts'o * descriptor) for each block group; assume two block 265634071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 265734071da7STheodore Ts'o * the worst case 265834071da7STheodore Ts'o */ 265934071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2660a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2661a86c6181SAlex Tomas correct_index = 1; 2662a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2663a86c6181SAlex Tomas } 26645aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 266583448bdfSJan Kara /* 266683448bdfSJan Kara * We may end up freeing some index blocks and data from the 266783448bdfSJan Kara * punched range. Note that partial clusters are accounted for 266883448bdfSJan Kara * by ext4_free_data_revoke_credits(). 266983448bdfSJan Kara */ 267083448bdfSJan Kara revoke_credits = 267183448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 267283448bdfSJan Kara ext_depth(inode)) + 267383448bdfSJan Kara ext4_free_data_revoke_credits(inode, b - a + 1); 2674a86c6181SAlex Tomas 2675a4130367SJan Kara err = ext4_datasem_ensure_credits(handle, inode, credits, 267683448bdfSJan Kara credits, revoke_credits); 2677a4130367SJan Kara if (err) { 2678a4130367SJan Kara if (err > 0) 2679a4130367SJan Kara err = -EAGAIN; 2680a86c6181SAlex Tomas goto out; 2681a4130367SJan Kara } 2682a86c6181SAlex Tomas 2683a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2684a86c6181SAlex Tomas if (err) 2685a86c6181SAlex Tomas goto out; 2686a86c6181SAlex Tomas 26879fe67149SEric Whitney err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2688a86c6181SAlex Tomas if (err) 2689a86c6181SAlex Tomas goto out; 2690a86c6181SAlex Tomas 2691750c9c47SDmitry Monakhov if (num == 0) 2692d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2693f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2694a86c6181SAlex Tomas 2695a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2696749269faSAmit Arora /* 2697556615dcSLukas Czerner * Do not mark unwritten if all the blocks in the 2698749269faSAmit Arora * extent have been removed. 2699749269faSAmit Arora */ 2700556615dcSLukas Czerner if (unwritten && num) 2701556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2702d583fb87SAllison Henderson /* 2703d583fb87SAllison Henderson * If the extent was completely released, 2704d583fb87SAllison Henderson * we need to remove it from the leaf 2705d583fb87SAllison Henderson */ 2706d583fb87SAllison Henderson if (num == 0) { 2707f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2708d583fb87SAllison Henderson /* 2709d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2710d583fb87SAllison Henderson * extents up when an extent is removed so that 2711d583fb87SAllison Henderson * we dont have blank extents in the middle 2712d583fb87SAllison Henderson */ 2713d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2714d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2715d583fb87SAllison Henderson 2716d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2717d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2718d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2719d583fb87SAllison Henderson } 2720d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 27215bf43760SEric Whitney } 2722d583fb87SAllison Henderson 2723750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2724750c9c47SDmitry Monakhov if (err) 2725750c9c47SDmitry Monakhov goto out; 2726750c9c47SDmitry Monakhov 272770aa1554SRitesh Harjani ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num, 2728bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2729a86c6181SAlex Tomas ex--; 2730a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2731a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2732a86c6181SAlex Tomas } 2733a86c6181SAlex Tomas 2734a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2735a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2736a86c6181SAlex Tomas 27370aa06000STheodore Ts'o /* 2738ad6599abSEric Whitney * If there's a partial cluster and at least one extent remains in 2739ad6599abSEric Whitney * the leaf, free the partial cluster if it isn't shared with the 27405bf43760SEric Whitney * current extent. If it is shared with the current extent 27419fe67149SEric Whitney * we reset the partial cluster because we've reached the start of the 27425bf43760SEric Whitney * truncated/punched region and we're done removing blocks. 27430aa06000STheodore Ts'o */ 27449fe67149SEric Whitney if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 27455bf43760SEric Whitney pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 27469fe67149SEric Whitney if (partial->pclu != EXT4_B2C(sbi, pblk)) { 27479fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 27489fe67149SEric Whitney 27499fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 27509fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 27510aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 27529fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 27539fe67149SEric Whitney sbi->s_cluster_ratio, flags); 27549fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 27559fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 27565bf43760SEric Whitney } 27579fe67149SEric Whitney partial->state = initial; 27580aa06000STheodore Ts'o } 27590aa06000STheodore Ts'o 2760a86c6181SAlex Tomas /* if this leaf is free, then we should 2761a86c6181SAlex Tomas * remove it from index block above */ 2762a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2763c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2764a86c6181SAlex Tomas 2765a86c6181SAlex Tomas out: 2766a86c6181SAlex Tomas return err; 2767a86c6181SAlex Tomas } 2768a86c6181SAlex Tomas 2769a86c6181SAlex Tomas /* 2770d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2771d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2772a86c6181SAlex Tomas */ 277309b88252SAvantika Mathur static int 2774a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2775a86c6181SAlex Tomas { 2776a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2777a86c6181SAlex Tomas 2778a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2779a86c6181SAlex Tomas return 0; 2780a86c6181SAlex Tomas 2781a86c6181SAlex Tomas /* 2782d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2783a86c6181SAlex Tomas * so we have to consider current index for truncation 2784a86c6181SAlex Tomas */ 2785a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2786a86c6181SAlex Tomas return 0; 2787a86c6181SAlex Tomas return 1; 2788a86c6181SAlex Tomas } 2789a86c6181SAlex Tomas 279026a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 27915f95d21fSLukas Czerner ext4_lblk_t end) 2792a86c6181SAlex Tomas { 2793f4226d9eSEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2794a86c6181SAlex Tomas int depth = ext_depth(inode); 2795968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 27969fe67149SEric Whitney struct partial_cluster partial; 2797a86c6181SAlex Tomas handle_t *handle; 27986f2080e6SDmitry Monakhov int i = 0, err = 0; 2799a86c6181SAlex Tomas 28009fe67149SEric Whitney partial.pclu = 0; 28019fe67149SEric Whitney partial.lblk = 0; 28029fe67149SEric Whitney partial.state = initial; 28039fe67149SEric Whitney 280470aa1554SRitesh Harjani ext_debug(inode, "truncate since %u to %u\n", start, end); 2805a86c6181SAlex Tomas 2806a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 280783448bdfSJan Kara handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 280883448bdfSJan Kara depth + 1, 280983448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2810a86c6181SAlex Tomas if (IS_ERR(handle)) 2811a86c6181SAlex Tomas return PTR_ERR(handle); 2812a86c6181SAlex Tomas 28130617b83fSDmitry Monakhov again: 281461801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2815d8990240SAditya Kali 2816a86c6181SAlex Tomas /* 28175f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 28185f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 28195f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 28205f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 28215f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 28225f95d21fSLukas Czerner */ 28235f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 28245f95d21fSLukas Czerner struct ext4_extent *ex; 2825f4226d9eSEric Whitney ext4_lblk_t ee_block, ex_end, lblk; 2826f4226d9eSEric Whitney ext4_fsblk_t pblk; 28275f95d21fSLukas Czerner 2828f4226d9eSEric Whitney /* find extent for or closest extent to this block */ 282973c384c0STheodore Ts'o path = ext4_find_extent(inode, end, NULL, 283073c384c0STheodore Ts'o EXT4_EX_NOCACHE | EXT4_EX_NOFAIL); 28315f95d21fSLukas Czerner if (IS_ERR(path)) { 28325f95d21fSLukas Czerner ext4_journal_stop(handle); 28335f95d21fSLukas Czerner return PTR_ERR(path); 28345f95d21fSLukas Czerner } 28355f95d21fSLukas Czerner depth = ext_depth(inode); 28366f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 28375f95d21fSLukas Czerner ex = path[depth].p_ext; 2838968dee77SAshish Sangwan if (!ex) { 28396f2080e6SDmitry Monakhov if (depth) { 28406f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 28416f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 28426f2080e6SDmitry Monakhov depth); 28436a797d27SDarrick J. Wong err = -EFSCORRUPTED; 28446f2080e6SDmitry Monakhov } 28456f2080e6SDmitry Monakhov goto out; 2846968dee77SAshish Sangwan } 28475f95d21fSLukas Czerner 28485f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 2849f4226d9eSEric Whitney ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 28505f95d21fSLukas Czerner 28515f95d21fSLukas Czerner /* 28525f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 28535f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 28545f95d21fSLukas Czerner * tail of the first part of the split extent in 28555f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 28565f95d21fSLukas Czerner */ 2857f4226d9eSEric Whitney if (end >= ee_block && end < ex_end) { 2858f4226d9eSEric Whitney 2859f4226d9eSEric Whitney /* 2860f4226d9eSEric Whitney * If we're going to split the extent, note that 2861f4226d9eSEric Whitney * the cluster containing the block after 'end' is 2862f4226d9eSEric Whitney * in use to avoid freeing it when removing blocks. 2863f4226d9eSEric Whitney */ 2864f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2865cfb3c85aSJeffle Xu pblk = ext4_ext_pblock(ex) + end - ee_block + 1; 28669fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 28679fe67149SEric Whitney partial.state = nofree; 2868f4226d9eSEric Whitney } 2869f4226d9eSEric Whitney 28705f95d21fSLukas Czerner /* 28715f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 287227dd4385SLukas Czerner * block in the first new extent. Also we should not 287327dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 287427dd4385SLukas Czerner * reserved block if that happens. 28755f95d21fSLukas Czerner */ 2876dfe50809STheodore Ts'o err = ext4_force_split_extent_at(handle, inode, &path, 2877fcf6b1b7SDmitry Monakhov end + 1, 1); 28785f95d21fSLukas Czerner if (err < 0) 28795f95d21fSLukas Czerner goto out; 2880f4226d9eSEric Whitney 28817bd75230SEric Whitney } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 28827bd75230SEric Whitney partial.state == initial) { 2883f4226d9eSEric Whitney /* 28847bd75230SEric Whitney * If we're punching, there's an extent to the right. 28857bd75230SEric Whitney * If the partial cluster hasn't been set, set it to 28867bd75230SEric Whitney * that extent's first cluster and its state to nofree 28877bd75230SEric Whitney * so it won't be freed should it contain blocks to be 28887bd75230SEric Whitney * removed. If it's already set (tofree/nofree), we're 28897bd75230SEric Whitney * retrying and keep the original partial cluster info 28907bd75230SEric Whitney * so a cluster marked tofree as a result of earlier 28917bd75230SEric Whitney * extent removal is not lost. 2892f4226d9eSEric Whitney */ 2893f4226d9eSEric Whitney lblk = ex_end + 1; 2894f4226d9eSEric Whitney err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2895d7dce9e0Syangerkun NULL); 2896d7dce9e0Syangerkun if (err < 0) 2897f4226d9eSEric Whitney goto out; 28989fe67149SEric Whitney if (pblk) { 28999fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 29009fe67149SEric Whitney partial.state = nofree; 29019fe67149SEric Whitney } 29025f95d21fSLukas Czerner } 29035f95d21fSLukas Czerner } 29045f95d21fSLukas Czerner /* 2905d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2906d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2907a86c6181SAlex Tomas */ 29080617b83fSDmitry Monakhov depth = ext_depth(inode); 2909968dee77SAshish Sangwan if (path) { 2910968dee77SAshish Sangwan int k = i = depth; 2911968dee77SAshish Sangwan while (--k > 0) 2912968dee77SAshish Sangwan path[k].p_block = 2913968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2914968dee77SAshish Sangwan } else { 29156396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 291673c384c0STheodore Ts'o GFP_NOFS | __GFP_NOFAIL); 2917a86c6181SAlex Tomas if (path == NULL) { 2918a86c6181SAlex Tomas ext4_journal_stop(handle); 2919a86c6181SAlex Tomas return -ENOMEM; 2920a86c6181SAlex Tomas } 292110809df8STheodore Ts'o path[0].p_maxdepth = path[0].p_depth = depth; 2922a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 292389a4e48fSTheodore Ts'o i = 0; 29245f95d21fSLukas Czerner 2925c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 29266a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2927a86c6181SAlex Tomas goto out; 2928a86c6181SAlex Tomas } 2929968dee77SAshish Sangwan } 2930968dee77SAshish Sangwan err = 0; 2931a86c6181SAlex Tomas 2932a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2933a86c6181SAlex Tomas if (i == depth) { 2934a86c6181SAlex Tomas /* this is leaf block */ 2935d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 29369fe67149SEric Whitney &partial, start, end); 2937d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2938a86c6181SAlex Tomas brelse(path[i].p_bh); 2939a86c6181SAlex Tomas path[i].p_bh = NULL; 2940a86c6181SAlex Tomas i--; 2941a86c6181SAlex Tomas continue; 2942a86c6181SAlex Tomas } 2943a86c6181SAlex Tomas 2944a86c6181SAlex Tomas /* this is index block */ 2945a86c6181SAlex Tomas if (!path[i].p_hdr) { 294670aa1554SRitesh Harjani ext_debug(inode, "initialize header\n"); 2947a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2948a86c6181SAlex Tomas } 2949a86c6181SAlex Tomas 2950a86c6181SAlex Tomas if (!path[i].p_idx) { 2951d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2952a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2953a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 295470aa1554SRitesh Harjani ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n", 2955a86c6181SAlex Tomas path[i].p_hdr, 2956a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2957a86c6181SAlex Tomas } else { 2958d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2959a86c6181SAlex Tomas path[i].p_idx--; 2960a86c6181SAlex Tomas } 2961a86c6181SAlex Tomas 296270aa1554SRitesh Harjani ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n", 2963a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2964a86c6181SAlex Tomas path[i].p_idx); 2965a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2966c29c0ae7SAlex Tomas struct buffer_head *bh; 2967a86c6181SAlex Tomas /* go to the next level */ 296870aa1554SRitesh Harjani ext_debug(inode, "move to level %d (block %llu)\n", 2969bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2970a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 29719c6e0719SZhang Yi bh = read_extent_tree_block(inode, path[i].p_idx, 29729c6e0719SZhang Yi depth - i - 1, 2973107a7bd3STheodore Ts'o EXT4_EX_NOCACHE); 29747d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 2975a86c6181SAlex Tomas /* should we reset i_size? */ 29767d7ea89eSTheodore Ts'o err = PTR_ERR(bh); 2977a86c6181SAlex Tomas break; 2978a86c6181SAlex Tomas } 297976828c88STheodore Ts'o /* Yield here to deal with large extent trees. 298076828c88STheodore Ts'o * Should be a no-op if we did IO above. */ 298176828c88STheodore Ts'o cond_resched(); 2982c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 29836a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2984c29c0ae7SAlex Tomas break; 2985c29c0ae7SAlex Tomas } 2986c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2987a86c6181SAlex Tomas 2988d0d856e8SRandy Dunlap /* save actual number of indexes since this 2989d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2990a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2991a86c6181SAlex Tomas i++; 2992a86c6181SAlex Tomas } else { 2993d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2994a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2995d0d856e8SRandy Dunlap /* index is empty, remove it; 2996a86c6181SAlex Tomas * handle must be already prepared by the 2997a86c6181SAlex Tomas * truncatei_leaf() */ 2998c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 2999a86c6181SAlex Tomas } 3000d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 3001a86c6181SAlex Tomas brelse(path[i].p_bh); 3002a86c6181SAlex Tomas path[i].p_bh = NULL; 3003a86c6181SAlex Tomas i--; 300470aa1554SRitesh Harjani ext_debug(inode, "return to level %d\n", i); 3005a86c6181SAlex Tomas } 3006a86c6181SAlex Tomas } 3007a86c6181SAlex Tomas 30089fe67149SEric Whitney trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 30099fe67149SEric Whitney path->p_hdr->eh_entries); 3010d8990240SAditya Kali 30110756b908SEric Whitney /* 30129fe67149SEric Whitney * if there's a partial cluster and we have removed the first extent 30139fe67149SEric Whitney * in the file, then we also free the partial cluster, if any 30140756b908SEric Whitney */ 30159fe67149SEric Whitney if (partial.state == tofree && err == 0) { 30169fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 30179fe67149SEric Whitney 30189fe67149SEric Whitney if (ext4_is_pending(inode, partial.lblk)) 30199fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 30207b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 30219fe67149SEric Whitney EXT4_C2B(sbi, partial.pclu), 30229fe67149SEric Whitney sbi->s_cluster_ratio, flags); 30239fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 30249fe67149SEric Whitney ext4_rereserve_cluster(inode, partial.lblk); 30259fe67149SEric Whitney partial.state = initial; 30267b415bf6SAditya Kali } 30277b415bf6SAditya Kali 3028a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 3029a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 3030a86c6181SAlex Tomas /* 3031d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 3032d0d856e8SRandy Dunlap * so we need to correct eh_depth 3033a86c6181SAlex Tomas */ 3034a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 3035a86c6181SAlex Tomas if (err == 0) { 3036a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 3037a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 303855ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 3039a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 3040a86c6181SAlex Tomas } 3041a86c6181SAlex Tomas } 3042a86c6181SAlex Tomas out: 30437ff5fddaSYe Bin ext4_free_ext_path(path); 3044968dee77SAshish Sangwan path = NULL; 3045dfe50809STheodore Ts'o if (err == -EAGAIN) 3046dfe50809STheodore Ts'o goto again; 3047a86c6181SAlex Tomas ext4_journal_stop(handle); 3048a86c6181SAlex Tomas 3049a86c6181SAlex Tomas return err; 3050a86c6181SAlex Tomas } 3051a86c6181SAlex Tomas 3052a86c6181SAlex Tomas /* 3053a86c6181SAlex Tomas * called at mount time 3054a86c6181SAlex Tomas */ 3055a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 3056a86c6181SAlex Tomas { 3057a86c6181SAlex Tomas /* 3058a86c6181SAlex Tomas * possible initialization would be here 3059a86c6181SAlex Tomas */ 3060a86c6181SAlex Tomas 3061e2b911c5SDarrick J. Wong if (ext4_has_feature_extents(sb)) { 306290576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 306392b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 3064bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 306592b97816STheodore Ts'o ", aggressive tests" 3066a86c6181SAlex Tomas #endif 3067a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 306892b97816STheodore Ts'o ", check binsearch" 3069a86c6181SAlex Tomas #endif 3070a86c6181SAlex Tomas #ifdef EXTENTS_STATS 307192b97816STheodore Ts'o ", stats" 3072a86c6181SAlex Tomas #endif 307392b97816STheodore Ts'o "\n"); 307490576c0bSTheodore Ts'o #endif 3075a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3076a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3077a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 3078a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 3079a86c6181SAlex Tomas #endif 3080a86c6181SAlex Tomas } 3081a86c6181SAlex Tomas } 3082a86c6181SAlex Tomas 3083a86c6181SAlex Tomas /* 3084a86c6181SAlex Tomas * called at umount time 3085a86c6181SAlex Tomas */ 3086a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 3087a86c6181SAlex Tomas { 3088e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 3089a86c6181SAlex Tomas return; 3090a86c6181SAlex Tomas 3091a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3092a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3093a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3094a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3095a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 3096a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 3097a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3098a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3099a86c6181SAlex Tomas } 3100a86c6181SAlex Tomas #endif 3101a86c6181SAlex Tomas } 3102a86c6181SAlex Tomas 3103ab8627e1SBaokun Li static void ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3104d7b2a00cSZheng Liu { 3105d7b2a00cSZheng Liu ext4_lblk_t ee_block; 3106d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock; 3107d7b2a00cSZheng Liu unsigned int ee_len; 3108d7b2a00cSZheng Liu 3109d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block); 3110d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex); 3111d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex); 3112d7b2a00cSZheng Liu 3113d7b2a00cSZheng Liu if (ee_len == 0) 3114ab8627e1SBaokun Li return; 3115d7b2a00cSZheng Liu 31166c120399SBaokun Li ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3117d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN); 3118d7b2a00cSZheng Liu } 3119d7b2a00cSZheng Liu 3120093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 3121093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3122093a088bSAneesh Kumar K.V { 31232407518dSLukas Czerner ext4_fsblk_t ee_pblock; 31242407518dSLukas Czerner unsigned int ee_len; 3125093a088bSAneesh Kumar K.V 3126093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 3127bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 312853085facSJan Kara return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 312953085facSJan Kara ee_len); 3130093a088bSAneesh Kumar K.V } 3131093a088bSAneesh Kumar K.V 313247ea3bb5SYongqiang Yang /* 313347ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 313447ea3bb5SYongqiang Yang * 313547ea3bb5SYongqiang Yang * @handle: the journal handle 313647ea3bb5SYongqiang Yang * @inode: the file inode 313747ea3bb5SYongqiang Yang * @path: the path to the extent 313847ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 313947ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 3140556615dcSLukas Czerner * the states(init or unwritten) of new extents. 314147ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 314247ea3bb5SYongqiang Yang * 314347ea3bb5SYongqiang Yang * 314447ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 3145e4d7f2d3SKeyur Patel * of which are determined by split_flag. 314647ea3bb5SYongqiang Yang * 314747ea3bb5SYongqiang Yang * There are two cases: 314847ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 314947ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 315047ea3bb5SYongqiang Yang * 315147ea3bb5SYongqiang Yang * return 0 on success. 315247ea3bb5SYongqiang Yang */ 315347ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 315447ea3bb5SYongqiang Yang struct inode *inode, 3155dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 315647ea3bb5SYongqiang Yang ext4_lblk_t split, 315747ea3bb5SYongqiang Yang int split_flag, 315847ea3bb5SYongqiang Yang int flags) 315947ea3bb5SYongqiang Yang { 3160dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 316147ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 316247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3163adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 316447ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 316547ea3bb5SYongqiang Yang unsigned int ee_len, depth; 316647ea3bb5SYongqiang Yang int err = 0; 316747ea3bb5SYongqiang Yang 3168dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3169dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3170dee1f973SDmitry Monakhov 317170aa1554SRitesh Harjani ext_debug(inode, "logical block %llu\n", (unsigned long long)split); 317247ea3bb5SYongqiang Yang 317347ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 317447ea3bb5SYongqiang Yang 317547ea3bb5SYongqiang Yang depth = ext_depth(inode); 317647ea3bb5SYongqiang Yang ex = path[depth].p_ext; 317747ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 317847ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 317947ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 318047ea3bb5SYongqiang Yang 318147ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3182556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex) && 3183357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3184556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT1 | 3185556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2)); 318647ea3bb5SYongqiang Yang 318747ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 318847ea3bb5SYongqiang Yang if (err) 318947ea3bb5SYongqiang Yang goto out; 319047ea3bb5SYongqiang Yang 319147ea3bb5SYongqiang Yang if (split == ee_block) { 319247ea3bb5SYongqiang Yang /* 319347ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 319447ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 319547ea3bb5SYongqiang Yang * is not needed. 319647ea3bb5SYongqiang Yang */ 3197556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3198556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 319947ea3bb5SYongqiang Yang else 320047ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 320147ea3bb5SYongqiang Yang 320247ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3203ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 320447ea3bb5SYongqiang Yang 3205ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 320647ea3bb5SYongqiang Yang goto out; 320747ea3bb5SYongqiang Yang } 320847ea3bb5SYongqiang Yang 320947ea3bb5SYongqiang Yang /* case a */ 321047ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 321147ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 3212556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3213556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 321447ea3bb5SYongqiang Yang 321547ea3bb5SYongqiang Yang /* 321647ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 321747ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 321847ea3bb5SYongqiang Yang */ 321947ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 322047ea3bb5SYongqiang Yang if (err) 322147ea3bb5SYongqiang Yang goto fix_extent_len; 322247ea3bb5SYongqiang Yang 322347ea3bb5SYongqiang Yang ex2 = &newex; 322447ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 322547ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 322647ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 3227556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3228556615dcSLukas Czerner ext4_ext_mark_unwritten(ex2); 322947ea3bb5SYongqiang Yang 3230dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 32313f542479Szhanchengbin if (err != -ENOSPC && err != -EDQUOT && err != -ENOMEM) 3232082cd4ecSYe Bin goto out; 3233082cd4ecSYe Bin 32348fe11779SBaokun Li /* 32358fe11779SBaokun Li * Update path is required because previous ext4_ext_insert_extent() 32368fe11779SBaokun Li * may have freed or reallocated the path. Using EXT4_EX_NOFAIL 32378fe11779SBaokun Li * guarantees that ext4_find_extent() will not return -ENOMEM, 32388fe11779SBaokun Li * otherwise -ENOMEM will cause a retry in do_writepages(), and a 32398fe11779SBaokun Li * WARN_ON may be triggered in ext4_da_update_reserve_space() due to 32408fe11779SBaokun Li * an incorrect ee_len causing the i_reserved_data_blocks exception. 32418fe11779SBaokun Li */ 32428fe11779SBaokun Li path = ext4_find_extent(inode, ee_block, ppath, 32438fe11779SBaokun Li flags | EXT4_EX_NOFAIL); 32448fe11779SBaokun Li if (IS_ERR(path)) { 32458fe11779SBaokun Li EXT4_ERROR_INODE(inode, "Failed split extent on %u, err %ld", 32468fe11779SBaokun Li split, PTR_ERR(path)); 32478fe11779SBaokun Li return PTR_ERR(path); 32488fe11779SBaokun Li } 32498fe11779SBaokun Li depth = ext_depth(inode); 32508fe11779SBaokun Li ex = path[depth].p_ext; 32518fe11779SBaokun Li *ppath = path; 32528fe11779SBaokun Li 3253082cd4ecSYe Bin if (EXT4_EXT_MAY_ZEROOUT & split_flag) { 3254dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3255adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3256dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3257adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 32588cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32598cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3260adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3261adb23551SZheng Liu ext4_ext_pblock(ex2)); 3262adb23551SZheng Liu } else { 3263dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3264adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 32658cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32668cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3267adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3268adb23551SZheng Liu ext4_ext_pblock(ex)); 3269adb23551SZheng Liu } 3270adb23551SZheng Liu } else { 327147ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3272adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 32738cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32748cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3275adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3276adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3277adb23551SZheng Liu } 3278dee1f973SDmitry Monakhov 3279082cd4ecSYe Bin if (!err) { 328047ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3281af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3282ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3283ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3284082cd4ecSYe Bin if (!err) 3285adb23551SZheng Liu /* update extent status tree */ 3286ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex); 3287082cd4ecSYe Bin /* If we failed at this point, we don't know in which 3288082cd4ecSYe Bin * state the extent tree exactly is so don't try to fix 3289082cd4ecSYe Bin * length of the original extent as it may do even more 3290082cd4ecSYe Bin * damage. 3291082cd4ecSYe Bin */ 329247ea3bb5SYongqiang Yang goto out; 3293082cd4ecSYe Bin } 3294082cd4ecSYe Bin } 329547ea3bb5SYongqiang Yang 329647ea3bb5SYongqiang Yang fix_extent_len: 329747ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 3298b60ca334SHarshad Shirwadkar /* 3299b60ca334SHarshad Shirwadkar * Ignore ext4_ext_dirty return value since we are already in error path 3300b60ca334SHarshad Shirwadkar * and err is a non-zero error code. 3301b60ca334SHarshad Shirwadkar */ 330229faed16SDmitry Monakhov ext4_ext_dirty(handle, inode, path + path->p_depth); 330347ea3bb5SYongqiang Yang return err; 3304082cd4ecSYe Bin out: 33058fe11779SBaokun Li ext4_ext_show_leaf(inode, *ppath); 3306082cd4ecSYe Bin return err; 330747ea3bb5SYongqiang Yang } 330847ea3bb5SYongqiang Yang 330947ea3bb5SYongqiang Yang /* 331034b20963SBaokun Li * ext4_split_extent() splits an extent and mark extent which is covered 331147ea3bb5SYongqiang Yang * by @map as split_flags indicates 331247ea3bb5SYongqiang Yang * 331347ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three) 331447ea3bb5SYongqiang Yang * There are three possibilities: 331547ea3bb5SYongqiang Yang * a> There is no split required 331647ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 331747ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 331847ea3bb5SYongqiang Yang * 331947ea3bb5SYongqiang Yang */ 332047ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 332147ea3bb5SYongqiang Yang struct inode *inode, 3322dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 332347ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 332447ea3bb5SYongqiang Yang int split_flag, 332547ea3bb5SYongqiang Yang int flags) 332647ea3bb5SYongqiang Yang { 3327dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 332847ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 332947ea3bb5SYongqiang Yang struct ext4_extent *ex; 333047ea3bb5SYongqiang Yang unsigned int ee_len, depth; 333147ea3bb5SYongqiang Yang int err = 0; 3332556615dcSLukas Czerner int unwritten; 333347ea3bb5SYongqiang Yang int split_flag1, flags1; 33343a225670SZheng Liu int allocated = map->m_len; 333547ea3bb5SYongqiang Yang 333647ea3bb5SYongqiang Yang depth = ext_depth(inode); 333747ea3bb5SYongqiang Yang ex = path[depth].p_ext; 333847ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 333947ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 3340556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 334147ea3bb5SYongqiang Yang 334247ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3343dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 334447ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3345556615dcSLukas Czerner if (unwritten) 3346556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3347556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2; 3348dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3349dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 3350dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 335147ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 335293917411SYongqiang Yang if (err) 335393917411SYongqiang Yang goto out; 33543a225670SZheng Liu } else { 33553a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 335647ea3bb5SYongqiang Yang } 3357357b66fdSDmitry Monakhov /* 3358357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3359357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3360357b66fdSDmitry Monakhov */ 336173c384c0STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, flags); 336247ea3bb5SYongqiang Yang if (IS_ERR(path)) 336347ea3bb5SYongqiang Yang return PTR_ERR(path); 3364357b66fdSDmitry Monakhov depth = ext_depth(inode); 3365357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3366a18ed359SDmitry Monakhov if (!ex) { 3367a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3368a18ed359SDmitry Monakhov (unsigned long) map->m_lblk); 33696a797d27SDarrick J. Wong return -EFSCORRUPTED; 3370a18ed359SDmitry Monakhov } 3371556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 337247ea3bb5SYongqiang Yang 337347ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3374357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3375556615dcSLukas Czerner if (unwritten) { 3376556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3377357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3378556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2); 3379357b66fdSDmitry Monakhov } 3380dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 338147ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 338247ea3bb5SYongqiang Yang if (err) 338347ea3bb5SYongqiang Yang goto out; 338447ea3bb5SYongqiang Yang } 338547ea3bb5SYongqiang Yang 338634b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath); 338747ea3bb5SYongqiang Yang out: 33883a225670SZheng Liu return err ? err : allocated; 338947ea3bb5SYongqiang Yang } 339047ea3bb5SYongqiang Yang 339156055d3aSAmit Arora /* 3392e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 3393556615dcSLukas Czerner * to an unwritten extent. It may result in splitting the unwritten 339456055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 3395556615dcSLukas Czerner * unwritten). 339656055d3aSAmit Arora * There are three possibilities: 339756055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 339856055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 339956055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 34006f91bc5fSEric Gouriou * 34016f91bc5fSEric Gouriou * Pre-conditions: 3402556615dcSLukas Czerner * - The extent pointed to by 'path' is unwritten. 34036f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 34046f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 34056f91bc5fSEric Gouriou * 34066f91bc5fSEric Gouriou * Post-conditions on success: 34076f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 34086f91bc5fSEric Gouriou * that are allocated and initialized. 34096f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 341056055d3aSAmit Arora */ 3411725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3412725d26d3SAneesh Kumar K.V struct inode *inode, 3413e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3414dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 341527dd4385SLukas Czerner int flags) 341656055d3aSAmit Arora { 3417dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 341867a5da56SZheng Liu struct ext4_sb_info *sbi; 34196f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3420667eff35SYongqiang Yang struct ext4_map_blocks split_map; 34214f8caa60SJan Kara struct ext4_extent zero_ex1, zero_ex2; 3422bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 342321ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3424bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 342556055d3aSAmit Arora int err = 0; 34264f8caa60SJan Kara int split_flag = EXT4_EXT_DATA_VALID2; 3427e9c0aa6cSBaokun Li int allocated = 0; 3428e9c0aa6cSBaokun Li unsigned int max_zeroout = 0; 342921ca087aSDmitry Monakhov 343070aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n", 3431bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 343221ca087aSDmitry Monakhov 343367a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 3434801674f3SJan Kara eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3435801674f3SJan Kara >> inode->i_sb->s_blocksize_bits; 3436bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3437bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 343856055d3aSAmit Arora 343956055d3aSAmit Arora depth = ext_depth(inode); 34406f91bc5fSEric Gouriou eh = path[depth].p_hdr; 344156055d3aSAmit Arora ex = path[depth].p_ext; 344256055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 344356055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 34444f8caa60SJan Kara zero_ex1.ee_len = 0; 34454f8caa60SJan Kara zero_ex2.ee_len = 0; 344621ca087aSDmitry Monakhov 34476f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 34486f91bc5fSEric Gouriou 34496f91bc5fSEric Gouriou /* Pre-conditions */ 3450556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex)); 34516f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 34526f91bc5fSEric Gouriou 34536f91bc5fSEric Gouriou /* 34546f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3455556615dcSLukas Czerner * unwritten extent to its neighbor. This is much cheaper 34566f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3457bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3458bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3459bc2d9db4SLukas Czerner * followed by append writes. 34606f91bc5fSEric Gouriou * 34616f91bc5fSEric Gouriou * Limitations of the current logic: 3462bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 34636f91bc5fSEric Gouriou * This would require removing the extent if the transfer 34646f91bc5fSEric Gouriou * is possible. 3465bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 34666f91bc5fSEric Gouriou * same extent tree node. 34676f91bc5fSEric Gouriou */ 3468bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3469bc2d9db4SLukas Czerner /* See if we can merge left */ 3470bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3471bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 34726f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 34736f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3474bc2d9db4SLukas Czerner unsigned int prev_len; 34756f91bc5fSEric Gouriou 3476bc2d9db4SLukas Czerner abut_ex = ex - 1; 3477bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3478bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3479bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 34806f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 34816f91bc5fSEric Gouriou 34826f91bc5fSEric Gouriou /* 3483bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 34846f91bc5fSEric Gouriou * upon those conditions: 3485bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3486bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3487bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3488bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 34896f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 34906f91bc5fSEric Gouriou */ 3491556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 34926f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 34936f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3494bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 34956f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 34966f91bc5fSEric Gouriou if (err) 34976f91bc5fSEric Gouriou goto out; 34986f91bc5fSEric Gouriou 34996f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3500bc2d9db4SLukas Czerner map, ex, abut_ex); 35016f91bc5fSEric Gouriou 3502bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3503bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3504bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3505bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3506556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 35076f91bc5fSEric Gouriou 3508bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3509bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 35106f91bc5fSEric Gouriou 3511bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3512bc2d9db4SLukas Czerner allocated = map_len; 3513bc2d9db4SLukas Czerner } 3514bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3515bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3516bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3517bc2d9db4SLukas Czerner /* See if we can merge right */ 3518bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3519bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3520bc2d9db4SLukas Czerner unsigned int next_len; 3521bc2d9db4SLukas Czerner 3522bc2d9db4SLukas Czerner abut_ex = ex + 1; 3523bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3524bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3525bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3526bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3527bc2d9db4SLukas Czerner 3528bc2d9db4SLukas Czerner /* 3529bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3530bc2d9db4SLukas Czerner * upon those conditions: 3531bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3532bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3533bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3534bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3535bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3536bc2d9db4SLukas Czerner */ 3537556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3538bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3539bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3540bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3541bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3542bc2d9db4SLukas Czerner if (err) 3543bc2d9db4SLukas Czerner goto out; 3544bc2d9db4SLukas Czerner 3545bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3546bc2d9db4SLukas Czerner map, ex, abut_ex); 3547bc2d9db4SLukas Czerner 3548bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3549bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3550bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3551bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3552556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3553bc2d9db4SLukas Czerner 3554bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3555bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3556bc2d9db4SLukas Czerner 3557bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3558bc2d9db4SLukas Czerner allocated = map_len; 3559bc2d9db4SLukas Czerner } 3560bc2d9db4SLukas Czerner } 3561bc2d9db4SLukas Czerner if (allocated) { 35626f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 3563b60ca334SHarshad Shirwadkar err = ext4_ext_dirty(handle, inode, path + depth); 35646f91bc5fSEric Gouriou 35656f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3566bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 35676f91bc5fSEric Gouriou goto out; 3568bc2d9db4SLukas Czerner } else 3569bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 35706f91bc5fSEric Gouriou 3571667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 357221ca087aSDmitry Monakhov /* 357321ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 35749e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size. 357521ca087aSDmitry Monakhov */ 3576667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 357721ca087aSDmitry Monakhov 357867a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 357967a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 35804f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 358167a5da56SZheng Liu 3582667eff35SYongqiang Yang /* 35834f8caa60SJan Kara * five cases: 3584667eff35SYongqiang Yang * 1. split the extent into three extents. 35854f8caa60SJan Kara * 2. split the extent into two extents, zeroout the head of the first 35864f8caa60SJan Kara * extent. 35874f8caa60SJan Kara * 3. split the extent into two extents, zeroout the tail of the second 35884f8caa60SJan Kara * extent. 3589667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 35904f8caa60SJan Kara * 5. no splitting needed, just possibly zeroout the head and / or the 35914f8caa60SJan Kara * tail of the extent. 3592667eff35SYongqiang Yang */ 3593667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3594667eff35SYongqiang Yang split_map.m_len = map->m_len; 3595667eff35SYongqiang Yang 35964f8caa60SJan Kara if (max_zeroout && (allocated > split_map.m_len)) { 359767a5da56SZheng Liu if (allocated <= max_zeroout) { 35984f8caa60SJan Kara /* case 3 or 5 */ 35994f8caa60SJan Kara zero_ex1.ee_block = 36004f8caa60SJan Kara cpu_to_le32(split_map.m_lblk + 36014f8caa60SJan Kara split_map.m_len); 36024f8caa60SJan Kara zero_ex1.ee_len = 36034f8caa60SJan Kara cpu_to_le16(allocated - split_map.m_len); 36044f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex1, 36054f8caa60SJan Kara ext4_ext_pblock(ex) + split_map.m_lblk + 36064f8caa60SJan Kara split_map.m_len - ee_block); 36074f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex1); 3608667eff35SYongqiang Yang if (err) 3609308c57ccSTheodore Ts'o goto fallback; 3610667eff35SYongqiang Yang split_map.m_len = allocated; 36114f8caa60SJan Kara } 36124f8caa60SJan Kara if (split_map.m_lblk - ee_block + split_map.m_len < 36134f8caa60SJan Kara max_zeroout) { 36144f8caa60SJan Kara /* case 2 or 5 */ 36154f8caa60SJan Kara if (split_map.m_lblk != ee_block) { 36164f8caa60SJan Kara zero_ex2.ee_block = ex->ee_block; 36174f8caa60SJan Kara zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3618667eff35SYongqiang Yang ee_block); 36194f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex2, 3620667eff35SYongqiang Yang ext4_ext_pblock(ex)); 36214f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex2); 3622667eff35SYongqiang Yang if (err) 3623308c57ccSTheodore Ts'o goto fallback; 3624667eff35SYongqiang Yang } 3625667eff35SYongqiang Yang 36264f8caa60SJan Kara split_map.m_len += split_map.m_lblk - ee_block; 3627667eff35SYongqiang Yang split_map.m_lblk = ee_block; 36289b940f8eSAllison Henderson allocated = map->m_len; 3629667eff35SYongqiang Yang } 3630667eff35SYongqiang Yang } 3631667eff35SYongqiang Yang 3632308c57ccSTheodore Ts'o fallback: 3633ae9e9c6aSJan Kara err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3634ae9e9c6aSJan Kara flags); 3635ae9e9c6aSJan Kara if (err > 0) 3636ae9e9c6aSJan Kara err = 0; 3637667eff35SYongqiang Yang out: 3638adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 36394f8caa60SJan Kara if (!err) { 3640ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex1); 3641ab8627e1SBaokun Li ext4_zeroout_es(inode, &zero_ex2); 36424f8caa60SJan Kara } 3643667eff35SYongqiang Yang return err ? err : allocated; 364456055d3aSAmit Arora } 364556055d3aSAmit Arora 3646c278bfecSAneesh Kumar K.V /* 3647e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 36480031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 3649556615dcSLukas Czerner * to an unwritten extent. 36500031462bSMingming Cao * 3651556615dcSLukas Czerner * Writing to an unwritten extent may result in splitting the unwritten 3652556615dcSLukas Czerner * extent into multiple initialized/unwritten extents (up to three) 36530031462bSMingming Cao * There are three possibilities: 3654556615dcSLukas Czerner * a> There is no split required: Entire extent should be unwritten 36550031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 36560031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 36570031462bSMingming Cao * 3658b8a86845SLukas Czerner * This works the same way in the case of initialized -> unwritten conversion. 3659b8a86845SLukas Czerner * 36600031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3661556615dcSLukas Czerner * the unwritten extent split. To prevent ENOSPC occur at the IO 3662556615dcSLukas Czerner * complete, we need to split the unwritten extent before DIO submit 3663556615dcSLukas Czerner * the IO. The unwritten extent called at this time will be split 3664556615dcSLukas Czerner * into three unwritten extent(at most). After IO complete, the part 36650031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 36660031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3667ba230c3fSMingming * 3668556615dcSLukas Czerner * Returns the size of unwritten extent to be written on success. 36690031462bSMingming Cao */ 3670b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle, 36710031462bSMingming Cao struct inode *inode, 3672e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3673dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 36740031462bSMingming Cao int flags) 36750031462bSMingming Cao { 3676dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 3677667eff35SYongqiang Yang ext4_lblk_t eof_block; 3678667eff35SYongqiang Yang ext4_lblk_t ee_block; 3679667eff35SYongqiang Yang struct ext4_extent *ex; 3680667eff35SYongqiang Yang unsigned int ee_len; 3681667eff35SYongqiang Yang int split_flag = 0, depth; 36820031462bSMingming Cao 368370aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n", 3684e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 368521ca087aSDmitry Monakhov 3686801674f3SJan Kara eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1) 3687801674f3SJan Kara >> inode->i_sb->s_blocksize_bits; 3688e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3689e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 36900031462bSMingming Cao /* 369121ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 3692e4d7f2d3SKeyur Patel * zeroout only if extent is fully inside i_size or new_size. 369321ca087aSDmitry Monakhov */ 3694667eff35SYongqiang Yang depth = ext_depth(inode); 36950031462bSMingming Cao ex = path[depth].p_ext; 3696667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3697667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 36980031462bSMingming Cao 3699b8a86845SLukas Czerner /* Convert to unwritten */ 3700b8a86845SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3701b8a86845SLukas Czerner split_flag |= EXT4_EXT_DATA_VALID1; 3702b8a86845SLukas Czerner /* Convert to initialized */ 3703b8a86845SLukas Czerner } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3704b8a86845SLukas Czerner split_flag |= ee_block + ee_len <= eof_block ? 3705b8a86845SLukas Czerner EXT4_EXT_MAY_ZEROOUT : 0; 3706556615dcSLukas Czerner split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3707b8a86845SLukas Czerner } 3708667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3709dfe50809STheodore Ts'o return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 37100031462bSMingming Cao } 3711197217a5SYongqiang Yang 3712c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 37130031462bSMingming Cao struct inode *inode, 3714dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 3715dfe50809STheodore Ts'o struct ext4_ext_path **ppath) 37160031462bSMingming Cao { 3717dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 37180031462bSMingming Cao struct ext4_extent *ex; 3719dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3720dee1f973SDmitry Monakhov unsigned int ee_len; 37210031462bSMingming Cao int depth; 37220031462bSMingming Cao int err = 0; 37230031462bSMingming Cao 37240031462bSMingming Cao depth = ext_depth(inode); 37250031462bSMingming Cao ex = path[depth].p_ext; 3726dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3727dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 37280031462bSMingming Cao 372970aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n", 3730dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3731dee1f973SDmitry Monakhov 3732ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3733ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3734ff95ec22SDmitry Monakhov * required. 3735ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3736ff95ec22SDmitry Monakhov * illegal. 3737ff95ec22SDmitry Monakhov */ 3738dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3739e3d550c2SRakesh Pandit #ifdef CONFIG_EXT4_DEBUG 3740e3d550c2SRakesh Pandit ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 37418d2ae1cbSJakub Wilk " len %u; IO logical block %llu, len %u", 3742ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3743ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3744ff95ec22SDmitry Monakhov #endif 3745dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3746dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3747dee1f973SDmitry Monakhov if (err < 0) 3748dfe50809STheodore Ts'o return err; 3749ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3750dfe50809STheodore Ts'o if (IS_ERR(path)) 3751dfe50809STheodore Ts'o return PTR_ERR(path); 3752dee1f973SDmitry Monakhov depth = ext_depth(inode); 3753dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3754dee1f973SDmitry Monakhov } 3755197217a5SYongqiang Yang 37560031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 37570031462bSMingming Cao if (err) 37580031462bSMingming Cao goto out; 37590031462bSMingming Cao /* first mark the extent as initialized */ 37600031462bSMingming Cao ext4_ext_mark_initialized(ex); 37610031462bSMingming Cao 3762197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3763197217a5SYongqiang Yang * borders are not changed 37640031462bSMingming Cao */ 3765ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3766197217a5SYongqiang Yang 37670031462bSMingming Cao /* Mark modified extent as dirty */ 3768ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 37690031462bSMingming Cao out: 37700031462bSMingming Cao ext4_ext_show_leaf(inode, path); 37710031462bSMingming Cao return err; 37720031462bSMingming Cao } 37730031462bSMingming Cao 37740031462bSMingming Cao static int 3775e8b83d93STheodore Ts'o convert_initialized_extent(handle_t *handle, struct inode *inode, 3776b8a86845SLukas Czerner struct ext4_map_blocks *map, 377729c6eaffSEric Whitney struct ext4_ext_path **ppath, 3778f064a9d6SEric Whitney unsigned int *allocated) 3779b8a86845SLukas Czerner { 37804f224b8bSTheodore Ts'o struct ext4_ext_path *path = *ppath; 3781e8b83d93STheodore Ts'o struct ext4_extent *ex; 3782e8b83d93STheodore Ts'o ext4_lblk_t ee_block; 3783e8b83d93STheodore Ts'o unsigned int ee_len; 3784e8b83d93STheodore Ts'o int depth; 3785b8a86845SLukas Czerner int err = 0; 3786b8a86845SLukas Czerner 3787b8a86845SLukas Czerner /* 3788b8a86845SLukas Czerner * Make sure that the extent is no bigger than we support with 3789556615dcSLukas Czerner * unwritten extent 3790b8a86845SLukas Czerner */ 3791556615dcSLukas Czerner if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3792556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3793b8a86845SLukas Czerner 3794e8b83d93STheodore Ts'o depth = ext_depth(inode); 3795e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3796e8b83d93STheodore Ts'o ee_block = le32_to_cpu(ex->ee_block); 3797e8b83d93STheodore Ts'o ee_len = ext4_ext_get_actual_len(ex); 3798e8b83d93STheodore Ts'o 379970aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u\n", 3800e8b83d93STheodore Ts'o (unsigned long long)ee_block, ee_len); 3801e8b83d93STheodore Ts'o 3802e8b83d93STheodore Ts'o if (ee_block != map->m_lblk || ee_len > map->m_len) { 3803dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3804e8b83d93STheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3805e8b83d93STheodore Ts'o if (err < 0) 3806e8b83d93STheodore Ts'o return err; 3807ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3808e8b83d93STheodore Ts'o if (IS_ERR(path)) 3809e8b83d93STheodore Ts'o return PTR_ERR(path); 3810e8b83d93STheodore Ts'o depth = ext_depth(inode); 3811e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3812e8b83d93STheodore Ts'o if (!ex) { 3813e8b83d93STheodore Ts'o EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3814e8b83d93STheodore Ts'o (unsigned long) map->m_lblk); 38156a797d27SDarrick J. Wong return -EFSCORRUPTED; 3816e8b83d93STheodore Ts'o } 3817e8b83d93STheodore Ts'o } 3818e8b83d93STheodore Ts'o 3819e8b83d93STheodore Ts'o err = ext4_ext_get_access(handle, inode, path + depth); 3820e8b83d93STheodore Ts'o if (err) 3821e8b83d93STheodore Ts'o return err; 3822e8b83d93STheodore Ts'o /* first mark the extent as unwritten */ 3823e8b83d93STheodore Ts'o ext4_ext_mark_unwritten(ex); 3824e8b83d93STheodore Ts'o 3825e8b83d93STheodore Ts'o /* note: ext4_ext_correct_indexes() isn't needed here because 3826e8b83d93STheodore Ts'o * borders are not changed 3827e8b83d93STheodore Ts'o */ 3828e8b83d93STheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3829e8b83d93STheodore Ts'o 3830e8b83d93STheodore Ts'o /* Mark modified extent as dirty */ 3831e8b83d93STheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3832e8b83d93STheodore Ts'o if (err) 3833e8b83d93STheodore Ts'o return err; 3834e8b83d93STheodore Ts'o ext4_ext_show_leaf(inode, path); 3835e8b83d93STheodore Ts'o 3836b8a86845SLukas Czerner ext4_update_inode_fsync_trans(handle, inode, 1); 38374337ecd1SEric Whitney 3838b8a86845SLukas Czerner map->m_flags |= EXT4_MAP_UNWRITTEN; 3839f064a9d6SEric Whitney if (*allocated > map->m_len) 3840f064a9d6SEric Whitney *allocated = map->m_len; 3841f064a9d6SEric Whitney map->m_len = *allocated; 3842f064a9d6SEric Whitney return 0; 3843b8a86845SLukas Czerner } 3844b8a86845SLukas Czerner 3845b8a86845SLukas Czerner static int 3846556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3847e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3848dfe50809STheodore Ts'o struct ext4_ext_path **ppath, int flags, 3849e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 38500031462bSMingming Cao { 38510031462bSMingming Cao int ret = 0; 38520031462bSMingming Cao int err = 0; 38530031462bSMingming Cao 385470aa1554SRitesh Harjani ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n", 385570aa1554SRitesh Harjani (unsigned long long)map->m_lblk, map->m_len, flags, 385670aa1554SRitesh Harjani allocated); 385734b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath); 38580031462bSMingming Cao 385927dd4385SLukas Czerner /* 3860556615dcSLukas Czerner * When writing into unwritten space, we should not fail to 386127dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 386227dd4385SLukas Czerner */ 386327dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 386427dd4385SLukas Czerner 3865556615dcSLukas Czerner trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3866b5645534SZheng Liu allocated, newblock); 3867d8990240SAditya Kali 3868779e2651SEric Whitney /* get_block() before submitting IO, split the extent */ 3869c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3870dfe50809STheodore Ts'o ret = ext4_split_convert_extents(handle, inode, map, ppath, 3871dfe50809STheodore Ts'o flags | EXT4_GET_BLOCKS_CONVERT); 3872779e2651SEric Whitney if (ret < 0) { 3873779e2651SEric Whitney err = ret; 3874779e2651SEric Whitney goto out2; 3875779e2651SEric Whitney } 3876779e2651SEric Whitney /* 3877779e2651SEric Whitney * shouldn't get a 0 return when splitting an extent unless 3878779e2651SEric Whitney * m_len is 0 (bug) or extent has been corrupted 3879779e2651SEric Whitney */ 3880779e2651SEric Whitney if (unlikely(ret == 0)) { 3881779e2651SEric Whitney EXT4_ERROR_INODE(inode, 3882779e2651SEric Whitney "unexpected ret == 0, m_len = %u", 3883779e2651SEric Whitney map->m_len); 3884779e2651SEric Whitney err = -EFSCORRUPTED; 3885779e2651SEric Whitney goto out2; 3886779e2651SEric Whitney } 3887a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 38880031462bSMingming Cao goto out; 38890031462bSMingming Cao } 3890c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3891c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT) { 3892bee6cf00SEric Whitney err = ext4_convert_unwritten_extents_endio(handle, inode, map, 3893dfe50809STheodore Ts'o ppath); 3894bee6cf00SEric Whitney if (err < 0) 38950031462bSMingming Cao goto out2; 3896bee6cf00SEric Whitney ext4_update_inode_fsync_trans(handle, inode, 1); 3897bee6cf00SEric Whitney goto map_out; 38980031462bSMingming Cao } 3899bee6cf00SEric Whitney /* buffered IO cases */ 39000031462bSMingming Cao /* 39010031462bSMingming Cao * repeat fallocate creation request 39020031462bSMingming Cao * we already have an unwritten extent 39030031462bSMingming Cao */ 3904556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 3905a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 39060031462bSMingming Cao goto map_out; 3907a25a4e1aSZheng Liu } 39080031462bSMingming Cao 39090031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 39100031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 39110031462bSMingming Cao /* 39120031462bSMingming Cao * We have blocks reserved already. We 39130031462bSMingming Cao * return allocated blocks so that delalloc 39140031462bSMingming Cao * won't do block reservation for us. But 39150031462bSMingming Cao * the buffer head will be unmapped so that 39160031462bSMingming Cao * a read from the block returns 0s. 39170031462bSMingming Cao */ 3918e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 39190031462bSMingming Cao goto out1; 39200031462bSMingming Cao } 39210031462bSMingming Cao 3922be809e12SEric Whitney /* 3923be809e12SEric Whitney * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1. 3924be809e12SEric Whitney * For buffered writes, at writepage time, etc. Convert a 3925be809e12SEric Whitney * discovered unwritten extent to written. 3926be809e12SEric Whitney */ 3927dfe50809STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 3928be809e12SEric Whitney if (ret < 0) { 39290031462bSMingming Cao err = ret; 39300031462bSMingming Cao goto out2; 3931779e2651SEric Whitney } 3932be809e12SEric Whitney ext4_update_inode_fsync_trans(handle, inode, 1); 3933be809e12SEric Whitney /* 3934be809e12SEric Whitney * shouldn't get a 0 return when converting an unwritten extent 3935be809e12SEric Whitney * unless m_len is 0 (bug) or extent has been corrupted 3936be809e12SEric Whitney */ 3937be809e12SEric Whitney if (unlikely(ret == 0)) { 3938be809e12SEric Whitney EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u", 3939be809e12SEric Whitney map->m_len); 3940be809e12SEric Whitney err = -EFSCORRUPTED; 3941be809e12SEric Whitney goto out2; 3942be809e12SEric Whitney } 3943be809e12SEric Whitney 3944779e2651SEric Whitney out: 39450031462bSMingming Cao allocated = ret; 3946e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 39470031462bSMingming Cao map_out: 3948e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 39490031462bSMingming Cao out1: 3950bee6cf00SEric Whitney map->m_pblk = newblock; 3951e35fd660STheodore Ts'o if (allocated > map->m_len) 3952e35fd660STheodore Ts'o allocated = map->m_len; 3953e35fd660STheodore Ts'o map->m_len = allocated; 395434b20963SBaokun Li ext4_ext_show_leaf(inode, *ppath); 39550031462bSMingming Cao out2: 39560031462bSMingming Cao return err ? err : allocated; 39570031462bSMingming Cao } 395858590b06STheodore Ts'o 39590031462bSMingming Cao /* 39604d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 39614d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 39624d33b1efSTheodore Ts'o * allocated in an extent. 3963d8990240SAditya Kali * @sb The filesystem superblock structure 39644d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 39654d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 39664d33b1efSTheodore Ts'o * cluster allocation 39674d33b1efSTheodore Ts'o * 39684d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 39694d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 39704d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 39714d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 39724d33b1efSTheodore Ts'o * want to catch. The first is this case: 39734d33b1efSTheodore Ts'o * 39744d33b1efSTheodore Ts'o * |--- cluster # N--| 39754d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 39764d33b1efSTheodore Ts'o * |==========| 39774d33b1efSTheodore Ts'o * 39784d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 39794d33b1efSTheodore Ts'o * 39804d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 39814d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 39824d33b1efSTheodore Ts'o * |=======================| 39834d33b1efSTheodore Ts'o * 39844d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 39854d33b1efSTheodore Ts'o * within the same cluster: 39864d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 39874d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 39884d33b1efSTheodore Ts'o * |------ requested region ------| 39894d33b1efSTheodore Ts'o * |================| 39904d33b1efSTheodore Ts'o * 39914d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 39924d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 39934d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 39944d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 39954d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 39964d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 39974d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 39984d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 39994d33b1efSTheodore Ts'o */ 4000d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 40014d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 40024d33b1efSTheodore Ts'o struct ext4_extent *ex, 40034d33b1efSTheodore Ts'o struct ext4_ext_path *path) 40044d33b1efSTheodore Ts'o { 4005d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 4006f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 40074d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 400814d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 40094d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 40104d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 40114d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 40124d33b1efSTheodore Ts'o 40134d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 40144d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 40154d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 40164d33b1efSTheodore Ts'o 40174d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 40184d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 40194d33b1efSTheodore Ts'o 40204d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 40214d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 40224d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 40234d33b1efSTheodore Ts'o ee_start += ee_len - 1; 4024f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 40254d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 40264d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 40274d33b1efSTheodore Ts'o /* 40284d33b1efSTheodore Ts'o * Check for and handle this case: 40294d33b1efSTheodore Ts'o * 40304d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 40314d33b1efSTheodore Ts'o * |------- extent ----| 40324d33b1efSTheodore Ts'o * |--- requested region ---| 40334d33b1efSTheodore Ts'o * |===========| 40344d33b1efSTheodore Ts'o */ 40354d33b1efSTheodore Ts'o 40364d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 40374d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 40384d33b1efSTheodore Ts'o 40394d33b1efSTheodore Ts'o /* 40404d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 40414d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 40424d33b1efSTheodore Ts'o * 40434d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 40444d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 40454d33b1efSTheodore Ts'o * |------ requested region ------| 40464d33b1efSTheodore Ts'o * |================| 40474d33b1efSTheodore Ts'o */ 40484d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 40494d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 40504d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 40514d33b1efSTheodore Ts'o } 4052d8990240SAditya Kali 4053d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 40544d33b1efSTheodore Ts'o return 1; 40554d33b1efSTheodore Ts'o } 4056d8990240SAditya Kali 4057d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 40584d33b1efSTheodore Ts'o return 0; 40594d33b1efSTheodore Ts'o } 40604d33b1efSTheodore Ts'o 4061f5411b76SZhang Yi /* 4062f5411b76SZhang Yi * Determine hole length around the given logical block, first try to 4063f5411b76SZhang Yi * locate and expand the hole from the given @path, and then adjust it 4064f5411b76SZhang Yi * if it's partially or completely converted to delayed extents, insert 4065f5411b76SZhang Yi * it into the extent cache tree if it's indeed a hole, finally return 4066f5411b76SZhang Yi * the length of the determined extent. 4067f5411b76SZhang Yi */ 4068f5411b76SZhang Yi static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode, 4069f5411b76SZhang Yi struct ext4_ext_path *path, 4070f5411b76SZhang Yi ext4_lblk_t lblk) 4071f5411b76SZhang Yi { 4072f5411b76SZhang Yi ext4_lblk_t hole_start, len; 4073f5411b76SZhang Yi struct extent_status es; 4074f5411b76SZhang Yi 4075f5411b76SZhang Yi hole_start = lblk; 4076f5411b76SZhang Yi len = ext4_ext_find_hole(inode, path, &hole_start); 4077f5411b76SZhang Yi again: 4078f5411b76SZhang Yi ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 4079f5411b76SZhang Yi hole_start + len - 1, &es); 4080f5411b76SZhang Yi if (!es.es_len) 4081f5411b76SZhang Yi goto insert_hole; 4082f5411b76SZhang Yi 4083f5411b76SZhang Yi /* 4084f5411b76SZhang Yi * There's a delalloc extent in the hole, handle it if the delalloc 4085f5411b76SZhang Yi * extent is in front of, behind and straddle the queried range. 4086f5411b76SZhang Yi */ 4087f5411b76SZhang Yi if (lblk >= es.es_lblk + es.es_len) { 4088f5411b76SZhang Yi /* 4089f5411b76SZhang Yi * The delalloc extent is in front of the queried range, 4090f5411b76SZhang Yi * find again from the queried start block. 4091f5411b76SZhang Yi */ 4092f5411b76SZhang Yi len -= lblk - hole_start; 4093f5411b76SZhang Yi hole_start = lblk; 4094f5411b76SZhang Yi goto again; 4095f5411b76SZhang Yi } else if (in_range(lblk, es.es_lblk, es.es_len)) { 4096f5411b76SZhang Yi /* 4097f5411b76SZhang Yi * The delalloc extent containing lblk, it must have been 4098f5411b76SZhang Yi * added after ext4_map_blocks() checked the extent status 4099f5411b76SZhang Yi * tree, adjust the length to the delalloc extent's after 4100f5411b76SZhang Yi * lblk. 4101f5411b76SZhang Yi */ 4102f5411b76SZhang Yi len = es.es_lblk + es.es_len - lblk; 4103f5411b76SZhang Yi return len; 4104f5411b76SZhang Yi } else { 4105f5411b76SZhang Yi /* 4106f5411b76SZhang Yi * The delalloc extent is partially or completely behind 4107f5411b76SZhang Yi * the queried range, update hole length until the 4108f5411b76SZhang Yi * beginning of the delalloc extent. 4109f5411b76SZhang Yi */ 4110f5411b76SZhang Yi len = min(es.es_lblk - hole_start, len); 4111f5411b76SZhang Yi } 4112f5411b76SZhang Yi 4113f5411b76SZhang Yi insert_hole: 4114f5411b76SZhang Yi /* Put just found gap into cache to speed up subsequent requests */ 4115f5411b76SZhang Yi ext_debug(inode, " -> %u:%u\n", hole_start, len); 4116f5411b76SZhang Yi ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE); 4117f5411b76SZhang Yi 4118f5411b76SZhang Yi /* Update hole_len to reflect hole size after lblk */ 4119f5411b76SZhang Yi if (hole_start != lblk) 4120f5411b76SZhang Yi len -= lblk - hole_start; 4121f5411b76SZhang Yi 4122f5411b76SZhang Yi return len; 4123f5411b76SZhang Yi } 41244d33b1efSTheodore Ts'o 41254d33b1efSTheodore Ts'o /* 4126f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4127f5ab0d1fSMingming Cao * 4128f5ab0d1fSMingming Cao * 4129c278bfecSAneesh Kumar K.V * Need to be called with 41300e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 41310e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4132f5ab0d1fSMingming Cao * 4133b483bb77SRandy Dunlap * return > 0, number of blocks already mapped/allocated 4134f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4135f5ab0d1fSMingming Cao * buffer head is unmapped 4136f5ab0d1fSMingming Cao * otherwise blocks are mapped 4137f5ab0d1fSMingming Cao * 4138f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4139f5ab0d1fSMingming Cao * buffer head is unmapped 4140f5ab0d1fSMingming Cao * 4141f5ab0d1fSMingming Cao * return < 0, error case. 4142c278bfecSAneesh Kumar K.V */ 4143e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4144e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4145a86c6181SAlex Tomas { 4146a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 4147d7dce9e0Syangerkun struct ext4_extent newex, *ex, ex2; 41484d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 41498ad8d710SEric Whitney ext4_fsblk_t newblock = 0, pblk; 415034990461SEric Whitney int err = 0, depth, ret; 41514d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 415281fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4153c9de560dSAlex Tomas struct ext4_allocation_request ar; 41544d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 4155a86c6181SAlex Tomas 415670aa1554SRitesh Harjani ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len); 41570562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4158a86c6181SAlex Tomas 4159a86c6181SAlex Tomas /* find extent for this block */ 4160ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4161a86c6181SAlex Tomas if (IS_ERR(path)) { 4162a86c6181SAlex Tomas err = PTR_ERR(path); 4163a86c6181SAlex Tomas path = NULL; 41648ad8d710SEric Whitney goto out; 4165a86c6181SAlex Tomas } 4166a86c6181SAlex Tomas 4167a86c6181SAlex Tomas depth = ext_depth(inode); 4168a86c6181SAlex Tomas 4169a86c6181SAlex Tomas /* 4170d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4171d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4172ed8a1a76STheodore Ts'o * this is why assert can't be put in ext4_find_extent() 4173a86c6181SAlex Tomas */ 4174273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4175273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4176f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4177f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4178f70f362bSTheodore Ts'o path[depth].p_block); 41796a797d27SDarrick J. Wong err = -EFSCORRUPTED; 41808ad8d710SEric Whitney goto out; 4181034fb4c9SSurbhi Palande } 4182a86c6181SAlex Tomas 41837e028976SAvantika Mathur ex = path[depth].p_ext; 41847e028976SAvantika Mathur if (ex) { 4185725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4186bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4187a2df2a63SAmit Arora unsigned short ee_len; 4188471d4011SSuparna Bhattacharya 4189b8a86845SLukas Czerner 4190471d4011SSuparna Bhattacharya /* 4191556615dcSLukas Czerner * unwritten extents are treated as holes, except that 419256055d3aSAmit Arora * we split out initialized portions during a write. 4193471d4011SSuparna Bhattacharya */ 4194a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4195d8990240SAditya Kali 4196d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4197d8990240SAditya Kali 4198d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4199e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4200e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4201d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4202e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 420370aa1554SRitesh Harjani ext_debug(inode, "%u fit into %u:%d -> %llu\n", 420470aa1554SRitesh Harjani map->m_lblk, ee_block, ee_len, newblock); 420556055d3aSAmit Arora 4206b8a86845SLukas Czerner /* 4207b8a86845SLukas Czerner * If the extent is initialized check whether the 4208b8a86845SLukas Czerner * caller wants to convert it to unwritten. 4209b8a86845SLukas Czerner */ 4210556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(ex)) && 4211b8a86845SLukas Czerner (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4212f064a9d6SEric Whitney err = convert_initialized_extent(handle, 4213f064a9d6SEric Whitney inode, map, &path, &allocated); 42148ad8d710SEric Whitney goto out; 4215f064a9d6SEric Whitney } else if (!ext4_ext_is_unwritten(ex)) { 42168ad8d710SEric Whitney map->m_flags |= EXT4_MAP_MAPPED; 42178ad8d710SEric Whitney map->m_pblk = newblock; 42188ad8d710SEric Whitney if (allocated > map->m_len) 42198ad8d710SEric Whitney allocated = map->m_len; 42208ad8d710SEric Whitney map->m_len = allocated; 42218ad8d710SEric Whitney ext4_ext_show_leaf(inode, path); 4222a86c6181SAlex Tomas goto out; 4223f064a9d6SEric Whitney } 422469eb33dcSZheng Liu 4225556615dcSLukas Czerner ret = ext4_ext_handle_unwritten_extents( 4226dfe50809STheodore Ts'o handle, inode, map, &path, flags, 4227e861304bSAllison Henderson allocated, newblock); 4228ce37c429SEric Whitney if (ret < 0) 4229ce37c429SEric Whitney err = ret; 4230ce37c429SEric Whitney else 4231ce37c429SEric Whitney allocated = ret; 42328ad8d710SEric Whitney goto out; 423356055d3aSAmit Arora } 4234a86c6181SAlex Tomas } 4235a86c6181SAlex Tomas 4236a86c6181SAlex Tomas /* 4237d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4238a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4239a86c6181SAlex Tomas */ 4240c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4241f5411b76SZhang Yi ext4_lblk_t len; 4242140a5250SJan Kara 4243f5411b76SZhang Yi len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk); 4244facab4d9SJan Kara 4245facab4d9SJan Kara map->m_pblk = 0; 4246f5411b76SZhang Yi map->m_len = min_t(unsigned int, map->m_len, len); 42478ad8d710SEric Whitney goto out; 4248a86c6181SAlex Tomas } 42494d33b1efSTheodore Ts'o 4250a86c6181SAlex Tomas /* 4251c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4252a86c6181SAlex Tomas */ 42534d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 4254d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 42554d33b1efSTheodore Ts'o 42564d33b1efSTheodore Ts'o /* 42574d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 4258ed8a1a76STheodore Ts'o * by ext4_find_extent() implies a cluster we can use. 42594d33b1efSTheodore Ts'o */ 42604d33b1efSTheodore Ts'o if (cluster_offset && ex && 4261d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 42624d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 42634d33b1efSTheodore Ts'o newblock = map->m_pblk; 42644d33b1efSTheodore Ts'o goto got_allocated_blocks; 42654d33b1efSTheodore Ts'o } 4266a86c6181SAlex Tomas 4267c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4268e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4269c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4270c9de560dSAlex Tomas if (err) 42718ad8d710SEric Whitney goto out; 4272e35fd660STheodore Ts'o ar.lright = map->m_lblk; 42734d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4274d7dce9e0Syangerkun if (err < 0) 42758ad8d710SEric Whitney goto out; 427625d14f98SAmit Arora 42774d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 42784d33b1efSTheodore Ts'o * cluster we can use. */ 4279d7dce9e0Syangerkun if ((sbi->s_cluster_ratio > 1) && err && 4280d7dce9e0Syangerkun get_implied_cluster_alloc(inode->i_sb, map, &ex2, path)) { 42814d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 42824d33b1efSTheodore Ts'o newblock = map->m_pblk; 42834d33b1efSTheodore Ts'o goto got_allocated_blocks; 42844d33b1efSTheodore Ts'o } 42854d33b1efSTheodore Ts'o 4286749269faSAmit Arora /* 4287749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4288749269faSAmit Arora * a single extent. For an initialized extent this limit is 4289556615dcSLukas Czerner * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4290556615dcSLukas Czerner * EXT_UNWRITTEN_MAX_LEN. 4291749269faSAmit Arora */ 4292e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4293556615dcSLukas Czerner !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4294e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4295556615dcSLukas Czerner else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4296556615dcSLukas Czerner (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4297556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN; 4298749269faSAmit Arora 4299e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4300e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 43014d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 430225d14f98SAmit Arora if (err) 4303b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 430425d14f98SAmit Arora else 4305e35fd660STheodore Ts'o allocated = map->m_len; 4306c9de560dSAlex Tomas 4307c9de560dSAlex Tomas /* allocate new block */ 4308c9de560dSAlex Tomas ar.inode = inode; 4309e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4310e35fd660STheodore Ts'o ar.logical = map->m_lblk; 43114d33b1efSTheodore Ts'o /* 43124d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 43134d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 43144d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 43154d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 43164d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 43174d33b1efSTheodore Ts'o * work correctly. 43184d33b1efSTheodore Ts'o */ 4319f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 43204d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 43214d33b1efSTheodore Ts'o ar.goal -= offset; 43224d33b1efSTheodore Ts'o ar.logical -= offset; 4323c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4324c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4325c9de560dSAlex Tomas else 4326c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4327c9de560dSAlex Tomas ar.flags = 0; 4328556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4329556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4330e3cf5d5dSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4331e3cf5d5dSTheodore Ts'o ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4332c5e298aeSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4333c5e298aeSTheodore Ts'o ar.flags |= EXT4_MB_USE_RESERVED; 4334c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4335a86c6181SAlex Tomas if (!newblock) 43368ad8d710SEric Whitney goto out; 43377b415bf6SAditya Kali allocated_clusters = ar.len; 43384d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 433970aa1554SRitesh Harjani ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n", 4340ec8c60beSRitesh Harjani ar.goal, newblock, ar.len, allocated); 43414d33b1efSTheodore Ts'o if (ar.len > allocated) 43424d33b1efSTheodore Ts'o ar.len = allocated; 4343a86c6181SAlex Tomas 43444d33b1efSTheodore Ts'o got_allocated_blocks: 4345a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 43468ad8d710SEric Whitney pblk = newblock + offset; 43478ad8d710SEric Whitney ext4_ext_store_pblock(&newex, pblk); 4348c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 4349556615dcSLukas Czerner /* Mark unwritten */ 4350556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4351556615dcSLukas Czerner ext4_ext_mark_unwritten(&newex); 4352a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 43538d5d02e6SMingming Cao } 4354c8d46e41SJiaying Zhang 43554337ecd1SEric Whitney err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 435634990461SEric Whitney if (err) { 435734990461SEric Whitney if (allocated_clusters) { 435834990461SEric Whitney int fb_flags = 0; 435982e54229SDmitry Monakhov 436034990461SEric Whitney /* 436134990461SEric Whitney * free data blocks we just allocated. 436234990461SEric Whitney * not a good idea to call discard here directly, 436334990461SEric Whitney * but otherwise we'd need to call it every free(). 436434990461SEric Whitney */ 436527bc446eSbrookxu ext4_discard_preallocations(inode, 0); 436634990461SEric Whitney if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 436734990461SEric Whitney fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; 4368c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock, 436934990461SEric Whitney EXT4_C2B(sbi, allocated_clusters), 437034990461SEric Whitney fb_flags); 437134990461SEric Whitney } 43728ad8d710SEric Whitney goto out; 4373315054f0SAlex Tomas } 4374a86c6181SAlex Tomas 4375b436b9beSJan Kara /* 4376b6bf9171SEric Whitney * Reduce the reserved cluster count to reflect successful deferred 4377b6bf9171SEric Whitney * allocation of delayed allocated clusters or direct allocation of 4378b6bf9171SEric Whitney * clusters discovered to be delayed allocated. Once allocated, a 4379b6bf9171SEric Whitney * cluster is not included in the reserved count. 43805f634d06SAneesh Kumar K.V */ 43812971148dSEric Whitney if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { 43827b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 43837b415bf6SAditya Kali /* 4384b6bf9171SEric Whitney * When allocating delayed allocated clusters, simply 4385b6bf9171SEric Whitney * reduce the reserved cluster count and claim quota 4386232ec872SLukas Czerner */ 4387232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4388232ec872SLukas Czerner 1); 4389b6bf9171SEric Whitney } else { 4390b6bf9171SEric Whitney ext4_lblk_t lblk, len; 4391b6bf9171SEric Whitney unsigned int n; 4392b6bf9171SEric Whitney 4393b6bf9171SEric Whitney /* 4394b6bf9171SEric Whitney * When allocating non-delayed allocated clusters 4395b6bf9171SEric Whitney * (from fallocate, filemap, DIO, or clusters 4396b6bf9171SEric Whitney * allocated when delalloc has been disabled by 4397b6bf9171SEric Whitney * ext4_nonda_switch), reduce the reserved cluster 4398b6bf9171SEric Whitney * count by the number of allocated clusters that 4399b6bf9171SEric Whitney * have previously been delayed allocated. Quota 4400b6bf9171SEric Whitney * has been claimed by ext4_mb_new_blocks() above, 4401b6bf9171SEric Whitney * so release the quota reservations made for any 4402b6bf9171SEric Whitney * previously delayed allocated clusters. 4403b6bf9171SEric Whitney */ 4404b6bf9171SEric Whitney lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); 4405b6bf9171SEric Whitney len = allocated_clusters << sbi->s_cluster_bits; 4406b6bf9171SEric Whitney n = ext4_es_delayed_clu(inode, lblk, len); 4407b6bf9171SEric Whitney if (n > 0) 4408b6bf9171SEric Whitney ext4_da_update_reserve_space(inode, (int) n, 0); 44097b415bf6SAditya Kali } 44107b415bf6SAditya Kali } 44115f634d06SAneesh Kumar K.V 44125f634d06SAneesh Kumar K.V /* 4413b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4414556615dcSLukas Czerner * when it is _not_ an unwritten extent. 4415b436b9beSJan Kara */ 4416556615dcSLukas Czerner if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4417b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 441869eb33dcSZheng Liu else 4419b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 44208ad8d710SEric Whitney 44218ad8d710SEric Whitney map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED); 44228ad8d710SEric Whitney map->m_pblk = pblk; 44238ad8d710SEric Whitney map->m_len = ar.len; 4424e35fd660STheodore Ts'o allocated = map->m_len; 4425a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 44268ad8d710SEric Whitney out: 44277ff5fddaSYe Bin ext4_free_ext_path(path); 4428e861304bSAllison Henderson 442963b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map, 443063b99968STheodore Ts'o err ? err : allocated); 44317877191cSLukas Czerner return err ? err : allocated; 4432a86c6181SAlex Tomas } 4433a86c6181SAlex Tomas 4434d0abb36dSTheodore Ts'o int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4435a86c6181SAlex Tomas { 4436a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4437725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4438a86c6181SAlex Tomas int err = 0; 4439a86c6181SAlex Tomas 4440a86c6181SAlex Tomas /* 4441d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4442d0d856e8SRandy Dunlap * Probably we need not scan at all, 4443d0d856e8SRandy Dunlap * because page truncation is enough. 4444a86c6181SAlex Tomas */ 4445a86c6181SAlex Tomas 4446a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4447a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4448d0abb36dSTheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 4449d0abb36dSTheodore Ts'o if (err) 4450d0abb36dSTheodore Ts'o return err; 4451a86c6181SAlex Tomas 4452a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4453a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 4454ed5d285bSBaokun Li ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block); 4455ed5d285bSBaokun Li 445673c384c0STheodore Ts'o retry_remove_space: 445773c384c0STheodore Ts'o err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 445873c384c0STheodore Ts'o if (err == -ENOMEM) { 44594034247aSNeilBrown memalloc_retry_wait(GFP_ATOMIC); 446073c384c0STheodore Ts'o goto retry_remove_space; 446173c384c0STheodore Ts'o } 446273c384c0STheodore Ts'o return err; 4463a86c6181SAlex Tomas } 4464a86c6181SAlex Tomas 44650e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4466c174e6d6SDmitry Monakhov ext4_lblk_t len, loff_t new_size, 446777a2e84dSTahsin Erdogan int flags) 4468a2df2a63SAmit Arora { 4469496ad9aaSAl Viro struct inode *inode = file_inode(file); 4470a2df2a63SAmit Arora handle_t *handle; 447164395d95STheodore Ts'o int ret = 0, ret2 = 0, ret3 = 0; 4472a2df2a63SAmit Arora int retries = 0; 44734134f5c8SLukas Czerner int depth = 0; 44742ed88685STheodore Ts'o struct ext4_map_blocks map; 44750e8b6879SLukas Czerner unsigned int credits; 4476c174e6d6SDmitry Monakhov loff_t epos; 4477a2df2a63SAmit Arora 4478c3fe493cSFabian Frederick BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 44790e8b6879SLukas Czerner map.m_lblk = offset; 4480c174e6d6SDmitry Monakhov map.m_len = len; 44813c6fe770SGreg Harm /* 44823c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 44833c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 44843c6fe770SGreg Harm * extents. 44853c6fe770SGreg Harm */ 4486556615dcSLukas Czerner if (len <= EXT_UNWRITTEN_MAX_LEN) 44873c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 448860d4616fSDmitry Monakhov 44890e8b6879SLukas Czerner /* 44900e8b6879SLukas Czerner * credits to insert 1 extent into extent tree 44910e8b6879SLukas Czerner */ 44920e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 44934134f5c8SLukas Czerner depth = ext_depth(inode); 44940e8b6879SLukas Czerner 4495a2df2a63SAmit Arora retry: 44963258386aSEric Whitney while (len) { 44974134f5c8SLukas Czerner /* 44984134f5c8SLukas Czerner * Recalculate credits when extent tree depth changes. 44994134f5c8SLukas Czerner */ 4500011c88e3SDan Carpenter if (depth != ext_depth(inode)) { 45014134f5c8SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 45024134f5c8SLukas Czerner depth = ext_depth(inode); 45034134f5c8SLukas Czerner } 45044134f5c8SLukas Czerner 45059924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 45069924a92aSTheodore Ts'o credits); 4507a2df2a63SAmit Arora if (IS_ERR(handle)) { 4508a2df2a63SAmit Arora ret = PTR_ERR(handle); 4509a2df2a63SAmit Arora break; 4510a2df2a63SAmit Arora } 4511a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4512221879c9SAneesh Kumar K.V if (ret <= 0) { 4513f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: " 4514b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4515b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4516b06acd38SLukas Czerner map.m_len, ret); 4517a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 45183258386aSEric Whitney ext4_journal_stop(handle); 4519a2df2a63SAmit Arora break; 4520a2df2a63SAmit Arora } 45213258386aSEric Whitney /* 45223258386aSEric Whitney * allow a full retry cycle for any remaining allocations 45233258386aSEric Whitney */ 45243258386aSEric Whitney retries = 0; 4525c174e6d6SDmitry Monakhov map.m_lblk += ret; 4526c174e6d6SDmitry Monakhov map.m_len = len = len - ret; 4527c174e6d6SDmitry Monakhov epos = (loff_t)map.m_lblk << inode->i_blkbits; 45281bc33893SJeff Layton inode_set_ctime_current(inode); 4529c174e6d6SDmitry Monakhov if (new_size) { 4530c174e6d6SDmitry Monakhov if (epos > new_size) 4531c174e6d6SDmitry Monakhov epos = new_size; 4532c174e6d6SDmitry Monakhov if (ext4_update_inode_size(inode, epos) & 0x1) 45331bc33893SJeff Layton inode->i_mtime = inode_get_ctime(inode); 4534c174e6d6SDmitry Monakhov } 45354209ae12SHarshad Shirwadkar ret2 = ext4_mark_inode_dirty(handle, inode); 4536c894aa97SEryu Guan ext4_update_inode_fsync_trans(handle, inode, 1); 45374209ae12SHarshad Shirwadkar ret3 = ext4_journal_stop(handle); 45384209ae12SHarshad Shirwadkar ret2 = ret3 ? ret3 : ret2; 45394209ae12SHarshad Shirwadkar if (unlikely(ret2)) 4540a2df2a63SAmit Arora break; 4541a2df2a63SAmit Arora } 45423258386aSEric Whitney if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 4543a2df2a63SAmit Arora goto retry; 4544f282ac19SLukas Czerner 45450e8b6879SLukas Czerner return ret > 0 ? ret2 : ret; 45460e8b6879SLukas Czerner } 45470e8b6879SLukas Czerner 4548ad5cd4f4SDarrick J. Wong static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len); 454943f81677SEric Biggers 4550ad5cd4f4SDarrick J. Wong static int ext4_insert_range(struct file *file, loff_t offset, loff_t len); 455143f81677SEric Biggers 4552b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset, 4553b8a86845SLukas Czerner loff_t len, int mode) 4554b8a86845SLukas Czerner { 4555b8a86845SLukas Czerner struct inode *inode = file_inode(file); 4556d4f5258eSJan Kara struct address_space *mapping = file->f_mapping; 4557b8a86845SLukas Czerner handle_t *handle = NULL; 4558b8a86845SLukas Czerner unsigned int max_blocks; 4559b8a86845SLukas Czerner loff_t new_size = 0; 4560b8a86845SLukas Czerner int ret = 0; 4561b8a86845SLukas Czerner int flags; 456269dc9536SDmitry Monakhov int credits; 4563c174e6d6SDmitry Monakhov int partial_begin, partial_end; 4564b8a86845SLukas Czerner loff_t start, end; 4565b8a86845SLukas Czerner ext4_lblk_t lblk; 4566b8a86845SLukas Czerner unsigned int blkbits = inode->i_blkbits; 4567b8a86845SLukas Czerner 4568b8a86845SLukas Czerner trace_ext4_zero_range(inode, offset, len, mode); 4569b8a86845SLukas Czerner 4570b8a86845SLukas Czerner /* 4571e4d7f2d3SKeyur Patel * Round up offset. This is not fallocate, we need to zero out 4572b8a86845SLukas Czerner * blocks, so convert interior block aligned part of the range to 4573b8a86845SLukas Czerner * unwritten and possibly manually zero out unaligned parts of the 4574d91ecb89SOjaswin Mujoo * range. Here, start and partial_begin are inclusive, end and 4575d91ecb89SOjaswin Mujoo * partial_end are exclusive. 4576b8a86845SLukas Czerner */ 4577b8a86845SLukas Czerner start = round_up(offset, 1 << blkbits); 4578b8a86845SLukas Czerner end = round_down((offset + len), 1 << blkbits); 4579b8a86845SLukas Czerner 4580b8a86845SLukas Czerner if (start < offset || end > offset + len) 4581b8a86845SLukas Czerner return -EINVAL; 4582c174e6d6SDmitry Monakhov partial_begin = offset & ((1 << blkbits) - 1); 4583c174e6d6SDmitry Monakhov partial_end = (offset + len) & ((1 << blkbits) - 1); 4584b8a86845SLukas Czerner 4585b8a86845SLukas Czerner lblk = start >> blkbits; 4586b8a86845SLukas Czerner max_blocks = (end >> blkbits); 4587b8a86845SLukas Czerner if (max_blocks < lblk) 4588b8a86845SLukas Czerner max_blocks = 0; 4589b8a86845SLukas Czerner else 4590b8a86845SLukas Czerner max_blocks -= lblk; 4591b8a86845SLukas Czerner 45925955102cSAl Viro inode_lock(inode); 4593b8a86845SLukas Czerner 4594b8a86845SLukas Czerner /* 459580dd4978SChristophe JAILLET * Indirect files do not support unwritten extents 4596b8a86845SLukas Czerner */ 4597b8a86845SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4598b8a86845SLukas Czerner ret = -EOPNOTSUPP; 4599b8a86845SLukas Czerner goto out_mutex; 4600b8a86845SLukas Czerner } 4601b8a86845SLukas Czerner 4602b8a86845SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 46039b02e498SEric Biggers (offset + len > inode->i_size || 460451e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 4605b8a86845SLukas Czerner new_size = offset + len; 4606b8a86845SLukas Czerner ret = inode_newsize_ok(inode, new_size); 4607b8a86845SLukas Czerner if (ret) 4608b8a86845SLukas Czerner goto out_mutex; 4609b8a86845SLukas Czerner } 4610b8a86845SLukas Czerner 46110f2af21aSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 46120f2af21aSLukas Czerner 4613f340b3d9Shongnanli /* Wait all existing dio workers, newcomers will block on i_rwsem */ 461417048e8aSJan Kara inode_dio_wait(inode); 461517048e8aSJan Kara 4616ad5cd4f4SDarrick J. Wong ret = file_modified(file); 4617ad5cd4f4SDarrick J. Wong if (ret) 4618ad5cd4f4SDarrick J. Wong goto out_mutex; 4619ad5cd4f4SDarrick J. Wong 46200f2af21aSLukas Czerner /* Preallocate the range including the unaligned edges */ 46210f2af21aSLukas Czerner if (partial_begin || partial_end) { 46220f2af21aSLukas Czerner ret = ext4_alloc_file_blocks(file, 46230f2af21aSLukas Czerner round_down(offset, 1 << blkbits) >> blkbits, 46240f2af21aSLukas Czerner (round_up((offset + len), 1 << blkbits) - 46250f2af21aSLukas Czerner round_down(offset, 1 << blkbits)) >> blkbits, 462677a2e84dSTahsin Erdogan new_size, flags); 46270f2af21aSLukas Czerner if (ret) 46281d39834fSNikolay Borisov goto out_mutex; 46290f2af21aSLukas Czerner 46300f2af21aSLukas Czerner } 46310f2af21aSLukas Czerner 46320f2af21aSLukas Czerner /* Zero range excluding the unaligned edges */ 4633b8a86845SLukas Czerner if (max_blocks > 0) { 46340f2af21aSLukas Czerner flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 46350f2af21aSLukas Czerner EXT4_EX_NOCACHE); 4636b8a86845SLukas Czerner 4637ea3d7209SJan Kara /* 4638ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have 4639ea3d7209SJan Kara * released from page cache. 4640ea3d7209SJan Kara */ 4641d4f5258eSJan Kara filemap_invalidate_lock(mapping); 4642430657b6SRoss Zwisler 4643430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 4644430657b6SRoss Zwisler if (ret) { 4645d4f5258eSJan Kara filemap_invalidate_unlock(mapping); 4646430657b6SRoss Zwisler goto out_mutex; 4647430657b6SRoss Zwisler } 4648430657b6SRoss Zwisler 464901127848SJan Kara ret = ext4_update_disksize_before_punch(inode, offset, len); 465001127848SJan Kara if (ret) { 4651d4f5258eSJan Kara filemap_invalidate_unlock(mapping); 46521d39834fSNikolay Borisov goto out_mutex; 465301127848SJan Kara } 4654783ae448SJan Kara 4655783ae448SJan Kara /* 4656783ae448SJan Kara * For journalled data we need to write (and checkpoint) pages 4657783ae448SJan Kara * before discarding page cache to avoid inconsitent data on 4658783ae448SJan Kara * disk in case of crash before zeroing trans is committed. 4659783ae448SJan Kara */ 4660783ae448SJan Kara if (ext4_should_journal_data(inode)) { 4661d91ecb89SOjaswin Mujoo ret = filemap_write_and_wait_range(mapping, start, 4662d91ecb89SOjaswin Mujoo end - 1); 4663783ae448SJan Kara if (ret) { 4664783ae448SJan Kara filemap_invalidate_unlock(mapping); 4665783ae448SJan Kara goto out_mutex; 4666783ae448SJan Kara } 4667783ae448SJan Kara } 4668783ae448SJan Kara 4669ea3d7209SJan Kara /* Now release the pages and zero block aligned part of pages */ 4670ea3d7209SJan Kara truncate_pagecache_range(inode, start, end - 1); 46711bc33893SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 4672ea3d7209SJan Kara 4673c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 467477a2e84dSTahsin Erdogan flags); 4675d4f5258eSJan Kara filemap_invalidate_unlock(mapping); 4676b8a86845SLukas Czerner if (ret) 46771d39834fSNikolay Borisov goto out_mutex; 4678b8a86845SLukas Czerner } 4679c174e6d6SDmitry Monakhov if (!partial_begin && !partial_end) 46801d39834fSNikolay Borisov goto out_mutex; 4681c174e6d6SDmitry Monakhov 468269dc9536SDmitry Monakhov /* 468369dc9536SDmitry Monakhov * In worst case we have to writeout two nonadjacent unwritten 468469dc9536SDmitry Monakhov * blocks and update the inode 468569dc9536SDmitry Monakhov */ 468669dc9536SDmitry Monakhov credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 468769dc9536SDmitry Monakhov if (ext4_should_journal_data(inode)) 468869dc9536SDmitry Monakhov credits += 2; 468969dc9536SDmitry Monakhov handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4690b8a86845SLukas Czerner if (IS_ERR(handle)) { 4691b8a86845SLukas Czerner ret = PTR_ERR(handle); 4692b8a86845SLukas Czerner ext4_std_error(inode->i_sb, ret); 46931d39834fSNikolay Borisov goto out_mutex; 4694b8a86845SLukas Czerner } 4695b8a86845SLukas Czerner 46961bc33893SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 46974337ecd1SEric Whitney if (new_size) 46984631dbf6SDmitry Monakhov ext4_update_inode_size(inode, new_size); 46994209ae12SHarshad Shirwadkar ret = ext4_mark_inode_dirty(handle, inode); 47004209ae12SHarshad Shirwadkar if (unlikely(ret)) 47014209ae12SHarshad Shirwadkar goto out_handle; 4702b8a86845SLukas Czerner /* Zero out partial block at the edges of the range */ 4703b8a86845SLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, len); 470467a7d5f5SJan Kara if (ret >= 0) 470567a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4706b8a86845SLukas Czerner 4707b8a86845SLukas Czerner if (file->f_flags & O_SYNC) 4708b8a86845SLukas Czerner ext4_handle_sync(handle); 4709b8a86845SLukas Czerner 47104209ae12SHarshad Shirwadkar out_handle: 4711b8a86845SLukas Czerner ext4_journal_stop(handle); 4712b8a86845SLukas Czerner out_mutex: 47135955102cSAl Viro inode_unlock(inode); 4714b8a86845SLukas Czerner return ret; 4715b8a86845SLukas Czerner } 4716b8a86845SLukas Czerner 47170e8b6879SLukas Czerner /* 47180e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file 47190e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call. 47200e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method 47210e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is 47220e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call). 47230e8b6879SLukas Czerner */ 47240e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 47250e8b6879SLukas Czerner { 47260e8b6879SLukas Czerner struct inode *inode = file_inode(file); 47270e8b6879SLukas Czerner loff_t new_size = 0; 47280e8b6879SLukas Czerner unsigned int max_blocks; 47290e8b6879SLukas Czerner int ret = 0; 47300e8b6879SLukas Czerner int flags; 47310e8b6879SLukas Czerner ext4_lblk_t lblk; 47320e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits; 47330e8b6879SLukas Czerner 47342058f83aSMichael Halcrow /* 47352058f83aSMichael Halcrow * Encrypted inodes can't handle collapse range or insert 47362058f83aSMichael Halcrow * range since we would need to re-encrypt blocks with a 47372058f83aSMichael Halcrow * different IV or XTS tweak (which are based on the logical 47382058f83aSMichael Halcrow * block number). 47392058f83aSMichael Halcrow */ 4740592ddec7SChandan Rajendra if (IS_ENCRYPTED(inode) && 4741457b1e35SEric Biggers (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 47422058f83aSMichael Halcrow return -EOPNOTSUPP; 47432058f83aSMichael Halcrow 47440e8b6879SLukas Czerner /* Return error if mode is not supported */ 47450e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4746331573feSNamjae Jeon FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4747331573feSNamjae Jeon FALLOC_FL_INSERT_RANGE)) 47480e8b6879SLukas Czerner return -EOPNOTSUPP; 47490e8b6879SLukas Czerner 4750f87c7a4bSBaokun Li inode_lock(inode); 4751f87c7a4bSBaokun Li ret = ext4_convert_inline_data(inode); 4752f87c7a4bSBaokun Li inode_unlock(inode); 4753f87c7a4bSBaokun Li if (ret) 4754f87c7a4bSBaokun Li goto exit; 4755f87c7a4bSBaokun Li 4756aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_PUNCH_HOLE) { 4757ad5cd4f4SDarrick J. Wong ret = ext4_punch_hole(file, offset, len); 4758aa75f4d3SHarshad Shirwadkar goto exit; 4759aa75f4d3SHarshad Shirwadkar } 47600e8b6879SLukas Czerner 4761aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_COLLAPSE_RANGE) { 4762ad5cd4f4SDarrick J. Wong ret = ext4_collapse_range(file, offset, len); 4763aa75f4d3SHarshad Shirwadkar goto exit; 4764aa75f4d3SHarshad Shirwadkar } 476540c406c7STheodore Ts'o 4766aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_INSERT_RANGE) { 4767ad5cd4f4SDarrick J. Wong ret = ext4_insert_range(file, offset, len); 4768aa75f4d3SHarshad Shirwadkar goto exit; 4769aa75f4d3SHarshad Shirwadkar } 4770331573feSNamjae Jeon 4771aa75f4d3SHarshad Shirwadkar if (mode & FALLOC_FL_ZERO_RANGE) { 4772aa75f4d3SHarshad Shirwadkar ret = ext4_zero_range(file, offset, len, mode); 4773aa75f4d3SHarshad Shirwadkar goto exit; 4774aa75f4d3SHarshad Shirwadkar } 47750e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode); 47760e8b6879SLukas Czerner lblk = offset >> blkbits; 47770e8b6879SLukas Czerner 4778518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4779556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 47800e8b6879SLukas Czerner 47815955102cSAl Viro inode_lock(inode); 47820e8b6879SLukas Czerner 4783280227a7SDavide Italiano /* 4784280227a7SDavide Italiano * We only support preallocation for extent-based files only 4785280227a7SDavide Italiano */ 4786280227a7SDavide Italiano if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4787280227a7SDavide Italiano ret = -EOPNOTSUPP; 4788280227a7SDavide Italiano goto out; 4789280227a7SDavide Italiano } 4790280227a7SDavide Italiano 47910e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 47929b02e498SEric Biggers (offset + len > inode->i_size || 479351e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 47940e8b6879SLukas Czerner new_size = offset + len; 47950e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size); 47960e8b6879SLukas Czerner if (ret) 47970e8b6879SLukas Czerner goto out; 47980e8b6879SLukas Czerner } 47990e8b6879SLukas Czerner 4800f340b3d9Shongnanli /* Wait all existing dio workers, newcomers will block on i_rwsem */ 480117048e8aSJan Kara inode_dio_wait(inode); 480217048e8aSJan Kara 4803ad5cd4f4SDarrick J. Wong ret = file_modified(file); 4804ad5cd4f4SDarrick J. Wong if (ret) 4805ad5cd4f4SDarrick J. Wong goto out; 4806ad5cd4f4SDarrick J. Wong 480777a2e84dSTahsin Erdogan ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 48080e8b6879SLukas Czerner if (ret) 48090e8b6879SLukas Czerner goto out; 48100e8b6879SLukas Czerner 4811c174e6d6SDmitry Monakhov if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4812aa75f4d3SHarshad Shirwadkar ret = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal, 4813c174e6d6SDmitry Monakhov EXT4_I(inode)->i_sync_tid); 4814f282ac19SLukas Czerner } 4815f282ac19SLukas Czerner out: 48165955102cSAl Viro inode_unlock(inode); 48170e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4818aa75f4d3SHarshad Shirwadkar exit: 48190e8b6879SLukas Czerner return ret; 4820a2df2a63SAmit Arora } 48216873fa0dSEric Sandeen 48226873fa0dSEric Sandeen /* 48230031462bSMingming Cao * This function convert a range of blocks to written extents 48240031462bSMingming Cao * The caller of this function will pass the start offset and the size. 48250031462bSMingming Cao * all unwritten extents within this range will be converted to 48260031462bSMingming Cao * written extents. 48270031462bSMingming Cao * 48280031462bSMingming Cao * This function is called from the direct IO end io call back 48290031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4830109f5565SMingming * Returns 0 on success. 48310031462bSMingming Cao */ 48326b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 48336b523df4SJan Kara loff_t offset, ssize_t len) 48340031462bSMingming Cao { 48350031462bSMingming Cao unsigned int max_blocks; 48364209ae12SHarshad Shirwadkar int ret = 0, ret2 = 0, ret3 = 0; 48372ed88685STheodore Ts'o struct ext4_map_blocks map; 4838a00713eaSRitesh Harjani unsigned int blkbits = inode->i_blkbits; 4839a00713eaSRitesh Harjani unsigned int credits = 0; 48400031462bSMingming Cao 48412ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4842518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4843518eaa63SFabian Frederick 4844a00713eaSRitesh Harjani if (!handle) { 48456b523df4SJan Kara /* 48460031462bSMingming Cao * credits to insert 1 extent into extent tree 48470031462bSMingming Cao */ 48480031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 48496b523df4SJan Kara } 48500031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 48512ed88685STheodore Ts'o map.m_lblk += ret; 48522ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 48536b523df4SJan Kara if (credits) { 48546b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 48556b523df4SJan Kara credits); 48560031462bSMingming Cao if (IS_ERR(handle)) { 48570031462bSMingming Cao ret = PTR_ERR(handle); 48580031462bSMingming Cao break; 48590031462bSMingming Cao } 48606b523df4SJan Kara } 48612ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4862c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4863b06acd38SLukas Czerner if (ret <= 0) 4864b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4865b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 486692b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4867b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 486892b97816STheodore Ts'o map.m_len, ret); 48694209ae12SHarshad Shirwadkar ret2 = ext4_mark_inode_dirty(handle, inode); 48704209ae12SHarshad Shirwadkar if (credits) { 48714209ae12SHarshad Shirwadkar ret3 = ext4_journal_stop(handle); 48724209ae12SHarshad Shirwadkar if (unlikely(ret3)) 48734209ae12SHarshad Shirwadkar ret2 = ret3; 48744209ae12SHarshad Shirwadkar } 48754209ae12SHarshad Shirwadkar 48760031462bSMingming Cao if (ret <= 0 || ret2) 48770031462bSMingming Cao break; 48780031462bSMingming Cao } 48790031462bSMingming Cao return ret > 0 ? ret2 : ret; 48800031462bSMingming Cao } 48816d9c85ebSYongqiang Yang 4882a00713eaSRitesh Harjani int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4883a00713eaSRitesh Harjani { 4884d1e18b88SRitesh Harjani int ret = 0, err = 0; 4885c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec; 4886a00713eaSRitesh Harjani 4887a00713eaSRitesh Harjani /* 4888a00713eaSRitesh Harjani * This is somewhat ugly but the idea is clear: When transaction is 4889a00713eaSRitesh Harjani * reserved, everything goes into it. Otherwise we rather start several 4890a00713eaSRitesh Harjani * smaller transactions for conversion of each extent separately. 4891a00713eaSRitesh Harjani */ 4892a00713eaSRitesh Harjani if (handle) { 4893a00713eaSRitesh Harjani handle = ext4_journal_start_reserved(handle, 4894a00713eaSRitesh Harjani EXT4_HT_EXT_CONVERT); 4895a00713eaSRitesh Harjani if (IS_ERR(handle)) 4896a00713eaSRitesh Harjani return PTR_ERR(handle); 4897a00713eaSRitesh Harjani } 4898a00713eaSRitesh Harjani 4899c8cc8816SRitesh Harjani list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4900a00713eaSRitesh Harjani ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4901c8cc8816SRitesh Harjani io_end_vec->offset, 4902c8cc8816SRitesh Harjani io_end_vec->size); 4903c8cc8816SRitesh Harjani if (ret) 4904c8cc8816SRitesh Harjani break; 4905c8cc8816SRitesh Harjani } 4906c8cc8816SRitesh Harjani 4907a00713eaSRitesh Harjani if (handle) 4908a00713eaSRitesh Harjani err = ext4_journal_stop(handle); 4909a00713eaSRitesh Harjani 4910a00713eaSRitesh Harjani return ret < 0 ? ret : err; 4911a00713eaSRitesh Harjani } 4912a00713eaSRitesh Harjani 4913d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) 49146873fa0dSEric Sandeen { 49156873fa0dSEric Sandeen __u64 physical = 0; 4916d3b6f23fSRitesh Harjani __u64 length = 0; 49176873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 49186873fa0dSEric Sandeen int error = 0; 4919d3b6f23fSRitesh Harjani u16 iomap_type; 49206873fa0dSEric Sandeen 49216873fa0dSEric Sandeen /* in-inode? */ 492219f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 49236873fa0dSEric Sandeen struct ext4_iloc iloc; 49246873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 49256873fa0dSEric Sandeen 49266873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 49276873fa0dSEric Sandeen if (error) 49286873fa0dSEric Sandeen return error; 4929a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 49306873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 49316873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 49326873fa0dSEric Sandeen physical += offset; 49336873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4934fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 4935d3b6f23fSRitesh Harjani iomap_type = IOMAP_INLINE; 4936d3b6f23fSRitesh Harjani } else if (EXT4_I(inode)->i_file_acl) { /* external block */ 4937a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 49386873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 4939d3b6f23fSRitesh Harjani iomap_type = IOMAP_MAPPED; 4940d3b6f23fSRitesh Harjani } else { 4941d3b6f23fSRitesh Harjani /* no in-inode or external block for xattr, so return -ENOENT */ 4942d3b6f23fSRitesh Harjani error = -ENOENT; 4943d3b6f23fSRitesh Harjani goto out; 49446873fa0dSEric Sandeen } 49456873fa0dSEric Sandeen 4946d3b6f23fSRitesh Harjani iomap->addr = physical; 4947d3b6f23fSRitesh Harjani iomap->offset = 0; 4948d3b6f23fSRitesh Harjani iomap->length = length; 4949d3b6f23fSRitesh Harjani iomap->type = iomap_type; 4950d3b6f23fSRitesh Harjani iomap->flags = 0; 4951d3b6f23fSRitesh Harjani out: 4952d3b6f23fSRitesh Harjani return error; 49536873fa0dSEric Sandeen } 49546873fa0dSEric Sandeen 4955d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, 4956d3b6f23fSRitesh Harjani loff_t length, unsigned flags, 4957d3b6f23fSRitesh Harjani struct iomap *iomap, struct iomap *srcmap) 4958d3b6f23fSRitesh Harjani { 4959d3b6f23fSRitesh Harjani int error; 4960d3b6f23fSRitesh Harjani 4961d3b6f23fSRitesh Harjani error = ext4_iomap_xattr_fiemap(inode, iomap); 4962d3b6f23fSRitesh Harjani if (error == 0 && (offset >= iomap->length)) 4963d3b6f23fSRitesh Harjani error = -ENOENT; 4964d3b6f23fSRitesh Harjani return error; 4965d3b6f23fSRitesh Harjani } 4966d3b6f23fSRitesh Harjani 4967d3b6f23fSRitesh Harjani static const struct iomap_ops ext4_iomap_xattr_ops = { 4968d3b6f23fSRitesh Harjani .iomap_begin = ext4_iomap_xattr_begin, 4969d3b6f23fSRitesh Harjani }; 4970d3b6f23fSRitesh Harjani 4971328e24aeSChristoph Hellwig static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len) 4972328e24aeSChristoph Hellwig { 4973328e24aeSChristoph Hellwig u64 maxbytes; 4974328e24aeSChristoph Hellwig 4975328e24aeSChristoph Hellwig if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 4976328e24aeSChristoph Hellwig maxbytes = inode->i_sb->s_maxbytes; 4977328e24aeSChristoph Hellwig else 4978328e24aeSChristoph Hellwig maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; 4979328e24aeSChristoph Hellwig 4980328e24aeSChristoph Hellwig if (*len == 0) 4981328e24aeSChristoph Hellwig return -EINVAL; 4982328e24aeSChristoph Hellwig if (start > maxbytes) 4983328e24aeSChristoph Hellwig return -EFBIG; 4984328e24aeSChristoph Hellwig 4985328e24aeSChristoph Hellwig /* 4986328e24aeSChristoph Hellwig * Shrink request scope to what the fs can actually handle. 4987328e24aeSChristoph Hellwig */ 4988328e24aeSChristoph Hellwig if (*len > maxbytes || (maxbytes - *len) < start) 4989328e24aeSChristoph Hellwig *len = maxbytes - start; 4990328e24aeSChristoph Hellwig return 0; 4991328e24aeSChristoph Hellwig } 4992328e24aeSChristoph Hellwig 499303a5ed24SChristoph Hellwig int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 499403a5ed24SChristoph Hellwig u64 start, u64 len) 49956873fa0dSEric Sandeen { 49966873fa0dSEric Sandeen int error = 0; 49976873fa0dSEric Sandeen 49987869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 49997869a4a6STheodore Ts'o error = ext4_ext_precache(inode); 50007869a4a6STheodore Ts'o if (error) 50017869a4a6STheodore Ts'o return error; 5002bb5835edSTheodore Ts'o fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 50037869a4a6STheodore Ts'o } 50047869a4a6STheodore Ts'o 5005328e24aeSChristoph Hellwig /* 5006328e24aeSChristoph Hellwig * For bitmap files the maximum size limit could be smaller than 5007328e24aeSChristoph Hellwig * s_maxbytes, so check len here manually instead of just relying on the 5008328e24aeSChristoph Hellwig * generic check. 5009328e24aeSChristoph Hellwig */ 5010328e24aeSChristoph Hellwig error = ext4_fiemap_check_ranges(inode, start, &len); 5011328e24aeSChristoph Hellwig if (error) 5012328e24aeSChristoph Hellwig return error; 5013328e24aeSChristoph Hellwig 50146873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 5015d3b6f23fSRitesh Harjani fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; 501603a5ed24SChristoph Hellwig return iomap_fiemap(inode, fieinfo, start, len, 5017d3b6f23fSRitesh Harjani &ext4_iomap_xattr_ops); 501803a5ed24SChristoph Hellwig } 501903a5ed24SChristoph Hellwig 502003a5ed24SChristoph Hellwig return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops); 502103a5ed24SChristoph Hellwig } 502203a5ed24SChristoph Hellwig 502303a5ed24SChristoph Hellwig int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 502403a5ed24SChristoph Hellwig __u64 start, __u64 len) 502503a5ed24SChristoph Hellwig { 502603a5ed24SChristoph Hellwig ext4_lblk_t start_blk, len_blks; 5027aca92ff6SLeonard Michlmayr __u64 last_blk; 502803a5ed24SChristoph Hellwig int error = 0; 502903a5ed24SChristoph Hellwig 503003a5ed24SChristoph Hellwig if (ext4_has_inline_data(inode)) { 503103a5ed24SChristoph Hellwig int has_inline; 503203a5ed24SChristoph Hellwig 503303a5ed24SChristoph Hellwig down_read(&EXT4_I(inode)->xattr_sem); 503403a5ed24SChristoph Hellwig has_inline = ext4_has_inline_data(inode); 503503a5ed24SChristoph Hellwig up_read(&EXT4_I(inode)->xattr_sem); 503603a5ed24SChristoph Hellwig if (has_inline) 503703a5ed24SChristoph Hellwig return 0; 503803a5ed24SChristoph Hellwig } 503903a5ed24SChristoph Hellwig 504003a5ed24SChristoph Hellwig if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 504103a5ed24SChristoph Hellwig error = ext4_ext_precache(inode); 504203a5ed24SChristoph Hellwig if (error) 504303a5ed24SChristoph Hellwig return error; 504403a5ed24SChristoph Hellwig fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 504503a5ed24SChristoph Hellwig } 504603a5ed24SChristoph Hellwig 504745dd052eSChristoph Hellwig error = fiemap_prep(inode, fieinfo, start, &len, 0); 5048cddf8a2cSChristoph Hellwig if (error) 5049cddf8a2cSChristoph Hellwig return error; 505003a5ed24SChristoph Hellwig 505103a5ed24SChristoph Hellwig error = ext4_fiemap_check_ranges(inode, start, &len); 505203a5ed24SChristoph Hellwig if (error) 505303a5ed24SChristoph Hellwig return error; 5054aca92ff6SLeonard Michlmayr 50556873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 5056aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5057f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 5058f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 5059aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 50606873fa0dSEric Sandeen 50616873fa0dSEric Sandeen /* 506291dd8c11SLukas Czerner * Walk the extent tree gathering extent information 506391dd8c11SLukas Czerner * and pushing extents back to the user. 50646873fa0dSEric Sandeen */ 506503a5ed24SChristoph Hellwig return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo); 50666873fa0dSEric Sandeen } 5067bb5835edSTheodore Ts'o 50689eb79482SNamjae Jeon /* 50699eb79482SNamjae Jeon * ext4_ext_shift_path_extents: 50709eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext 5071331573feSNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5072331573feSNamjae Jeon * if it is right shift or left shift operation. 50739eb79482SNamjae Jeon */ 50749eb79482SNamjae Jeon static int 50759eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 50769eb79482SNamjae Jeon struct inode *inode, handle_t *handle, 5077331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 50789eb79482SNamjae Jeon { 50799eb79482SNamjae Jeon int depth, err = 0; 50809eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last; 50814756ee18Szhengbin bool update = false; 50824268496eSyangerkun int credits, restart_credits; 50839eb79482SNamjae Jeon depth = path->p_depth; 50849eb79482SNamjae Jeon 50859eb79482SNamjae Jeon while (depth >= 0) { 50869eb79482SNamjae Jeon if (depth == path->p_depth) { 50879eb79482SNamjae Jeon ex_start = path[depth].p_ext; 50889eb79482SNamjae Jeon if (!ex_start) 50896a797d27SDarrick J. Wong return -EFSCORRUPTED; 50909eb79482SNamjae Jeon 50919eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 50924268496eSyangerkun /* leaf + sb + inode */ 50934268496eSyangerkun credits = 3; 50944268496eSyangerkun if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) { 50954268496eSyangerkun update = true; 50964268496eSyangerkun /* extent tree + sb + inode */ 50974268496eSyangerkun credits = depth + 2; 50984268496eSyangerkun } 50999eb79482SNamjae Jeon 51004268496eSyangerkun restart_credits = ext4_writepage_trans_blocks(inode); 51014268496eSyangerkun err = ext4_datasem_ensure_credits(handle, inode, credits, 51024268496eSyangerkun restart_credits, 0); 51031811bc40Syangerkun if (err) { 51041811bc40Syangerkun if (err > 0) 51051811bc40Syangerkun err = -EAGAIN; 51069eb79482SNamjae Jeon goto out; 51071811bc40Syangerkun } 51089eb79482SNamjae Jeon 51094268496eSyangerkun err = ext4_ext_get_access(handle, inode, path + depth); 51104268496eSyangerkun if (err) 51114268496eSyangerkun goto out; 51129eb79482SNamjae Jeon 51139eb79482SNamjae Jeon while (ex_start <= ex_last) { 5114331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 5115331573feSNamjae Jeon le32_add_cpu(&ex_start->ee_block, 5116331573feSNamjae Jeon -shift); 51176dd834efSLukas Czerner /* Try to merge to the left. */ 51186dd834efSLukas Czerner if ((ex_start > 5119331573feSNamjae Jeon EXT_FIRST_EXTENT(path[depth].p_hdr)) 5120331573feSNamjae Jeon && 51216dd834efSLukas Czerner ext4_ext_try_to_merge_right(inode, 51229eb79482SNamjae Jeon path, ex_start - 1)) 51239eb79482SNamjae Jeon ex_last--; 51246dd834efSLukas Czerner else 51259eb79482SNamjae Jeon ex_start++; 5126331573feSNamjae Jeon } else { 5127331573feSNamjae Jeon le32_add_cpu(&ex_last->ee_block, shift); 5128331573feSNamjae Jeon ext4_ext_try_to_merge_right(inode, path, 5129331573feSNamjae Jeon ex_last); 5130331573feSNamjae Jeon ex_last--; 5131331573feSNamjae Jeon } 51329eb79482SNamjae Jeon } 51339eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 51349eb79482SNamjae Jeon if (err) 51359eb79482SNamjae Jeon goto out; 51369eb79482SNamjae Jeon 51379eb79482SNamjae Jeon if (--depth < 0 || !update) 51389eb79482SNamjae Jeon break; 51399eb79482SNamjae Jeon } 51409eb79482SNamjae Jeon 51419eb79482SNamjae Jeon /* Update index too */ 51424268496eSyangerkun err = ext4_ext_get_access(handle, inode, path + depth); 51439eb79482SNamjae Jeon if (err) 51449eb79482SNamjae Jeon goto out; 51459eb79482SNamjae Jeon 5146331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5147847c6c42SZheng Liu le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5148331573feSNamjae Jeon else 5149331573feSNamjae Jeon le32_add_cpu(&path[depth].p_idx->ei_block, shift); 51509eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 51519eb79482SNamjae Jeon if (err) 51529eb79482SNamjae Jeon goto out; 51539eb79482SNamjae Jeon 51549eb79482SNamjae Jeon /* we are done if current index is not a starting index */ 51559eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 51569eb79482SNamjae Jeon break; 51579eb79482SNamjae Jeon 51589eb79482SNamjae Jeon depth--; 51599eb79482SNamjae Jeon } 51609eb79482SNamjae Jeon 51619eb79482SNamjae Jeon out: 51629eb79482SNamjae Jeon return err; 51639eb79482SNamjae Jeon } 51649eb79482SNamjae Jeon 51659eb79482SNamjae Jeon /* 51669eb79482SNamjae Jeon * ext4_ext_shift_extents: 5167331573feSNamjae Jeon * All the extents which lies in the range from @start to the last allocated 5168331573feSNamjae Jeon * block for the @inode are shifted either towards left or right (depending 5169331573feSNamjae Jeon * upon @SHIFT) by @shift blocks. 51709eb79482SNamjae Jeon * On success, 0 is returned, error otherwise. 51719eb79482SNamjae Jeon */ 51729eb79482SNamjae Jeon static int 51739eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5174331573feSNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift, 5175331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 51769eb79482SNamjae Jeon { 51779eb79482SNamjae Jeon struct ext4_ext_path *path; 51789eb79482SNamjae Jeon int ret = 0, depth; 51799eb79482SNamjae Jeon struct ext4_extent *extent; 5180331573feSNamjae Jeon ext4_lblk_t stop, *iterator, ex_start, ex_end; 51811811bc40Syangerkun ext4_lblk_t tmp = EXT_MAX_BLOCKS; 51829eb79482SNamjae Jeon 51839eb79482SNamjae Jeon /* Let path point to the last extent */ 518403e916faSRoman Pen path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 518503e916faSRoman Pen EXT4_EX_NOCACHE); 51869eb79482SNamjae Jeon if (IS_ERR(path)) 51879eb79482SNamjae Jeon return PTR_ERR(path); 51889eb79482SNamjae Jeon 51899eb79482SNamjae Jeon depth = path->p_depth; 51909eb79482SNamjae Jeon extent = path[depth].p_ext; 5191ee4bd0d9STheodore Ts'o if (!extent) 5192ee4bd0d9STheodore Ts'o goto out; 51939eb79482SNamjae Jeon 51942a9b8cbaSRoman Pen stop = le32_to_cpu(extent->ee_block); 51959eb79482SNamjae Jeon 51969eb79482SNamjae Jeon /* 5197349fa7d6SEric Biggers * For left shifts, make sure the hole on the left is big enough to 5198349fa7d6SEric Biggers * accommodate the shift. For right shifts, make sure the last extent 5199349fa7d6SEric Biggers * won't be shifted beyond EXT_MAX_BLOCKS. 52009eb79482SNamjae Jeon */ 5201331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 520203e916faSRoman Pen path = ext4_find_extent(inode, start - 1, &path, 520303e916faSRoman Pen EXT4_EX_NOCACHE); 52048dc79ec4SDmitry Monakhov if (IS_ERR(path)) 52058dc79ec4SDmitry Monakhov return PTR_ERR(path); 52069eb79482SNamjae Jeon depth = path->p_depth; 52079eb79482SNamjae Jeon extent = path[depth].p_ext; 52088dc79ec4SDmitry Monakhov if (extent) { 5209847c6c42SZheng Liu ex_start = le32_to_cpu(extent->ee_block); 5210847c6c42SZheng Liu ex_end = le32_to_cpu(extent->ee_block) + 5211847c6c42SZheng Liu ext4_ext_get_actual_len(extent); 52128dc79ec4SDmitry Monakhov } else { 52138dc79ec4SDmitry Monakhov ex_start = 0; 52148dc79ec4SDmitry Monakhov ex_end = 0; 52158dc79ec4SDmitry Monakhov } 52169eb79482SNamjae Jeon 52179eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) || 5218331573feSNamjae Jeon (shift > start - ex_end)) { 5219349fa7d6SEric Biggers ret = -EINVAL; 5220349fa7d6SEric Biggers goto out; 5221349fa7d6SEric Biggers } 5222349fa7d6SEric Biggers } else { 5223349fa7d6SEric Biggers if (shift > EXT_MAX_BLOCKS - 5224349fa7d6SEric Biggers (stop + ext4_ext_get_actual_len(extent))) { 5225349fa7d6SEric Biggers ret = -EINVAL; 5226349fa7d6SEric Biggers goto out; 5227331573feSNamjae Jeon } 5228331573feSNamjae Jeon } 5229331573feSNamjae Jeon 5230331573feSNamjae Jeon /* 5231331573feSNamjae Jeon * In case of left shift, iterator points to start and it is increased 5232331573feSNamjae Jeon * till we reach stop. In case of right shift, iterator points to stop 5233331573feSNamjae Jeon * and it is decreased till we reach start. 5234331573feSNamjae Jeon */ 52351811bc40Syangerkun again: 5236f6b1a1cfSBaokun Li ret = 0; 5237331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5238331573feSNamjae Jeon iterator = &start; 5239331573feSNamjae Jeon else 5240331573feSNamjae Jeon iterator = &stop; 52419eb79482SNamjae Jeon 52421811bc40Syangerkun if (tmp != EXT_MAX_BLOCKS) 52431811bc40Syangerkun *iterator = tmp; 52441811bc40Syangerkun 52452a9b8cbaSRoman Pen /* 52462a9b8cbaSRoman Pen * Its safe to start updating extents. Start and stop are unsigned, so 52472a9b8cbaSRoman Pen * in case of right shift if extent with 0 block is reached, iterator 52482a9b8cbaSRoman Pen * becomes NULL to indicate the end of the loop. 52492a9b8cbaSRoman Pen */ 52502a9b8cbaSRoman Pen while (iterator && start <= stop) { 525103e916faSRoman Pen path = ext4_find_extent(inode, *iterator, &path, 525203e916faSRoman Pen EXT4_EX_NOCACHE); 52539eb79482SNamjae Jeon if (IS_ERR(path)) 52549eb79482SNamjae Jeon return PTR_ERR(path); 52559eb79482SNamjae Jeon depth = path->p_depth; 52569eb79482SNamjae Jeon extent = path[depth].p_ext; 5257a18ed359SDmitry Monakhov if (!extent) { 5258a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5259331573feSNamjae Jeon (unsigned long) *iterator); 52606a797d27SDarrick J. Wong return -EFSCORRUPTED; 5261a18ed359SDmitry Monakhov } 5262331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT && *iterator > 5263331573feSNamjae Jeon le32_to_cpu(extent->ee_block)) { 52649eb79482SNamjae Jeon /* Hole, move to the next extent */ 5265f8fb4f41SDmitry Monakhov if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5266f8fb4f41SDmitry Monakhov path[depth].p_ext++; 5267f8fb4f41SDmitry Monakhov } else { 5268331573feSNamjae Jeon *iterator = ext4_ext_next_allocated_block(path); 5269f8fb4f41SDmitry Monakhov continue; 52709eb79482SNamjae Jeon } 52719eb79482SNamjae Jeon } 5272331573feSNamjae Jeon 52731811bc40Syangerkun tmp = *iterator; 5274331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 5275331573feSNamjae Jeon extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5276331573feSNamjae Jeon *iterator = le32_to_cpu(extent->ee_block) + 5277331573feSNamjae Jeon ext4_ext_get_actual_len(extent); 5278331573feSNamjae Jeon } else { 5279331573feSNamjae Jeon extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 5280f6b1a1cfSBaokun Li if (le32_to_cpu(extent->ee_block) > start) 52812a9b8cbaSRoman Pen *iterator = le32_to_cpu(extent->ee_block) - 1; 5282f6b1a1cfSBaokun Li else if (le32_to_cpu(extent->ee_block) == start) 52832a9b8cbaSRoman Pen iterator = NULL; 5284f6b1a1cfSBaokun Li else { 5285f6b1a1cfSBaokun Li extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5286f6b1a1cfSBaokun Li while (le32_to_cpu(extent->ee_block) >= start) 5287f6b1a1cfSBaokun Li extent--; 5288f6b1a1cfSBaokun Li 5289f6b1a1cfSBaokun Li if (extent == EXT_LAST_EXTENT(path[depth].p_hdr)) 5290f6b1a1cfSBaokun Li break; 5291f6b1a1cfSBaokun Li 5292331573feSNamjae Jeon extent++; 5293f6b1a1cfSBaokun Li iterator = NULL; 5294f6b1a1cfSBaokun Li } 5295331573feSNamjae Jeon path[depth].p_ext = extent; 5296331573feSNamjae Jeon } 52979eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode, 5298331573feSNamjae Jeon handle, SHIFT); 52991811bc40Syangerkun /* iterator can be NULL which means we should break */ 53001811bc40Syangerkun if (ret == -EAGAIN) 53011811bc40Syangerkun goto again; 53029eb79482SNamjae Jeon if (ret) 53039eb79482SNamjae Jeon break; 53049eb79482SNamjae Jeon } 5305ee4bd0d9STheodore Ts'o out: 53067ff5fddaSYe Bin ext4_free_ext_path(path); 53079eb79482SNamjae Jeon return ret; 53089eb79482SNamjae Jeon } 53099eb79482SNamjae Jeon 53109eb79482SNamjae Jeon /* 53119eb79482SNamjae Jeon * ext4_collapse_range: 53129eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4 53139eb79482SNamjae Jeon * Returns: 0 and non-zero on error. 53149eb79482SNamjae Jeon */ 5315ad5cd4f4SDarrick J. Wong static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len) 53169eb79482SNamjae Jeon { 5317ad5cd4f4SDarrick J. Wong struct inode *inode = file_inode(file); 53189eb79482SNamjae Jeon struct super_block *sb = inode->i_sb; 5319d4f5258eSJan Kara struct address_space *mapping = inode->i_mapping; 53209eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop; 53219eb79482SNamjae Jeon handle_t *handle; 53229eb79482SNamjae Jeon unsigned int credits; 5323a8680e0dSNamjae Jeon loff_t new_size, ioffset; 53249eb79482SNamjae Jeon int ret; 53259eb79482SNamjae Jeon 5326b9576fc3STheodore Ts'o /* 5327b9576fc3STheodore Ts'o * We need to test this early because xfstests assumes that a 5328b9576fc3STheodore Ts'o * collapse range of (0, 1) will return EOPNOTSUPP if the file 5329b9576fc3STheodore Ts'o * system does not support collapse range. 5330b9576fc3STheodore Ts'o */ 5331b9576fc3STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5332b9576fc3STheodore Ts'o return -EOPNOTSUPP; 5333b9576fc3STheodore Ts'o 53349b02e498SEric Biggers /* Collapse range works only on fs cluster size aligned regions. */ 53359b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 53369eb79482SNamjae Jeon return -EINVAL; 53379eb79482SNamjae Jeon 53389eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len); 53399eb79482SNamjae Jeon 53409eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 53419eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 53429eb79482SNamjae Jeon 53435955102cSAl Viro inode_lock(inode); 534423fffa92SLukas Czerner /* 534523fffa92SLukas Czerner * There is no need to overlap collapse range with EOF, in which case 534623fffa92SLukas Czerner * it is effectively a truncate operation 534723fffa92SLukas Czerner */ 53489b02e498SEric Biggers if (offset + len >= inode->i_size) { 534923fffa92SLukas Czerner ret = -EINVAL; 535023fffa92SLukas Czerner goto out_mutex; 535123fffa92SLukas Czerner } 535223fffa92SLukas Czerner 53539eb79482SNamjae Jeon /* Currently just for extent based files */ 53549eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 53559eb79482SNamjae Jeon ret = -EOPNOTSUPP; 53569eb79482SNamjae Jeon goto out_mutex; 53579eb79482SNamjae Jeon } 53589eb79482SNamjae Jeon 53599eb79482SNamjae Jeon /* Wait for existing dio to complete */ 53609eb79482SNamjae Jeon inode_dio_wait(inode); 53619eb79482SNamjae Jeon 5362ad5cd4f4SDarrick J. Wong ret = file_modified(file); 5363ad5cd4f4SDarrick J. Wong if (ret) 5364ad5cd4f4SDarrick J. Wong goto out_mutex; 5365ad5cd4f4SDarrick J. Wong 5366ea3d7209SJan Kara /* 5367ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5368ea3d7209SJan Kara * page cache. 5369ea3d7209SJan Kara */ 5370d4f5258eSJan Kara filemap_invalidate_lock(mapping); 5371430657b6SRoss Zwisler 5372430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5373430657b6SRoss Zwisler if (ret) 5374430657b6SRoss Zwisler goto out_mmap; 5375430657b6SRoss Zwisler 537632ebffd3SJan Kara /* 537732ebffd3SJan Kara * Need to round down offset to be aligned with page size boundary 537832ebffd3SJan Kara * for page size > block size. 537932ebffd3SJan Kara */ 538032ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 538132ebffd3SJan Kara /* 538232ebffd3SJan Kara * Write tail of the last page before removed range since it will get 538332ebffd3SJan Kara * removed from the page cache below. 538432ebffd3SJan Kara */ 5385d4f5258eSJan Kara ret = filemap_write_and_wait_range(mapping, ioffset, offset); 538632ebffd3SJan Kara if (ret) 538732ebffd3SJan Kara goto out_mmap; 538832ebffd3SJan Kara /* 538932ebffd3SJan Kara * Write data that will be shifted to preserve them when discarding 539032ebffd3SJan Kara * page cache below. We are also protected from pages becoming dirty 5391d4f5258eSJan Kara * by i_rwsem and invalidate_lock. 539232ebffd3SJan Kara */ 5393d4f5258eSJan Kara ret = filemap_write_and_wait_range(mapping, offset + len, 539432ebffd3SJan Kara LLONG_MAX); 539532ebffd3SJan Kara if (ret) 539632ebffd3SJan Kara goto out_mmap; 5397ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5398ea3d7209SJan Kara 53999eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 54009eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 54019eb79482SNamjae Jeon if (IS_ERR(handle)) { 54029eb79482SNamjae Jeon ret = PTR_ERR(handle); 5403ea3d7209SJan Kara goto out_mmap; 54049eb79482SNamjae Jeon } 5405e85c81baSXin Yin ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 54069eb79482SNamjae Jeon 54079eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 540827bc446eSbrookxu ext4_discard_preallocations(inode, 0); 5409ed5d285bSBaokun Li ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); 54109eb79482SNamjae Jeon 54119eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 54129eb79482SNamjae Jeon if (ret) { 54139eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54149eb79482SNamjae Jeon goto out_stop; 54159eb79482SNamjae Jeon } 541627bc446eSbrookxu ext4_discard_preallocations(inode, 0); 54179eb79482SNamjae Jeon 54189eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5419331573feSNamjae Jeon punch_stop - punch_start, SHIFT_LEFT); 54209eb79482SNamjae Jeon if (ret) { 54219eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54229eb79482SNamjae Jeon goto out_stop; 54239eb79482SNamjae Jeon } 54249eb79482SNamjae Jeon 54259b02e498SEric Biggers new_size = inode->i_size - len; 54269337d5d3SLukas Czerner i_size_write(inode, new_size); 54279eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size; 54289eb79482SNamjae Jeon 54299eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54309eb79482SNamjae Jeon if (IS_SYNC(inode)) 54319eb79482SNamjae Jeon ext4_handle_sync(handle); 54321bc33893SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 54334209ae12SHarshad Shirwadkar ret = ext4_mark_inode_dirty(handle, inode); 543467a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 54359eb79482SNamjae Jeon 54369eb79482SNamjae Jeon out_stop: 54379eb79482SNamjae Jeon ext4_journal_stop(handle); 5438ea3d7209SJan Kara out_mmap: 5439d4f5258eSJan Kara filemap_invalidate_unlock(mapping); 54409eb79482SNamjae Jeon out_mutex: 54415955102cSAl Viro inode_unlock(inode); 54429eb79482SNamjae Jeon return ret; 54439eb79482SNamjae Jeon } 5444fcf6b1b7SDmitry Monakhov 5445331573feSNamjae Jeon /* 5446331573feSNamjae Jeon * ext4_insert_range: 5447331573feSNamjae Jeon * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5448331573feSNamjae Jeon * The data blocks starting from @offset to the EOF are shifted by @len 5449331573feSNamjae Jeon * towards right to create a hole in the @inode. Inode size is increased 5450331573feSNamjae Jeon * by len bytes. 5451331573feSNamjae Jeon * Returns 0 on success, error otherwise. 5452331573feSNamjae Jeon */ 5453ad5cd4f4SDarrick J. Wong static int ext4_insert_range(struct file *file, loff_t offset, loff_t len) 5454331573feSNamjae Jeon { 5455ad5cd4f4SDarrick J. Wong struct inode *inode = file_inode(file); 5456331573feSNamjae Jeon struct super_block *sb = inode->i_sb; 5457d4f5258eSJan Kara struct address_space *mapping = inode->i_mapping; 5458331573feSNamjae Jeon handle_t *handle; 5459331573feSNamjae Jeon struct ext4_ext_path *path; 5460331573feSNamjae Jeon struct ext4_extent *extent; 5461331573feSNamjae Jeon ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5462331573feSNamjae Jeon unsigned int credits, ee_len; 5463331573feSNamjae Jeon int ret = 0, depth, split_flag = 0; 5464331573feSNamjae Jeon loff_t ioffset; 5465331573feSNamjae Jeon 5466331573feSNamjae Jeon /* 5467331573feSNamjae Jeon * We need to test this early because xfstests assumes that an 5468331573feSNamjae Jeon * insert range of (0, 1) will return EOPNOTSUPP if the file 5469331573feSNamjae Jeon * system does not support insert range. 5470331573feSNamjae Jeon */ 5471331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5472331573feSNamjae Jeon return -EOPNOTSUPP; 5473331573feSNamjae Jeon 54749b02e498SEric Biggers /* Insert range works only on fs cluster size aligned regions. */ 54759b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5476331573feSNamjae Jeon return -EINVAL; 5477331573feSNamjae Jeon 5478331573feSNamjae Jeon trace_ext4_insert_range(inode, offset, len); 5479331573feSNamjae Jeon 5480331573feSNamjae Jeon offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5481331573feSNamjae Jeon len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5482331573feSNamjae Jeon 54835955102cSAl Viro inode_lock(inode); 5484331573feSNamjae Jeon /* Currently just for extent based files */ 5485331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5486331573feSNamjae Jeon ret = -EOPNOTSUPP; 5487331573feSNamjae Jeon goto out_mutex; 5488331573feSNamjae Jeon } 5489331573feSNamjae Jeon 54909b02e498SEric Biggers /* Check whether the maximum file size would be exceeded */ 54919b02e498SEric Biggers if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5492331573feSNamjae Jeon ret = -EFBIG; 5493331573feSNamjae Jeon goto out_mutex; 5494331573feSNamjae Jeon } 5495331573feSNamjae Jeon 54969b02e498SEric Biggers /* Offset must be less than i_size */ 54979b02e498SEric Biggers if (offset >= inode->i_size) { 5498331573feSNamjae Jeon ret = -EINVAL; 5499331573feSNamjae Jeon goto out_mutex; 5500331573feSNamjae Jeon } 5501331573feSNamjae Jeon 5502331573feSNamjae Jeon /* Wait for existing dio to complete */ 5503331573feSNamjae Jeon inode_dio_wait(inode); 5504331573feSNamjae Jeon 5505ad5cd4f4SDarrick J. Wong ret = file_modified(file); 5506ad5cd4f4SDarrick J. Wong if (ret) 5507ad5cd4f4SDarrick J. Wong goto out_mutex; 5508ad5cd4f4SDarrick J. Wong 5509ea3d7209SJan Kara /* 5510ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5511ea3d7209SJan Kara * page cache. 5512ea3d7209SJan Kara */ 5513d4f5258eSJan Kara filemap_invalidate_lock(mapping); 5514430657b6SRoss Zwisler 5515430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5516430657b6SRoss Zwisler if (ret) 5517430657b6SRoss Zwisler goto out_mmap; 5518430657b6SRoss Zwisler 551932ebffd3SJan Kara /* 552032ebffd3SJan Kara * Need to round down to align start offset to page size boundary 552132ebffd3SJan Kara * for page size > block size. 552232ebffd3SJan Kara */ 552332ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 552432ebffd3SJan Kara /* Write out all dirty pages */ 552532ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 552632ebffd3SJan Kara LLONG_MAX); 552732ebffd3SJan Kara if (ret) 552832ebffd3SJan Kara goto out_mmap; 5529ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5530ea3d7209SJan Kara 5531331573feSNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 5532331573feSNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5533331573feSNamjae Jeon if (IS_ERR(handle)) { 5534331573feSNamjae Jeon ret = PTR_ERR(handle); 5535ea3d7209SJan Kara goto out_mmap; 5536331573feSNamjae Jeon } 5537e85c81baSXin Yin ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle); 5538331573feSNamjae Jeon 5539331573feSNamjae Jeon /* Expand file to avoid data loss if there is error while shifting */ 5540331573feSNamjae Jeon inode->i_size += len; 5541331573feSNamjae Jeon EXT4_I(inode)->i_disksize += len; 55421bc33893SJeff Layton inode->i_mtime = inode_set_ctime_current(inode); 5543331573feSNamjae Jeon ret = ext4_mark_inode_dirty(handle, inode); 5544331573feSNamjae Jeon if (ret) 5545331573feSNamjae Jeon goto out_stop; 5546331573feSNamjae Jeon 5547331573feSNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 554827bc446eSbrookxu ext4_discard_preallocations(inode, 0); 5549331573feSNamjae Jeon 5550331573feSNamjae Jeon path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5551331573feSNamjae Jeon if (IS_ERR(path)) { 5552331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5553f4308d8eSBaokun Li ret = PTR_ERR(path); 5554331573feSNamjae Jeon goto out_stop; 5555331573feSNamjae Jeon } 5556331573feSNamjae Jeon 5557331573feSNamjae Jeon depth = ext_depth(inode); 5558331573feSNamjae Jeon extent = path[depth].p_ext; 5559331573feSNamjae Jeon if (extent) { 5560331573feSNamjae Jeon ee_start_lblk = le32_to_cpu(extent->ee_block); 5561331573feSNamjae Jeon ee_len = ext4_ext_get_actual_len(extent); 5562331573feSNamjae Jeon 5563331573feSNamjae Jeon /* 5564331573feSNamjae Jeon * If offset_lblk is not the starting block of extent, split 5565331573feSNamjae Jeon * the extent @offset_lblk 5566331573feSNamjae Jeon */ 5567331573feSNamjae Jeon if ((offset_lblk > ee_start_lblk) && 5568331573feSNamjae Jeon (offset_lblk < (ee_start_lblk + ee_len))) { 5569331573feSNamjae Jeon if (ext4_ext_is_unwritten(extent)) 5570331573feSNamjae Jeon split_flag = EXT4_EXT_MARK_UNWRIT1 | 5571331573feSNamjae Jeon EXT4_EXT_MARK_UNWRIT2; 5572331573feSNamjae Jeon ret = ext4_split_extent_at(handle, inode, &path, 5573331573feSNamjae Jeon offset_lblk, split_flag, 5574331573feSNamjae Jeon EXT4_EX_NOCACHE | 5575331573feSNamjae Jeon EXT4_GET_BLOCKS_PRE_IO | 5576331573feSNamjae Jeon EXT4_GET_BLOCKS_METADATA_NOFAIL); 5577331573feSNamjae Jeon } 5578331573feSNamjae Jeon 55797ff5fddaSYe Bin ext4_free_ext_path(path); 5580331573feSNamjae Jeon if (ret < 0) { 5581331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5582331573feSNamjae Jeon goto out_stop; 5583331573feSNamjae Jeon } 5584edf15aa1SFabian Frederick } else { 55857ff5fddaSYe Bin ext4_free_ext_path(path); 5586331573feSNamjae Jeon } 5587331573feSNamjae Jeon 5588ed5d285bSBaokun Li ext4_es_remove_extent(inode, offset_lblk, EXT_MAX_BLOCKS - offset_lblk); 5589331573feSNamjae Jeon 5590331573feSNamjae Jeon /* 5591331573feSNamjae Jeon * if offset_lblk lies in a hole which is at start of file, use 5592331573feSNamjae Jeon * ee_start_lblk to shift extents 5593331573feSNamjae Jeon */ 5594331573feSNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, 559566267814SJiangshan Yi max(ee_start_lblk, offset_lblk), len_lblk, SHIFT_RIGHT); 5596331573feSNamjae Jeon 5597331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5598331573feSNamjae Jeon if (IS_SYNC(inode)) 5599331573feSNamjae Jeon ext4_handle_sync(handle); 560067a7d5f5SJan Kara if (ret >= 0) 560167a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 5602331573feSNamjae Jeon 5603331573feSNamjae Jeon out_stop: 5604331573feSNamjae Jeon ext4_journal_stop(handle); 5605ea3d7209SJan Kara out_mmap: 5606d4f5258eSJan Kara filemap_invalidate_unlock(mapping); 5607331573feSNamjae Jeon out_mutex: 56085955102cSAl Viro inode_unlock(inode); 5609331573feSNamjae Jeon return ret; 5610331573feSNamjae Jeon } 5611331573feSNamjae Jeon 5612fcf6b1b7SDmitry Monakhov /** 5613c60990b3STheodore Ts'o * ext4_swap_extents() - Swap extents between two inodes 5614c60990b3STheodore Ts'o * @handle: handle for this transaction 5615fcf6b1b7SDmitry Monakhov * @inode1: First inode 5616fcf6b1b7SDmitry Monakhov * @inode2: Second inode 5617fcf6b1b7SDmitry Monakhov * @lblk1: Start block for first inode 5618fcf6b1b7SDmitry Monakhov * @lblk2: Start block for second inode 5619fcf6b1b7SDmitry Monakhov * @count: Number of blocks to swap 5620dcae058aSzhenwei.pi * @unwritten: Mark second inode's extents as unwritten after swap 5621fcf6b1b7SDmitry Monakhov * @erp: Pointer to save error value 5622fcf6b1b7SDmitry Monakhov * 5623fcf6b1b7SDmitry Monakhov * This helper routine does exactly what is promise "swap extents". All other 5624fcf6b1b7SDmitry Monakhov * stuff such as page-cache locking consistency, bh mapping consistency or 5625fcf6b1b7SDmitry Monakhov * extent's data copying must be performed by caller. 5626fcf6b1b7SDmitry Monakhov * Locking: 5627f340b3d9Shongnanli * i_rwsem is held for both inodes 5628fcf6b1b7SDmitry Monakhov * i_data_sem is locked for write for both inodes 5629fcf6b1b7SDmitry Monakhov * Assumptions: 5630fcf6b1b7SDmitry Monakhov * All pages from requested range are locked for both inodes 5631fcf6b1b7SDmitry Monakhov */ 5632fcf6b1b7SDmitry Monakhov int 5633fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1, 5634fcf6b1b7SDmitry Monakhov struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5635fcf6b1b7SDmitry Monakhov ext4_lblk_t count, int unwritten, int *erp) 5636fcf6b1b7SDmitry Monakhov { 5637fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path1 = NULL; 5638fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path2 = NULL; 5639fcf6b1b7SDmitry Monakhov int replaced_count = 0; 5640fcf6b1b7SDmitry Monakhov 5641fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5642fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 56435955102cSAl Viro BUG_ON(!inode_is_locked(inode1)); 56445955102cSAl Viro BUG_ON(!inode_is_locked(inode2)); 5645fcf6b1b7SDmitry Monakhov 5646ed5d285bSBaokun Li ext4_es_remove_extent(inode1, lblk1, count); 5647ed5d285bSBaokun Li ext4_es_remove_extent(inode2, lblk2, count); 5648fcf6b1b7SDmitry Monakhov 5649fcf6b1b7SDmitry Monakhov while (count) { 5650fcf6b1b7SDmitry Monakhov struct ext4_extent *ex1, *ex2, tmp_ex; 5651fcf6b1b7SDmitry Monakhov ext4_lblk_t e1_blk, e2_blk; 5652fcf6b1b7SDmitry Monakhov int e1_len, e2_len, len; 5653fcf6b1b7SDmitry Monakhov int split = 0; 5654fcf6b1b7SDmitry Monakhov 5655ed8a1a76STheodore Ts'o path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5656a1c83681SViresh Kumar if (IS_ERR(path1)) { 5657fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path1); 565819008f6dSTheodore Ts'o path1 = NULL; 565919008f6dSTheodore Ts'o finish: 566019008f6dSTheodore Ts'o count = 0; 566119008f6dSTheodore Ts'o goto repeat; 5662fcf6b1b7SDmitry Monakhov } 5663ed8a1a76STheodore Ts'o path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5664a1c83681SViresh Kumar if (IS_ERR(path2)) { 5665fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path2); 566619008f6dSTheodore Ts'o path2 = NULL; 566719008f6dSTheodore Ts'o goto finish; 5668fcf6b1b7SDmitry Monakhov } 5669fcf6b1b7SDmitry Monakhov ex1 = path1[path1->p_depth].p_ext; 5670fcf6b1b7SDmitry Monakhov ex2 = path2[path2->p_depth].p_ext; 5671e4d7f2d3SKeyur Patel /* Do we have something to swap ? */ 5672fcf6b1b7SDmitry Monakhov if (unlikely(!ex2 || !ex1)) 567319008f6dSTheodore Ts'o goto finish; 5674fcf6b1b7SDmitry Monakhov 5675fcf6b1b7SDmitry Monakhov e1_blk = le32_to_cpu(ex1->ee_block); 5676fcf6b1b7SDmitry Monakhov e2_blk = le32_to_cpu(ex2->ee_block); 5677fcf6b1b7SDmitry Monakhov e1_len = ext4_ext_get_actual_len(ex1); 5678fcf6b1b7SDmitry Monakhov e2_len = ext4_ext_get_actual_len(ex2); 5679fcf6b1b7SDmitry Monakhov 5680fcf6b1b7SDmitry Monakhov /* Hole handling */ 5681fcf6b1b7SDmitry Monakhov if (!in_range(lblk1, e1_blk, e1_len) || 5682fcf6b1b7SDmitry Monakhov !in_range(lblk2, e2_blk, e2_len)) { 5683fcf6b1b7SDmitry Monakhov ext4_lblk_t next1, next2; 5684fcf6b1b7SDmitry Monakhov 5685fcf6b1b7SDmitry Monakhov /* if hole after extent, then go to next extent */ 5686fcf6b1b7SDmitry Monakhov next1 = ext4_ext_next_allocated_block(path1); 5687fcf6b1b7SDmitry Monakhov next2 = ext4_ext_next_allocated_block(path2); 5688fcf6b1b7SDmitry Monakhov /* If hole before extent, then shift to that extent */ 5689fcf6b1b7SDmitry Monakhov if (e1_blk > lblk1) 5690fcf6b1b7SDmitry Monakhov next1 = e1_blk; 5691fcf6b1b7SDmitry Monakhov if (e2_blk > lblk2) 56924e562013SManinder Singh next2 = e2_blk; 5693fcf6b1b7SDmitry Monakhov /* Do we have something to swap */ 5694fcf6b1b7SDmitry Monakhov if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 569519008f6dSTheodore Ts'o goto finish; 5696fcf6b1b7SDmitry Monakhov /* Move to the rightest boundary */ 5697fcf6b1b7SDmitry Monakhov len = next1 - lblk1; 5698fcf6b1b7SDmitry Monakhov if (len < next2 - lblk2) 5699fcf6b1b7SDmitry Monakhov len = next2 - lblk2; 5700fcf6b1b7SDmitry Monakhov if (len > count) 5701fcf6b1b7SDmitry Monakhov len = count; 5702fcf6b1b7SDmitry Monakhov lblk1 += len; 5703fcf6b1b7SDmitry Monakhov lblk2 += len; 5704fcf6b1b7SDmitry Monakhov count -= len; 5705fcf6b1b7SDmitry Monakhov goto repeat; 5706fcf6b1b7SDmitry Monakhov } 5707fcf6b1b7SDmitry Monakhov 5708fcf6b1b7SDmitry Monakhov /* Prepare left boundary */ 5709fcf6b1b7SDmitry Monakhov if (e1_blk < lblk1) { 5710fcf6b1b7SDmitry Monakhov split = 1; 5711fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5712dfe50809STheodore Ts'o &path1, lblk1, 0); 571319008f6dSTheodore Ts'o if (unlikely(*erp)) 571419008f6dSTheodore Ts'o goto finish; 5715fcf6b1b7SDmitry Monakhov } 5716fcf6b1b7SDmitry Monakhov if (e2_blk < lblk2) { 5717fcf6b1b7SDmitry Monakhov split = 1; 5718fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5719dfe50809STheodore Ts'o &path2, lblk2, 0); 572019008f6dSTheodore Ts'o if (unlikely(*erp)) 572119008f6dSTheodore Ts'o goto finish; 5722fcf6b1b7SDmitry Monakhov } 5723dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5724fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5725fcf6b1b7SDmitry Monakhov if (split) 5726fcf6b1b7SDmitry Monakhov goto repeat; 5727fcf6b1b7SDmitry Monakhov 5728fcf6b1b7SDmitry Monakhov /* Prepare right boundary */ 5729fcf6b1b7SDmitry Monakhov len = count; 5730fcf6b1b7SDmitry Monakhov if (len > e1_blk + e1_len - lblk1) 5731fcf6b1b7SDmitry Monakhov len = e1_blk + e1_len - lblk1; 5732fcf6b1b7SDmitry Monakhov if (len > e2_blk + e2_len - lblk2) 5733fcf6b1b7SDmitry Monakhov len = e2_blk + e2_len - lblk2; 5734fcf6b1b7SDmitry Monakhov 5735fcf6b1b7SDmitry Monakhov if (len != e1_len) { 5736fcf6b1b7SDmitry Monakhov split = 1; 5737fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5738dfe50809STheodore Ts'o &path1, lblk1 + len, 0); 573919008f6dSTheodore Ts'o if (unlikely(*erp)) 574019008f6dSTheodore Ts'o goto finish; 5741fcf6b1b7SDmitry Monakhov } 5742fcf6b1b7SDmitry Monakhov if (len != e2_len) { 5743fcf6b1b7SDmitry Monakhov split = 1; 5744fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5745dfe50809STheodore Ts'o &path2, lblk2 + len, 0); 5746fcf6b1b7SDmitry Monakhov if (*erp) 574719008f6dSTheodore Ts'o goto finish; 5748fcf6b1b7SDmitry Monakhov } 5749dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5750fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5751fcf6b1b7SDmitry Monakhov if (split) 5752fcf6b1b7SDmitry Monakhov goto repeat; 5753fcf6b1b7SDmitry Monakhov 5754fcf6b1b7SDmitry Monakhov BUG_ON(e2_len != e1_len); 5755fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 575619008f6dSTheodore Ts'o if (unlikely(*erp)) 575719008f6dSTheodore Ts'o goto finish; 5758fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 575919008f6dSTheodore Ts'o if (unlikely(*erp)) 576019008f6dSTheodore Ts'o goto finish; 5761fcf6b1b7SDmitry Monakhov 5762fcf6b1b7SDmitry Monakhov /* Both extents are fully inside boundaries. Swap it now */ 5763fcf6b1b7SDmitry Monakhov tmp_ex = *ex1; 5764fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5765fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5766fcf6b1b7SDmitry Monakhov ex1->ee_len = cpu_to_le16(e2_len); 5767fcf6b1b7SDmitry Monakhov ex2->ee_len = cpu_to_le16(e1_len); 5768fcf6b1b7SDmitry Monakhov if (unwritten) 5769fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex2); 5770fcf6b1b7SDmitry Monakhov if (ext4_ext_is_unwritten(&tmp_ex)) 5771fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex1); 5772fcf6b1b7SDmitry Monakhov 5773fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5774fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5775fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode2, path2 + 5776fcf6b1b7SDmitry Monakhov path2->p_depth); 577719008f6dSTheodore Ts'o if (unlikely(*erp)) 577819008f6dSTheodore Ts'o goto finish; 5779fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode1, path1 + 5780fcf6b1b7SDmitry Monakhov path1->p_depth); 5781fcf6b1b7SDmitry Monakhov /* 5782fcf6b1b7SDmitry Monakhov * Looks scarry ah..? second inode already points to new blocks, 5783fcf6b1b7SDmitry Monakhov * and it was successfully dirtied. But luckily error may happen 5784fcf6b1b7SDmitry Monakhov * only due to journal error, so full transaction will be 5785fcf6b1b7SDmitry Monakhov * aborted anyway. 5786fcf6b1b7SDmitry Monakhov */ 578719008f6dSTheodore Ts'o if (unlikely(*erp)) 578819008f6dSTheodore Ts'o goto finish; 5789fcf6b1b7SDmitry Monakhov lblk1 += len; 5790fcf6b1b7SDmitry Monakhov lblk2 += len; 5791fcf6b1b7SDmitry Monakhov replaced_count += len; 5792fcf6b1b7SDmitry Monakhov count -= len; 5793fcf6b1b7SDmitry Monakhov 5794fcf6b1b7SDmitry Monakhov repeat: 57957ff5fddaSYe Bin ext4_free_ext_path(path1); 57967ff5fddaSYe Bin ext4_free_ext_path(path2); 5797b7ea89adSTheodore Ts'o path1 = path2 = NULL; 5798fcf6b1b7SDmitry Monakhov } 5799fcf6b1b7SDmitry Monakhov return replaced_count; 5800fcf6b1b7SDmitry Monakhov } 58010b02f4c0SEric Whitney 58020b02f4c0SEric Whitney /* 58030b02f4c0SEric Whitney * ext4_clu_mapped - determine whether any block in a logical cluster has 58040b02f4c0SEric Whitney * been mapped to a physical cluster 58050b02f4c0SEric Whitney * 58060b02f4c0SEric Whitney * @inode - file containing the logical cluster 58070b02f4c0SEric Whitney * @lclu - logical cluster of interest 58080b02f4c0SEric Whitney * 58090b02f4c0SEric Whitney * Returns 1 if any block in the logical cluster is mapped, signifying 58100b02f4c0SEric Whitney * that a physical cluster has been allocated for it. Otherwise, 58110b02f4c0SEric Whitney * returns 0. Can also return negative error codes. Derived from 58120b02f4c0SEric Whitney * ext4_ext_map_blocks(). 58130b02f4c0SEric Whitney */ 58140b02f4c0SEric Whitney int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 58150b02f4c0SEric Whitney { 58160b02f4c0SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 58170b02f4c0SEric Whitney struct ext4_ext_path *path; 58180b02f4c0SEric Whitney int depth, mapped = 0, err = 0; 58190b02f4c0SEric Whitney struct ext4_extent *extent; 58200b02f4c0SEric Whitney ext4_lblk_t first_lblk, first_lclu, last_lclu; 58210b02f4c0SEric Whitney 5822131294c3SEric Whitney /* 5823131294c3SEric Whitney * if data can be stored inline, the logical cluster isn't 5824131294c3SEric Whitney * mapped - no physical clusters have been allocated, and the 5825131294c3SEric Whitney * file has no extents 5826131294c3SEric Whitney */ 582783565959SYe Bin if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) || 582883565959SYe Bin ext4_has_inline_data(inode)) 5829131294c3SEric Whitney return 0; 5830131294c3SEric Whitney 58310b02f4c0SEric Whitney /* search for the extent closest to the first block in the cluster */ 58320b02f4c0SEric Whitney path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 58330b02f4c0SEric Whitney if (IS_ERR(path)) { 58340b02f4c0SEric Whitney err = PTR_ERR(path); 58350b02f4c0SEric Whitney path = NULL; 58360b02f4c0SEric Whitney goto out; 58370b02f4c0SEric Whitney } 58380b02f4c0SEric Whitney 58390b02f4c0SEric Whitney depth = ext_depth(inode); 58400b02f4c0SEric Whitney 58410b02f4c0SEric Whitney /* 58420b02f4c0SEric Whitney * A consistent leaf must not be empty. This situation is possible, 58430b02f4c0SEric Whitney * though, _during_ tree modification, and it's why an assert can't 58440b02f4c0SEric Whitney * be put in ext4_find_extent(). 58450b02f4c0SEric Whitney */ 58460b02f4c0SEric Whitney if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 58470b02f4c0SEric Whitney EXT4_ERROR_INODE(inode, 58480b02f4c0SEric Whitney "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 58490b02f4c0SEric Whitney (unsigned long) EXT4_C2B(sbi, lclu), 58500b02f4c0SEric Whitney depth, path[depth].p_block); 58510b02f4c0SEric Whitney err = -EFSCORRUPTED; 58520b02f4c0SEric Whitney goto out; 58530b02f4c0SEric Whitney } 58540b02f4c0SEric Whitney 58550b02f4c0SEric Whitney extent = path[depth].p_ext; 58560b02f4c0SEric Whitney 58570b02f4c0SEric Whitney /* can't be mapped if the extent tree is empty */ 58580b02f4c0SEric Whitney if (extent == NULL) 58590b02f4c0SEric Whitney goto out; 58600b02f4c0SEric Whitney 58610b02f4c0SEric Whitney first_lblk = le32_to_cpu(extent->ee_block); 58620b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 58630b02f4c0SEric Whitney 58640b02f4c0SEric Whitney /* 58650b02f4c0SEric Whitney * Three possible outcomes at this point - found extent spanning 58660b02f4c0SEric Whitney * the target cluster, to the left of the target cluster, or to the 58670b02f4c0SEric Whitney * right of the target cluster. The first two cases are handled here. 58680b02f4c0SEric Whitney * The last case indicates the target cluster is not mapped. 58690b02f4c0SEric Whitney */ 58700b02f4c0SEric Whitney if (lclu >= first_lclu) { 58710b02f4c0SEric Whitney last_lclu = EXT4_B2C(sbi, first_lblk + 58720b02f4c0SEric Whitney ext4_ext_get_actual_len(extent) - 1); 58730b02f4c0SEric Whitney if (lclu <= last_lclu) { 58740b02f4c0SEric Whitney mapped = 1; 58750b02f4c0SEric Whitney } else { 58760b02f4c0SEric Whitney first_lblk = ext4_ext_next_allocated_block(path); 58770b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 58780b02f4c0SEric Whitney if (lclu == first_lclu) 58790b02f4c0SEric Whitney mapped = 1; 58800b02f4c0SEric Whitney } 58810b02f4c0SEric Whitney } 58820b02f4c0SEric Whitney 58830b02f4c0SEric Whitney out: 58847ff5fddaSYe Bin ext4_free_ext_path(path); 58850b02f4c0SEric Whitney 58860b02f4c0SEric Whitney return err ? err : mapped; 58870b02f4c0SEric Whitney } 58888016e29fSHarshad Shirwadkar 58898016e29fSHarshad Shirwadkar /* 58908016e29fSHarshad Shirwadkar * Updates physical block address and unwritten status of extent 58918016e29fSHarshad Shirwadkar * starting at lblk start and of len. If such an extent doesn't exist, 58928016e29fSHarshad Shirwadkar * this function splits the extent tree appropriately to create an 58938016e29fSHarshad Shirwadkar * extent like this. This function is called in the fast commit 58948016e29fSHarshad Shirwadkar * replay path. Returns 0 on success and error on failure. 58958016e29fSHarshad Shirwadkar */ 58968016e29fSHarshad Shirwadkar int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start, 58978016e29fSHarshad Shirwadkar int len, int unwritten, ext4_fsblk_t pblk) 58988016e29fSHarshad Shirwadkar { 58991b558006SBaokun Li struct ext4_ext_path *path; 59008016e29fSHarshad Shirwadkar struct ext4_extent *ex; 59018016e29fSHarshad Shirwadkar int ret; 59028016e29fSHarshad Shirwadkar 59038016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, start, NULL, 0); 5904bc18546bSDan Carpenter if (IS_ERR(path)) 5905bc18546bSDan Carpenter return PTR_ERR(path); 59068016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 59078016e29fSHarshad Shirwadkar if (!ex) { 59088016e29fSHarshad Shirwadkar ret = -EFSCORRUPTED; 59098016e29fSHarshad Shirwadkar goto out; 59108016e29fSHarshad Shirwadkar } 59118016e29fSHarshad Shirwadkar 59128016e29fSHarshad Shirwadkar if (le32_to_cpu(ex->ee_block) != start || 59138016e29fSHarshad Shirwadkar ext4_ext_get_actual_len(ex) != len) { 59148016e29fSHarshad Shirwadkar /* We need to split this extent to match our extent first */ 59158016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem); 59161b558006SBaokun Li ret = ext4_force_split_extent_at(NULL, inode, &path, start, 1); 59178016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem); 59188016e29fSHarshad Shirwadkar if (ret) 59198016e29fSHarshad Shirwadkar goto out; 59201b558006SBaokun Li 59211b558006SBaokun Li path = ext4_find_extent(inode, start, &path, 0); 59228016e29fSHarshad Shirwadkar if (IS_ERR(path)) 59231b558006SBaokun Li return PTR_ERR(path); 59248016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 59258016e29fSHarshad Shirwadkar WARN_ON(le32_to_cpu(ex->ee_block) != start); 59261b558006SBaokun Li 59278016e29fSHarshad Shirwadkar if (ext4_ext_get_actual_len(ex) != len) { 59288016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem); 59291b558006SBaokun Li ret = ext4_force_split_extent_at(NULL, inode, &path, 59308016e29fSHarshad Shirwadkar start + len, 1); 59318016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem); 59328016e29fSHarshad Shirwadkar if (ret) 59338016e29fSHarshad Shirwadkar goto out; 59341b558006SBaokun Li 59351b558006SBaokun Li path = ext4_find_extent(inode, start, &path, 0); 59368016e29fSHarshad Shirwadkar if (IS_ERR(path)) 59371b558006SBaokun Li return PTR_ERR(path); 59388016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 59398016e29fSHarshad Shirwadkar } 59408016e29fSHarshad Shirwadkar } 59418016e29fSHarshad Shirwadkar if (unwritten) 59428016e29fSHarshad Shirwadkar ext4_ext_mark_unwritten(ex); 59438016e29fSHarshad Shirwadkar else 59448016e29fSHarshad Shirwadkar ext4_ext_mark_initialized(ex); 59458016e29fSHarshad Shirwadkar ext4_ext_store_pblock(ex, pblk); 59468016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem); 59478016e29fSHarshad Shirwadkar ret = ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 59488016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem); 59498016e29fSHarshad Shirwadkar out: 59507ff5fddaSYe Bin ext4_free_ext_path(path); 59518016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode); 59528016e29fSHarshad Shirwadkar return ret; 59538016e29fSHarshad Shirwadkar } 59548016e29fSHarshad Shirwadkar 59558016e29fSHarshad Shirwadkar /* Try to shrink the extent tree */ 59568016e29fSHarshad Shirwadkar void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end) 59578016e29fSHarshad Shirwadkar { 59588016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL; 59598016e29fSHarshad Shirwadkar struct ext4_extent *ex; 59608016e29fSHarshad Shirwadkar ext4_lblk_t old_cur, cur = 0; 59618016e29fSHarshad Shirwadkar 59628016e29fSHarshad Shirwadkar while (cur < end) { 59638016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0); 59648016e29fSHarshad Shirwadkar if (IS_ERR(path)) 59658016e29fSHarshad Shirwadkar return; 59668016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 59678016e29fSHarshad Shirwadkar if (!ex) { 59687ff5fddaSYe Bin ext4_free_ext_path(path); 59698016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode); 59708016e29fSHarshad Shirwadkar return; 59718016e29fSHarshad Shirwadkar } 59728016e29fSHarshad Shirwadkar old_cur = cur; 59738016e29fSHarshad Shirwadkar cur = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 59748016e29fSHarshad Shirwadkar if (cur <= old_cur) 59758016e29fSHarshad Shirwadkar cur = old_cur + 1; 59768016e29fSHarshad Shirwadkar ext4_ext_try_to_merge(NULL, inode, path, ex); 59778016e29fSHarshad Shirwadkar down_write(&EXT4_I(inode)->i_data_sem); 59788016e29fSHarshad Shirwadkar ext4_ext_dirty(NULL, inode, &path[path->p_depth]); 59798016e29fSHarshad Shirwadkar up_write(&EXT4_I(inode)->i_data_sem); 59808016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode); 59817ff5fddaSYe Bin ext4_free_ext_path(path); 59828016e29fSHarshad Shirwadkar } 59838016e29fSHarshad Shirwadkar } 59848016e29fSHarshad Shirwadkar 59858016e29fSHarshad Shirwadkar /* Check if *cur is a hole and if it is, skip it */ 59861fd95c05STheodore Ts'o static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 59878016e29fSHarshad Shirwadkar { 59888016e29fSHarshad Shirwadkar int ret; 59898016e29fSHarshad Shirwadkar struct ext4_map_blocks map; 59908016e29fSHarshad Shirwadkar 59918016e29fSHarshad Shirwadkar map.m_lblk = *cur; 59928016e29fSHarshad Shirwadkar map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 59938016e29fSHarshad Shirwadkar 59948016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0); 59951fd95c05STheodore Ts'o if (ret < 0) 59961fd95c05STheodore Ts'o return ret; 59978016e29fSHarshad Shirwadkar if (ret != 0) 59981fd95c05STheodore Ts'o return 0; 59998016e29fSHarshad Shirwadkar *cur = *cur + map.m_len; 60001fd95c05STheodore Ts'o return 0; 60018016e29fSHarshad Shirwadkar } 60028016e29fSHarshad Shirwadkar 60038016e29fSHarshad Shirwadkar /* Count number of blocks used by this inode and update i_blocks */ 60048016e29fSHarshad Shirwadkar int ext4_ext_replay_set_iblocks(struct inode *inode) 60058016e29fSHarshad Shirwadkar { 60068016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL, *path2 = NULL; 60078016e29fSHarshad Shirwadkar struct ext4_extent *ex; 60088016e29fSHarshad Shirwadkar ext4_lblk_t cur = 0, end; 60098016e29fSHarshad Shirwadkar int numblks = 0, i, ret = 0; 60108016e29fSHarshad Shirwadkar ext4_fsblk_t cmp1, cmp2; 60118016e29fSHarshad Shirwadkar struct ext4_map_blocks map; 60128016e29fSHarshad Shirwadkar 60138016e29fSHarshad Shirwadkar /* Determin the size of the file first */ 60148016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 60158016e29fSHarshad Shirwadkar EXT4_EX_NOCACHE); 60168016e29fSHarshad Shirwadkar if (IS_ERR(path)) 60178016e29fSHarshad Shirwadkar return PTR_ERR(path); 60188016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 60198016e29fSHarshad Shirwadkar if (!ex) { 60207ff5fddaSYe Bin ext4_free_ext_path(path); 60218016e29fSHarshad Shirwadkar goto out; 60228016e29fSHarshad Shirwadkar } 60238016e29fSHarshad Shirwadkar end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 60247ff5fddaSYe Bin ext4_free_ext_path(path); 60258016e29fSHarshad Shirwadkar 60268016e29fSHarshad Shirwadkar /* Count the number of data blocks */ 60278016e29fSHarshad Shirwadkar cur = 0; 60288016e29fSHarshad Shirwadkar while (cur < end) { 60298016e29fSHarshad Shirwadkar map.m_lblk = cur; 60308016e29fSHarshad Shirwadkar map.m_len = end - cur; 60318016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0); 60328016e29fSHarshad Shirwadkar if (ret < 0) 60338016e29fSHarshad Shirwadkar break; 60348016e29fSHarshad Shirwadkar if (ret > 0) 60358016e29fSHarshad Shirwadkar numblks += ret; 60368016e29fSHarshad Shirwadkar cur = cur + map.m_len; 60378016e29fSHarshad Shirwadkar } 60388016e29fSHarshad Shirwadkar 60398016e29fSHarshad Shirwadkar /* 60408016e29fSHarshad Shirwadkar * Count the number of extent tree blocks. We do it by looking up 60418016e29fSHarshad Shirwadkar * two successive extents and determining the difference between 60428016e29fSHarshad Shirwadkar * their paths. When path is different for 2 successive extents 60438016e29fSHarshad Shirwadkar * we compare the blocks in the path at each level and increment 60448016e29fSHarshad Shirwadkar * iblocks by total number of differences found. 60458016e29fSHarshad Shirwadkar */ 60468016e29fSHarshad Shirwadkar cur = 0; 60471fd95c05STheodore Ts'o ret = skip_hole(inode, &cur); 60481fd95c05STheodore Ts'o if (ret < 0) 60491fd95c05STheodore Ts'o goto out; 60508016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0); 60518016e29fSHarshad Shirwadkar if (IS_ERR(path)) 60528016e29fSHarshad Shirwadkar goto out; 60538016e29fSHarshad Shirwadkar numblks += path->p_depth; 60547ff5fddaSYe Bin ext4_free_ext_path(path); 60558016e29fSHarshad Shirwadkar while (cur < end) { 60568016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, cur, NULL, 0); 60578016e29fSHarshad Shirwadkar if (IS_ERR(path)) 60588016e29fSHarshad Shirwadkar break; 60598016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 60608016e29fSHarshad Shirwadkar if (!ex) { 60617ff5fddaSYe Bin ext4_free_ext_path(path); 60628016e29fSHarshad Shirwadkar return 0; 60638016e29fSHarshad Shirwadkar } 60648016e29fSHarshad Shirwadkar cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 60658016e29fSHarshad Shirwadkar ext4_ext_get_actual_len(ex)); 60661fd95c05STheodore Ts'o ret = skip_hole(inode, &cur); 60671fd95c05STheodore Ts'o if (ret < 0) { 60687ff5fddaSYe Bin ext4_free_ext_path(path); 60691fd95c05STheodore Ts'o break; 60701fd95c05STheodore Ts'o } 60718016e29fSHarshad Shirwadkar path2 = ext4_find_extent(inode, cur, NULL, 0); 60728016e29fSHarshad Shirwadkar if (IS_ERR(path2)) { 60737ff5fddaSYe Bin ext4_free_ext_path(path); 60748016e29fSHarshad Shirwadkar break; 60758016e29fSHarshad Shirwadkar } 60768016e29fSHarshad Shirwadkar for (i = 0; i <= max(path->p_depth, path2->p_depth); i++) { 60778016e29fSHarshad Shirwadkar cmp1 = cmp2 = 0; 60788016e29fSHarshad Shirwadkar if (i <= path->p_depth) 60798016e29fSHarshad Shirwadkar cmp1 = path[i].p_bh ? 60808016e29fSHarshad Shirwadkar path[i].p_bh->b_blocknr : 0; 60818016e29fSHarshad Shirwadkar if (i <= path2->p_depth) 60828016e29fSHarshad Shirwadkar cmp2 = path2[i].p_bh ? 60838016e29fSHarshad Shirwadkar path2[i].p_bh->b_blocknr : 0; 60848016e29fSHarshad Shirwadkar if (cmp1 != cmp2 && cmp2 != 0) 60858016e29fSHarshad Shirwadkar numblks++; 60868016e29fSHarshad Shirwadkar } 60877ff5fddaSYe Bin ext4_free_ext_path(path); 60887ff5fddaSYe Bin ext4_free_ext_path(path2); 60898016e29fSHarshad Shirwadkar } 60908016e29fSHarshad Shirwadkar 60918016e29fSHarshad Shirwadkar out: 60928016e29fSHarshad Shirwadkar inode->i_blocks = numblks << (inode->i_sb->s_blocksize_bits - 9); 60938016e29fSHarshad Shirwadkar ext4_mark_inode_dirty(NULL, inode); 60948016e29fSHarshad Shirwadkar return 0; 60958016e29fSHarshad Shirwadkar } 60968016e29fSHarshad Shirwadkar 60978016e29fSHarshad Shirwadkar int ext4_ext_clear_bb(struct inode *inode) 60988016e29fSHarshad Shirwadkar { 60998016e29fSHarshad Shirwadkar struct ext4_ext_path *path = NULL; 61008016e29fSHarshad Shirwadkar struct ext4_extent *ex; 61018016e29fSHarshad Shirwadkar ext4_lblk_t cur = 0, end; 61028016e29fSHarshad Shirwadkar int j, ret = 0; 61038016e29fSHarshad Shirwadkar struct ext4_map_blocks map; 61048016e29fSHarshad Shirwadkar 61051ebf2178SHarshad Shirwadkar if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) 61061ebf2178SHarshad Shirwadkar return 0; 61071ebf2178SHarshad Shirwadkar 61088016e29fSHarshad Shirwadkar /* Determin the size of the file first */ 61098016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 61108016e29fSHarshad Shirwadkar EXT4_EX_NOCACHE); 61118016e29fSHarshad Shirwadkar if (IS_ERR(path)) 61128016e29fSHarshad Shirwadkar return PTR_ERR(path); 61138016e29fSHarshad Shirwadkar ex = path[path->p_depth].p_ext; 61148016e29fSHarshad Shirwadkar if (!ex) { 61157ff5fddaSYe Bin ext4_free_ext_path(path); 61168016e29fSHarshad Shirwadkar return 0; 61178016e29fSHarshad Shirwadkar } 61188016e29fSHarshad Shirwadkar end = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 61197ff5fddaSYe Bin ext4_free_ext_path(path); 61208016e29fSHarshad Shirwadkar 61218016e29fSHarshad Shirwadkar cur = 0; 61228016e29fSHarshad Shirwadkar while (cur < end) { 61238016e29fSHarshad Shirwadkar map.m_lblk = cur; 61248016e29fSHarshad Shirwadkar map.m_len = end - cur; 61258016e29fSHarshad Shirwadkar ret = ext4_map_blocks(NULL, inode, &map, 0); 61268016e29fSHarshad Shirwadkar if (ret < 0) 61278016e29fSHarshad Shirwadkar break; 61288016e29fSHarshad Shirwadkar if (ret > 0) { 61298016e29fSHarshad Shirwadkar path = ext4_find_extent(inode, map.m_lblk, NULL, 0); 61308016e29fSHarshad Shirwadkar if (!IS_ERR_OR_NULL(path)) { 61318016e29fSHarshad Shirwadkar for (j = 0; j < path->p_depth; j++) { 61328016e29fSHarshad Shirwadkar 61338016e29fSHarshad Shirwadkar ext4_mb_mark_bb(inode->i_sb, 61348016e29fSHarshad Shirwadkar path[j].p_block, 1, 0); 6135599ea31dSXin Yin ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6136599ea31dSXin Yin 0, path[j].p_block, 1, 1); 61378016e29fSHarshad Shirwadkar } 61387ff5fddaSYe Bin ext4_free_ext_path(path); 61398016e29fSHarshad Shirwadkar } 61408016e29fSHarshad Shirwadkar ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); 6141599ea31dSXin Yin ext4_fc_record_regions(inode->i_sb, inode->i_ino, 6142599ea31dSXin Yin map.m_lblk, map.m_pblk, map.m_len, 1); 61438016e29fSHarshad Shirwadkar } 61448016e29fSHarshad Shirwadkar cur = cur + map.m_len; 61458016e29fSHarshad Shirwadkar } 61468016e29fSHarshad Shirwadkar 61478016e29fSHarshad Shirwadkar return 0; 61488016e29fSHarshad Shirwadkar } 6149