1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2a86c6181SAlex Tomas /* 3a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5a86c6181SAlex Tomas * 6a86c6181SAlex Tomas * Architecture independence: 7a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 8a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9a86c6181SAlex Tomas */ 10a86c6181SAlex Tomas 11a86c6181SAlex Tomas /* 12a86c6181SAlex Tomas * Extents support for EXT4 13a86c6181SAlex Tomas * 14a86c6181SAlex Tomas * TODO: 15a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 16a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17a86c6181SAlex Tomas * - smart tree reduction 18a86c6181SAlex Tomas */ 19a86c6181SAlex Tomas 20a86c6181SAlex Tomas #include <linux/fs.h> 21a86c6181SAlex Tomas #include <linux/time.h> 22cd02ff0bSMingming Cao #include <linux/jbd2.h> 23a86c6181SAlex Tomas #include <linux/highuid.h> 24a86c6181SAlex Tomas #include <linux/pagemap.h> 25a86c6181SAlex Tomas #include <linux/quotaops.h> 26a86c6181SAlex Tomas #include <linux/string.h> 27a86c6181SAlex Tomas #include <linux/slab.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 296873fa0dSEric Sandeen #include <linux/fiemap.h> 3066114cadSTejun Heo #include <linux/backing-dev.h> 313dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 324a092d73STheodore Ts'o #include "ext4_extents.h" 33f19d5870STao Ma #include "xattr.h" 34a86c6181SAlex Tomas 350562e0baSJiaying Zhang #include <trace/events/ext4.h> 360562e0baSJiaying Zhang 375f95d21fSLukas Czerner /* 385f95d21fSLukas Czerner * used by extent splitting. 395f95d21fSLukas Czerner */ 405f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 415f95d21fSLukas Czerner due to ENOSPC */ 42556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 43556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 445f95d21fSLukas Czerner 45dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 46dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 47dee1f973SDmitry Monakhov 487ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 497ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 507ac5990dSDarrick J. Wong { 517ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 527ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 537ac5990dSDarrick J. Wong __u32 csum; 547ac5990dSDarrick J. Wong 557ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 567ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 577ac5990dSDarrick J. Wong return cpu_to_le32(csum); 587ac5990dSDarrick J. Wong } 597ac5990dSDarrick J. Wong 607ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 617ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 627ac5990dSDarrick J. Wong { 637ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 647ac5990dSDarrick J. Wong 659aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 667ac5990dSDarrick J. Wong return 1; 677ac5990dSDarrick J. Wong 687ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 697ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 707ac5990dSDarrick J. Wong return 0; 717ac5990dSDarrick J. Wong return 1; 727ac5990dSDarrick J. Wong } 737ac5990dSDarrick J. Wong 747ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 757ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 767ac5990dSDarrick J. Wong { 777ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 787ac5990dSDarrick J. Wong 799aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 807ac5990dSDarrick J. Wong return; 817ac5990dSDarrick J. Wong 827ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 837ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 847ac5990dSDarrick J. Wong } 857ac5990dSDarrick J. Wong 865f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 875f95d21fSLukas Czerner struct inode *inode, 88dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 895f95d21fSLukas Czerner ext4_lblk_t split, 905f95d21fSLukas Czerner int split_flag, 915f95d21fSLukas Czerner int flags); 925f95d21fSLukas Czerner 9391dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 9469eb33dcSZheng Liu struct extent_status *newes); 9591dd8c11SLukas Czerner 96a4130367SJan Kara static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 97a86c6181SAlex Tomas { 987b808191STheodore Ts'o /* 99a4130367SJan Kara * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 100a4130367SJan Kara * moment, get_block can be called only for blocks inside i_size since 101a4130367SJan Kara * page cache has been already dropped and writes are blocked by 102a4130367SJan Kara * i_mutex. So we can safely drop the i_data_sem here. 1037b808191STheodore Ts'o */ 104a4130367SJan Kara BUG_ON(EXT4_JOURNAL(inode) == NULL); 105a4130367SJan Kara ext4_discard_preallocations(inode); 106a4130367SJan Kara up_write(&EXT4_I(inode)->i_data_sem); 107a4130367SJan Kara *dropped = 1; 108a4130367SJan Kara return 0; 109a4130367SJan Kara } 110487caeefSJan Kara 111a4130367SJan Kara /* 112a4130367SJan Kara * Make sure 'handle' has at least 'check_cred' credits. If not, restart 113a4130367SJan Kara * transaction with 'restart_cred' credits. The function drops i_data_sem 114a4130367SJan Kara * when restarting transaction and gets it after transaction is restarted. 115a4130367SJan Kara * 116a4130367SJan Kara * The function returns 0 on success, 1 if transaction had to be restarted, 117a4130367SJan Kara * and < 0 in case of fatal error. 118a4130367SJan Kara */ 119a4130367SJan Kara int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 12083448bdfSJan Kara int check_cred, int restart_cred, 12183448bdfSJan Kara int revoke_cred) 122a4130367SJan Kara { 123a4130367SJan Kara int ret; 124a4130367SJan Kara int dropped = 0; 125a4130367SJan Kara 126a4130367SJan Kara ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 12783448bdfSJan Kara revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 128a4130367SJan Kara if (dropped) 129a4130367SJan Kara down_write(&EXT4_I(inode)->i_data_sem); 130a4130367SJan Kara return ret; 131a86c6181SAlex Tomas } 132a86c6181SAlex Tomas 133a86c6181SAlex Tomas /* 134a86c6181SAlex Tomas * could return: 135a86c6181SAlex Tomas * - EROFS 136a86c6181SAlex Tomas * - ENOMEM 137a86c6181SAlex Tomas */ 138a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 139a86c6181SAlex Tomas struct ext4_ext_path *path) 140a86c6181SAlex Tomas { 141a86c6181SAlex Tomas if (path->p_bh) { 142a86c6181SAlex Tomas /* path points to block */ 1435d601255Sliang xie BUFFER_TRACE(path->p_bh, "get_write_access"); 144a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 145a86c6181SAlex Tomas } 146a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 147a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 148a86c6181SAlex Tomas return 0; 149a86c6181SAlex Tomas } 150a86c6181SAlex Tomas 151a86c6181SAlex Tomas /* 152a86c6181SAlex Tomas * could return: 153a86c6181SAlex Tomas * - EROFS 154a86c6181SAlex Tomas * - ENOMEM 155a86c6181SAlex Tomas * - EIO 156a86c6181SAlex Tomas */ 15743f81677SEric Biggers static int __ext4_ext_dirty(const char *where, unsigned int line, 15843f81677SEric Biggers handle_t *handle, struct inode *inode, 15943f81677SEric Biggers struct ext4_ext_path *path) 160a86c6181SAlex Tomas { 161a86c6181SAlex Tomas int err; 1624b1f1660SDmitry Monakhov 1634b1f1660SDmitry Monakhov WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 164a86c6181SAlex Tomas if (path->p_bh) { 1657ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 166a86c6181SAlex Tomas /* path points to block */ 1679ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1689ea7a0dfSTheodore Ts'o inode, path->p_bh); 169a86c6181SAlex Tomas } else { 170a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 171a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 172a86c6181SAlex Tomas } 173a86c6181SAlex Tomas return err; 174a86c6181SAlex Tomas } 175a86c6181SAlex Tomas 17643f81677SEric Biggers #define ext4_ext_dirty(handle, inode, path) \ 17743f81677SEric Biggers __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 17843f81677SEric Biggers 179f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 180a86c6181SAlex Tomas struct ext4_ext_path *path, 181725d26d3SAneesh Kumar K.V ext4_lblk_t block) 182a86c6181SAlex Tomas { 183a86c6181SAlex Tomas if (path) { 18481fdbb4aSYongqiang Yang int depth = path->p_depth; 185a86c6181SAlex Tomas struct ext4_extent *ex; 186a86c6181SAlex Tomas 187ad4fb9caSKazuya Mio /* 188ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 189ad4fb9caSKazuya Mio * filling in a file which will eventually be 190ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 191ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 192ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 193ad4fb9caSKazuya Mio * executable file, or some database extending a table 194ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 195ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 196ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 197ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 198ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 199ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 200ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 201b8d6568aSTao Ma * especially if the latter case turns out to be 202ad4fb9caSKazuya Mio * common. 203ad4fb9caSKazuya Mio */ 2047e028976SAvantika Mathur ex = path[depth].p_ext; 205ad4fb9caSKazuya Mio if (ex) { 206ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 207ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 208ad4fb9caSKazuya Mio 209ad4fb9caSKazuya Mio if (block > ext_block) 210ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 211ad4fb9caSKazuya Mio else 212ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 213ad4fb9caSKazuya Mio } 214a86c6181SAlex Tomas 215d0d856e8SRandy Dunlap /* it looks like index is empty; 216d0d856e8SRandy Dunlap * try to find starting block from index itself */ 217a86c6181SAlex Tomas if (path[depth].p_bh) 218a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 219a86c6181SAlex Tomas } 220a86c6181SAlex Tomas 221a86c6181SAlex Tomas /* OK. use inode's group */ 222f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 223a86c6181SAlex Tomas } 224a86c6181SAlex Tomas 225654b4908SAneesh Kumar K.V /* 226654b4908SAneesh Kumar K.V * Allocation for a meta data block 227654b4908SAneesh Kumar K.V */ 228f65e6fbaSAlex Tomas static ext4_fsblk_t 229654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 230a86c6181SAlex Tomas struct ext4_ext_path *path, 23155f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 232a86c6181SAlex Tomas { 233f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 234a86c6181SAlex Tomas 235a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23655f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23755f020dbSAllison Henderson NULL, err); 238a86c6181SAlex Tomas return newblock; 239a86c6181SAlex Tomas } 240a86c6181SAlex Tomas 24155ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 242a86c6181SAlex Tomas { 243a86c6181SAlex Tomas int size; 244a86c6181SAlex Tomas 245a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 246a86c6181SAlex Tomas / sizeof(struct ext4_extent); 247bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24802dc62fbSYongqiang Yang if (!check && size > 6) 249a86c6181SAlex Tomas size = 6; 250a86c6181SAlex Tomas #endif 251a86c6181SAlex Tomas return size; 252a86c6181SAlex Tomas } 253a86c6181SAlex Tomas 25455ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 255a86c6181SAlex Tomas { 256a86c6181SAlex Tomas int size; 257a86c6181SAlex Tomas 258a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 259a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 260bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 26102dc62fbSYongqiang Yang if (!check && size > 5) 262a86c6181SAlex Tomas size = 5; 263a86c6181SAlex Tomas #endif 264a86c6181SAlex Tomas return size; 265a86c6181SAlex Tomas } 266a86c6181SAlex Tomas 26755ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 268a86c6181SAlex Tomas { 269a86c6181SAlex Tomas int size; 270a86c6181SAlex Tomas 271a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 272a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 273a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 274bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27502dc62fbSYongqiang Yang if (!check && size > 3) 276a86c6181SAlex Tomas size = 3; 277a86c6181SAlex Tomas #endif 278a86c6181SAlex Tomas return size; 279a86c6181SAlex Tomas } 280a86c6181SAlex Tomas 28155ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 282a86c6181SAlex Tomas { 283a86c6181SAlex Tomas int size; 284a86c6181SAlex Tomas 285a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 286a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 287a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 288bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28902dc62fbSYongqiang Yang if (!check && size > 4) 290a86c6181SAlex Tomas size = 4; 291a86c6181SAlex Tomas #endif 292a86c6181SAlex Tomas return size; 293a86c6181SAlex Tomas } 294a86c6181SAlex Tomas 295fcf6b1b7SDmitry Monakhov static inline int 296fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 297dfe50809STheodore Ts'o struct ext4_ext_path **ppath, ext4_lblk_t lblk, 298fcf6b1b7SDmitry Monakhov int nofail) 299fcf6b1b7SDmitry Monakhov { 300dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 301fcf6b1b7SDmitry Monakhov int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 302fcf6b1b7SDmitry Monakhov 303dfe50809STheodore Ts'o return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 304fcf6b1b7SDmitry Monakhov EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 305fcf6b1b7SDmitry Monakhov EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | 306fcf6b1b7SDmitry Monakhov (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); 307fcf6b1b7SDmitry Monakhov } 308fcf6b1b7SDmitry Monakhov 309c29c0ae7SAlex Tomas static int 310c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 311c29c0ae7SAlex Tomas { 312c29c0ae7SAlex Tomas int max; 313c29c0ae7SAlex Tomas 314c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 315c29c0ae7SAlex Tomas if (depth == 0) 31655ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 317c29c0ae7SAlex Tomas else 31855ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 319c29c0ae7SAlex Tomas } else { 320c29c0ae7SAlex Tomas if (depth == 0) 32155ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 322c29c0ae7SAlex Tomas else 32355ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 324c29c0ae7SAlex Tomas } 325c29c0ae7SAlex Tomas 326c29c0ae7SAlex Tomas return max; 327c29c0ae7SAlex Tomas } 328c29c0ae7SAlex Tomas 32956b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 33056b19868SAneesh Kumar K.V { 331bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 33256b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 3335946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 334e84a26ceSTheodore Ts'o 335f70749caSVegard Nossum /* 336f70749caSVegard Nossum * We allow neither: 337f70749caSVegard Nossum * - zero length 338f70749caSVegard Nossum * - overflow/wrap-around 339f70749caSVegard Nossum */ 340f70749caSVegard Nossum if (lblock + len <= lblock) 34131d4f3a2STheodore Ts'o return 0; 3426fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 34356b19868SAneesh Kumar K.V } 34456b19868SAneesh Kumar K.V 34556b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 34656b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 34756b19868SAneesh Kumar K.V { 348bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 349e84a26ceSTheodore Ts'o 3506fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 35156b19868SAneesh Kumar K.V } 35256b19868SAneesh Kumar K.V 35356b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 35456b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 35556b19868SAneesh Kumar K.V int depth) 35656b19868SAneesh Kumar K.V { 35756b19868SAneesh Kumar K.V unsigned short entries; 35856b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 35956b19868SAneesh Kumar K.V return 1; 36056b19868SAneesh Kumar K.V 36156b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 36256b19868SAneesh Kumar K.V 36356b19868SAneesh Kumar K.V if (depth == 0) { 36456b19868SAneesh Kumar K.V /* leaf entries */ 36581fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 3665946d089SEryu Guan struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 3675946d089SEryu Guan ext4_fsblk_t pblock = 0; 3685946d089SEryu Guan ext4_lblk_t lblock = 0; 3695946d089SEryu Guan ext4_lblk_t prev = 0; 3705946d089SEryu Guan int len = 0; 37156b19868SAneesh Kumar K.V while (entries) { 37256b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 37356b19868SAneesh Kumar K.V return 0; 3745946d089SEryu Guan 3755946d089SEryu Guan /* Check for overlapping extents */ 3765946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block); 3775946d089SEryu Guan len = ext4_ext_get_actual_len(ext); 3785946d089SEryu Guan if ((lblock <= prev) && prev) { 3795946d089SEryu Guan pblock = ext4_ext_pblock(ext); 3805946d089SEryu Guan es->s_last_error_block = cpu_to_le64(pblock); 3815946d089SEryu Guan return 0; 3825946d089SEryu Guan } 38356b19868SAneesh Kumar K.V ext++; 38456b19868SAneesh Kumar K.V entries--; 3855946d089SEryu Guan prev = lblock + len - 1; 38656b19868SAneesh Kumar K.V } 38756b19868SAneesh Kumar K.V } else { 38881fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 38956b19868SAneesh Kumar K.V while (entries) { 39056b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 39156b19868SAneesh Kumar K.V return 0; 39256b19868SAneesh Kumar K.V ext_idx++; 39356b19868SAneesh Kumar K.V entries--; 39456b19868SAneesh Kumar K.V } 39556b19868SAneesh Kumar K.V } 39656b19868SAneesh Kumar K.V return 1; 39756b19868SAneesh Kumar K.V } 39856b19868SAneesh Kumar K.V 399c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 400c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 401c349179bSTheodore Ts'o int depth, ext4_fsblk_t pblk) 402c29c0ae7SAlex Tomas { 403c29c0ae7SAlex Tomas const char *error_msg; 4046a797d27SDarrick J. Wong int max = 0, err = -EFSCORRUPTED; 405c29c0ae7SAlex Tomas 406c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 407c29c0ae7SAlex Tomas error_msg = "invalid magic"; 408c29c0ae7SAlex Tomas goto corrupted; 409c29c0ae7SAlex Tomas } 410c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 411c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 412c29c0ae7SAlex Tomas goto corrupted; 413c29c0ae7SAlex Tomas } 414c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 415c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 416c29c0ae7SAlex Tomas goto corrupted; 417c29c0ae7SAlex Tomas } 418c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 419c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 420c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 421c29c0ae7SAlex Tomas goto corrupted; 422c29c0ae7SAlex Tomas } 423c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 424c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 425c29c0ae7SAlex Tomas goto corrupted; 426c29c0ae7SAlex Tomas } 42756b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 42856b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 42956b19868SAneesh Kumar K.V goto corrupted; 43056b19868SAneesh Kumar K.V } 4317bc94916SVegard Nossum if (unlikely(depth > 32)) { 4327bc94916SVegard Nossum error_msg = "too large eh_depth"; 4337bc94916SVegard Nossum goto corrupted; 4347bc94916SVegard Nossum } 4357ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4367ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4377ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4387ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4396a797d27SDarrick J. Wong err = -EFSBADCRC; 4407ac5990dSDarrick J. Wong goto corrupted; 4417ac5990dSDarrick J. Wong } 442c29c0ae7SAlex Tomas return 0; 443c29c0ae7SAlex Tomas 444c29c0ae7SAlex Tomas corrupted: 445878520acSTheodore Ts'o ext4_set_errno(inode->i_sb, -err); 446c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 447c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, " 448c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 449c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg, 450c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic), 451c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 452c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 4536a797d27SDarrick J. Wong return err; 454c29c0ae7SAlex Tomas } 455c29c0ae7SAlex Tomas 456c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \ 457c349179bSTheodore Ts'o __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 458c29c0ae7SAlex Tomas 4597a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4607a262f7cSAneesh Kumar K.V { 461c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 4627a262f7cSAneesh Kumar K.V } 4637a262f7cSAneesh Kumar K.V 4644068664eSDmitry Monakhov static void ext4_cache_extents(struct inode *inode, 4654068664eSDmitry Monakhov struct ext4_extent_header *eh) 4664068664eSDmitry Monakhov { 4674068664eSDmitry Monakhov struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 4684068664eSDmitry Monakhov ext4_lblk_t prev = 0; 4694068664eSDmitry Monakhov int i; 4704068664eSDmitry Monakhov 4714068664eSDmitry Monakhov for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 4724068664eSDmitry Monakhov unsigned int status = EXTENT_STATUS_WRITTEN; 4734068664eSDmitry Monakhov ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 4744068664eSDmitry Monakhov int len = ext4_ext_get_actual_len(ex); 4754068664eSDmitry Monakhov 4764068664eSDmitry Monakhov if (prev && (prev != lblk)) 4774068664eSDmitry Monakhov ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 4784068664eSDmitry Monakhov EXTENT_STATUS_HOLE); 4794068664eSDmitry Monakhov 4804068664eSDmitry Monakhov if (ext4_ext_is_unwritten(ex)) 4814068664eSDmitry Monakhov status = EXTENT_STATUS_UNWRITTEN; 4824068664eSDmitry Monakhov ext4_es_cache_extent(inode, lblk, len, 4834068664eSDmitry Monakhov ext4_ext_pblock(ex), status); 4844068664eSDmitry Monakhov prev = lblk + len; 4854068664eSDmitry Monakhov } 4864068664eSDmitry Monakhov } 4874068664eSDmitry Monakhov 4887d7ea89eSTheodore Ts'o static struct buffer_head * 4897d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line, 490107a7bd3STheodore Ts'o struct inode *inode, ext4_fsblk_t pblk, int depth, 491107a7bd3STheodore Ts'o int flags) 492f8489128SDarrick J. Wong { 4937d7ea89eSTheodore Ts'o struct buffer_head *bh; 4947d7ea89eSTheodore Ts'o int err; 495f8489128SDarrick J. Wong 496c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); 4977d7ea89eSTheodore Ts'o if (unlikely(!bh)) 4987d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM); 4997d7ea89eSTheodore Ts'o 5007d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) { 5017d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 5027d7ea89eSTheodore Ts'o err = bh_submit_read(bh); 5037d7ea89eSTheodore Ts'o if (err < 0) 5047d7ea89eSTheodore Ts'o goto errout; 5057d7ea89eSTheodore Ts'o } 5067869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 5077d7ea89eSTheodore Ts'o return bh; 5080a944e8aSTheodore Ts'o if (!ext4_has_feature_journal(inode->i_sb) || 5090a944e8aSTheodore Ts'o (inode->i_ino != 5100a944e8aSTheodore Ts'o le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { 5117d7ea89eSTheodore Ts'o err = __ext4_ext_check(function, line, inode, 512c349179bSTheodore Ts'o ext_block_hdr(bh), depth, pblk); 5137d7ea89eSTheodore Ts'o if (err) 5147d7ea89eSTheodore Ts'o goto errout; 5150a944e8aSTheodore Ts'o } 516f8489128SDarrick J. Wong set_buffer_verified(bh); 517107a7bd3STheodore Ts'o /* 518107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries 519107a7bd3STheodore Ts'o */ 520107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 521107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh); 5224068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 523107a7bd3STheodore Ts'o } 5247d7ea89eSTheodore Ts'o return bh; 5257d7ea89eSTheodore Ts'o errout: 5267d7ea89eSTheodore Ts'o put_bh(bh); 5277d7ea89eSTheodore Ts'o return ERR_PTR(err); 5287d7ea89eSTheodore Ts'o 529f8489128SDarrick J. Wong } 530f8489128SDarrick J. Wong 531107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags) \ 532107a7bd3STheodore Ts'o __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 533107a7bd3STheodore Ts'o (depth), (flags)) 534f8489128SDarrick J. Wong 5357869a4a6STheodore Ts'o /* 5367869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the 5377869a4a6STheodore Ts'o * extent status tree 5387869a4a6STheodore Ts'o */ 5397869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode) 5407869a4a6STheodore Ts'o { 5417869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 5427869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL; 5437869a4a6STheodore Ts'o struct buffer_head *bh; 5447869a4a6STheodore Ts'o int i = 0, depth, ret = 0; 5457869a4a6STheodore Ts'o 5467869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5477869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */ 5487869a4a6STheodore Ts'o 5497869a4a6STheodore Ts'o down_read(&ei->i_data_sem); 5507869a4a6STheodore Ts'o depth = ext_depth(inode); 5517869a4a6STheodore Ts'o 552*2f424a5aSRitesh Harjani /* Don't cache anything if there are no external extent blocks */ 553*2f424a5aSRitesh Harjani if (!depth) { 554*2f424a5aSRitesh Harjani up_read(&ei->i_data_sem); 555*2f424a5aSRitesh Harjani return ret; 556*2f424a5aSRitesh Harjani } 557*2f424a5aSRitesh Harjani 5586396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 5597869a4a6STheodore Ts'o GFP_NOFS); 5607869a4a6STheodore Ts'o if (path == NULL) { 5617869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5627869a4a6STheodore Ts'o return -ENOMEM; 5637869a4a6STheodore Ts'o } 5647869a4a6STheodore Ts'o 5657869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode); 5667869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 5677869a4a6STheodore Ts'o if (ret) 5687869a4a6STheodore Ts'o goto out; 5697869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 5707869a4a6STheodore Ts'o while (i >= 0) { 5717869a4a6STheodore Ts'o /* 5727869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of 5737869a4a6STheodore Ts'o * the index block, go up 5747869a4a6STheodore Ts'o */ 5757869a4a6STheodore Ts'o if ((i == depth) || 5767869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 5777869a4a6STheodore Ts'o brelse(path[i].p_bh); 5787869a4a6STheodore Ts'o path[i].p_bh = NULL; 5797869a4a6STheodore Ts'o i--; 5807869a4a6STheodore Ts'o continue; 5817869a4a6STheodore Ts'o } 5827869a4a6STheodore Ts'o bh = read_extent_tree_block(inode, 5837869a4a6STheodore Ts'o ext4_idx_pblock(path[i].p_idx++), 5847869a4a6STheodore Ts'o depth - i - 1, 5857869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE); 5867869a4a6STheodore Ts'o if (IS_ERR(bh)) { 5877869a4a6STheodore Ts'o ret = PTR_ERR(bh); 5887869a4a6STheodore Ts'o break; 5897869a4a6STheodore Ts'o } 5907869a4a6STheodore Ts'o i++; 5917869a4a6STheodore Ts'o path[i].p_bh = bh; 5927869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh); 5937869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 5947869a4a6STheodore Ts'o } 5957869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 5967869a4a6STheodore Ts'o out: 5977869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5987869a4a6STheodore Ts'o ext4_ext_drop_refs(path); 5997869a4a6STheodore Ts'o kfree(path); 6007869a4a6STheodore Ts'o return ret; 6017869a4a6STheodore Ts'o } 6027869a4a6STheodore Ts'o 603a86c6181SAlex Tomas #ifdef EXT_DEBUG 604a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 605a86c6181SAlex Tomas { 606a86c6181SAlex Tomas int k, l = path->p_depth; 607a86c6181SAlex Tomas 608a86c6181SAlex Tomas ext_debug("path:"); 609a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 610a86c6181SAlex Tomas if (path->p_idx) { 6116e89bbb7SEric Biggers ext_debug(" %d->%llu", 6126e89bbb7SEric Biggers le32_to_cpu(path->p_idx->ei_block), 613bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 614a86c6181SAlex Tomas } else if (path->p_ext) { 615553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 616a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 617556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 618a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 619bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 620a86c6181SAlex Tomas } else 621a86c6181SAlex Tomas ext_debug(" []"); 622a86c6181SAlex Tomas } 623a86c6181SAlex Tomas ext_debug("\n"); 624a86c6181SAlex Tomas } 625a86c6181SAlex Tomas 626a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 627a86c6181SAlex Tomas { 628a86c6181SAlex Tomas int depth = ext_depth(inode); 629a86c6181SAlex Tomas struct ext4_extent_header *eh; 630a86c6181SAlex Tomas struct ext4_extent *ex; 631a86c6181SAlex Tomas int i; 632a86c6181SAlex Tomas 633a86c6181SAlex Tomas if (!path) 634a86c6181SAlex Tomas return; 635a86c6181SAlex Tomas 636a86c6181SAlex Tomas eh = path[depth].p_hdr; 637a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 638a86c6181SAlex Tomas 639553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 640553f9008SMingming 641a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 642553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 643556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 644bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 645a86c6181SAlex Tomas } 646a86c6181SAlex Tomas ext_debug("\n"); 647a86c6181SAlex Tomas } 6481b16da77SYongqiang Yang 6491b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 6501b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 6511b16da77SYongqiang Yang { 6521b16da77SYongqiang Yang int depth = ext_depth(inode); 6531b16da77SYongqiang Yang struct ext4_extent *ex; 6541b16da77SYongqiang Yang 6551b16da77SYongqiang Yang if (depth != level) { 6561b16da77SYongqiang Yang struct ext4_extent_idx *idx; 6571b16da77SYongqiang Yang idx = path[level].p_idx; 6581b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 6591b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 6601b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 6611b16da77SYongqiang Yang ext4_idx_pblock(idx), 6621b16da77SYongqiang Yang newblock); 6631b16da77SYongqiang Yang idx++; 6641b16da77SYongqiang Yang } 6651b16da77SYongqiang Yang 6661b16da77SYongqiang Yang return; 6671b16da77SYongqiang Yang } 6681b16da77SYongqiang Yang 6691b16da77SYongqiang Yang ex = path[depth].p_ext; 6701b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 6711b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 6721b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 6731b16da77SYongqiang Yang ext4_ext_pblock(ex), 674556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 6751b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 6761b16da77SYongqiang Yang newblock); 6771b16da77SYongqiang Yang ex++; 6781b16da77SYongqiang Yang } 6791b16da77SYongqiang Yang } 6801b16da77SYongqiang Yang 681a86c6181SAlex Tomas #else 682a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 683a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 6841b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 685a86c6181SAlex Tomas #endif 686a86c6181SAlex Tomas 687b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 688a86c6181SAlex Tomas { 689b7ea89adSTheodore Ts'o int depth, i; 690a86c6181SAlex Tomas 691b7ea89adSTheodore Ts'o if (!path) 692b7ea89adSTheodore Ts'o return; 693b7ea89adSTheodore Ts'o depth = path->p_depth; 694de745485SEric Biggers for (i = 0; i <= depth; i++, path++) { 695a86c6181SAlex Tomas if (path->p_bh) { 696a86c6181SAlex Tomas brelse(path->p_bh); 697a86c6181SAlex Tomas path->p_bh = NULL; 698a86c6181SAlex Tomas } 699a86c6181SAlex Tomas } 700de745485SEric Biggers } 701a86c6181SAlex Tomas 702a86c6181SAlex Tomas /* 703d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 704d0d856e8SRandy Dunlap * binary search for the closest index of the given block 705c29c0ae7SAlex Tomas * the header must be checked before calling this 706a86c6181SAlex Tomas */ 707a86c6181SAlex Tomas static void 708725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 709725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 710a86c6181SAlex Tomas { 711a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 712a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 713a86c6181SAlex Tomas 714a86c6181SAlex Tomas 715bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 716a86c6181SAlex Tomas 717a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 718e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 719a86c6181SAlex Tomas while (l <= r) { 720a86c6181SAlex Tomas m = l + (r - l) / 2; 721a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 722a86c6181SAlex Tomas r = m - 1; 723a86c6181SAlex Tomas else 724a86c6181SAlex Tomas l = m + 1; 72526d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 72626d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 72726d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 728a86c6181SAlex Tomas } 729a86c6181SAlex Tomas 730a86c6181SAlex Tomas path->p_idx = l - 1; 7314a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 732bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 733a86c6181SAlex Tomas 734a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 735a86c6181SAlex Tomas { 736a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 737a86c6181SAlex Tomas int k; 738a86c6181SAlex Tomas 739a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 740a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 7416e89bbb7SEric Biggers if (k != 0 && le32_to_cpu(ix->ei_block) <= 7426e89bbb7SEric Biggers le32_to_cpu(ix[-1].ei_block)) { 7434776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 7444776004fSTheodore Ts'o "first=0x%p\n", k, 745a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 7464776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 747a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 748a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 749a86c6181SAlex Tomas } 750a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 751a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 752a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 753a86c6181SAlex Tomas break; 754a86c6181SAlex Tomas chix = ix; 755a86c6181SAlex Tomas } 756a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 757a86c6181SAlex Tomas } 758a86c6181SAlex Tomas #endif 759a86c6181SAlex Tomas 760a86c6181SAlex Tomas } 761a86c6181SAlex Tomas 762a86c6181SAlex Tomas /* 763d0d856e8SRandy Dunlap * ext4_ext_binsearch: 764d0d856e8SRandy Dunlap * binary search for closest extent of the given block 765c29c0ae7SAlex Tomas * the header must be checked before calling this 766a86c6181SAlex Tomas */ 767a86c6181SAlex Tomas static void 768725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 769725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 770a86c6181SAlex Tomas { 771a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 772a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 773a86c6181SAlex Tomas 774a86c6181SAlex Tomas if (eh->eh_entries == 0) { 775a86c6181SAlex Tomas /* 776d0d856e8SRandy Dunlap * this leaf is empty: 777a86c6181SAlex Tomas * we get such a leaf in split/add case 778a86c6181SAlex Tomas */ 779a86c6181SAlex Tomas return; 780a86c6181SAlex Tomas } 781a86c6181SAlex Tomas 782bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 783a86c6181SAlex Tomas 784a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 785e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 786a86c6181SAlex Tomas 787a86c6181SAlex Tomas while (l <= r) { 788a86c6181SAlex Tomas m = l + (r - l) / 2; 789a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 790a86c6181SAlex Tomas r = m - 1; 791a86c6181SAlex Tomas else 792a86c6181SAlex Tomas l = m + 1; 79326d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 79426d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 79526d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 796a86c6181SAlex Tomas } 797a86c6181SAlex Tomas 798a86c6181SAlex Tomas path->p_ext = l - 1; 799553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 800a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 801bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 802556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 803a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 804a86c6181SAlex Tomas 805a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 806a86c6181SAlex Tomas { 807a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 808a86c6181SAlex Tomas int k; 809a86c6181SAlex Tomas 810a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 811a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 812a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 813a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 814a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 815a86c6181SAlex Tomas break; 816a86c6181SAlex Tomas chex = ex; 817a86c6181SAlex Tomas } 818a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 819a86c6181SAlex Tomas } 820a86c6181SAlex Tomas #endif 821a86c6181SAlex Tomas 822a86c6181SAlex Tomas } 823a86c6181SAlex Tomas 824a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 825a86c6181SAlex Tomas { 826a86c6181SAlex Tomas struct ext4_extent_header *eh; 827a86c6181SAlex Tomas 828a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 829a86c6181SAlex Tomas eh->eh_depth = 0; 830a86c6181SAlex Tomas eh->eh_entries = 0; 831a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 83255ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 833a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 834a86c6181SAlex Tomas return 0; 835a86c6181SAlex Tomas } 836a86c6181SAlex Tomas 837a86c6181SAlex Tomas struct ext4_ext_path * 838ed8a1a76STheodore Ts'o ext4_find_extent(struct inode *inode, ext4_lblk_t block, 839705912caSTheodore Ts'o struct ext4_ext_path **orig_path, int flags) 840a86c6181SAlex Tomas { 841a86c6181SAlex Tomas struct ext4_extent_header *eh; 842a86c6181SAlex Tomas struct buffer_head *bh; 843705912caSTheodore Ts'o struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 844705912caSTheodore Ts'o short int depth, i, ppos = 0; 845860d21e2STheodore Ts'o int ret; 846a86c6181SAlex Tomas 847a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 848c29c0ae7SAlex Tomas depth = ext_depth(inode); 849bc890a60STheodore Ts'o if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 850bc890a60STheodore Ts'o EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 851bc890a60STheodore Ts'o depth); 852bc890a60STheodore Ts'o ret = -EFSCORRUPTED; 853bc890a60STheodore Ts'o goto err; 854bc890a60STheodore Ts'o } 855a86c6181SAlex Tomas 85610809df8STheodore Ts'o if (path) { 857523f431cSTheodore Ts'o ext4_ext_drop_refs(path); 85810809df8STheodore Ts'o if (depth > path[0].p_maxdepth) { 85910809df8STheodore Ts'o kfree(path); 86010809df8STheodore Ts'o *orig_path = path = NULL; 86110809df8STheodore Ts'o } 86210809df8STheodore Ts'o } 86310809df8STheodore Ts'o if (!path) { 864a86c6181SAlex Tomas /* account possible depth increase */ 8656396bb22SKees Cook path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 866a86c6181SAlex Tomas GFP_NOFS); 86719008f6dSTheodore Ts'o if (unlikely(!path)) 868a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 86910809df8STheodore Ts'o path[0].p_maxdepth = depth + 1; 870a86c6181SAlex Tomas } 871a86c6181SAlex Tomas path[0].p_hdr = eh; 8721973adcbSShen Feng path[0].p_bh = NULL; 873a86c6181SAlex Tomas 874c29c0ae7SAlex Tomas i = depth; 8754068664eSDmitry Monakhov if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 8764068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 877a86c6181SAlex Tomas /* walk through the tree */ 878a86c6181SAlex Tomas while (i) { 879a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 880a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 881c29c0ae7SAlex Tomas 882a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 883bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 884a86c6181SAlex Tomas path[ppos].p_depth = i; 885a86c6181SAlex Tomas path[ppos].p_ext = NULL; 886a86c6181SAlex Tomas 887107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 888107a7bd3STheodore Ts'o flags); 889a1c83681SViresh Kumar if (IS_ERR(bh)) { 8907d7ea89eSTheodore Ts'o ret = PTR_ERR(bh); 891a86c6181SAlex Tomas goto err; 892860d21e2STheodore Ts'o } 8937d7ea89eSTheodore Ts'o 894a86c6181SAlex Tomas eh = ext_block_hdr(bh); 895a86c6181SAlex Tomas ppos++; 896a86c6181SAlex Tomas path[ppos].p_bh = bh; 897a86c6181SAlex Tomas path[ppos].p_hdr = eh; 898a86c6181SAlex Tomas } 899a86c6181SAlex Tomas 900a86c6181SAlex Tomas path[ppos].p_depth = i; 901a86c6181SAlex Tomas path[ppos].p_ext = NULL; 902a86c6181SAlex Tomas path[ppos].p_idx = NULL; 903a86c6181SAlex Tomas 904a86c6181SAlex Tomas /* find extent */ 905a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 9061973adcbSShen Feng /* if not an empty leaf */ 9071973adcbSShen Feng if (path[ppos].p_ext) 908bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 909a86c6181SAlex Tomas 910a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 911a86c6181SAlex Tomas 912a86c6181SAlex Tomas return path; 913a86c6181SAlex Tomas 914a86c6181SAlex Tomas err: 915a86c6181SAlex Tomas ext4_ext_drop_refs(path); 916a86c6181SAlex Tomas kfree(path); 917705912caSTheodore Ts'o if (orig_path) 918705912caSTheodore Ts'o *orig_path = NULL; 919860d21e2STheodore Ts'o return ERR_PTR(ret); 920a86c6181SAlex Tomas } 921a86c6181SAlex Tomas 922a86c6181SAlex Tomas /* 923d0d856e8SRandy Dunlap * ext4_ext_insert_index: 924d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 925d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 926a86c6181SAlex Tomas */ 9271f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 928a86c6181SAlex Tomas struct ext4_ext_path *curp, 929f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 930a86c6181SAlex Tomas { 931a86c6181SAlex Tomas struct ext4_extent_idx *ix; 932a86c6181SAlex Tomas int len, err; 933a86c6181SAlex Tomas 9347e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 9357e028976SAvantika Mathur if (err) 936a86c6181SAlex Tomas return err; 937a86c6181SAlex Tomas 938273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 939273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 940273df556SFrank Mayhar "logical %d == ei_block %d!", 941273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 9426a797d27SDarrick J. Wong return -EFSCORRUPTED; 943273df556SFrank Mayhar } 944d4620315SRobin Dong 945d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 946d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 947d4620315SRobin Dong EXT4_ERROR_INODE(inode, 948d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 949d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 950d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 9516a797d27SDarrick J. Wong return -EFSCORRUPTED; 952d4620315SRobin Dong } 953d4620315SRobin Dong 954a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 955a86c6181SAlex Tomas /* insert after */ 95680e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 957a86c6181SAlex Tomas ix = curp->p_idx + 1; 958a86c6181SAlex Tomas } else { 959a86c6181SAlex Tomas /* insert before */ 96080e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 961a86c6181SAlex Tomas ix = curp->p_idx; 962a86c6181SAlex Tomas } 963a86c6181SAlex Tomas 96480e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 96580e675f9SEric Gouriou BUG_ON(len < 0); 96680e675f9SEric Gouriou if (len > 0) { 96780e675f9SEric Gouriou ext_debug("insert new index %d: " 96880e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 96980e675f9SEric Gouriou logical, len, ix, ix + 1); 97080e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 97180e675f9SEric Gouriou } 97280e675f9SEric Gouriou 973f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 974f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 9756a797d27SDarrick J. Wong return -EFSCORRUPTED; 976f472e026STao Ma } 977f472e026STao Ma 978a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 979f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 980e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 981a86c6181SAlex Tomas 982273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 983273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 9846a797d27SDarrick J. Wong return -EFSCORRUPTED; 985273df556SFrank Mayhar } 986a86c6181SAlex Tomas 987a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 988a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 989a86c6181SAlex Tomas 990a86c6181SAlex Tomas return err; 991a86c6181SAlex Tomas } 992a86c6181SAlex Tomas 993a86c6181SAlex Tomas /* 994d0d856e8SRandy Dunlap * ext4_ext_split: 995d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 996d0d856e8SRandy Dunlap * at depth @at: 997a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 998a86c6181SAlex Tomas * - makes decision where to split 999d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 1000a86c6181SAlex Tomas * into the newly allocated blocks 1001d0d856e8SRandy Dunlap * - initializes subtree 1002a86c6181SAlex Tomas */ 1003a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 100455f020dbSAllison Henderson unsigned int flags, 1005a86c6181SAlex Tomas struct ext4_ext_path *path, 1006a86c6181SAlex Tomas struct ext4_extent *newext, int at) 1007a86c6181SAlex Tomas { 1008a86c6181SAlex Tomas struct buffer_head *bh = NULL; 1009a86c6181SAlex Tomas int depth = ext_depth(inode); 1010a86c6181SAlex Tomas struct ext4_extent_header *neh; 1011a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 1012a86c6181SAlex Tomas int i = at, k, m, a; 1013f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 1014a86c6181SAlex Tomas __le32 border; 1015f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1016a86c6181SAlex Tomas int err = 0; 1017592acbf1SSriram Rajagopalan size_t ext_size = 0; 1018a86c6181SAlex Tomas 1019a86c6181SAlex Tomas /* make decision: where to split? */ 1020d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 1021a86c6181SAlex Tomas 1022d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 1023a86c6181SAlex Tomas * border from split point */ 1024273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1025273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 10266a797d27SDarrick J. Wong return -EFSCORRUPTED; 1027273df556SFrank Mayhar } 1028a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1029a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 1030d0d856e8SRandy Dunlap ext_debug("leaf will be split." 1031a86c6181SAlex Tomas " next leaf starts at %d\n", 1032a86c6181SAlex Tomas le32_to_cpu(border)); 1033a86c6181SAlex Tomas } else { 1034a86c6181SAlex Tomas border = newext->ee_block; 1035a86c6181SAlex Tomas ext_debug("leaf will be added." 1036a86c6181SAlex Tomas " next leaf starts at %d\n", 1037a86c6181SAlex Tomas le32_to_cpu(border)); 1038a86c6181SAlex Tomas } 1039a86c6181SAlex Tomas 1040a86c6181SAlex Tomas /* 1041d0d856e8SRandy Dunlap * If error occurs, then we break processing 1042d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 1043a86c6181SAlex Tomas * be inserted and tree will be in consistent 1044d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 1045a86c6181SAlex Tomas */ 1046a86c6181SAlex Tomas 1047a86c6181SAlex Tomas /* 1048d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 1049d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 1050d0d856e8SRandy Dunlap * upon them. 1051a86c6181SAlex Tomas */ 10526396bb22SKees Cook ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS); 1053a86c6181SAlex Tomas if (!ablocks) 1054a86c6181SAlex Tomas return -ENOMEM; 1055a86c6181SAlex Tomas 1056a86c6181SAlex Tomas /* allocate all needed blocks */ 1057a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1058a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 1059654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 106055f020dbSAllison Henderson newext, &err, flags); 1061a86c6181SAlex Tomas if (newblock == 0) 1062a86c6181SAlex Tomas goto cleanup; 1063a86c6181SAlex Tomas ablocks[a] = newblock; 1064a86c6181SAlex Tomas } 1065a86c6181SAlex Tomas 1066a86c6181SAlex Tomas /* initialize new leaf */ 1067a86c6181SAlex Tomas newblock = ablocks[--a]; 1068273df556SFrank Mayhar if (unlikely(newblock == 0)) { 1069273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 10706a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1071273df556SFrank Mayhar goto cleanup; 1072273df556SFrank Mayhar } 1073c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1074aebf0243SWang Shilong if (unlikely(!bh)) { 1075860d21e2STheodore Ts'o err = -ENOMEM; 1076a86c6181SAlex Tomas goto cleanup; 1077a86c6181SAlex Tomas } 1078a86c6181SAlex Tomas lock_buffer(bh); 1079a86c6181SAlex Tomas 10807e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10817e028976SAvantika Mathur if (err) 1082a86c6181SAlex Tomas goto cleanup; 1083a86c6181SAlex Tomas 1084a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1085a86c6181SAlex Tomas neh->eh_entries = 0; 108655ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1087a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1088a86c6181SAlex Tomas neh->eh_depth = 0; 1089a86c6181SAlex Tomas 1090d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 1091273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 1092273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 1093273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1094273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 1095273df556SFrank Mayhar path[depth].p_hdr->eh_max); 10966a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1097273df556SFrank Mayhar goto cleanup; 1098273df556SFrank Mayhar } 1099a86c6181SAlex Tomas /* start copy from next extent */ 11001b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 11011b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 1102a86c6181SAlex Tomas if (m) { 11031b16da77SYongqiang Yang struct ext4_extent *ex; 11041b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 11051b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1106e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1107a86c6181SAlex Tomas } 1108a86c6181SAlex Tomas 1109592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1110592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1111592acbf1SSriram Rajagopalan sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1112592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 11137ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1114a86c6181SAlex Tomas set_buffer_uptodate(bh); 1115a86c6181SAlex Tomas unlock_buffer(bh); 1116a86c6181SAlex Tomas 11170390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11187e028976SAvantika Mathur if (err) 1119a86c6181SAlex Tomas goto cleanup; 1120a86c6181SAlex Tomas brelse(bh); 1121a86c6181SAlex Tomas bh = NULL; 1122a86c6181SAlex Tomas 1123a86c6181SAlex Tomas /* correct old leaf */ 1124a86c6181SAlex Tomas if (m) { 11257e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 11267e028976SAvantika Mathur if (err) 1127a86c6181SAlex Tomas goto cleanup; 1128e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 11297e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 11307e028976SAvantika Mathur if (err) 1131a86c6181SAlex Tomas goto cleanup; 1132a86c6181SAlex Tomas 1133a86c6181SAlex Tomas } 1134a86c6181SAlex Tomas 1135a86c6181SAlex Tomas /* create intermediate indexes */ 1136a86c6181SAlex Tomas k = depth - at - 1; 1137273df556SFrank Mayhar if (unlikely(k < 0)) { 1138273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 11396a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1140273df556SFrank Mayhar goto cleanup; 1141273df556SFrank Mayhar } 1142a86c6181SAlex Tomas if (k) 1143a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1144a86c6181SAlex Tomas /* insert new index into current index block */ 1145a86c6181SAlex Tomas /* current depth stored in i var */ 1146a86c6181SAlex Tomas i = depth - 1; 1147a86c6181SAlex Tomas while (k--) { 1148a86c6181SAlex Tomas oldblock = newblock; 1149a86c6181SAlex Tomas newblock = ablocks[--a]; 1150bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1151aebf0243SWang Shilong if (unlikely(!bh)) { 1152860d21e2STheodore Ts'o err = -ENOMEM; 1153a86c6181SAlex Tomas goto cleanup; 1154a86c6181SAlex Tomas } 1155a86c6181SAlex Tomas lock_buffer(bh); 1156a86c6181SAlex Tomas 11577e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11587e028976SAvantika Mathur if (err) 1159a86c6181SAlex Tomas goto cleanup; 1160a86c6181SAlex Tomas 1161a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1162a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1163a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 116455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1165a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1166a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1167a86c6181SAlex Tomas fidx->ei_block = border; 1168f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1169a86c6181SAlex Tomas 1170bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1171bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1172a86c6181SAlex Tomas 11731b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1174273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1175273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1176273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1177273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1178273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 11796a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1180273df556SFrank Mayhar goto cleanup; 1181273df556SFrank Mayhar } 11821b16da77SYongqiang Yang /* start copy indexes */ 11831b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 11841b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 11851b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 11861b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1187a86c6181SAlex Tomas if (m) { 11881b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1189a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1190e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1191a86c6181SAlex Tomas } 1192592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1193592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1194592acbf1SSriram Rajagopalan (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1195592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, 1196592acbf1SSriram Rajagopalan inode->i_sb->s_blocksize - ext_size); 11977ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1198a86c6181SAlex Tomas set_buffer_uptodate(bh); 1199a86c6181SAlex Tomas unlock_buffer(bh); 1200a86c6181SAlex Tomas 12010390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 12027e028976SAvantika Mathur if (err) 1203a86c6181SAlex Tomas goto cleanup; 1204a86c6181SAlex Tomas brelse(bh); 1205a86c6181SAlex Tomas bh = NULL; 1206a86c6181SAlex Tomas 1207a86c6181SAlex Tomas /* correct old index */ 1208a86c6181SAlex Tomas if (m) { 1209a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1210a86c6181SAlex Tomas if (err) 1211a86c6181SAlex Tomas goto cleanup; 1212e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1213a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1214a86c6181SAlex Tomas if (err) 1215a86c6181SAlex Tomas goto cleanup; 1216a86c6181SAlex Tomas } 1217a86c6181SAlex Tomas 1218a86c6181SAlex Tomas i--; 1219a86c6181SAlex Tomas } 1220a86c6181SAlex Tomas 1221a86c6181SAlex Tomas /* insert new index */ 1222a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1223a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1224a86c6181SAlex Tomas 1225a86c6181SAlex Tomas cleanup: 1226a86c6181SAlex Tomas if (bh) { 1227a86c6181SAlex Tomas if (buffer_locked(bh)) 1228a86c6181SAlex Tomas unlock_buffer(bh); 1229a86c6181SAlex Tomas brelse(bh); 1230a86c6181SAlex Tomas } 1231a86c6181SAlex Tomas 1232a86c6181SAlex Tomas if (err) { 1233a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1234a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1235a86c6181SAlex Tomas if (!ablocks[i]) 1236a86c6181SAlex Tomas continue; 12377dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1238e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1239a86c6181SAlex Tomas } 1240a86c6181SAlex Tomas } 1241a86c6181SAlex Tomas kfree(ablocks); 1242a86c6181SAlex Tomas 1243a86c6181SAlex Tomas return err; 1244a86c6181SAlex Tomas } 1245a86c6181SAlex Tomas 1246a86c6181SAlex Tomas /* 1247d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1248d0d856e8SRandy Dunlap * implements tree growing procedure: 1249a86c6181SAlex Tomas * - allocates new block 1250a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1251d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1252a86c6181SAlex Tomas * just created block 1253a86c6181SAlex Tomas */ 1254a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1255be5cd90dSDmitry Monakhov unsigned int flags) 1256a86c6181SAlex Tomas { 1257a86c6181SAlex Tomas struct ext4_extent_header *neh; 1258a86c6181SAlex Tomas struct buffer_head *bh; 1259be5cd90dSDmitry Monakhov ext4_fsblk_t newblock, goal = 0; 1260be5cd90dSDmitry Monakhov struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1261a86c6181SAlex Tomas int err = 0; 1262592acbf1SSriram Rajagopalan size_t ext_size = 0; 1263a86c6181SAlex Tomas 1264be5cd90dSDmitry Monakhov /* Try to prepend new index to old one */ 1265be5cd90dSDmitry Monakhov if (ext_depth(inode)) 1266be5cd90dSDmitry Monakhov goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1267be5cd90dSDmitry Monakhov if (goal > le32_to_cpu(es->s_first_data_block)) { 1268be5cd90dSDmitry Monakhov flags |= EXT4_MB_HINT_TRY_GOAL; 1269be5cd90dSDmitry Monakhov goal--; 1270be5cd90dSDmitry Monakhov } else 1271be5cd90dSDmitry Monakhov goal = ext4_inode_to_goal_block(inode); 1272be5cd90dSDmitry Monakhov newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1273be5cd90dSDmitry Monakhov NULL, &err); 1274a86c6181SAlex Tomas if (newblock == 0) 1275a86c6181SAlex Tomas return err; 1276a86c6181SAlex Tomas 1277c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1278aebf0243SWang Shilong if (unlikely(!bh)) 1279860d21e2STheodore Ts'o return -ENOMEM; 1280a86c6181SAlex Tomas lock_buffer(bh); 1281a86c6181SAlex Tomas 12827e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 12837e028976SAvantika Mathur if (err) { 1284a86c6181SAlex Tomas unlock_buffer(bh); 1285a86c6181SAlex Tomas goto out; 1286a86c6181SAlex Tomas } 1287a86c6181SAlex Tomas 1288592acbf1SSriram Rajagopalan ext_size = sizeof(EXT4_I(inode)->i_data); 1289a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 1290592acbf1SSriram Rajagopalan memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1291592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1292592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1293a86c6181SAlex Tomas 1294a86c6181SAlex Tomas /* set size of new block */ 1295a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1296a86c6181SAlex Tomas /* old root could have indexes or leaves 1297a86c6181SAlex Tomas * so calculate e_max right way */ 1298a86c6181SAlex Tomas if (ext_depth(inode)) 129955ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1300a86c6181SAlex Tomas else 130155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1302a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 13037ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1304a86c6181SAlex Tomas set_buffer_uptodate(bh); 1305a86c6181SAlex Tomas unlock_buffer(bh); 1306a86c6181SAlex Tomas 13070390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 13087e028976SAvantika Mathur if (err) 1309a86c6181SAlex Tomas goto out; 1310a86c6181SAlex Tomas 13111939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1312a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 13131939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 13141939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 13151939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 13161939dd84SDmitry Monakhov /* Root extent block becomes index block */ 13171939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 13181939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 13191939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 13201939dd84SDmitry Monakhov } 13212ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1322a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 13235a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1324bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1325a86c6181SAlex Tomas 1326ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 13271939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1328a86c6181SAlex Tomas out: 1329a86c6181SAlex Tomas brelse(bh); 1330a86c6181SAlex Tomas 1331a86c6181SAlex Tomas return err; 1332a86c6181SAlex Tomas } 1333a86c6181SAlex Tomas 1334a86c6181SAlex Tomas /* 1335d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1336d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1337d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1338a86c6181SAlex Tomas */ 1339a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1340107a7bd3STheodore Ts'o unsigned int mb_flags, 1341107a7bd3STheodore Ts'o unsigned int gb_flags, 1342dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1343a86c6181SAlex Tomas struct ext4_extent *newext) 1344a86c6181SAlex Tomas { 1345dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1346a86c6181SAlex Tomas struct ext4_ext_path *curp; 1347a86c6181SAlex Tomas int depth, i, err = 0; 1348a86c6181SAlex Tomas 1349a86c6181SAlex Tomas repeat: 1350a86c6181SAlex Tomas i = depth = ext_depth(inode); 1351a86c6181SAlex Tomas 1352a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1353a86c6181SAlex Tomas curp = path + depth; 1354a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1355a86c6181SAlex Tomas i--; 1356a86c6181SAlex Tomas curp--; 1357a86c6181SAlex Tomas } 1358a86c6181SAlex Tomas 1359d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1360d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1361a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1362a86c6181SAlex Tomas /* if we found index with free entry, then use that 1363a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 1364107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1365787e0981SShen Feng if (err) 1366787e0981SShen Feng goto out; 1367a86c6181SAlex Tomas 1368a86c6181SAlex Tomas /* refill path */ 1369ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1370725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1371dfe50809STheodore Ts'o ppath, gb_flags); 1372a86c6181SAlex Tomas if (IS_ERR(path)) 1373a86c6181SAlex Tomas err = PTR_ERR(path); 1374a86c6181SAlex Tomas } else { 1375a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 1376be5cd90dSDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1377a86c6181SAlex Tomas if (err) 1378a86c6181SAlex Tomas goto out; 1379a86c6181SAlex Tomas 1380a86c6181SAlex Tomas /* refill path */ 1381ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1382725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1383dfe50809STheodore Ts'o ppath, gb_flags); 1384a86c6181SAlex Tomas if (IS_ERR(path)) { 1385a86c6181SAlex Tomas err = PTR_ERR(path); 1386a86c6181SAlex Tomas goto out; 1387a86c6181SAlex Tomas } 1388a86c6181SAlex Tomas 1389a86c6181SAlex Tomas /* 1390d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1391d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1392a86c6181SAlex Tomas */ 1393a86c6181SAlex Tomas depth = ext_depth(inode); 1394a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1395d0d856e8SRandy Dunlap /* now we need to split */ 1396a86c6181SAlex Tomas goto repeat; 1397a86c6181SAlex Tomas } 1398a86c6181SAlex Tomas } 1399a86c6181SAlex Tomas 1400a86c6181SAlex Tomas out: 1401a86c6181SAlex Tomas return err; 1402a86c6181SAlex Tomas } 1403a86c6181SAlex Tomas 1404a86c6181SAlex Tomas /* 14051988b51eSAlex Tomas * search the closest allocated block to the left for *logical 14061988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 14071988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 14081988b51eSAlex Tomas * returns 0 at @phys 14091988b51eSAlex Tomas * return value contains 0 (success) or error code 14101988b51eSAlex Tomas */ 14111f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 14121f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14131988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 14141988b51eSAlex Tomas { 14151988b51eSAlex Tomas struct ext4_extent_idx *ix; 14161988b51eSAlex Tomas struct ext4_extent *ex; 1417b939e376SAneesh Kumar K.V int depth, ee_len; 14181988b51eSAlex Tomas 1419273df556SFrank Mayhar if (unlikely(path == NULL)) { 1420273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 14216a797d27SDarrick J. Wong return -EFSCORRUPTED; 1422273df556SFrank Mayhar } 14231988b51eSAlex Tomas depth = path->p_depth; 14241988b51eSAlex Tomas *phys = 0; 14251988b51eSAlex Tomas 14261988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14271988b51eSAlex Tomas return 0; 14281988b51eSAlex Tomas 14291988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14301988b51eSAlex Tomas * then *logical, but it can be that extent is the 14311988b51eSAlex Tomas * first one in the file */ 14321988b51eSAlex Tomas 14331988b51eSAlex Tomas ex = path[depth].p_ext; 1434b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14351988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1436273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1437273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1438273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1439273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 14406a797d27SDarrick J. Wong return -EFSCORRUPTED; 1441273df556SFrank Mayhar } 14421988b51eSAlex Tomas while (--depth >= 0) { 14431988b51eSAlex Tomas ix = path[depth].p_idx; 1444273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1445273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1446273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 14476ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1448273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 14496ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1450273df556SFrank Mayhar depth); 14516a797d27SDarrick J. Wong return -EFSCORRUPTED; 1452273df556SFrank Mayhar } 14531988b51eSAlex Tomas } 14541988b51eSAlex Tomas return 0; 14551988b51eSAlex Tomas } 14561988b51eSAlex Tomas 1457273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1458273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1459273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1460273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 14616a797d27SDarrick J. Wong return -EFSCORRUPTED; 1462273df556SFrank Mayhar } 14631988b51eSAlex Tomas 1464b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1465bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 14661988b51eSAlex Tomas return 0; 14671988b51eSAlex Tomas } 14681988b51eSAlex Tomas 14691988b51eSAlex Tomas /* 14701988b51eSAlex Tomas * search the closest allocated block to the right for *logical 14711988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1472df3ab170STao Ma * if *logical is the largest allocated block, the function 14731988b51eSAlex Tomas * returns 0 at @phys 14741988b51eSAlex Tomas * return value contains 0 (success) or error code 14751988b51eSAlex Tomas */ 14761f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 14771f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14784d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 14794d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 14801988b51eSAlex Tomas { 14811988b51eSAlex Tomas struct buffer_head *bh = NULL; 14821988b51eSAlex Tomas struct ext4_extent_header *eh; 14831988b51eSAlex Tomas struct ext4_extent_idx *ix; 14841988b51eSAlex Tomas struct ext4_extent *ex; 14851988b51eSAlex Tomas ext4_fsblk_t block; 1486395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1487395a87bfSEric Sandeen int ee_len; 14881988b51eSAlex Tomas 1489273df556SFrank Mayhar if (unlikely(path == NULL)) { 1490273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 14916a797d27SDarrick J. Wong return -EFSCORRUPTED; 1492273df556SFrank Mayhar } 14931988b51eSAlex Tomas depth = path->p_depth; 14941988b51eSAlex Tomas *phys = 0; 14951988b51eSAlex Tomas 14961988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14971988b51eSAlex Tomas return 0; 14981988b51eSAlex Tomas 14991988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 15001988b51eSAlex Tomas * then *logical, but it can be that extent is the 15011988b51eSAlex Tomas * first one in the file */ 15021988b51eSAlex Tomas 15031988b51eSAlex Tomas ex = path[depth].p_ext; 1504b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 15051988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1506273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1507273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1508273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1509273df556SFrank Mayhar depth); 15106a797d27SDarrick J. Wong return -EFSCORRUPTED; 1511273df556SFrank Mayhar } 15121988b51eSAlex Tomas while (--depth >= 0) { 15131988b51eSAlex Tomas ix = path[depth].p_idx; 1514273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1515273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1516273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1517273df556SFrank Mayhar *logical); 15186a797d27SDarrick J. Wong return -EFSCORRUPTED; 1519273df556SFrank Mayhar } 15201988b51eSAlex Tomas } 15214d33b1efSTheodore Ts'o goto found_extent; 15221988b51eSAlex Tomas } 15231988b51eSAlex Tomas 1524273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1525273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1526273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1527273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 15286a797d27SDarrick J. Wong return -EFSCORRUPTED; 1529273df556SFrank Mayhar } 15301988b51eSAlex Tomas 15311988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 15321988b51eSAlex Tomas /* next allocated block in this leaf */ 15331988b51eSAlex Tomas ex++; 15344d33b1efSTheodore Ts'o goto found_extent; 15351988b51eSAlex Tomas } 15361988b51eSAlex Tomas 15371988b51eSAlex Tomas /* go up and search for index to the right */ 15381988b51eSAlex Tomas while (--depth >= 0) { 15391988b51eSAlex Tomas ix = path[depth].p_idx; 15401988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 154125f1ee3aSWu Fengguang goto got_index; 15421988b51eSAlex Tomas } 15431988b51eSAlex Tomas 154425f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 15451988b51eSAlex Tomas return 0; 15461988b51eSAlex Tomas 154725f1ee3aSWu Fengguang got_index: 15481988b51eSAlex Tomas /* we've found index to the right, let's 15491988b51eSAlex Tomas * follow it and find the closest allocated 15501988b51eSAlex Tomas * block to the right */ 15511988b51eSAlex Tomas ix++; 1552bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15531988b51eSAlex Tomas while (++depth < path->p_depth) { 1554395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 15557d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, block, 1556107a7bd3STheodore Ts'o path->p_depth - depth, 0); 15577d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15587d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15597d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh); 15601988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1561bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15621988b51eSAlex Tomas put_bh(bh); 15631988b51eSAlex Tomas } 15641988b51eSAlex Tomas 1565107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 15667d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15677d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15681988b51eSAlex Tomas eh = ext_block_hdr(bh); 15691988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 15704d33b1efSTheodore Ts'o found_extent: 15711988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1572bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 15734d33b1efSTheodore Ts'o *ret_ex = ex; 15744d33b1efSTheodore Ts'o if (bh) 15751988b51eSAlex Tomas put_bh(bh); 15761988b51eSAlex Tomas return 0; 15771988b51eSAlex Tomas } 15781988b51eSAlex Tomas 15791988b51eSAlex Tomas /* 1580d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1581f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1582d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1583d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1584d0d856e8SRandy Dunlap * with leaves. 1585a86c6181SAlex Tomas */ 1586fcf6b1b7SDmitry Monakhov ext4_lblk_t 1587a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1588a86c6181SAlex Tomas { 1589a86c6181SAlex Tomas int depth; 1590a86c6181SAlex Tomas 1591a86c6181SAlex Tomas BUG_ON(path == NULL); 1592a86c6181SAlex Tomas depth = path->p_depth; 1593a86c6181SAlex Tomas 1594a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1595f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1596a86c6181SAlex Tomas 1597a86c6181SAlex Tomas while (depth >= 0) { 15986e89bbb7SEric Biggers struct ext4_ext_path *p = &path[depth]; 15996e89bbb7SEric Biggers 1600a86c6181SAlex Tomas if (depth == path->p_depth) { 1601a86c6181SAlex Tomas /* leaf */ 16026e89bbb7SEric Biggers if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 16036e89bbb7SEric Biggers return le32_to_cpu(p->p_ext[1].ee_block); 1604a86c6181SAlex Tomas } else { 1605a86c6181SAlex Tomas /* index */ 16066e89bbb7SEric Biggers if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 16076e89bbb7SEric Biggers return le32_to_cpu(p->p_idx[1].ei_block); 1608a86c6181SAlex Tomas } 1609a86c6181SAlex Tomas depth--; 1610a86c6181SAlex Tomas } 1611a86c6181SAlex Tomas 1612f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1613a86c6181SAlex Tomas } 1614a86c6181SAlex Tomas 1615a86c6181SAlex Tomas /* 1616d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1617f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1618a86c6181SAlex Tomas */ 16195718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1620a86c6181SAlex Tomas { 1621a86c6181SAlex Tomas int depth; 1622a86c6181SAlex Tomas 1623a86c6181SAlex Tomas BUG_ON(path == NULL); 1624a86c6181SAlex Tomas depth = path->p_depth; 1625a86c6181SAlex Tomas 1626a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1627a86c6181SAlex Tomas if (depth == 0) 1628f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1629a86c6181SAlex Tomas 1630a86c6181SAlex Tomas /* go to index block */ 1631a86c6181SAlex Tomas depth--; 1632a86c6181SAlex Tomas 1633a86c6181SAlex Tomas while (depth >= 0) { 1634a86c6181SAlex Tomas if (path[depth].p_idx != 1635a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1636725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1637725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1638a86c6181SAlex Tomas depth--; 1639a86c6181SAlex Tomas } 1640a86c6181SAlex Tomas 1641f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1642a86c6181SAlex Tomas } 1643a86c6181SAlex Tomas 1644a86c6181SAlex Tomas /* 1645d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1646d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1647d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1648a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1649a86c6181SAlex Tomas */ 16501d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1651a86c6181SAlex Tomas struct ext4_ext_path *path) 1652a86c6181SAlex Tomas { 1653a86c6181SAlex Tomas struct ext4_extent_header *eh; 1654a86c6181SAlex Tomas int depth = ext_depth(inode); 1655a86c6181SAlex Tomas struct ext4_extent *ex; 1656a86c6181SAlex Tomas __le32 border; 1657a86c6181SAlex Tomas int k, err = 0; 1658a86c6181SAlex Tomas 1659a86c6181SAlex Tomas eh = path[depth].p_hdr; 1660a86c6181SAlex Tomas ex = path[depth].p_ext; 1661273df556SFrank Mayhar 1662273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1663273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1664273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 16656a797d27SDarrick J. Wong return -EFSCORRUPTED; 1666273df556SFrank Mayhar } 1667a86c6181SAlex Tomas 1668a86c6181SAlex Tomas if (depth == 0) { 1669a86c6181SAlex Tomas /* there is no tree at all */ 1670a86c6181SAlex Tomas return 0; 1671a86c6181SAlex Tomas } 1672a86c6181SAlex Tomas 1673a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1674a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1675a86c6181SAlex Tomas return 0; 1676a86c6181SAlex Tomas } 1677a86c6181SAlex Tomas 1678a86c6181SAlex Tomas /* 1679d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1680a86c6181SAlex Tomas */ 1681a86c6181SAlex Tomas k = depth - 1; 1682a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 16837e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16847e028976SAvantika Mathur if (err) 1685a86c6181SAlex Tomas return err; 1686a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16877e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16887e028976SAvantika Mathur if (err) 1689a86c6181SAlex Tomas return err; 1690a86c6181SAlex Tomas 1691a86c6181SAlex Tomas while (k--) { 1692a86c6181SAlex Tomas /* change all left-side indexes */ 1693a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1694a86c6181SAlex Tomas break; 16957e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16967e028976SAvantika Mathur if (err) 1697a86c6181SAlex Tomas break; 1698a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16997e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 17007e028976SAvantika Mathur if (err) 1701a86c6181SAlex Tomas break; 1702a86c6181SAlex Tomas } 1703a86c6181SAlex Tomas 1704a86c6181SAlex Tomas return err; 1705a86c6181SAlex Tomas } 1706a86c6181SAlex Tomas 170743f81677SEric Biggers static int ext4_can_extents_be_merged(struct inode *inode, 170843f81677SEric Biggers struct ext4_extent *ex1, 1709a86c6181SAlex Tomas struct ext4_extent *ex2) 1710a86c6181SAlex Tomas { 1711da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len; 1712a2df2a63SAmit Arora 1713556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1714a2df2a63SAmit Arora return 0; 1715a2df2a63SAmit Arora 1716a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1717a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1718a2df2a63SAmit Arora 1719a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 172063f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1721a86c6181SAlex Tomas return 0; 1722a86c6181SAlex Tomas 1723da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1724471d4011SSuparna Bhattacharya return 0; 1725378f32baSMatthew Bobrowski 1726556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) && 1727378f32baSMatthew Bobrowski ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1728a9b82415SDarrick J. Wong return 0; 1729bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1730b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1731a86c6181SAlex Tomas return 0; 1732a86c6181SAlex Tomas #endif 1733a86c6181SAlex Tomas 1734bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1735a86c6181SAlex Tomas return 1; 1736a86c6181SAlex Tomas return 0; 1737a86c6181SAlex Tomas } 1738a86c6181SAlex Tomas 1739a86c6181SAlex Tomas /* 174056055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 174156055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 174256055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 174356055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 174456055d3aSAmit Arora * 1 if they got merged. 174556055d3aSAmit Arora */ 1746197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 174756055d3aSAmit Arora struct ext4_ext_path *path, 174856055d3aSAmit Arora struct ext4_extent *ex) 174956055d3aSAmit Arora { 175056055d3aSAmit Arora struct ext4_extent_header *eh; 175156055d3aSAmit Arora unsigned int depth, len; 1752556615dcSLukas Czerner int merge_done = 0, unwritten; 175356055d3aSAmit Arora 175456055d3aSAmit Arora depth = ext_depth(inode); 175556055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 175656055d3aSAmit Arora eh = path[depth].p_hdr; 175756055d3aSAmit Arora 175856055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 175956055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 176056055d3aSAmit Arora break; 176156055d3aSAmit Arora /* merge with next extent! */ 1762556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 176356055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 176456055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 1765556615dcSLukas Czerner if (unwritten) 1766556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 176756055d3aSAmit Arora 176856055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 176956055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 177056055d3aSAmit Arora * sizeof(struct ext4_extent); 177156055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 177256055d3aSAmit Arora } 1773e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 177456055d3aSAmit Arora merge_done = 1; 177556055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 177656055d3aSAmit Arora if (!eh->eh_entries) 177724676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 177856055d3aSAmit Arora } 177956055d3aSAmit Arora 178056055d3aSAmit Arora return merge_done; 178156055d3aSAmit Arora } 178256055d3aSAmit Arora 178356055d3aSAmit Arora /* 1784ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1785ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1786ecb94f5fSTheodore Ts'o */ 1787ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1788ecb94f5fSTheodore Ts'o struct inode *inode, 1789ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1790ecb94f5fSTheodore Ts'o { 1791ecb94f5fSTheodore Ts'o size_t s; 1792ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1793ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1794ecb94f5fSTheodore Ts'o 1795ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1796ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1797ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1798ecb94f5fSTheodore Ts'o return; 1799ecb94f5fSTheodore Ts'o 1800ecb94f5fSTheodore Ts'o /* 1801ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1802ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1803ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1804ecb94f5fSTheodore Ts'o */ 180583448bdfSJan Kara if (ext4_journal_extend(handle, 2, 180683448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1807ecb94f5fSTheodore Ts'o return; 1808ecb94f5fSTheodore Ts'o 1809ecb94f5fSTheodore Ts'o /* 1810ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1811ecb94f5fSTheodore Ts'o */ 1812ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1813ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1814ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1815ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1816ecb94f5fSTheodore Ts'o 181710809df8STheodore Ts'o path[1].p_maxdepth = path[0].p_maxdepth; 1818ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1819ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1820ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1821ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1822ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1823ecb94f5fSTheodore Ts'o 1824ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1825ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 182671d4f7d0STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1827ecb94f5fSTheodore Ts'o } 1828ecb94f5fSTheodore Ts'o 1829ecb94f5fSTheodore Ts'o /* 1830adde81cfSEric Biggers * This function tries to merge the @ex extent to neighbours in the tree, then 1831adde81cfSEric Biggers * tries to collapse the extent tree into the inode. 1832197217a5SYongqiang Yang */ 1833ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1834ecb94f5fSTheodore Ts'o struct inode *inode, 1835197217a5SYongqiang Yang struct ext4_ext_path *path, 1836adde81cfSEric Biggers struct ext4_extent *ex) 1837adde81cfSEric Biggers { 1838197217a5SYongqiang Yang struct ext4_extent_header *eh; 1839197217a5SYongqiang Yang unsigned int depth; 1840197217a5SYongqiang Yang int merge_done = 0; 1841197217a5SYongqiang Yang 1842197217a5SYongqiang Yang depth = ext_depth(inode); 1843197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1844197217a5SYongqiang Yang eh = path[depth].p_hdr; 1845197217a5SYongqiang Yang 1846197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1847197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1848197217a5SYongqiang Yang 1849197217a5SYongqiang Yang if (!merge_done) 1850ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1851197217a5SYongqiang Yang 1852ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1853197217a5SYongqiang Yang } 1854197217a5SYongqiang Yang 1855197217a5SYongqiang Yang /* 185625d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 185725d14f98SAmit Arora * existing extent. 185825d14f98SAmit Arora * 185925d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 186025d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 186125d14f98SAmit Arora * If there is no overlap found, it returns 0. 186225d14f98SAmit Arora */ 18634d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 18644d33b1efSTheodore Ts'o struct inode *inode, 186525d14f98SAmit Arora struct ext4_extent *newext, 186625d14f98SAmit Arora struct ext4_ext_path *path) 186725d14f98SAmit Arora { 1868725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 186925d14f98SAmit Arora unsigned int depth, len1; 187025d14f98SAmit Arora unsigned int ret = 0; 187125d14f98SAmit Arora 187225d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1873a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 187425d14f98SAmit Arora depth = ext_depth(inode); 187525d14f98SAmit Arora if (!path[depth].p_ext) 187625d14f98SAmit Arora goto out; 1877f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 187825d14f98SAmit Arora 187925d14f98SAmit Arora /* 188025d14f98SAmit Arora * get the next allocated block if the extent in the path 188125d14f98SAmit Arora * is before the requested block(s) 188225d14f98SAmit Arora */ 188325d14f98SAmit Arora if (b2 < b1) { 188425d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1885f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 188625d14f98SAmit Arora goto out; 1887f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2); 188825d14f98SAmit Arora } 188925d14f98SAmit Arora 1890725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 189125d14f98SAmit Arora if (b1 + len1 < b1) { 1892f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 189325d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 189425d14f98SAmit Arora ret = 1; 189525d14f98SAmit Arora } 189625d14f98SAmit Arora 189725d14f98SAmit Arora /* check for overlap */ 189825d14f98SAmit Arora if (b1 + len1 > b2) { 189925d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 190025d14f98SAmit Arora ret = 1; 190125d14f98SAmit Arora } 190225d14f98SAmit Arora out: 190325d14f98SAmit Arora return ret; 190425d14f98SAmit Arora } 190525d14f98SAmit Arora 190625d14f98SAmit Arora /* 1907d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1908d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1909d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1910d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1911a86c6181SAlex Tomas */ 1912a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1913dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1914107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags) 1915a86c6181SAlex Tomas { 1916dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1917a86c6181SAlex Tomas struct ext4_extent_header *eh; 1918a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1919a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1920a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1921725d26d3SAneesh Kumar K.V int depth, len, err; 1922725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1923556615dcSLukas Czerner int mb_flags = 0, unwritten; 1924a86c6181SAlex Tomas 1925e3cf5d5dSTheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1926e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1927273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1928273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 19296a797d27SDarrick J. Wong return -EFSCORRUPTED; 1930273df556SFrank Mayhar } 1931a86c6181SAlex Tomas depth = ext_depth(inode); 1932a86c6181SAlex Tomas ex = path[depth].p_ext; 1933be8981beSLukas Czerner eh = path[depth].p_hdr; 1934273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1935273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 19366a797d27SDarrick J. Wong return -EFSCORRUPTED; 1937273df556SFrank Mayhar } 1938a86c6181SAlex Tomas 1939a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1940107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1941be8981beSLukas Czerner 1942be8981beSLukas Czerner /* 1943be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1944be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 1945ed8a1a76STheodore Ts'o * ext4_find_extent() can return either extent on the 1946be8981beSLukas Czerner * left, or on the right from the searched position. This 1947be8981beSLukas Czerner * will make merging more effective. 1948be8981beSLukas Czerner */ 1949be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 1950be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 1951be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 1952be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 1953be8981beSLukas Czerner ex += 1; 1954be8981beSLukas Czerner goto prepend; 1955be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1956be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 1957be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 1958be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 1959be8981beSLukas Czerner ex -= 1; 1960be8981beSLukas Czerner 1961be8981beSLukas Czerner /* Try to append newex to the ex */ 1962be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 1963be8981beSLukas Czerner ext_debug("append [%d]%d block to %u:[%d]%d" 1964be8981beSLukas Czerner "(from %llu)\n", 1965556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 1966a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1967a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1968556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 1969bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1970bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 1971be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1972be8981beSLukas Czerner path + depth); 19737e028976SAvantika Mathur if (err) 1974a86c6181SAlex Tomas return err; 1975556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 1976a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1977a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1978556615dcSLukas Czerner if (unwritten) 1979556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 1980a86c6181SAlex Tomas eh = path[depth].p_hdr; 1981a86c6181SAlex Tomas nearex = ex; 1982a86c6181SAlex Tomas goto merge; 1983a86c6181SAlex Tomas } 1984a86c6181SAlex Tomas 1985be8981beSLukas Czerner prepend: 1986be8981beSLukas Czerner /* Try to prepend newex to the ex */ 1987be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 1988be8981beSLukas Czerner ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 1989be8981beSLukas Czerner "(from %llu)\n", 1990be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 1991556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 1992be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 1993be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 1994556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 1995be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 1996be8981beSLukas Czerner ext4_ext_pblock(ex)); 1997be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1998be8981beSLukas Czerner path + depth); 1999be8981beSLukas Czerner if (err) 2000be8981beSLukas Czerner return err; 2001be8981beSLukas Czerner 2002556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 2003be8981beSLukas Czerner ex->ee_block = newext->ee_block; 2004be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2005be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2006be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 2007556615dcSLukas Czerner if (unwritten) 2008556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2009be8981beSLukas Czerner eh = path[depth].p_hdr; 2010be8981beSLukas Czerner nearex = ex; 2011be8981beSLukas Czerner goto merge; 2012be8981beSLukas Czerner } 2013be8981beSLukas Czerner } 2014be8981beSLukas Czerner 2015a86c6181SAlex Tomas depth = ext_depth(inode); 2016a86c6181SAlex Tomas eh = path[depth].p_hdr; 2017a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2018a86c6181SAlex Tomas goto has_space; 2019a86c6181SAlex Tomas 2020a86c6181SAlex Tomas /* probably next leaf has space for us? */ 2021a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 2022598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 2023598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 20245718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 2025598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 202632de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 2027a86c6181SAlex Tomas BUG_ON(npath != NULL); 2028ed8a1a76STheodore Ts'o npath = ext4_find_extent(inode, next, NULL, 0); 2029a86c6181SAlex Tomas if (IS_ERR(npath)) 2030a86c6181SAlex Tomas return PTR_ERR(npath); 2031a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 2032a86c6181SAlex Tomas eh = npath[depth].p_hdr; 2033a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 203425985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 2035a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 2036a86c6181SAlex Tomas path = npath; 2037ffb505ffSRobin Dong goto has_space; 2038a86c6181SAlex Tomas } 2039a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 2040a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2041a86c6181SAlex Tomas } 2042a86c6181SAlex Tomas 2043a86c6181SAlex Tomas /* 2044d0d856e8SRandy Dunlap * There is no free space in the found leaf. 2045d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 2046a86c6181SAlex Tomas */ 2047107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2048e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_USE_RESERVED; 2049107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2050dfe50809STheodore Ts'o ppath, newext); 2051a86c6181SAlex Tomas if (err) 2052a86c6181SAlex Tomas goto cleanup; 2053a86c6181SAlex Tomas depth = ext_depth(inode); 2054a86c6181SAlex Tomas eh = path[depth].p_hdr; 2055a86c6181SAlex Tomas 2056a86c6181SAlex Tomas has_space: 2057a86c6181SAlex Tomas nearex = path[depth].p_ext; 2058a86c6181SAlex Tomas 20597e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 20607e028976SAvantika Mathur if (err) 2061a86c6181SAlex Tomas goto cleanup; 2062a86c6181SAlex Tomas 2063a86c6181SAlex Tomas if (!nearex) { 2064a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 206532de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2066a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2067bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2068556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2069a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 207080e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 2071a86c6181SAlex Tomas } else { 207280e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 207380e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 207480e675f9SEric Gouriou /* Insert after */ 207532de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 207632de6756SYongqiang Yang "nearest %p\n", 2077a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2078bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2079556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2080a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 208180e675f9SEric Gouriou nearex); 208280e675f9SEric Gouriou nearex++; 208380e675f9SEric Gouriou } else { 208480e675f9SEric Gouriou /* Insert before */ 208580e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 208632de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 208732de6756SYongqiang Yang "nearest %p\n", 208880e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 208980e675f9SEric Gouriou ext4_ext_pblock(newext), 2090556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 209180e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 209280e675f9SEric Gouriou nearex); 209380e675f9SEric Gouriou } 209480e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 209580e675f9SEric Gouriou if (len > 0) { 209632de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 209780e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 209880e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 209980e675f9SEric Gouriou ext4_ext_pblock(newext), 2100556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 210180e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 210280e675f9SEric Gouriou len, nearex, nearex + 1); 210380e675f9SEric Gouriou memmove(nearex + 1, nearex, 210480e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 210580e675f9SEric Gouriou } 2106a86c6181SAlex Tomas } 2107a86c6181SAlex Tomas 2108e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 210980e675f9SEric Gouriou path[depth].p_ext = nearex; 2110a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2111bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2112a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2113a86c6181SAlex Tomas 2114a86c6181SAlex Tomas merge: 2115e7bcf823SHaiboLiu /* try to merge extents */ 2116107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2117ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2118a86c6181SAlex Tomas 2119a86c6181SAlex Tomas 2120a86c6181SAlex Tomas /* time to correct all indexes above */ 2121a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2122a86c6181SAlex Tomas if (err) 2123a86c6181SAlex Tomas goto cleanup; 2124a86c6181SAlex Tomas 2125ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2126a86c6181SAlex Tomas 2127a86c6181SAlex Tomas cleanup: 2128a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 2129a86c6181SAlex Tomas kfree(npath); 2130a86c6181SAlex Tomas return err; 2131a86c6181SAlex Tomas } 2132a86c6181SAlex Tomas 213391dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode, 213491dd8c11SLukas Czerner ext4_lblk_t block, ext4_lblk_t num, 213591dd8c11SLukas Czerner struct fiemap_extent_info *fieinfo) 21366873fa0dSEric Sandeen { 21376873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 21386873fa0dSEric Sandeen struct ext4_extent *ex; 213969eb33dcSZheng Liu struct extent_status es; 214091dd8c11SLukas Czerner ext4_lblk_t next, next_del, start = 0, end = 0; 21416873fa0dSEric Sandeen ext4_lblk_t last = block + num; 214291dd8c11SLukas Czerner int exists, depth = 0, err = 0; 214391dd8c11SLukas Czerner unsigned int flags = 0; 214491dd8c11SLukas Czerner unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 21456873fa0dSEric Sandeen 2146f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 21476873fa0dSEric Sandeen num = last - block; 21486873fa0dSEric Sandeen /* find extent for this block */ 2149fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 215091dd8c11SLukas Czerner 2151ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, block, &path, 0); 21526873fa0dSEric Sandeen if (IS_ERR(path)) { 215391dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 21546873fa0dSEric Sandeen err = PTR_ERR(path); 21556873fa0dSEric Sandeen path = NULL; 21566873fa0dSEric Sandeen break; 21576873fa0dSEric Sandeen } 21586873fa0dSEric Sandeen 21596873fa0dSEric Sandeen depth = ext_depth(inode); 2160273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 216191dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 2162273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 21636a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2164273df556SFrank Mayhar break; 2165273df556SFrank Mayhar } 21666873fa0dSEric Sandeen ex = path[depth].p_ext; 21676873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 21686873fa0dSEric Sandeen 216991dd8c11SLukas Czerner flags = 0; 21706873fa0dSEric Sandeen exists = 0; 21716873fa0dSEric Sandeen if (!ex) { 21726873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 21736873fa0dSEric Sandeen * all requested space */ 21746873fa0dSEric Sandeen start = block; 21756873fa0dSEric Sandeen end = block + num; 21766873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 21776873fa0dSEric Sandeen /* need to allocate space before found extent */ 21786873fa0dSEric Sandeen start = block; 21796873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 21806873fa0dSEric Sandeen if (block + num < end) 21816873fa0dSEric Sandeen end = block + num; 21826873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 21836873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 21846873fa0dSEric Sandeen /* need to allocate space after found extent */ 21856873fa0dSEric Sandeen start = block; 21866873fa0dSEric Sandeen end = block + num; 21876873fa0dSEric Sandeen if (end >= next) 21886873fa0dSEric Sandeen end = next; 21896873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 21906873fa0dSEric Sandeen /* 21916873fa0dSEric Sandeen * some part of requested space is covered 21926873fa0dSEric Sandeen * by found extent 21936873fa0dSEric Sandeen */ 21946873fa0dSEric Sandeen start = block; 21956873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 21966873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 21976873fa0dSEric Sandeen if (block + num < end) 21986873fa0dSEric Sandeen end = block + num; 21996873fa0dSEric Sandeen exists = 1; 22006873fa0dSEric Sandeen } else { 22016873fa0dSEric Sandeen BUG(); 22026873fa0dSEric Sandeen } 22036873fa0dSEric Sandeen BUG_ON(end <= start); 22046873fa0dSEric Sandeen 22056873fa0dSEric Sandeen if (!exists) { 220669eb33dcSZheng Liu es.es_lblk = start; 220769eb33dcSZheng Liu es.es_len = end - start; 220869eb33dcSZheng Liu es.es_pblk = 0; 22096873fa0dSEric Sandeen } else { 221069eb33dcSZheng Liu es.es_lblk = le32_to_cpu(ex->ee_block); 221169eb33dcSZheng Liu es.es_len = ext4_ext_get_actual_len(ex); 221269eb33dcSZheng Liu es.es_pblk = ext4_ext_pblock(ex); 2213556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 221491dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_UNWRITTEN; 22156873fa0dSEric Sandeen } 22166873fa0dSEric Sandeen 221791dd8c11SLukas Czerner /* 221869eb33dcSZheng Liu * Find delayed extent and update es accordingly. We call 221969eb33dcSZheng Liu * it even in !exists case to find out whether es is the 222091dd8c11SLukas Czerner * last existing extent or not. 222191dd8c11SLukas Czerner */ 222269eb33dcSZheng Liu next_del = ext4_find_delayed_extent(inode, &es); 222391dd8c11SLukas Czerner if (!exists && next_del) { 222491dd8c11SLukas Czerner exists = 1; 222572dac95dSJie Liu flags |= (FIEMAP_EXTENT_DELALLOC | 222672dac95dSJie Liu FIEMAP_EXTENT_UNKNOWN); 222791dd8c11SLukas Czerner } 222891dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 222991dd8c11SLukas Czerner 223069eb33dcSZheng Liu if (unlikely(es.es_len == 0)) { 223169eb33dcSZheng Liu EXT4_ERROR_INODE(inode, "es.es_len == 0"); 22326a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2233273df556SFrank Mayhar break; 2234273df556SFrank Mayhar } 22356873fa0dSEric Sandeen 2236f7fec032SZheng Liu /* 2237f7fec032SZheng Liu * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2238f7fec032SZheng Liu * we need to check next == EXT_MAX_BLOCKS because it is 2239f7fec032SZheng Liu * possible that an extent is with unwritten and delayed 2240f7fec032SZheng Liu * status due to when an extent is delayed allocated and 2241f7fec032SZheng Liu * is allocated by fallocate status tree will track both of 2242f7fec032SZheng Liu * them in a extent. 2243f7fec032SZheng Liu * 2244f7fec032SZheng Liu * So we could return a unwritten and delayed extent, and 2245f7fec032SZheng Liu * its block is equal to 'next'. 2246f7fec032SZheng Liu */ 2247f7fec032SZheng Liu if (next == next_del && next == EXT_MAX_BLOCKS) { 224891dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_LAST; 224991dd8c11SLukas Czerner if (unlikely(next_del != EXT_MAX_BLOCKS || 225091dd8c11SLukas Czerner next != EXT_MAX_BLOCKS)) { 225191dd8c11SLukas Czerner EXT4_ERROR_INODE(inode, 225291dd8c11SLukas Czerner "next extent == %u, next " 225391dd8c11SLukas Czerner "delalloc extent = %u", 225491dd8c11SLukas Czerner next, next_del); 22556a797d27SDarrick J. Wong err = -EFSCORRUPTED; 225691dd8c11SLukas Czerner break; 225791dd8c11SLukas Czerner } 225891dd8c11SLukas Czerner } 225991dd8c11SLukas Czerner 226091dd8c11SLukas Czerner if (exists) { 226191dd8c11SLukas Czerner err = fiemap_fill_next_extent(fieinfo, 226269eb33dcSZheng Liu (__u64)es.es_lblk << blksize_bits, 226369eb33dcSZheng Liu (__u64)es.es_pblk << blksize_bits, 226469eb33dcSZheng Liu (__u64)es.es_len << blksize_bits, 226591dd8c11SLukas Czerner flags); 22666873fa0dSEric Sandeen if (err < 0) 22676873fa0dSEric Sandeen break; 226891dd8c11SLukas Czerner if (err == 1) { 22696873fa0dSEric Sandeen err = 0; 22706873fa0dSEric Sandeen break; 22716873fa0dSEric Sandeen } 22726873fa0dSEric Sandeen } 22736873fa0dSEric Sandeen 227469eb33dcSZheng Liu block = es.es_lblk + es.es_len; 22756873fa0dSEric Sandeen } 22766873fa0dSEric Sandeen 22776873fa0dSEric Sandeen ext4_ext_drop_refs(path); 22786873fa0dSEric Sandeen kfree(path); 22796873fa0dSEric Sandeen return err; 22806873fa0dSEric Sandeen } 22816873fa0dSEric Sandeen 2282bb5835edSTheodore Ts'o static int ext4_fill_es_cache_info(struct inode *inode, 2283bb5835edSTheodore Ts'o ext4_lblk_t block, ext4_lblk_t num, 2284bb5835edSTheodore Ts'o struct fiemap_extent_info *fieinfo) 2285bb5835edSTheodore Ts'o { 2286bb5835edSTheodore Ts'o ext4_lblk_t next, end = block + num - 1; 2287bb5835edSTheodore Ts'o struct extent_status es; 2288bb5835edSTheodore Ts'o unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2289bb5835edSTheodore Ts'o unsigned int flags; 2290bb5835edSTheodore Ts'o int err; 2291bb5835edSTheodore Ts'o 2292bb5835edSTheodore Ts'o while (block <= end) { 2293bb5835edSTheodore Ts'o next = 0; 2294bb5835edSTheodore Ts'o flags = 0; 2295bb5835edSTheodore Ts'o if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2296bb5835edSTheodore Ts'o break; 2297bb5835edSTheodore Ts'o if (ext4_es_is_unwritten(&es)) 2298bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_UNWRITTEN; 2299bb5835edSTheodore Ts'o if (ext4_es_is_delayed(&es)) 2300bb5835edSTheodore Ts'o flags |= (FIEMAP_EXTENT_DELALLOC | 2301bb5835edSTheodore Ts'o FIEMAP_EXTENT_UNKNOWN); 2302bb5835edSTheodore Ts'o if (ext4_es_is_hole(&es)) 2303bb5835edSTheodore Ts'o flags |= EXT4_FIEMAP_EXTENT_HOLE; 2304bb5835edSTheodore Ts'o if (next == 0) 2305bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_LAST; 2306bb5835edSTheodore Ts'o if (flags & (FIEMAP_EXTENT_DELALLOC| 2307bb5835edSTheodore Ts'o EXT4_FIEMAP_EXTENT_HOLE)) 2308bb5835edSTheodore Ts'o es.es_pblk = 0; 2309bb5835edSTheodore Ts'o else 2310bb5835edSTheodore Ts'o es.es_pblk = ext4_es_pblock(&es); 2311bb5835edSTheodore Ts'o err = fiemap_fill_next_extent(fieinfo, 2312bb5835edSTheodore Ts'o (__u64)es.es_lblk << blksize_bits, 2313bb5835edSTheodore Ts'o (__u64)es.es_pblk << blksize_bits, 2314bb5835edSTheodore Ts'o (__u64)es.es_len << blksize_bits, 2315bb5835edSTheodore Ts'o flags); 2316bb5835edSTheodore Ts'o if (next == 0) 2317bb5835edSTheodore Ts'o break; 2318bb5835edSTheodore Ts'o block = next; 2319bb5835edSTheodore Ts'o if (err < 0) 2320bb5835edSTheodore Ts'o return err; 2321bb5835edSTheodore Ts'o if (err == 1) 2322bb5835edSTheodore Ts'o return 0; 2323bb5835edSTheodore Ts'o } 2324bb5835edSTheodore Ts'o return 0; 2325bb5835edSTheodore Ts'o } 2326bb5835edSTheodore Ts'o 2327bb5835edSTheodore Ts'o 2328a86c6181SAlex Tomas /* 2329140a5250SJan Kara * ext4_ext_determine_hole - determine hole around given block 2330140a5250SJan Kara * @inode: inode we lookup in 2331140a5250SJan Kara * @path: path in extent tree to @lblk 2332140a5250SJan Kara * @lblk: pointer to logical block around which we want to determine hole 2333140a5250SJan Kara * 2334140a5250SJan Kara * Determine hole length (and start if easily possible) around given logical 2335140a5250SJan Kara * block. We don't try too hard to find the beginning of the hole but @path 2336140a5250SJan Kara * actually points to extent before @lblk, we provide it. 2337140a5250SJan Kara * 2338140a5250SJan Kara * The function returns the length of a hole starting at @lblk. We update @lblk 2339140a5250SJan Kara * to the beginning of the hole if we managed to find it. 2340140a5250SJan Kara */ 2341140a5250SJan Kara static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, 2342140a5250SJan Kara struct ext4_ext_path *path, 2343140a5250SJan Kara ext4_lblk_t *lblk) 2344140a5250SJan Kara { 2345140a5250SJan Kara int depth = ext_depth(inode); 2346140a5250SJan Kara struct ext4_extent *ex; 2347140a5250SJan Kara ext4_lblk_t len; 2348140a5250SJan Kara 2349140a5250SJan Kara ex = path[depth].p_ext; 2350140a5250SJan Kara if (ex == NULL) { 2351140a5250SJan Kara /* there is no extent yet, so gap is [0;-] */ 2352140a5250SJan Kara *lblk = 0; 2353140a5250SJan Kara len = EXT_MAX_BLOCKS; 2354140a5250SJan Kara } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2355140a5250SJan Kara len = le32_to_cpu(ex->ee_block) - *lblk; 2356140a5250SJan Kara } else if (*lblk >= le32_to_cpu(ex->ee_block) 2357140a5250SJan Kara + ext4_ext_get_actual_len(ex)) { 2358140a5250SJan Kara ext4_lblk_t next; 2359140a5250SJan Kara 2360140a5250SJan Kara *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2361140a5250SJan Kara next = ext4_ext_next_allocated_block(path); 2362140a5250SJan Kara BUG_ON(next == *lblk); 2363140a5250SJan Kara len = next - *lblk; 2364140a5250SJan Kara } else { 2365140a5250SJan Kara BUG(); 2366140a5250SJan Kara } 2367140a5250SJan Kara return len; 2368140a5250SJan Kara } 2369140a5250SJan Kara 2370140a5250SJan Kara /* 2371d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2372d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2373a86c6181SAlex Tomas * and cache this gap 2374a86c6181SAlex Tomas */ 237509b88252SAvantika Mathur static void 2376140a5250SJan Kara ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, 2377140a5250SJan Kara ext4_lblk_t hole_len) 2378a86c6181SAlex Tomas { 23792f8e0a7cSZheng Liu struct extent_status es; 2380a86c6181SAlex Tomas 2381ad431025SEric Whitney ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 2382140a5250SJan Kara hole_start + hole_len - 1, &es); 23832f8e0a7cSZheng Liu if (es.es_len) { 23842f8e0a7cSZheng Liu /* There's delayed extent containing lblock? */ 2385140a5250SJan Kara if (es.es_lblk <= hole_start) 23862f8e0a7cSZheng Liu return; 2387140a5250SJan Kara hole_len = min(es.es_lblk - hole_start, hole_len); 23882f8e0a7cSZheng Liu } 2389140a5250SJan Kara ext_debug(" -> %u:%u\n", hole_start, hole_len); 2390140a5250SJan Kara ext4_es_insert_extent(inode, hole_start, hole_len, ~0, 2391140a5250SJan Kara EXTENT_STATUS_HOLE); 2392a86c6181SAlex Tomas } 2393a86c6181SAlex Tomas 2394a86c6181SAlex Tomas /* 2395d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2396d0d856e8SRandy Dunlap * removes index from the index block. 2397a86c6181SAlex Tomas */ 23981d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2399c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2400a86c6181SAlex Tomas { 2401a86c6181SAlex Tomas int err; 2402f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2403a86c6181SAlex Tomas 2404a86c6181SAlex Tomas /* free index block */ 2405c36575e6SForrest Liu depth--; 2406c36575e6SForrest Liu path = path + depth; 2407bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2408273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2409273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 24106a797d27SDarrick J. Wong return -EFSCORRUPTED; 2411273df556SFrank Mayhar } 24127e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 24137e028976SAvantika Mathur if (err) 2414a86c6181SAlex Tomas return err; 24150e1147b0SRobin Dong 24160e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 24170e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 24180e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 24190e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 24200e1147b0SRobin Dong } 24210e1147b0SRobin Dong 2422e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 24237e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 24247e028976SAvantika Mathur if (err) 2425a86c6181SAlex Tomas return err; 24262ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2427d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2428d8990240SAditya Kali 24297dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2430e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2431c36575e6SForrest Liu 2432c36575e6SForrest Liu while (--depth >= 0) { 2433c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2434c36575e6SForrest Liu break; 2435c36575e6SForrest Liu path--; 2436c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2437c36575e6SForrest Liu if (err) 2438c36575e6SForrest Liu break; 2439c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2440c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2441c36575e6SForrest Liu if (err) 2442c36575e6SForrest Liu break; 2443c36575e6SForrest Liu } 2444a86c6181SAlex Tomas return err; 2445a86c6181SAlex Tomas } 2446a86c6181SAlex Tomas 2447a86c6181SAlex Tomas /* 2448ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2449ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2450ee12b630SMingming Cao * to the extent tree. 2451ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2452ee12b630SMingming Cao * under i_data_sem. 2453a86c6181SAlex Tomas */ 2454525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2455a86c6181SAlex Tomas struct ext4_ext_path *path) 2456a86c6181SAlex Tomas { 2457a86c6181SAlex Tomas if (path) { 2458ee12b630SMingming Cao int depth = ext_depth(inode); 2459f3bd1f3fSMingming Cao int ret = 0; 2460ee12b630SMingming Cao 2461a86c6181SAlex Tomas /* probably there is space in leaf? */ 2462a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2463ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2464ee12b630SMingming Cao 2465ee12b630SMingming Cao /* 2466ee12b630SMingming Cao * There are some space in the leaf tree, no 2467ee12b630SMingming Cao * need to account for leaf block credit 2468ee12b630SMingming Cao * 2469ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2470df3ab170STao Ma * and other metadata blocks still need to be 2471ee12b630SMingming Cao * accounted. 2472ee12b630SMingming Cao */ 2473525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2474ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 24755887e98bSAneesh Kumar K.V return ret; 2476ee12b630SMingming Cao } 2477ee12b630SMingming Cao } 2478ee12b630SMingming Cao 2479525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2480a86c6181SAlex Tomas } 2481a86c6181SAlex Tomas 2482a86c6181SAlex Tomas /* 2483fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2484ee12b630SMingming Cao * 2485fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2486fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2487ee12b630SMingming Cao * 2488fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2489fffb2739SJan Kara * than once, but this is really rare. 2490a86c6181SAlex Tomas */ 2491fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2492ee12b630SMingming Cao { 2493ee12b630SMingming Cao int index; 2494f19d5870STao Ma int depth; 2495f19d5870STao Ma 2496f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2497f19d5870STao Ma if (ext4_has_inline_data(inode)) 2498f19d5870STao Ma return 1; 2499f19d5870STao Ma 2500f19d5870STao Ma depth = ext_depth(inode); 2501a86c6181SAlex Tomas 2502fffb2739SJan Kara if (extents <= 1) 2503ee12b630SMingming Cao index = depth * 2; 2504ee12b630SMingming Cao else 2505ee12b630SMingming Cao index = depth * 3; 2506a86c6181SAlex Tomas 2507ee12b630SMingming Cao return index; 2508a86c6181SAlex Tomas } 2509a86c6181SAlex Tomas 2510981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode) 2511981250caSTheodore Ts'o { 2512ddfa17e4STahsin Erdogan if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2513ddfa17e4STahsin Erdogan ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2514981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2515981250caSTheodore Ts'o else if (ext4_should_journal_data(inode)) 2516981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET; 2517981250caSTheodore Ts'o return 0; 2518981250caSTheodore Ts'o } 2519981250caSTheodore Ts'o 25209fe67149SEric Whitney /* 25219fe67149SEric Whitney * ext4_rereserve_cluster - increment the reserved cluster count when 25229fe67149SEric Whitney * freeing a cluster with a pending reservation 25239fe67149SEric Whitney * 25249fe67149SEric Whitney * @inode - file containing the cluster 25259fe67149SEric Whitney * @lblk - logical block in cluster to be reserved 25269fe67149SEric Whitney * 25279fe67149SEric Whitney * Increments the reserved cluster count and adjusts quota in a bigalloc 25289fe67149SEric Whitney * file system when freeing a partial cluster containing at least one 25299fe67149SEric Whitney * delayed and unwritten block. A partial cluster meeting that 25309fe67149SEric Whitney * requirement will have a pending reservation. If so, the 25319fe67149SEric Whitney * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 25329fe67149SEric Whitney * defer reserved and allocated space accounting to a subsequent call 25339fe67149SEric Whitney * to this function. 25349fe67149SEric Whitney */ 25359fe67149SEric Whitney static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 25369fe67149SEric Whitney { 25379fe67149SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 25389fe67149SEric Whitney struct ext4_inode_info *ei = EXT4_I(inode); 25399fe67149SEric Whitney 25409fe67149SEric Whitney dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 25419fe67149SEric Whitney 25429fe67149SEric Whitney spin_lock(&ei->i_block_reservation_lock); 25439fe67149SEric Whitney ei->i_reserved_data_blocks++; 25449fe67149SEric Whitney percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 25459fe67149SEric Whitney spin_unlock(&ei->i_block_reservation_lock); 25469fe67149SEric Whitney 25479fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 1); 25489fe67149SEric Whitney ext4_remove_pending(inode, lblk); 25499fe67149SEric Whitney } 25509fe67149SEric Whitney 2551a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2552a86c6181SAlex Tomas struct ext4_extent *ex, 25539fe67149SEric Whitney struct partial_cluster *partial, 2554725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2555a86c6181SAlex Tomas { 25560aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2557a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 25589fe67149SEric Whitney ext4_fsblk_t last_pblk, pblk; 25599fe67149SEric Whitney ext4_lblk_t num; 25609fe67149SEric Whitney int flags; 256118888cf0SAndrey Sidorov 25629fe67149SEric Whitney /* only extent tail removal is allowed */ 25639fe67149SEric Whitney if (from < le32_to_cpu(ex->ee_block) || 25649fe67149SEric Whitney to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 25659fe67149SEric Whitney ext4_error(sbi->s_sb, 25669fe67149SEric Whitney "strange request: removal(2) %u-%u from %u:%u", 25679fe67149SEric Whitney from, to, le32_to_cpu(ex->ee_block), ee_len); 25689fe67149SEric Whitney return 0; 25690aa06000STheodore Ts'o } 25700aa06000STheodore Ts'o 2571a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2572a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2573a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2574a86c6181SAlex Tomas sbi->s_ext_extents++; 2575a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2576a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2577a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2578a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2579a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2580a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2581a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2582a86c6181SAlex Tomas #endif 25839fe67149SEric Whitney 25849fe67149SEric Whitney trace_ext4_remove_blocks(inode, ex, from, to, partial); 25859fe67149SEric Whitney 25869fe67149SEric Whitney /* 25879fe67149SEric Whitney * if we have a partial cluster, and it's different from the 25889fe67149SEric Whitney * cluster of the last block in the extent, we free it 25899fe67149SEric Whitney */ 25909fe67149SEric Whitney last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 25919fe67149SEric Whitney 25929fe67149SEric Whitney if (partial->state != initial && 25939fe67149SEric Whitney partial->pclu != EXT4_B2C(sbi, last_pblk)) { 25949fe67149SEric Whitney if (partial->state == tofree) { 25959fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 25969fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 25979fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 25989fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 25999fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 26009fe67149SEric Whitney sbi->s_cluster_ratio, flags); 26019fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 26029fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 26039fe67149SEric Whitney } 26049fe67149SEric Whitney partial->state = initial; 26059fe67149SEric Whitney } 2606725d26d3SAneesh Kumar K.V 2607a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 26080aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 26099fe67149SEric Whitney 2610d23142c6SLukas Czerner /* 26119fe67149SEric Whitney * We free the partial cluster at the end of the extent (if any), 26129fe67149SEric Whitney * unless the cluster is used by another extent (partial_cluster 26139fe67149SEric Whitney * state is nofree). If a partial cluster exists here, it must be 26149fe67149SEric Whitney * shared with the last block in the extent. 2615d23142c6SLukas Czerner */ 26169fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 26179fe67149SEric Whitney 26189fe67149SEric Whitney /* partial, left end cluster aligned, right end unaligned */ 26199fe67149SEric Whitney if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 26209fe67149SEric Whitney (EXT4_LBLK_CMASK(sbi, to) >= from) && 26219fe67149SEric Whitney (partial->state != nofree)) { 26229fe67149SEric Whitney if (ext4_is_pending(inode, to)) 26239fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 26249fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 26259fe67149SEric Whitney EXT4_PBLK_CMASK(sbi, last_pblk), 26269fe67149SEric Whitney sbi->s_cluster_ratio, flags); 26279fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 26289fe67149SEric Whitney ext4_rereserve_cluster(inode, to); 26299fe67149SEric Whitney partial->state = initial; 26309fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 26319fe67149SEric Whitney } 26329fe67149SEric Whitney 2633d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2634d23142c6SLukas Czerner 26350aa06000STheodore Ts'o /* 26369fe67149SEric Whitney * For bigalloc file systems, we never free a partial cluster 26379fe67149SEric Whitney * at the beginning of the extent. Instead, we check to see if we 26389fe67149SEric Whitney * need to free it on a subsequent call to ext4_remove_blocks, 26399fe67149SEric Whitney * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 26400aa06000STheodore Ts'o */ 26419fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 26429fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 26439fe67149SEric Whitney 26449fe67149SEric Whitney /* reset the partial cluster if we've freed past it */ 26459fe67149SEric Whitney if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 26469fe67149SEric Whitney partial->state = initial; 26479fe67149SEric Whitney 26489fe67149SEric Whitney /* 26499fe67149SEric Whitney * If we've freed the entire extent but the beginning is not left 26509fe67149SEric Whitney * cluster aligned and is not marked as ineligible for freeing we 26519fe67149SEric Whitney * record the partial cluster at the beginning of the extent. It 26529fe67149SEric Whitney * wasn't freed by the preceding ext4_free_blocks() call, and we 26539fe67149SEric Whitney * need to look farther to the left to determine if it's to be freed 26549fe67149SEric Whitney * (not shared with another extent). Else, reset the partial 26559fe67149SEric Whitney * cluster - we're either done freeing or the beginning of the 26569fe67149SEric Whitney * extent is left cluster aligned. 26579fe67149SEric Whitney */ 26589fe67149SEric Whitney if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 26599fe67149SEric Whitney if (partial->state == initial) { 26609fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 26619fe67149SEric Whitney partial->lblk = from; 26629fe67149SEric Whitney partial->state = tofree; 2663345ee947SEric Whitney } 26649fe67149SEric Whitney } else { 26659fe67149SEric Whitney partial->state = initial; 2666a86c6181SAlex Tomas } 2667a86c6181SAlex Tomas 26689fe67149SEric Whitney return 0; 26699fe67149SEric Whitney } 2670d583fb87SAllison Henderson 2671d583fb87SAllison Henderson /* 2672d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 26735bf43760SEric Whitney * blocks appearing between "start" and "end". Both "start" 26745bf43760SEric Whitney * and "end" must appear in the same extent or EIO is returned. 2675d583fb87SAllison Henderson * 2676d583fb87SAllison Henderson * @handle: The journal handle 2677d583fb87SAllison Henderson * @inode: The files inode 2678d583fb87SAllison Henderson * @path: The path to the leaf 2679d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 26805bf43760SEric Whitney * has been released from it. However, if this value is 26815bf43760SEric Whitney * negative, it's a cluster just to the right of the 26825bf43760SEric Whitney * punched region and it must not be freed. 2683d583fb87SAllison Henderson * @start: The first block to remove 2684d583fb87SAllison Henderson * @end: The last block to remove 2685d583fb87SAllison Henderson */ 2686a86c6181SAlex Tomas static int 2687a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2688d23142c6SLukas Czerner struct ext4_ext_path *path, 26899fe67149SEric Whitney struct partial_cluster *partial, 26900aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2691a86c6181SAlex Tomas { 26920aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2693a86c6181SAlex Tomas int err = 0, correct_index = 0; 269483448bdfSJan Kara int depth = ext_depth(inode), credits, revoke_credits; 2695a86c6181SAlex Tomas struct ext4_extent_header *eh; 2696750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2697725d26d3SAneesh Kumar K.V unsigned num; 2698725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2699a86c6181SAlex Tomas unsigned short ex_ee_len; 2700556615dcSLukas Czerner unsigned unwritten = 0; 2701a86c6181SAlex Tomas struct ext4_extent *ex; 2702d23142c6SLukas Czerner ext4_fsblk_t pblk; 2703a86c6181SAlex Tomas 2704c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 27055f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2706a86c6181SAlex Tomas if (!path[depth].p_hdr) 2707a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2708a86c6181SAlex Tomas eh = path[depth].p_hdr; 2709273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2710273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 27116a797d27SDarrick J. Wong return -EFSCORRUPTED; 2712273df556SFrank Mayhar } 2713a86c6181SAlex Tomas /* find where to start removing */ 27146ae06ff5SAshish Sangwan ex = path[depth].p_ext; 27156ae06ff5SAshish Sangwan if (!ex) 2716a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2717a86c6181SAlex Tomas 2718a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2719a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2720a86c6181SAlex Tomas 27219fe67149SEric Whitney trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2722d8990240SAditya Kali 2723a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2724a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2725a41f2071SAneesh Kumar K.V 2726556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 2727556615dcSLukas Czerner unwritten = 1; 2728a41f2071SAneesh Kumar K.V else 2729556615dcSLukas Czerner unwritten = 0; 2730a41f2071SAneesh Kumar K.V 2731553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2732556615dcSLukas Czerner unwritten, ex_ee_len); 2733a86c6181SAlex Tomas path[depth].p_ext = ex; 2734a86c6181SAlex Tomas 2735a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2736d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2737d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2738a86c6181SAlex Tomas 2739a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2740a86c6181SAlex Tomas 2741d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 27425f95d21fSLukas Czerner if (end < ex_ee_block) { 2743d23142c6SLukas Czerner /* 2744d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2745f4226d9eSEric Whitney * so note that its first cluster is in use to avoid 2746f4226d9eSEric Whitney * freeing it when removing blocks. Eventually, the 2747f4226d9eSEric Whitney * right edge of the truncated/punched region will 2748f4226d9eSEric Whitney * be just to the left. 2749d23142c6SLukas Czerner */ 2750f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2751d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 27529fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 27539fe67149SEric Whitney partial->state = nofree; 2754f4226d9eSEric Whitney } 2755d583fb87SAllison Henderson ex--; 2756d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2757d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2758d583fb87SAllison Henderson continue; 2759750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2760dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2761dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2762dc1841d6SLukas Czerner "on extent %u:%u", 2763dc1841d6SLukas Czerner start, end, ex_ee_block, 2764dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 27656a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2766d583fb87SAllison Henderson goto out; 2767a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2768a86c6181SAlex Tomas /* remove tail of the extent */ 2769750c9c47SDmitry Monakhov num = a - ex_ee_block; 2770a86c6181SAlex Tomas } else { 2771a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2772a86c6181SAlex Tomas num = 0; 2773d583fb87SAllison Henderson } 277434071da7STheodore Ts'o /* 277534071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 277634071da7STheodore Ts'o * descriptor) for each block group; assume two block 277734071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 277834071da7STheodore Ts'o * the worst case 277934071da7STheodore Ts'o */ 278034071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2781a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2782a86c6181SAlex Tomas correct_index = 1; 2783a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2784a86c6181SAlex Tomas } 27855aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 278683448bdfSJan Kara /* 278783448bdfSJan Kara * We may end up freeing some index blocks and data from the 278883448bdfSJan Kara * punched range. Note that partial clusters are accounted for 278983448bdfSJan Kara * by ext4_free_data_revoke_credits(). 279083448bdfSJan Kara */ 279183448bdfSJan Kara revoke_credits = 279283448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 279383448bdfSJan Kara ext_depth(inode)) + 279483448bdfSJan Kara ext4_free_data_revoke_credits(inode, b - a + 1); 2795a86c6181SAlex Tomas 2796a4130367SJan Kara err = ext4_datasem_ensure_credits(handle, inode, credits, 279783448bdfSJan Kara credits, revoke_credits); 2798a4130367SJan Kara if (err) { 2799a4130367SJan Kara if (err > 0) 2800a4130367SJan Kara err = -EAGAIN; 2801a86c6181SAlex Tomas goto out; 2802a4130367SJan Kara } 2803a86c6181SAlex Tomas 2804a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2805a86c6181SAlex Tomas if (err) 2806a86c6181SAlex Tomas goto out; 2807a86c6181SAlex Tomas 28089fe67149SEric Whitney err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2809a86c6181SAlex Tomas if (err) 2810a86c6181SAlex Tomas goto out; 2811a86c6181SAlex Tomas 2812750c9c47SDmitry Monakhov if (num == 0) 2813d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2814f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2815a86c6181SAlex Tomas 2816a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2817749269faSAmit Arora /* 2818556615dcSLukas Czerner * Do not mark unwritten if all the blocks in the 2819749269faSAmit Arora * extent have been removed. 2820749269faSAmit Arora */ 2821556615dcSLukas Czerner if (unwritten && num) 2822556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2823d583fb87SAllison Henderson /* 2824d583fb87SAllison Henderson * If the extent was completely released, 2825d583fb87SAllison Henderson * we need to remove it from the leaf 2826d583fb87SAllison Henderson */ 2827d583fb87SAllison Henderson if (num == 0) { 2828f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2829d583fb87SAllison Henderson /* 2830d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2831d583fb87SAllison Henderson * extents up when an extent is removed so that 2832d583fb87SAllison Henderson * we dont have blank extents in the middle 2833d583fb87SAllison Henderson */ 2834d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2835d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2836d583fb87SAllison Henderson 2837d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2838d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2839d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2840d583fb87SAllison Henderson } 2841d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 28425bf43760SEric Whitney } 2843d583fb87SAllison Henderson 2844750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2845750c9c47SDmitry Monakhov if (err) 2846750c9c47SDmitry Monakhov goto out; 2847750c9c47SDmitry Monakhov 2848bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2849bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2850a86c6181SAlex Tomas ex--; 2851a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2852a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2853a86c6181SAlex Tomas } 2854a86c6181SAlex Tomas 2855a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2856a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2857a86c6181SAlex Tomas 28580aa06000STheodore Ts'o /* 2859ad6599abSEric Whitney * If there's a partial cluster and at least one extent remains in 2860ad6599abSEric Whitney * the leaf, free the partial cluster if it isn't shared with the 28615bf43760SEric Whitney * current extent. If it is shared with the current extent 28629fe67149SEric Whitney * we reset the partial cluster because we've reached the start of the 28635bf43760SEric Whitney * truncated/punched region and we're done removing blocks. 28640aa06000STheodore Ts'o */ 28659fe67149SEric Whitney if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 28665bf43760SEric Whitney pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 28679fe67149SEric Whitney if (partial->pclu != EXT4_B2C(sbi, pblk)) { 28689fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 28699fe67149SEric Whitney 28709fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 28719fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 28720aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 28739fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 28749fe67149SEric Whitney sbi->s_cluster_ratio, flags); 28759fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 28769fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 28775bf43760SEric Whitney } 28789fe67149SEric Whitney partial->state = initial; 28790aa06000STheodore Ts'o } 28800aa06000STheodore Ts'o 2881a86c6181SAlex Tomas /* if this leaf is free, then we should 2882a86c6181SAlex Tomas * remove it from index block above */ 2883a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2884c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2885a86c6181SAlex Tomas 2886a86c6181SAlex Tomas out: 2887a86c6181SAlex Tomas return err; 2888a86c6181SAlex Tomas } 2889a86c6181SAlex Tomas 2890a86c6181SAlex Tomas /* 2891d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2892d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2893a86c6181SAlex Tomas */ 289409b88252SAvantika Mathur static int 2895a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2896a86c6181SAlex Tomas { 2897a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2898a86c6181SAlex Tomas 2899a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2900a86c6181SAlex Tomas return 0; 2901a86c6181SAlex Tomas 2902a86c6181SAlex Tomas /* 2903d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2904a86c6181SAlex Tomas * so we have to consider current index for truncation 2905a86c6181SAlex Tomas */ 2906a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2907a86c6181SAlex Tomas return 0; 2908a86c6181SAlex Tomas return 1; 2909a86c6181SAlex Tomas } 2910a86c6181SAlex Tomas 291126a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 29125f95d21fSLukas Czerner ext4_lblk_t end) 2913a86c6181SAlex Tomas { 2914f4226d9eSEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2915a86c6181SAlex Tomas int depth = ext_depth(inode); 2916968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 29179fe67149SEric Whitney struct partial_cluster partial; 2918a86c6181SAlex Tomas handle_t *handle; 29196f2080e6SDmitry Monakhov int i = 0, err = 0; 2920a86c6181SAlex Tomas 29219fe67149SEric Whitney partial.pclu = 0; 29229fe67149SEric Whitney partial.lblk = 0; 29239fe67149SEric Whitney partial.state = initial; 29249fe67149SEric Whitney 29255f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2926a86c6181SAlex Tomas 2927a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 292883448bdfSJan Kara handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 292983448bdfSJan Kara depth + 1, 293083448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2931a86c6181SAlex Tomas if (IS_ERR(handle)) 2932a86c6181SAlex Tomas return PTR_ERR(handle); 2933a86c6181SAlex Tomas 29340617b83fSDmitry Monakhov again: 293561801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2936d8990240SAditya Kali 2937a86c6181SAlex Tomas /* 29385f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 29395f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 29405f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 29415f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 29425f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 29435f95d21fSLukas Czerner */ 29445f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 29455f95d21fSLukas Czerner struct ext4_extent *ex; 2946f4226d9eSEric Whitney ext4_lblk_t ee_block, ex_end, lblk; 2947f4226d9eSEric Whitney ext4_fsblk_t pblk; 29485f95d21fSLukas Czerner 2949f4226d9eSEric Whitney /* find extent for or closest extent to this block */ 2950ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 29515f95d21fSLukas Czerner if (IS_ERR(path)) { 29525f95d21fSLukas Czerner ext4_journal_stop(handle); 29535f95d21fSLukas Czerner return PTR_ERR(path); 29545f95d21fSLukas Czerner } 29555f95d21fSLukas Czerner depth = ext_depth(inode); 29566f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 29575f95d21fSLukas Czerner ex = path[depth].p_ext; 2958968dee77SAshish Sangwan if (!ex) { 29596f2080e6SDmitry Monakhov if (depth) { 29606f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 29616f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 29626f2080e6SDmitry Monakhov depth); 29636a797d27SDarrick J. Wong err = -EFSCORRUPTED; 29646f2080e6SDmitry Monakhov } 29656f2080e6SDmitry Monakhov goto out; 2966968dee77SAshish Sangwan } 29675f95d21fSLukas Czerner 29685f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 2969f4226d9eSEric Whitney ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 29705f95d21fSLukas Czerner 29715f95d21fSLukas Czerner /* 29725f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 29735f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 29745f95d21fSLukas Czerner * tail of the first part of the split extent in 29755f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 29765f95d21fSLukas Czerner */ 2977f4226d9eSEric Whitney if (end >= ee_block && end < ex_end) { 2978f4226d9eSEric Whitney 2979f4226d9eSEric Whitney /* 2980f4226d9eSEric Whitney * If we're going to split the extent, note that 2981f4226d9eSEric Whitney * the cluster containing the block after 'end' is 2982f4226d9eSEric Whitney * in use to avoid freeing it when removing blocks. 2983f4226d9eSEric Whitney */ 2984f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2985f4226d9eSEric Whitney pblk = ext4_ext_pblock(ex) + end - ee_block + 2; 29869fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 29879fe67149SEric Whitney partial.state = nofree; 2988f4226d9eSEric Whitney } 2989f4226d9eSEric Whitney 29905f95d21fSLukas Czerner /* 29915f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 299227dd4385SLukas Czerner * block in the first new extent. Also we should not 299327dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 299427dd4385SLukas Czerner * reserved block if that happens. 29955f95d21fSLukas Czerner */ 2996dfe50809STheodore Ts'o err = ext4_force_split_extent_at(handle, inode, &path, 2997fcf6b1b7SDmitry Monakhov end + 1, 1); 29985f95d21fSLukas Czerner if (err < 0) 29995f95d21fSLukas Czerner goto out; 3000f4226d9eSEric Whitney 30017bd75230SEric Whitney } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 30027bd75230SEric Whitney partial.state == initial) { 3003f4226d9eSEric Whitney /* 30047bd75230SEric Whitney * If we're punching, there's an extent to the right. 30057bd75230SEric Whitney * If the partial cluster hasn't been set, set it to 30067bd75230SEric Whitney * that extent's first cluster and its state to nofree 30077bd75230SEric Whitney * so it won't be freed should it contain blocks to be 30087bd75230SEric Whitney * removed. If it's already set (tofree/nofree), we're 30097bd75230SEric Whitney * retrying and keep the original partial cluster info 30107bd75230SEric Whitney * so a cluster marked tofree as a result of earlier 30117bd75230SEric Whitney * extent removal is not lost. 3012f4226d9eSEric Whitney */ 3013f4226d9eSEric Whitney lblk = ex_end + 1; 3014f4226d9eSEric Whitney err = ext4_ext_search_right(inode, path, &lblk, &pblk, 3015f4226d9eSEric Whitney &ex); 3016f4226d9eSEric Whitney if (err) 3017f4226d9eSEric Whitney goto out; 30189fe67149SEric Whitney if (pblk) { 30199fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 30209fe67149SEric Whitney partial.state = nofree; 30219fe67149SEric Whitney } 30225f95d21fSLukas Czerner } 30235f95d21fSLukas Czerner } 30245f95d21fSLukas Czerner /* 3025d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 3026d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 3027a86c6181SAlex Tomas */ 30280617b83fSDmitry Monakhov depth = ext_depth(inode); 3029968dee77SAshish Sangwan if (path) { 3030968dee77SAshish Sangwan int k = i = depth; 3031968dee77SAshish Sangwan while (--k > 0) 3032968dee77SAshish Sangwan path[k].p_block = 3033968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 3034968dee77SAshish Sangwan } else { 30356396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 3036968dee77SAshish Sangwan GFP_NOFS); 3037a86c6181SAlex Tomas if (path == NULL) { 3038a86c6181SAlex Tomas ext4_journal_stop(handle); 3039a86c6181SAlex Tomas return -ENOMEM; 3040a86c6181SAlex Tomas } 304110809df8STheodore Ts'o path[0].p_maxdepth = path[0].p_depth = depth; 3042a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 304389a4e48fSTheodore Ts'o i = 0; 30445f95d21fSLukas Czerner 3045c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 30466a797d27SDarrick J. Wong err = -EFSCORRUPTED; 3047a86c6181SAlex Tomas goto out; 3048a86c6181SAlex Tomas } 3049968dee77SAshish Sangwan } 3050968dee77SAshish Sangwan err = 0; 3051a86c6181SAlex Tomas 3052a86c6181SAlex Tomas while (i >= 0 && err == 0) { 3053a86c6181SAlex Tomas if (i == depth) { 3054a86c6181SAlex Tomas /* this is leaf block */ 3055d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 30569fe67149SEric Whitney &partial, start, end); 3057d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 3058a86c6181SAlex Tomas brelse(path[i].p_bh); 3059a86c6181SAlex Tomas path[i].p_bh = NULL; 3060a86c6181SAlex Tomas i--; 3061a86c6181SAlex Tomas continue; 3062a86c6181SAlex Tomas } 3063a86c6181SAlex Tomas 3064a86c6181SAlex Tomas /* this is index block */ 3065a86c6181SAlex Tomas if (!path[i].p_hdr) { 3066a86c6181SAlex Tomas ext_debug("initialize header\n"); 3067a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 3068a86c6181SAlex Tomas } 3069a86c6181SAlex Tomas 3070a86c6181SAlex Tomas if (!path[i].p_idx) { 3071d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 3072a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 3073a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 3074a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 3075a86c6181SAlex Tomas path[i].p_hdr, 3076a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 3077a86c6181SAlex Tomas } else { 3078d0d856e8SRandy Dunlap /* we were already here, see at next index */ 3079a86c6181SAlex Tomas path[i].p_idx--; 3080a86c6181SAlex Tomas } 3081a86c6181SAlex Tomas 3082a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 3083a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 3084a86c6181SAlex Tomas path[i].p_idx); 3085a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 3086c29c0ae7SAlex Tomas struct buffer_head *bh; 3087a86c6181SAlex Tomas /* go to the next level */ 30882ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 3089bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 3090a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 30917d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, 3092107a7bd3STheodore Ts'o ext4_idx_pblock(path[i].p_idx), depth - i - 1, 3093107a7bd3STheodore Ts'o EXT4_EX_NOCACHE); 30947d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 3095a86c6181SAlex Tomas /* should we reset i_size? */ 30967d7ea89eSTheodore Ts'o err = PTR_ERR(bh); 3097a86c6181SAlex Tomas break; 3098a86c6181SAlex Tomas } 309976828c88STheodore Ts'o /* Yield here to deal with large extent trees. 310076828c88STheodore Ts'o * Should be a no-op if we did IO above. */ 310176828c88STheodore Ts'o cond_resched(); 3102c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 31036a797d27SDarrick J. Wong err = -EFSCORRUPTED; 3104c29c0ae7SAlex Tomas break; 3105c29c0ae7SAlex Tomas } 3106c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 3107a86c6181SAlex Tomas 3108d0d856e8SRandy Dunlap /* save actual number of indexes since this 3109d0d856e8SRandy Dunlap * number is changed at the next iteration */ 3110a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 3111a86c6181SAlex Tomas i++; 3112a86c6181SAlex Tomas } else { 3113d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 3114a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 3115d0d856e8SRandy Dunlap /* index is empty, remove it; 3116a86c6181SAlex Tomas * handle must be already prepared by the 3117a86c6181SAlex Tomas * truncatei_leaf() */ 3118c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 3119a86c6181SAlex Tomas } 3120d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 3121a86c6181SAlex Tomas brelse(path[i].p_bh); 3122a86c6181SAlex Tomas path[i].p_bh = NULL; 3123a86c6181SAlex Tomas i--; 3124a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 3125a86c6181SAlex Tomas } 3126a86c6181SAlex Tomas } 3127a86c6181SAlex Tomas 31289fe67149SEric Whitney trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 31299fe67149SEric Whitney path->p_hdr->eh_entries); 3130d8990240SAditya Kali 31310756b908SEric Whitney /* 31329fe67149SEric Whitney * if there's a partial cluster and we have removed the first extent 31339fe67149SEric Whitney * in the file, then we also free the partial cluster, if any 31340756b908SEric Whitney */ 31359fe67149SEric Whitney if (partial.state == tofree && err == 0) { 31369fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 31379fe67149SEric Whitney 31389fe67149SEric Whitney if (ext4_is_pending(inode, partial.lblk)) 31399fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 31407b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 31419fe67149SEric Whitney EXT4_C2B(sbi, partial.pclu), 31429fe67149SEric Whitney sbi->s_cluster_ratio, flags); 31439fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 31449fe67149SEric Whitney ext4_rereserve_cluster(inode, partial.lblk); 31459fe67149SEric Whitney partial.state = initial; 31467b415bf6SAditya Kali } 31477b415bf6SAditya Kali 3148a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 3149a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 3150a86c6181SAlex Tomas /* 3151d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 3152d0d856e8SRandy Dunlap * so we need to correct eh_depth 3153a86c6181SAlex Tomas */ 3154a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 3155a86c6181SAlex Tomas if (err == 0) { 3156a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 3157a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 315855ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 3159a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 3160a86c6181SAlex Tomas } 3161a86c6181SAlex Tomas } 3162a86c6181SAlex Tomas out: 3163a86c6181SAlex Tomas ext4_ext_drop_refs(path); 3164a86c6181SAlex Tomas kfree(path); 3165968dee77SAshish Sangwan path = NULL; 3166dfe50809STheodore Ts'o if (err == -EAGAIN) 3167dfe50809STheodore Ts'o goto again; 3168a86c6181SAlex Tomas ext4_journal_stop(handle); 3169a86c6181SAlex Tomas 3170a86c6181SAlex Tomas return err; 3171a86c6181SAlex Tomas } 3172a86c6181SAlex Tomas 3173a86c6181SAlex Tomas /* 3174a86c6181SAlex Tomas * called at mount time 3175a86c6181SAlex Tomas */ 3176a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 3177a86c6181SAlex Tomas { 3178a86c6181SAlex Tomas /* 3179a86c6181SAlex Tomas * possible initialization would be here 3180a86c6181SAlex Tomas */ 3181a86c6181SAlex Tomas 3182e2b911c5SDarrick J. Wong if (ext4_has_feature_extents(sb)) { 318390576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 318492b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 3185bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 318692b97816STheodore Ts'o ", aggressive tests" 3187a86c6181SAlex Tomas #endif 3188a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 318992b97816STheodore Ts'o ", check binsearch" 3190a86c6181SAlex Tomas #endif 3191a86c6181SAlex Tomas #ifdef EXTENTS_STATS 319292b97816STheodore Ts'o ", stats" 3193a86c6181SAlex Tomas #endif 319492b97816STheodore Ts'o "\n"); 319590576c0bSTheodore Ts'o #endif 3196a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3197a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3198a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 3199a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 3200a86c6181SAlex Tomas #endif 3201a86c6181SAlex Tomas } 3202a86c6181SAlex Tomas } 3203a86c6181SAlex Tomas 3204a86c6181SAlex Tomas /* 3205a86c6181SAlex Tomas * called at umount time 3206a86c6181SAlex Tomas */ 3207a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 3208a86c6181SAlex Tomas { 3209e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 3210a86c6181SAlex Tomas return; 3211a86c6181SAlex Tomas 3212a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3213a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3214a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3215a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3216a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 3217a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 3218a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3219a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3220a86c6181SAlex Tomas } 3221a86c6181SAlex Tomas #endif 3222a86c6181SAlex Tomas } 3223a86c6181SAlex Tomas 3224d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3225d7b2a00cSZheng Liu { 3226d7b2a00cSZheng Liu ext4_lblk_t ee_block; 3227d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock; 3228d7b2a00cSZheng Liu unsigned int ee_len; 3229d7b2a00cSZheng Liu 3230d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block); 3231d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex); 3232d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex); 3233d7b2a00cSZheng Liu 3234d7b2a00cSZheng Liu if (ee_len == 0) 3235d7b2a00cSZheng Liu return 0; 3236d7b2a00cSZheng Liu 3237d7b2a00cSZheng Liu return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3238d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN); 3239d7b2a00cSZheng Liu } 3240d7b2a00cSZheng Liu 3241093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 3242093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3243093a088bSAneesh Kumar K.V { 32442407518dSLukas Czerner ext4_fsblk_t ee_pblock; 32452407518dSLukas Czerner unsigned int ee_len; 3246093a088bSAneesh Kumar K.V 3247093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 3248bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 324953085facSJan Kara return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 325053085facSJan Kara ee_len); 3251093a088bSAneesh Kumar K.V } 3252093a088bSAneesh Kumar K.V 325347ea3bb5SYongqiang Yang /* 325447ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 325547ea3bb5SYongqiang Yang * 325647ea3bb5SYongqiang Yang * @handle: the journal handle 325747ea3bb5SYongqiang Yang * @inode: the file inode 325847ea3bb5SYongqiang Yang * @path: the path to the extent 325947ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 326047ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 3261556615dcSLukas Czerner * the states(init or unwritten) of new extents. 326247ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 326347ea3bb5SYongqiang Yang * 326447ea3bb5SYongqiang Yang * 326547ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 326647ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 326747ea3bb5SYongqiang Yang * 326847ea3bb5SYongqiang Yang * There are two cases: 326947ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 327047ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 327147ea3bb5SYongqiang Yang * 327247ea3bb5SYongqiang Yang * return 0 on success. 327347ea3bb5SYongqiang Yang */ 327447ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 327547ea3bb5SYongqiang Yang struct inode *inode, 3276dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 327747ea3bb5SYongqiang Yang ext4_lblk_t split, 327847ea3bb5SYongqiang Yang int split_flag, 327947ea3bb5SYongqiang Yang int flags) 328047ea3bb5SYongqiang Yang { 3281dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 328247ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 328347ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3284adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 328547ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 328647ea3bb5SYongqiang Yang unsigned int ee_len, depth; 328747ea3bb5SYongqiang Yang int err = 0; 328847ea3bb5SYongqiang Yang 3289dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3290dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3291dee1f973SDmitry Monakhov 329247ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 329347ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 329447ea3bb5SYongqiang Yang 329547ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 329647ea3bb5SYongqiang Yang 329747ea3bb5SYongqiang Yang depth = ext_depth(inode); 329847ea3bb5SYongqiang Yang ex = path[depth].p_ext; 329947ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 330047ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 330147ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 330247ea3bb5SYongqiang Yang 330347ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3304556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex) && 3305357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3306556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT1 | 3307556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2)); 330847ea3bb5SYongqiang Yang 330947ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 331047ea3bb5SYongqiang Yang if (err) 331147ea3bb5SYongqiang Yang goto out; 331247ea3bb5SYongqiang Yang 331347ea3bb5SYongqiang Yang if (split == ee_block) { 331447ea3bb5SYongqiang Yang /* 331547ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 331647ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 331747ea3bb5SYongqiang Yang * is not needed. 331847ea3bb5SYongqiang Yang */ 3319556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3320556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 332147ea3bb5SYongqiang Yang else 332247ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 332347ea3bb5SYongqiang Yang 332447ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3325ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 332647ea3bb5SYongqiang Yang 3327ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 332847ea3bb5SYongqiang Yang goto out; 332947ea3bb5SYongqiang Yang } 333047ea3bb5SYongqiang Yang 333147ea3bb5SYongqiang Yang /* case a */ 333247ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 333347ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 3334556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3335556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 333647ea3bb5SYongqiang Yang 333747ea3bb5SYongqiang Yang /* 333847ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 333947ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 334047ea3bb5SYongqiang Yang */ 334147ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 334247ea3bb5SYongqiang Yang if (err) 334347ea3bb5SYongqiang Yang goto fix_extent_len; 334447ea3bb5SYongqiang Yang 334547ea3bb5SYongqiang Yang ex2 = &newex; 334647ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 334747ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 334847ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 3349556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3350556615dcSLukas Czerner ext4_ext_mark_unwritten(ex2); 335147ea3bb5SYongqiang Yang 3352dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 335347ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3354dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3355adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3356dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3357adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 33588cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 33598cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3360adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3361adb23551SZheng Liu ext4_ext_pblock(ex2)); 3362adb23551SZheng Liu } else { 3363dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3364adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 33658cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 33668cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3367adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3368adb23551SZheng Liu ext4_ext_pblock(ex)); 3369adb23551SZheng Liu } 3370adb23551SZheng Liu } else { 337147ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3372adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 33738cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 33748cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3375adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3376adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3377adb23551SZheng Liu } 3378dee1f973SDmitry Monakhov 337947ea3bb5SYongqiang Yang if (err) 338047ea3bb5SYongqiang Yang goto fix_extent_len; 338147ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3382af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3383ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3384ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3385adb23551SZheng Liu if (err) 3386adb23551SZheng Liu goto fix_extent_len; 3387adb23551SZheng Liu 3388adb23551SZheng Liu /* update extent status tree */ 3389d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3390adb23551SZheng Liu 339147ea3bb5SYongqiang Yang goto out; 339247ea3bb5SYongqiang Yang } else if (err) 339347ea3bb5SYongqiang Yang goto fix_extent_len; 339447ea3bb5SYongqiang Yang 339547ea3bb5SYongqiang Yang out: 339647ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 339747ea3bb5SYongqiang Yang return err; 339847ea3bb5SYongqiang Yang 339947ea3bb5SYongqiang Yang fix_extent_len: 340047ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 340129faed16SDmitry Monakhov ext4_ext_dirty(handle, inode, path + path->p_depth); 340247ea3bb5SYongqiang Yang return err; 340347ea3bb5SYongqiang Yang } 340447ea3bb5SYongqiang Yang 340547ea3bb5SYongqiang Yang /* 340647ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 340747ea3bb5SYongqiang Yang * by @map as split_flags indicates 340847ea3bb5SYongqiang Yang * 340947ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three) 341047ea3bb5SYongqiang Yang * There are three possibilities: 341147ea3bb5SYongqiang Yang * a> There is no split required 341247ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 341347ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 341447ea3bb5SYongqiang Yang * 341547ea3bb5SYongqiang Yang */ 341647ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 341747ea3bb5SYongqiang Yang struct inode *inode, 3418dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 341947ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 342047ea3bb5SYongqiang Yang int split_flag, 342147ea3bb5SYongqiang Yang int flags) 342247ea3bb5SYongqiang Yang { 3423dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 342447ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 342547ea3bb5SYongqiang Yang struct ext4_extent *ex; 342647ea3bb5SYongqiang Yang unsigned int ee_len, depth; 342747ea3bb5SYongqiang Yang int err = 0; 3428556615dcSLukas Czerner int unwritten; 342947ea3bb5SYongqiang Yang int split_flag1, flags1; 34303a225670SZheng Liu int allocated = map->m_len; 343147ea3bb5SYongqiang Yang 343247ea3bb5SYongqiang Yang depth = ext_depth(inode); 343347ea3bb5SYongqiang Yang ex = path[depth].p_ext; 343447ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 343547ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 3436556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 343747ea3bb5SYongqiang Yang 343847ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3439dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 344047ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3441556615dcSLukas Czerner if (unwritten) 3442556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3443556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2; 3444dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3445dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 3446dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 344747ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 344893917411SYongqiang Yang if (err) 344993917411SYongqiang Yang goto out; 34503a225670SZheng Liu } else { 34513a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 345247ea3bb5SYongqiang Yang } 3453357b66fdSDmitry Monakhov /* 3454357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3455357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3456357b66fdSDmitry Monakhov */ 3457ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 345847ea3bb5SYongqiang Yang if (IS_ERR(path)) 345947ea3bb5SYongqiang Yang return PTR_ERR(path); 3460357b66fdSDmitry Monakhov depth = ext_depth(inode); 3461357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3462a18ed359SDmitry Monakhov if (!ex) { 3463a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3464a18ed359SDmitry Monakhov (unsigned long) map->m_lblk); 34656a797d27SDarrick J. Wong return -EFSCORRUPTED; 3466a18ed359SDmitry Monakhov } 3467556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 3468357b66fdSDmitry Monakhov split_flag1 = 0; 346947ea3bb5SYongqiang Yang 347047ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3471357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3472556615dcSLukas Czerner if (unwritten) { 3473556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3474357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3475556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2); 3476357b66fdSDmitry Monakhov } 3477dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 347847ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 347947ea3bb5SYongqiang Yang if (err) 348047ea3bb5SYongqiang Yang goto out; 348147ea3bb5SYongqiang Yang } 348247ea3bb5SYongqiang Yang 348347ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 348447ea3bb5SYongqiang Yang out: 34853a225670SZheng Liu return err ? err : allocated; 348647ea3bb5SYongqiang Yang } 348747ea3bb5SYongqiang Yang 348856055d3aSAmit Arora /* 3489e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 3490556615dcSLukas Czerner * to an unwritten extent. It may result in splitting the unwritten 349156055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 3492556615dcSLukas Czerner * unwritten). 349356055d3aSAmit Arora * There are three possibilities: 349456055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 349556055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 349656055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 34976f91bc5fSEric Gouriou * 34986f91bc5fSEric Gouriou * Pre-conditions: 3499556615dcSLukas Czerner * - The extent pointed to by 'path' is unwritten. 35006f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 35016f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 35026f91bc5fSEric Gouriou * 35036f91bc5fSEric Gouriou * Post-conditions on success: 35046f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 35056f91bc5fSEric Gouriou * that are allocated and initialized. 35066f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 350756055d3aSAmit Arora */ 3508725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3509725d26d3SAneesh Kumar K.V struct inode *inode, 3510e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3511dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 351227dd4385SLukas Czerner int flags) 351356055d3aSAmit Arora { 3514dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 351567a5da56SZheng Liu struct ext4_sb_info *sbi; 35166f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3517667eff35SYongqiang Yang struct ext4_map_blocks split_map; 35184f8caa60SJan Kara struct ext4_extent zero_ex1, zero_ex2; 3519bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 352021ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3521bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 3522bc2d9db4SLukas Czerner int allocated = 0, max_zeroout = 0; 352356055d3aSAmit Arora int err = 0; 35244f8caa60SJan Kara int split_flag = EXT4_EXT_DATA_VALID2; 352521ca087aSDmitry Monakhov 352621ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 352721ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3528bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 352921ca087aSDmitry Monakhov 353067a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 353121ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 353221ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3533bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3534bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 353556055d3aSAmit Arora 353656055d3aSAmit Arora depth = ext_depth(inode); 35376f91bc5fSEric Gouriou eh = path[depth].p_hdr; 353856055d3aSAmit Arora ex = path[depth].p_ext; 353956055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 354056055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 35414f8caa60SJan Kara zero_ex1.ee_len = 0; 35424f8caa60SJan Kara zero_ex2.ee_len = 0; 354321ca087aSDmitry Monakhov 35446f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 35456f91bc5fSEric Gouriou 35466f91bc5fSEric Gouriou /* Pre-conditions */ 3547556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex)); 35486f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 35496f91bc5fSEric Gouriou 35506f91bc5fSEric Gouriou /* 35516f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3552556615dcSLukas Czerner * unwritten extent to its neighbor. This is much cheaper 35536f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3554bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3555bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3556bc2d9db4SLukas Czerner * followed by append writes. 35576f91bc5fSEric Gouriou * 35586f91bc5fSEric Gouriou * Limitations of the current logic: 3559bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 35606f91bc5fSEric Gouriou * This would require removing the extent if the transfer 35616f91bc5fSEric Gouriou * is possible. 3562bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 35636f91bc5fSEric Gouriou * same extent tree node. 35646f91bc5fSEric Gouriou */ 3565bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3566bc2d9db4SLukas Czerner /* See if we can merge left */ 3567bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3568bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 35696f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 35706f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3571bc2d9db4SLukas Czerner unsigned int prev_len; 35726f91bc5fSEric Gouriou 3573bc2d9db4SLukas Czerner abut_ex = ex - 1; 3574bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3575bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3576bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 35776f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 35786f91bc5fSEric Gouriou 35796f91bc5fSEric Gouriou /* 3580bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 35816f91bc5fSEric Gouriou * upon those conditions: 3582bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3583bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3584bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3585bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 35866f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 35876f91bc5fSEric Gouriou */ 3588556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 35896f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 35906f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3591bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 35926f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 35936f91bc5fSEric Gouriou if (err) 35946f91bc5fSEric Gouriou goto out; 35956f91bc5fSEric Gouriou 35966f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3597bc2d9db4SLukas Czerner map, ex, abut_ex); 35986f91bc5fSEric Gouriou 3599bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3600bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3601bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3602bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3603556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 36046f91bc5fSEric Gouriou 3605bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3606bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 36076f91bc5fSEric Gouriou 3608bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3609bc2d9db4SLukas Czerner allocated = map_len; 3610bc2d9db4SLukas Czerner } 3611bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3612bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3613bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3614bc2d9db4SLukas Czerner /* See if we can merge right */ 3615bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3616bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3617bc2d9db4SLukas Czerner unsigned int next_len; 3618bc2d9db4SLukas Czerner 3619bc2d9db4SLukas Czerner abut_ex = ex + 1; 3620bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3621bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3622bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3623bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3624bc2d9db4SLukas Czerner 3625bc2d9db4SLukas Czerner /* 3626bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3627bc2d9db4SLukas Czerner * upon those conditions: 3628bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3629bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3630bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3631bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3632bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3633bc2d9db4SLukas Czerner */ 3634556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3635bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3636bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3637bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3638bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3639bc2d9db4SLukas Czerner if (err) 3640bc2d9db4SLukas Czerner goto out; 3641bc2d9db4SLukas Czerner 3642bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3643bc2d9db4SLukas Czerner map, ex, abut_ex); 3644bc2d9db4SLukas Czerner 3645bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3646bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3647bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3648bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3649556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3650bc2d9db4SLukas Czerner 3651bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3652bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3653bc2d9db4SLukas Czerner 3654bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3655bc2d9db4SLukas Czerner allocated = map_len; 3656bc2d9db4SLukas Czerner } 3657bc2d9db4SLukas Czerner } 3658bc2d9db4SLukas Czerner if (allocated) { 36596f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 36606f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 36616f91bc5fSEric Gouriou 36626f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3663bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 36646f91bc5fSEric Gouriou goto out; 3665bc2d9db4SLukas Czerner } else 3666bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 36676f91bc5fSEric Gouriou 3668667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 366921ca087aSDmitry Monakhov /* 367021ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 36719e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size. 367221ca087aSDmitry Monakhov */ 3673667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 367421ca087aSDmitry Monakhov 367567a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 367667a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 36774f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 367867a5da56SZheng Liu 3679667eff35SYongqiang Yang /* 36804f8caa60SJan Kara * five cases: 3681667eff35SYongqiang Yang * 1. split the extent into three extents. 36824f8caa60SJan Kara * 2. split the extent into two extents, zeroout the head of the first 36834f8caa60SJan Kara * extent. 36844f8caa60SJan Kara * 3. split the extent into two extents, zeroout the tail of the second 36854f8caa60SJan Kara * extent. 3686667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 36874f8caa60SJan Kara * 5. no splitting needed, just possibly zeroout the head and / or the 36884f8caa60SJan Kara * tail of the extent. 3689667eff35SYongqiang Yang */ 3690667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3691667eff35SYongqiang Yang split_map.m_len = map->m_len; 3692667eff35SYongqiang Yang 36934f8caa60SJan Kara if (max_zeroout && (allocated > split_map.m_len)) { 369467a5da56SZheng Liu if (allocated <= max_zeroout) { 36954f8caa60SJan Kara /* case 3 or 5 */ 36964f8caa60SJan Kara zero_ex1.ee_block = 36974f8caa60SJan Kara cpu_to_le32(split_map.m_lblk + 36984f8caa60SJan Kara split_map.m_len); 36994f8caa60SJan Kara zero_ex1.ee_len = 37004f8caa60SJan Kara cpu_to_le16(allocated - split_map.m_len); 37014f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex1, 37024f8caa60SJan Kara ext4_ext_pblock(ex) + split_map.m_lblk + 37034f8caa60SJan Kara split_map.m_len - ee_block); 37044f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex1); 3705667eff35SYongqiang Yang if (err) 3706667eff35SYongqiang Yang goto out; 3707667eff35SYongqiang Yang split_map.m_len = allocated; 37084f8caa60SJan Kara } 37094f8caa60SJan Kara if (split_map.m_lblk - ee_block + split_map.m_len < 37104f8caa60SJan Kara max_zeroout) { 37114f8caa60SJan Kara /* case 2 or 5 */ 37124f8caa60SJan Kara if (split_map.m_lblk != ee_block) { 37134f8caa60SJan Kara zero_ex2.ee_block = ex->ee_block; 37144f8caa60SJan Kara zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3715667eff35SYongqiang Yang ee_block); 37164f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex2, 3717667eff35SYongqiang Yang ext4_ext_pblock(ex)); 37184f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex2); 3719667eff35SYongqiang Yang if (err) 3720667eff35SYongqiang Yang goto out; 3721667eff35SYongqiang Yang } 3722667eff35SYongqiang Yang 37234f8caa60SJan Kara split_map.m_len += split_map.m_lblk - ee_block; 3724667eff35SYongqiang Yang split_map.m_lblk = ee_block; 37259b940f8eSAllison Henderson allocated = map->m_len; 3726667eff35SYongqiang Yang } 3727667eff35SYongqiang Yang } 3728667eff35SYongqiang Yang 3729ae9e9c6aSJan Kara err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3730ae9e9c6aSJan Kara flags); 3731ae9e9c6aSJan Kara if (err > 0) 3732ae9e9c6aSJan Kara err = 0; 3733667eff35SYongqiang Yang out: 3734adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 37354f8caa60SJan Kara if (!err) { 37364f8caa60SJan Kara err = ext4_zeroout_es(inode, &zero_ex1); 3737adb23551SZheng Liu if (!err) 37384f8caa60SJan Kara err = ext4_zeroout_es(inode, &zero_ex2); 37394f8caa60SJan Kara } 3740667eff35SYongqiang Yang return err ? err : allocated; 374156055d3aSAmit Arora } 374256055d3aSAmit Arora 3743c278bfecSAneesh Kumar K.V /* 3744e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 37450031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 3746556615dcSLukas Czerner * to an unwritten extent. 37470031462bSMingming Cao * 3748556615dcSLukas Czerner * Writing to an unwritten extent may result in splitting the unwritten 3749556615dcSLukas Czerner * extent into multiple initialized/unwritten extents (up to three) 37500031462bSMingming Cao * There are three possibilities: 3751556615dcSLukas Czerner * a> There is no split required: Entire extent should be unwritten 37520031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 37530031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 37540031462bSMingming Cao * 3755b8a86845SLukas Czerner * This works the same way in the case of initialized -> unwritten conversion. 3756b8a86845SLukas Czerner * 37570031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3758556615dcSLukas Czerner * the unwritten extent split. To prevent ENOSPC occur at the IO 3759556615dcSLukas Czerner * complete, we need to split the unwritten extent before DIO submit 3760556615dcSLukas Czerner * the IO. The unwritten extent called at this time will be split 3761556615dcSLukas Czerner * into three unwritten extent(at most). After IO complete, the part 37620031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 37630031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3764ba230c3fSMingming * 3765556615dcSLukas Czerner * Returns the size of unwritten extent to be written on success. 37660031462bSMingming Cao */ 3767b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle, 37680031462bSMingming Cao struct inode *inode, 3769e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3770dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 37710031462bSMingming Cao int flags) 37720031462bSMingming Cao { 3773dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 3774667eff35SYongqiang Yang ext4_lblk_t eof_block; 3775667eff35SYongqiang Yang ext4_lblk_t ee_block; 3776667eff35SYongqiang Yang struct ext4_extent *ex; 3777667eff35SYongqiang Yang unsigned int ee_len; 3778667eff35SYongqiang Yang int split_flag = 0, depth; 37790031462bSMingming Cao 3780b8a86845SLukas Czerner ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", 3781b8a86845SLukas Czerner __func__, inode->i_ino, 3782e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 378321ca087aSDmitry Monakhov 378421ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 378521ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3786e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3787e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 37880031462bSMingming Cao /* 378921ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 379021ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 379121ca087aSDmitry Monakhov */ 3792667eff35SYongqiang Yang depth = ext_depth(inode); 37930031462bSMingming Cao ex = path[depth].p_ext; 3794667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3795667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 37960031462bSMingming Cao 3797b8a86845SLukas Czerner /* Convert to unwritten */ 3798b8a86845SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3799b8a86845SLukas Czerner split_flag |= EXT4_EXT_DATA_VALID1; 3800b8a86845SLukas Czerner /* Convert to initialized */ 3801b8a86845SLukas Czerner } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3802b8a86845SLukas Czerner split_flag |= ee_block + ee_len <= eof_block ? 3803b8a86845SLukas Czerner EXT4_EXT_MAY_ZEROOUT : 0; 3804556615dcSLukas Czerner split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3805b8a86845SLukas Czerner } 3806667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3807dfe50809STheodore Ts'o return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 38080031462bSMingming Cao } 3809197217a5SYongqiang Yang 3810c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 38110031462bSMingming Cao struct inode *inode, 3812dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 3813dfe50809STheodore Ts'o struct ext4_ext_path **ppath) 38140031462bSMingming Cao { 3815dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 38160031462bSMingming Cao struct ext4_extent *ex; 3817dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3818dee1f973SDmitry Monakhov unsigned int ee_len; 38190031462bSMingming Cao int depth; 38200031462bSMingming Cao int err = 0; 38210031462bSMingming Cao 38220031462bSMingming Cao depth = ext_depth(inode); 38230031462bSMingming Cao ex = path[depth].p_ext; 3824dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3825dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 38260031462bSMingming Cao 3827197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3828197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3829dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3830dee1f973SDmitry Monakhov 3831ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3832ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3833ff95ec22SDmitry Monakhov * required. 3834ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3835ff95ec22SDmitry Monakhov * illegal. 3836ff95ec22SDmitry Monakhov */ 3837dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3838e3d550c2SRakesh Pandit #ifdef CONFIG_EXT4_DEBUG 3839e3d550c2SRakesh Pandit ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 38408d2ae1cbSJakub Wilk " len %u; IO logical block %llu, len %u", 3841ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3842ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3843ff95ec22SDmitry Monakhov #endif 3844dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3845dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3846dee1f973SDmitry Monakhov if (err < 0) 3847dfe50809STheodore Ts'o return err; 3848ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3849dfe50809STheodore Ts'o if (IS_ERR(path)) 3850dfe50809STheodore Ts'o return PTR_ERR(path); 3851dee1f973SDmitry Monakhov depth = ext_depth(inode); 3852dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3853dee1f973SDmitry Monakhov } 3854197217a5SYongqiang Yang 38550031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 38560031462bSMingming Cao if (err) 38570031462bSMingming Cao goto out; 38580031462bSMingming Cao /* first mark the extent as initialized */ 38590031462bSMingming Cao ext4_ext_mark_initialized(ex); 38600031462bSMingming Cao 3861197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3862197217a5SYongqiang Yang * borders are not changed 38630031462bSMingming Cao */ 3864ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3865197217a5SYongqiang Yang 38660031462bSMingming Cao /* Mark modified extent as dirty */ 3867ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 38680031462bSMingming Cao out: 38690031462bSMingming Cao ext4_ext_show_leaf(inode, path); 38700031462bSMingming Cao return err; 38710031462bSMingming Cao } 38720031462bSMingming Cao 38730031462bSMingming Cao static int 3874e8b83d93STheodore Ts'o convert_initialized_extent(handle_t *handle, struct inode *inode, 3875b8a86845SLukas Czerner struct ext4_map_blocks *map, 387629c6eaffSEric Whitney struct ext4_ext_path **ppath, 3877f064a9d6SEric Whitney unsigned int *allocated) 3878b8a86845SLukas Czerner { 38794f224b8bSTheodore Ts'o struct ext4_ext_path *path = *ppath; 3880e8b83d93STheodore Ts'o struct ext4_extent *ex; 3881e8b83d93STheodore Ts'o ext4_lblk_t ee_block; 3882e8b83d93STheodore Ts'o unsigned int ee_len; 3883e8b83d93STheodore Ts'o int depth; 3884b8a86845SLukas Czerner int err = 0; 3885b8a86845SLukas Czerner 3886b8a86845SLukas Czerner /* 3887b8a86845SLukas Czerner * Make sure that the extent is no bigger than we support with 3888556615dcSLukas Czerner * unwritten extent 3889b8a86845SLukas Czerner */ 3890556615dcSLukas Czerner if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3891556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3892b8a86845SLukas Czerner 3893e8b83d93STheodore Ts'o depth = ext_depth(inode); 3894e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3895e8b83d93STheodore Ts'o ee_block = le32_to_cpu(ex->ee_block); 3896e8b83d93STheodore Ts'o ee_len = ext4_ext_get_actual_len(ex); 3897e8b83d93STheodore Ts'o 3898e8b83d93STheodore Ts'o ext_debug("%s: inode %lu, logical" 3899e8b83d93STheodore Ts'o "block %llu, max_blocks %u\n", __func__, inode->i_ino, 3900e8b83d93STheodore Ts'o (unsigned long long)ee_block, ee_len); 3901e8b83d93STheodore Ts'o 3902e8b83d93STheodore Ts'o if (ee_block != map->m_lblk || ee_len > map->m_len) { 3903dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3904e8b83d93STheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3905e8b83d93STheodore Ts'o if (err < 0) 3906e8b83d93STheodore Ts'o return err; 3907ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3908e8b83d93STheodore Ts'o if (IS_ERR(path)) 3909e8b83d93STheodore Ts'o return PTR_ERR(path); 3910e8b83d93STheodore Ts'o depth = ext_depth(inode); 3911e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3912e8b83d93STheodore Ts'o if (!ex) { 3913e8b83d93STheodore Ts'o EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3914e8b83d93STheodore Ts'o (unsigned long) map->m_lblk); 39156a797d27SDarrick J. Wong return -EFSCORRUPTED; 3916e8b83d93STheodore Ts'o } 3917e8b83d93STheodore Ts'o } 3918e8b83d93STheodore Ts'o 3919e8b83d93STheodore Ts'o err = ext4_ext_get_access(handle, inode, path + depth); 3920e8b83d93STheodore Ts'o if (err) 3921e8b83d93STheodore Ts'o return err; 3922e8b83d93STheodore Ts'o /* first mark the extent as unwritten */ 3923e8b83d93STheodore Ts'o ext4_ext_mark_unwritten(ex); 3924e8b83d93STheodore Ts'o 3925e8b83d93STheodore Ts'o /* note: ext4_ext_correct_indexes() isn't needed here because 3926e8b83d93STheodore Ts'o * borders are not changed 3927e8b83d93STheodore Ts'o */ 3928e8b83d93STheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3929e8b83d93STheodore Ts'o 3930e8b83d93STheodore Ts'o /* Mark modified extent as dirty */ 3931e8b83d93STheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3932e8b83d93STheodore Ts'o if (err) 3933e8b83d93STheodore Ts'o return err; 3934e8b83d93STheodore Ts'o ext4_ext_show_leaf(inode, path); 3935e8b83d93STheodore Ts'o 3936b8a86845SLukas Czerner ext4_update_inode_fsync_trans(handle, inode, 1); 39374337ecd1SEric Whitney 3938b8a86845SLukas Czerner map->m_flags |= EXT4_MAP_UNWRITTEN; 3939f064a9d6SEric Whitney if (*allocated > map->m_len) 3940f064a9d6SEric Whitney *allocated = map->m_len; 3941f064a9d6SEric Whitney map->m_len = *allocated; 3942f064a9d6SEric Whitney return 0; 3943b8a86845SLukas Czerner } 3944b8a86845SLukas Czerner 3945b8a86845SLukas Czerner static int 3946556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3947e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3948dfe50809STheodore Ts'o struct ext4_ext_path **ppath, int flags, 3949e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 39500031462bSMingming Cao { 39514337ecd1SEric Whitney #ifdef EXT_DEBUG 3952dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 39534337ecd1SEric Whitney #endif 39540031462bSMingming Cao int ret = 0; 39550031462bSMingming Cao int err = 0; 39560031462bSMingming Cao 3957556615dcSLukas Czerner ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " 395888635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 3959e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 39600031462bSMingming Cao flags, allocated); 39610031462bSMingming Cao ext4_ext_show_leaf(inode, path); 39620031462bSMingming Cao 396327dd4385SLukas Czerner /* 3964556615dcSLukas Czerner * When writing into unwritten space, we should not fail to 396527dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 396627dd4385SLukas Czerner */ 396727dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 396827dd4385SLukas Czerner 3969556615dcSLukas Czerner trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3970b5645534SZheng Liu allocated, newblock); 3971d8990240SAditya Kali 3972c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3973c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3974dfe50809STheodore Ts'o ret = ext4_split_convert_extents(handle, inode, map, ppath, 3975dfe50809STheodore Ts'o flags | EXT4_GET_BLOCKS_CONVERT); 397682e54229SDmitry Monakhov if (ret <= 0) 397782e54229SDmitry Monakhov goto out; 3978a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 39790031462bSMingming Cao goto out; 39800031462bSMingming Cao } 3981c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3982c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT) { 3983c86d8db3SJan Kara if (flags & EXT4_GET_BLOCKS_ZERO) { 3984c86d8db3SJan Kara if (allocated > map->m_len) 3985c86d8db3SJan Kara allocated = map->m_len; 3986c86d8db3SJan Kara err = ext4_issue_zeroout(inode, map->m_lblk, newblock, 3987c86d8db3SJan Kara allocated); 3988c86d8db3SJan Kara if (err < 0) 3989c86d8db3SJan Kara goto out2; 3990c86d8db3SJan Kara } 3991dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 3992dfe50809STheodore Ts'o ppath); 39934337ecd1SEric Whitney if (ret >= 0) 3994b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 39954337ecd1SEric Whitney else 399658590b06STheodore Ts'o err = ret; 3997cdee7843SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 399815cc1767SEric Whitney map->m_pblk = newblock; 3999cdee7843SZheng Liu if (allocated > map->m_len) 4000cdee7843SZheng Liu allocated = map->m_len; 4001cdee7843SZheng Liu map->m_len = allocated; 40020031462bSMingming Cao goto out2; 40030031462bSMingming Cao } 40040031462bSMingming Cao /* buffered IO case */ 40050031462bSMingming Cao /* 40060031462bSMingming Cao * repeat fallocate creation request 40070031462bSMingming Cao * we already have an unwritten extent 40080031462bSMingming Cao */ 4009556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4010a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 40110031462bSMingming Cao goto map_out; 4012a25a4e1aSZheng Liu } 40130031462bSMingming Cao 40140031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 40150031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 40160031462bSMingming Cao /* 40170031462bSMingming Cao * We have blocks reserved already. We 40180031462bSMingming Cao * return allocated blocks so that delalloc 40190031462bSMingming Cao * won't do block reservation for us. But 40200031462bSMingming Cao * the buffer head will be unmapped so that 40210031462bSMingming Cao * a read from the block returns 0s. 40220031462bSMingming Cao */ 4023e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 40240031462bSMingming Cao goto out1; 40250031462bSMingming Cao } 40260031462bSMingming Cao 40270031462bSMingming Cao /* buffered write, writepage time, convert*/ 4028dfe50809STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 4029a4e5d88bSDmitry Monakhov if (ret >= 0) 4030b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 40310031462bSMingming Cao out: 40320031462bSMingming Cao if (ret <= 0) { 40330031462bSMingming Cao err = ret; 40340031462bSMingming Cao goto out2; 40350031462bSMingming Cao } else 40360031462bSMingming Cao allocated = ret; 4037e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 403816e08b14Szhangyi (F) if (allocated > map->m_len) 4039e35fd660STheodore Ts'o allocated = map->m_len; 40403a225670SZheng Liu map->m_len = allocated; 40415f634d06SAneesh Kumar K.V 40420031462bSMingming Cao map_out: 4043e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 40440031462bSMingming Cao out1: 4045e35fd660STheodore Ts'o if (allocated > map->m_len) 4046e35fd660STheodore Ts'o allocated = map->m_len; 40470031462bSMingming Cao ext4_ext_show_leaf(inode, path); 4048e35fd660STheodore Ts'o map->m_pblk = newblock; 4049e35fd660STheodore Ts'o map->m_len = allocated; 40500031462bSMingming Cao out2: 40510031462bSMingming Cao return err ? err : allocated; 40520031462bSMingming Cao } 405358590b06STheodore Ts'o 40540031462bSMingming Cao /* 40554d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 40564d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 40574d33b1efSTheodore Ts'o * allocated in an extent. 4058d8990240SAditya Kali * @sb The filesystem superblock structure 40594d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 40604d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 40614d33b1efSTheodore Ts'o * cluster allocation 40624d33b1efSTheodore Ts'o * 40634d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 40644d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 40654d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 40664d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 40674d33b1efSTheodore Ts'o * want to catch. The first is this case: 40684d33b1efSTheodore Ts'o * 40694d33b1efSTheodore Ts'o * |--- cluster # N--| 40704d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 40714d33b1efSTheodore Ts'o * |==========| 40724d33b1efSTheodore Ts'o * 40734d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 40744d33b1efSTheodore Ts'o * 40754d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 40764d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 40774d33b1efSTheodore Ts'o * |=======================| 40784d33b1efSTheodore Ts'o * 40794d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 40804d33b1efSTheodore Ts'o * within the same cluster: 40814d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 40824d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 40834d33b1efSTheodore Ts'o * |------ requested region ------| 40844d33b1efSTheodore Ts'o * |================| 40854d33b1efSTheodore Ts'o * 40864d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 40874d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 40884d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 40894d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 40904d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 40914d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 40924d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 40934d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 40944d33b1efSTheodore Ts'o */ 4095d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 40964d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 40974d33b1efSTheodore Ts'o struct ext4_extent *ex, 40984d33b1efSTheodore Ts'o struct ext4_ext_path *path) 40994d33b1efSTheodore Ts'o { 4100d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 4101f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 41024d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 410314d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 41044d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 41054d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 41064d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 41074d33b1efSTheodore Ts'o 41084d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 41094d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 41104d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 41114d33b1efSTheodore Ts'o 41124d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 41134d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 41144d33b1efSTheodore Ts'o 41154d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 41164d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 41174d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 41184d33b1efSTheodore Ts'o ee_start += ee_len - 1; 4119f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 41204d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 41214d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 41224d33b1efSTheodore Ts'o /* 41234d33b1efSTheodore Ts'o * Check for and handle this case: 41244d33b1efSTheodore Ts'o * 41254d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 41264d33b1efSTheodore Ts'o * |------- extent ----| 41274d33b1efSTheodore Ts'o * |--- requested region ---| 41284d33b1efSTheodore Ts'o * |===========| 41294d33b1efSTheodore Ts'o */ 41304d33b1efSTheodore Ts'o 41314d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 41324d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 41334d33b1efSTheodore Ts'o 41344d33b1efSTheodore Ts'o /* 41354d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 41364d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 41374d33b1efSTheodore Ts'o * 41384d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 41394d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 41404d33b1efSTheodore Ts'o * |------ requested region ------| 41414d33b1efSTheodore Ts'o * |================| 41424d33b1efSTheodore Ts'o */ 41434d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 41444d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 41454d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 41464d33b1efSTheodore Ts'o } 4147d8990240SAditya Kali 4148d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 41494d33b1efSTheodore Ts'o return 1; 41504d33b1efSTheodore Ts'o } 4151d8990240SAditya Kali 4152d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 41534d33b1efSTheodore Ts'o return 0; 41544d33b1efSTheodore Ts'o } 41554d33b1efSTheodore Ts'o 41564d33b1efSTheodore Ts'o 41574d33b1efSTheodore Ts'o /* 4158f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4159f5ab0d1fSMingming Cao * 4160f5ab0d1fSMingming Cao * 4161c278bfecSAneesh Kumar K.V * Need to be called with 41620e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 41630e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4164f5ab0d1fSMingming Cao * 4165f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 4166f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4167f5ab0d1fSMingming Cao * buffer head is unmapped 4168f5ab0d1fSMingming Cao * otherwise blocks are mapped 4169f5ab0d1fSMingming Cao * 4170f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4171f5ab0d1fSMingming Cao * buffer head is unmapped 4172f5ab0d1fSMingming Cao * 4173f5ab0d1fSMingming Cao * return < 0, error case. 4174c278bfecSAneesh Kumar K.V */ 4175e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4176e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4177a86c6181SAlex Tomas { 4178a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 41794d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 41804d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 41810562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 4182ce37c429SEric Whitney int free_on_err = 0, err = 0, depth, ret; 41834d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 418481fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4185c9de560dSAlex Tomas struct ext4_allocation_request ar; 41864d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 4187cbd7584eSJan Kara bool map_from_cluster = false; 4188a86c6181SAlex Tomas 418984fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 4190e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 41910562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4192a86c6181SAlex Tomas 4193a86c6181SAlex Tomas /* find extent for this block */ 4194ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4195a86c6181SAlex Tomas if (IS_ERR(path)) { 4196a86c6181SAlex Tomas err = PTR_ERR(path); 4197a86c6181SAlex Tomas path = NULL; 4198a86c6181SAlex Tomas goto out2; 4199a86c6181SAlex Tomas } 4200a86c6181SAlex Tomas 4201a86c6181SAlex Tomas depth = ext_depth(inode); 4202a86c6181SAlex Tomas 4203a86c6181SAlex Tomas /* 4204d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4205d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4206ed8a1a76STheodore Ts'o * this is why assert can't be put in ext4_find_extent() 4207a86c6181SAlex Tomas */ 4208273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4209273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4210f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4211f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4212f70f362bSTheodore Ts'o path[depth].p_block); 42136a797d27SDarrick J. Wong err = -EFSCORRUPTED; 4214034fb4c9SSurbhi Palande goto out2; 4215034fb4c9SSurbhi Palande } 4216a86c6181SAlex Tomas 42177e028976SAvantika Mathur ex = path[depth].p_ext; 42187e028976SAvantika Mathur if (ex) { 4219725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4220bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4221a2df2a63SAmit Arora unsigned short ee_len; 4222471d4011SSuparna Bhattacharya 4223b8a86845SLukas Czerner 4224471d4011SSuparna Bhattacharya /* 4225556615dcSLukas Czerner * unwritten extents are treated as holes, except that 422656055d3aSAmit Arora * we split out initialized portions during a write. 4227471d4011SSuparna Bhattacharya */ 4228a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4229d8990240SAditya Kali 4230d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4231d8990240SAditya Kali 4232d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4233e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4234e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4235d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4236e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 4237e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4238a86c6181SAlex Tomas ee_block, ee_len, newblock); 423956055d3aSAmit Arora 4240b8a86845SLukas Czerner /* 4241b8a86845SLukas Czerner * If the extent is initialized check whether the 4242b8a86845SLukas Czerner * caller wants to convert it to unwritten. 4243b8a86845SLukas Czerner */ 4244556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(ex)) && 4245b8a86845SLukas Czerner (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4246f064a9d6SEric Whitney err = convert_initialized_extent(handle, 4247f064a9d6SEric Whitney inode, map, &path, &allocated); 4248b8a86845SLukas Czerner goto out2; 4249f064a9d6SEric Whitney } else if (!ext4_ext_is_unwritten(ex)) { 4250a86c6181SAlex Tomas goto out; 4251f064a9d6SEric Whitney } 425269eb33dcSZheng Liu 4253556615dcSLukas Czerner ret = ext4_ext_handle_unwritten_extents( 4254dfe50809STheodore Ts'o handle, inode, map, &path, flags, 4255e861304bSAllison Henderson allocated, newblock); 4256ce37c429SEric Whitney if (ret < 0) 4257ce37c429SEric Whitney err = ret; 4258ce37c429SEric Whitney else 4259ce37c429SEric Whitney allocated = ret; 426031cf0f2cSEric Whitney goto out2; 426156055d3aSAmit Arora } 4262a86c6181SAlex Tomas } 4263a86c6181SAlex Tomas 4264a86c6181SAlex Tomas /* 4265d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4266a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4267a86c6181SAlex Tomas */ 4268c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4269140a5250SJan Kara ext4_lblk_t hole_start, hole_len; 4270140a5250SJan Kara 4271facab4d9SJan Kara hole_start = map->m_lblk; 4272facab4d9SJan Kara hole_len = ext4_ext_determine_hole(inode, path, &hole_start); 427356055d3aSAmit Arora /* 427456055d3aSAmit Arora * put just found gap into cache to speed up 427556055d3aSAmit Arora * subsequent requests 427656055d3aSAmit Arora */ 4277140a5250SJan Kara ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); 4278facab4d9SJan Kara 4279facab4d9SJan Kara /* Update hole_len to reflect hole size after map->m_lblk */ 4280facab4d9SJan Kara if (hole_start != map->m_lblk) 4281facab4d9SJan Kara hole_len -= map->m_lblk - hole_start; 4282facab4d9SJan Kara map->m_pblk = 0; 4283facab4d9SJan Kara map->m_len = min_t(unsigned int, map->m_len, hole_len); 4284facab4d9SJan Kara 4285a86c6181SAlex Tomas goto out2; 4286a86c6181SAlex Tomas } 42874d33b1efSTheodore Ts'o 4288a86c6181SAlex Tomas /* 4289c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4290a86c6181SAlex Tomas */ 42914d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 4292d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 42934d33b1efSTheodore Ts'o 42944d33b1efSTheodore Ts'o /* 42954d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 4296ed8a1a76STheodore Ts'o * by ext4_find_extent() implies a cluster we can use. 42974d33b1efSTheodore Ts'o */ 42984d33b1efSTheodore Ts'o if (cluster_offset && ex && 4299d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 43004d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 43014d33b1efSTheodore Ts'o newblock = map->m_pblk; 4302cbd7584eSJan Kara map_from_cluster = true; 43034d33b1efSTheodore Ts'o goto got_allocated_blocks; 43044d33b1efSTheodore Ts'o } 4305a86c6181SAlex Tomas 4306c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4307e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4308c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4309c9de560dSAlex Tomas if (err) 4310c9de560dSAlex Tomas goto out2; 4311e35fd660STheodore Ts'o ar.lright = map->m_lblk; 43124d33b1efSTheodore Ts'o ex2 = NULL; 43134d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4314c9de560dSAlex Tomas if (err) 4315c9de560dSAlex Tomas goto out2; 431625d14f98SAmit Arora 43174d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 43184d33b1efSTheodore Ts'o * cluster we can use. */ 43194d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4320d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 43214d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 43224d33b1efSTheodore Ts'o newblock = map->m_pblk; 4323cbd7584eSJan Kara map_from_cluster = true; 43244d33b1efSTheodore Ts'o goto got_allocated_blocks; 43254d33b1efSTheodore Ts'o } 43264d33b1efSTheodore Ts'o 4327749269faSAmit Arora /* 4328749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4329749269faSAmit Arora * a single extent. For an initialized extent this limit is 4330556615dcSLukas Czerner * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4331556615dcSLukas Czerner * EXT_UNWRITTEN_MAX_LEN. 4332749269faSAmit Arora */ 4333e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4334556615dcSLukas Czerner !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4335e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4336556615dcSLukas Czerner else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4337556615dcSLukas Czerner (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4338556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN; 4339749269faSAmit Arora 4340e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4341e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 43424d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 434325d14f98SAmit Arora if (err) 4344b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 434525d14f98SAmit Arora else 4346e35fd660STheodore Ts'o allocated = map->m_len; 4347c9de560dSAlex Tomas 4348c9de560dSAlex Tomas /* allocate new block */ 4349c9de560dSAlex Tomas ar.inode = inode; 4350e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4351e35fd660STheodore Ts'o ar.logical = map->m_lblk; 43524d33b1efSTheodore Ts'o /* 43534d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 43544d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 43554d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 43564d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 43574d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 43584d33b1efSTheodore Ts'o * work correctly. 43594d33b1efSTheodore Ts'o */ 4360f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 43614d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 43624d33b1efSTheodore Ts'o ar.goal -= offset; 43634d33b1efSTheodore Ts'o ar.logical -= offset; 4364c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4365c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4366c9de560dSAlex Tomas else 4367c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4368c9de560dSAlex Tomas ar.flags = 0; 4369556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4370556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4371e3cf5d5dSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4372e3cf5d5dSTheodore Ts'o ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4373c5e298aeSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4374c5e298aeSTheodore Ts'o ar.flags |= EXT4_MB_USE_RESERVED; 4375c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4376a86c6181SAlex Tomas if (!newblock) 4377a86c6181SAlex Tomas goto out2; 437884fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4379498e5f24STheodore Ts'o ar.goal, newblock, allocated); 43804d33b1efSTheodore Ts'o free_on_err = 1; 43817b415bf6SAditya Kali allocated_clusters = ar.len; 43824d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 43834d33b1efSTheodore Ts'o if (ar.len > allocated) 43844d33b1efSTheodore Ts'o ar.len = allocated; 4385a86c6181SAlex Tomas 43864d33b1efSTheodore Ts'o got_allocated_blocks: 4387a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 43884d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4389c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 4390556615dcSLukas Czerner /* Mark unwritten */ 4391556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ 4392556615dcSLukas Czerner ext4_ext_mark_unwritten(&newex); 4393a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 43948d5d02e6SMingming Cao } 4395c8d46e41SJiaying Zhang 4396a4e5d88bSDmitry Monakhov err = 0; 43974337ecd1SEric Whitney err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 439882e54229SDmitry Monakhov 43994d33b1efSTheodore Ts'o if (err && free_on_err) { 44007132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 44017132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4402315054f0SAlex Tomas /* free data blocks we just allocated */ 4403c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4404c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4405c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4406c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock, 4407c8e15130STheodore Ts'o EXT4_C2B(sbi, allocated_clusters), fb_flags); 4408a86c6181SAlex Tomas goto out2; 4409315054f0SAlex Tomas } 4410a86c6181SAlex Tomas 4411a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4412bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4413b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4414e35fd660STheodore Ts'o if (allocated > map->m_len) 4415e35fd660STheodore Ts'o allocated = map->m_len; 4416e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4417a86c6181SAlex Tomas 4418b436b9beSJan Kara /* 4419b6bf9171SEric Whitney * Reduce the reserved cluster count to reflect successful deferred 4420b6bf9171SEric Whitney * allocation of delayed allocated clusters or direct allocation of 4421b6bf9171SEric Whitney * clusters discovered to be delayed allocated. Once allocated, a 4422b6bf9171SEric Whitney * cluster is not included in the reserved count. 44235f634d06SAneesh Kumar K.V */ 4424b6bf9171SEric Whitney if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) { 44257b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 44267b415bf6SAditya Kali /* 4427b6bf9171SEric Whitney * When allocating delayed allocated clusters, simply 4428b6bf9171SEric Whitney * reduce the reserved cluster count and claim quota 4429232ec872SLukas Czerner */ 4430232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4431232ec872SLukas Czerner 1); 4432b6bf9171SEric Whitney } else { 4433b6bf9171SEric Whitney ext4_lblk_t lblk, len; 4434b6bf9171SEric Whitney unsigned int n; 4435b6bf9171SEric Whitney 4436b6bf9171SEric Whitney /* 4437b6bf9171SEric Whitney * When allocating non-delayed allocated clusters 4438b6bf9171SEric Whitney * (from fallocate, filemap, DIO, or clusters 4439b6bf9171SEric Whitney * allocated when delalloc has been disabled by 4440b6bf9171SEric Whitney * ext4_nonda_switch), reduce the reserved cluster 4441b6bf9171SEric Whitney * count by the number of allocated clusters that 4442b6bf9171SEric Whitney * have previously been delayed allocated. Quota 4443b6bf9171SEric Whitney * has been claimed by ext4_mb_new_blocks() above, 4444b6bf9171SEric Whitney * so release the quota reservations made for any 4445b6bf9171SEric Whitney * previously delayed allocated clusters. 4446b6bf9171SEric Whitney */ 4447b6bf9171SEric Whitney lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); 4448b6bf9171SEric Whitney len = allocated_clusters << sbi->s_cluster_bits; 4449b6bf9171SEric Whitney n = ext4_es_delayed_clu(inode, lblk, len); 4450b6bf9171SEric Whitney if (n > 0) 4451b6bf9171SEric Whitney ext4_da_update_reserve_space(inode, (int) n, 0); 44527b415bf6SAditya Kali } 44537b415bf6SAditya Kali } 44545f634d06SAneesh Kumar K.V 44555f634d06SAneesh Kumar K.V /* 4456b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4457556615dcSLukas Czerner * when it is _not_ an unwritten extent. 4458b436b9beSJan Kara */ 4459556615dcSLukas Czerner if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4460b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 446169eb33dcSZheng Liu else 4462b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4463a86c6181SAlex Tomas out: 4464e35fd660STheodore Ts'o if (allocated > map->m_len) 4465e35fd660STheodore Ts'o allocated = map->m_len; 4466a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4467e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4468e35fd660STheodore Ts'o map->m_pblk = newblock; 4469e35fd660STheodore Ts'o map->m_len = allocated; 4470a86c6181SAlex Tomas out2: 4471a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4472a86c6181SAlex Tomas kfree(path); 4473e861304bSAllison Henderson 447463b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map, 447563b99968STheodore Ts'o err ? err : allocated); 44767877191cSLukas Czerner return err ? err : allocated; 4477a86c6181SAlex Tomas } 4478a86c6181SAlex Tomas 4479d0abb36dSTheodore Ts'o int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4480a86c6181SAlex Tomas { 4481a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4482725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4483a86c6181SAlex Tomas int err = 0; 4484a86c6181SAlex Tomas 4485a86c6181SAlex Tomas /* 4486d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4487d0d856e8SRandy Dunlap * Probably we need not scan at all, 4488d0d856e8SRandy Dunlap * because page truncation is enough. 4489a86c6181SAlex Tomas */ 4490a86c6181SAlex Tomas 4491a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4492a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4493d0abb36dSTheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 4494d0abb36dSTheodore Ts'o if (err) 4495d0abb36dSTheodore Ts'o return err; 4496a86c6181SAlex Tomas 4497a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4498a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 44998acd5e9bSTheodore Ts'o retry: 450051865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 450151865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 450294eec0fcSTheodore Ts'o if (err == -ENOMEM) { 45038acd5e9bSTheodore Ts'o cond_resched(); 45048acd5e9bSTheodore Ts'o congestion_wait(BLK_RW_ASYNC, HZ/50); 45058acd5e9bSTheodore Ts'o goto retry; 45068acd5e9bSTheodore Ts'o } 4507d0abb36dSTheodore Ts'o if (err) 4508d0abb36dSTheodore Ts'o return err; 4509d0abb36dSTheodore Ts'o return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4510a86c6181SAlex Tomas } 4511a86c6181SAlex Tomas 45120e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4513c174e6d6SDmitry Monakhov ext4_lblk_t len, loff_t new_size, 451477a2e84dSTahsin Erdogan int flags) 4515a2df2a63SAmit Arora { 4516496ad9aaSAl Viro struct inode *inode = file_inode(file); 4517a2df2a63SAmit Arora handle_t *handle; 4518a2df2a63SAmit Arora int ret = 0; 4519a2df2a63SAmit Arora int ret2 = 0; 4520a2df2a63SAmit Arora int retries = 0; 45214134f5c8SLukas Czerner int depth = 0; 45222ed88685STheodore Ts'o struct ext4_map_blocks map; 45230e8b6879SLukas Czerner unsigned int credits; 4524c174e6d6SDmitry Monakhov loff_t epos; 4525a2df2a63SAmit Arora 4526c3fe493cSFabian Frederick BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 45270e8b6879SLukas Czerner map.m_lblk = offset; 4528c174e6d6SDmitry Monakhov map.m_len = len; 45293c6fe770SGreg Harm /* 45303c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 45313c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 45323c6fe770SGreg Harm * extents. 45333c6fe770SGreg Harm */ 4534556615dcSLukas Czerner if (len <= EXT_UNWRITTEN_MAX_LEN) 45353c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 453660d4616fSDmitry Monakhov 45370e8b6879SLukas Czerner /* 45380e8b6879SLukas Czerner * credits to insert 1 extent into extent tree 45390e8b6879SLukas Czerner */ 45400e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 45414134f5c8SLukas Czerner depth = ext_depth(inode); 45420e8b6879SLukas Czerner 4543a2df2a63SAmit Arora retry: 4544c174e6d6SDmitry Monakhov while (ret >= 0 && len) { 45454134f5c8SLukas Czerner /* 45464134f5c8SLukas Czerner * Recalculate credits when extent tree depth changes. 45474134f5c8SLukas Czerner */ 4548011c88e3SDan Carpenter if (depth != ext_depth(inode)) { 45494134f5c8SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 45504134f5c8SLukas Czerner depth = ext_depth(inode); 45514134f5c8SLukas Czerner } 45524134f5c8SLukas Czerner 45539924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 45549924a92aSTheodore Ts'o credits); 4555a2df2a63SAmit Arora if (IS_ERR(handle)) { 4556a2df2a63SAmit Arora ret = PTR_ERR(handle); 4557a2df2a63SAmit Arora break; 4558a2df2a63SAmit Arora } 4559a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4560221879c9SAneesh Kumar K.V if (ret <= 0) { 4561f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: " 4562b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4563b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4564b06acd38SLukas Czerner map.m_len, ret); 4565a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4566a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4567a2df2a63SAmit Arora break; 4568a2df2a63SAmit Arora } 4569c174e6d6SDmitry Monakhov map.m_lblk += ret; 4570c174e6d6SDmitry Monakhov map.m_len = len = len - ret; 4571c174e6d6SDmitry Monakhov epos = (loff_t)map.m_lblk << inode->i_blkbits; 4572eeca7ea1SDeepa Dinamani inode->i_ctime = current_time(inode); 4573c174e6d6SDmitry Monakhov if (new_size) { 4574c174e6d6SDmitry Monakhov if (epos > new_size) 4575c174e6d6SDmitry Monakhov epos = new_size; 4576c174e6d6SDmitry Monakhov if (ext4_update_inode_size(inode, epos) & 0x1) 4577c174e6d6SDmitry Monakhov inode->i_mtime = inode->i_ctime; 4578c174e6d6SDmitry Monakhov } 4579c174e6d6SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 4580c894aa97SEryu Guan ext4_update_inode_fsync_trans(handle, inode, 1); 4581a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4582a2df2a63SAmit Arora if (ret2) 4583a2df2a63SAmit Arora break; 4584a2df2a63SAmit Arora } 4585fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4586fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4587fd28784aSAneesh Kumar K.V ret = 0; 4588a2df2a63SAmit Arora goto retry; 4589a2df2a63SAmit Arora } 4590f282ac19SLukas Czerner 45910e8b6879SLukas Czerner return ret > 0 ? ret2 : ret; 45920e8b6879SLukas Czerner } 45930e8b6879SLukas Czerner 459443f81677SEric Biggers static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); 459543f81677SEric Biggers 459643f81677SEric Biggers static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len); 459743f81677SEric Biggers 4598b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset, 4599b8a86845SLukas Czerner loff_t len, int mode) 4600b8a86845SLukas Czerner { 4601b8a86845SLukas Czerner struct inode *inode = file_inode(file); 4602b8a86845SLukas Czerner handle_t *handle = NULL; 4603b8a86845SLukas Czerner unsigned int max_blocks; 4604b8a86845SLukas Czerner loff_t new_size = 0; 4605b8a86845SLukas Czerner int ret = 0; 4606b8a86845SLukas Czerner int flags; 460769dc9536SDmitry Monakhov int credits; 4608c174e6d6SDmitry Monakhov int partial_begin, partial_end; 4609b8a86845SLukas Czerner loff_t start, end; 4610b8a86845SLukas Czerner ext4_lblk_t lblk; 4611b8a86845SLukas Czerner unsigned int blkbits = inode->i_blkbits; 4612b8a86845SLukas Czerner 4613b8a86845SLukas Czerner trace_ext4_zero_range(inode, offset, len, mode); 4614b8a86845SLukas Czerner 4615e1ee60fdSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 4616e1ee60fdSNamjae Jeon if (ext4_should_journal_data(inode)) { 4617e1ee60fdSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 4618e1ee60fdSNamjae Jeon if (ret) 4619e1ee60fdSNamjae Jeon return ret; 4620e1ee60fdSNamjae Jeon } 4621e1ee60fdSNamjae Jeon 4622b8a86845SLukas Czerner /* 4623b8a86845SLukas Czerner * Round up offset. This is not fallocate, we neet to zero out 4624b8a86845SLukas Czerner * blocks, so convert interior block aligned part of the range to 4625b8a86845SLukas Czerner * unwritten and possibly manually zero out unaligned parts of the 4626b8a86845SLukas Czerner * range. 4627b8a86845SLukas Czerner */ 4628b8a86845SLukas Czerner start = round_up(offset, 1 << blkbits); 4629b8a86845SLukas Czerner end = round_down((offset + len), 1 << blkbits); 4630b8a86845SLukas Czerner 4631b8a86845SLukas Czerner if (start < offset || end > offset + len) 4632b8a86845SLukas Czerner return -EINVAL; 4633c174e6d6SDmitry Monakhov partial_begin = offset & ((1 << blkbits) - 1); 4634c174e6d6SDmitry Monakhov partial_end = (offset + len) & ((1 << blkbits) - 1); 4635b8a86845SLukas Czerner 4636b8a86845SLukas Czerner lblk = start >> blkbits; 4637b8a86845SLukas Czerner max_blocks = (end >> blkbits); 4638b8a86845SLukas Czerner if (max_blocks < lblk) 4639b8a86845SLukas Czerner max_blocks = 0; 4640b8a86845SLukas Czerner else 4641b8a86845SLukas Czerner max_blocks -= lblk; 4642b8a86845SLukas Czerner 46435955102cSAl Viro inode_lock(inode); 4644b8a86845SLukas Czerner 4645b8a86845SLukas Czerner /* 4646b8a86845SLukas Czerner * Indirect files do not support unwritten extnets 4647b8a86845SLukas Czerner */ 4648b8a86845SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4649b8a86845SLukas Czerner ret = -EOPNOTSUPP; 4650b8a86845SLukas Czerner goto out_mutex; 4651b8a86845SLukas Czerner } 4652b8a86845SLukas Czerner 4653b8a86845SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 46549b02e498SEric Biggers (offset + len > inode->i_size || 465551e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 4656b8a86845SLukas Czerner new_size = offset + len; 4657b8a86845SLukas Czerner ret = inode_newsize_ok(inode, new_size); 4658b8a86845SLukas Czerner if (ret) 4659b8a86845SLukas Czerner goto out_mutex; 4660b8a86845SLukas Czerner } 4661b8a86845SLukas Czerner 46620f2af21aSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 46630f2af21aSLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 46640f2af21aSLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 46650f2af21aSLukas Czerner 466617048e8aSJan Kara /* Wait all existing dio workers, newcomers will block on i_mutex */ 466717048e8aSJan Kara inode_dio_wait(inode); 466817048e8aSJan Kara 46690f2af21aSLukas Czerner /* Preallocate the range including the unaligned edges */ 46700f2af21aSLukas Czerner if (partial_begin || partial_end) { 46710f2af21aSLukas Czerner ret = ext4_alloc_file_blocks(file, 46720f2af21aSLukas Czerner round_down(offset, 1 << blkbits) >> blkbits, 46730f2af21aSLukas Czerner (round_up((offset + len), 1 << blkbits) - 46740f2af21aSLukas Czerner round_down(offset, 1 << blkbits)) >> blkbits, 467577a2e84dSTahsin Erdogan new_size, flags); 46760f2af21aSLukas Czerner if (ret) 46771d39834fSNikolay Borisov goto out_mutex; 46780f2af21aSLukas Czerner 46790f2af21aSLukas Czerner } 46800f2af21aSLukas Czerner 46810f2af21aSLukas Czerner /* Zero range excluding the unaligned edges */ 4682b8a86845SLukas Czerner if (max_blocks > 0) { 46830f2af21aSLukas Czerner flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 46840f2af21aSLukas Czerner EXT4_EX_NOCACHE); 4685b8a86845SLukas Czerner 4686ea3d7209SJan Kara /* 4687ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have 4688ea3d7209SJan Kara * released from page cache. 4689ea3d7209SJan Kara */ 4690ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 4691430657b6SRoss Zwisler 4692430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 4693430657b6SRoss Zwisler if (ret) { 4694430657b6SRoss Zwisler up_write(&EXT4_I(inode)->i_mmap_sem); 4695430657b6SRoss Zwisler goto out_mutex; 4696430657b6SRoss Zwisler } 4697430657b6SRoss Zwisler 469801127848SJan Kara ret = ext4_update_disksize_before_punch(inode, offset, len); 469901127848SJan Kara if (ret) { 470001127848SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 47011d39834fSNikolay Borisov goto out_mutex; 470201127848SJan Kara } 4703ea3d7209SJan Kara /* Now release the pages and zero block aligned part of pages */ 4704ea3d7209SJan Kara truncate_pagecache_range(inode, start, end - 1); 4705eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 4706ea3d7209SJan Kara 4707c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 470877a2e84dSTahsin Erdogan flags); 4709ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 4710b8a86845SLukas Czerner if (ret) 47111d39834fSNikolay Borisov goto out_mutex; 4712b8a86845SLukas Czerner } 4713c174e6d6SDmitry Monakhov if (!partial_begin && !partial_end) 47141d39834fSNikolay Borisov goto out_mutex; 4715c174e6d6SDmitry Monakhov 471669dc9536SDmitry Monakhov /* 471769dc9536SDmitry Monakhov * In worst case we have to writeout two nonadjacent unwritten 471869dc9536SDmitry Monakhov * blocks and update the inode 471969dc9536SDmitry Monakhov */ 472069dc9536SDmitry Monakhov credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 472169dc9536SDmitry Monakhov if (ext4_should_journal_data(inode)) 472269dc9536SDmitry Monakhov credits += 2; 472369dc9536SDmitry Monakhov handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4724b8a86845SLukas Czerner if (IS_ERR(handle)) { 4725b8a86845SLukas Czerner ret = PTR_ERR(handle); 4726b8a86845SLukas Czerner ext4_std_error(inode->i_sb, ret); 47271d39834fSNikolay Borisov goto out_mutex; 4728b8a86845SLukas Czerner } 4729b8a86845SLukas Czerner 4730eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 47314337ecd1SEric Whitney if (new_size) 47324631dbf6SDmitry Monakhov ext4_update_inode_size(inode, new_size); 4733b8a86845SLukas Czerner ext4_mark_inode_dirty(handle, inode); 4734b8a86845SLukas Czerner 4735b8a86845SLukas Czerner /* Zero out partial block at the edges of the range */ 4736b8a86845SLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, len); 473767a7d5f5SJan Kara if (ret >= 0) 473867a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4739b8a86845SLukas Czerner 4740b8a86845SLukas Czerner if (file->f_flags & O_SYNC) 4741b8a86845SLukas Czerner ext4_handle_sync(handle); 4742b8a86845SLukas Czerner 4743b8a86845SLukas Czerner ext4_journal_stop(handle); 4744b8a86845SLukas Czerner out_mutex: 47455955102cSAl Viro inode_unlock(inode); 4746b8a86845SLukas Czerner return ret; 4747b8a86845SLukas Czerner } 4748b8a86845SLukas Czerner 47490e8b6879SLukas Czerner /* 47500e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file 47510e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call. 47520e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method 47530e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is 47540e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call). 47550e8b6879SLukas Czerner */ 47560e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 47570e8b6879SLukas Czerner { 47580e8b6879SLukas Czerner struct inode *inode = file_inode(file); 47590e8b6879SLukas Czerner loff_t new_size = 0; 47600e8b6879SLukas Czerner unsigned int max_blocks; 47610e8b6879SLukas Czerner int ret = 0; 47620e8b6879SLukas Czerner int flags; 47630e8b6879SLukas Czerner ext4_lblk_t lblk; 47640e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits; 47650e8b6879SLukas Czerner 47662058f83aSMichael Halcrow /* 47672058f83aSMichael Halcrow * Encrypted inodes can't handle collapse range or insert 47682058f83aSMichael Halcrow * range since we would need to re-encrypt blocks with a 47692058f83aSMichael Halcrow * different IV or XTS tweak (which are based on the logical 47702058f83aSMichael Halcrow * block number). 47712058f83aSMichael Halcrow */ 4772592ddec7SChandan Rajendra if (IS_ENCRYPTED(inode) && 4773457b1e35SEric Biggers (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 47742058f83aSMichael Halcrow return -EOPNOTSUPP; 47752058f83aSMichael Halcrow 47760e8b6879SLukas Czerner /* Return error if mode is not supported */ 47770e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4778331573feSNamjae Jeon FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4779331573feSNamjae Jeon FALLOC_FL_INSERT_RANGE)) 47800e8b6879SLukas Czerner return -EOPNOTSUPP; 47810e8b6879SLukas Czerner 47820e8b6879SLukas Czerner if (mode & FALLOC_FL_PUNCH_HOLE) 47830e8b6879SLukas Czerner return ext4_punch_hole(inode, offset, len); 47840e8b6879SLukas Czerner 47850e8b6879SLukas Czerner ret = ext4_convert_inline_data(inode); 47860e8b6879SLukas Czerner if (ret) 47870e8b6879SLukas Czerner return ret; 47880e8b6879SLukas Czerner 478940c406c7STheodore Ts'o if (mode & FALLOC_FL_COLLAPSE_RANGE) 479040c406c7STheodore Ts'o return ext4_collapse_range(inode, offset, len); 479140c406c7STheodore Ts'o 4792331573feSNamjae Jeon if (mode & FALLOC_FL_INSERT_RANGE) 4793331573feSNamjae Jeon return ext4_insert_range(inode, offset, len); 4794331573feSNamjae Jeon 4795b8a86845SLukas Czerner if (mode & FALLOC_FL_ZERO_RANGE) 4796b8a86845SLukas Czerner return ext4_zero_range(file, offset, len, mode); 4797b8a86845SLukas Czerner 47980e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode); 47990e8b6879SLukas Czerner lblk = offset >> blkbits; 48000e8b6879SLukas Czerner 4801518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4802556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 48030e8b6879SLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 48040e8b6879SLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 48050e8b6879SLukas Czerner 48065955102cSAl Viro inode_lock(inode); 48070e8b6879SLukas Czerner 4808280227a7SDavide Italiano /* 4809280227a7SDavide Italiano * We only support preallocation for extent-based files only 4810280227a7SDavide Italiano */ 4811280227a7SDavide Italiano if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4812280227a7SDavide Italiano ret = -EOPNOTSUPP; 4813280227a7SDavide Italiano goto out; 4814280227a7SDavide Italiano } 4815280227a7SDavide Italiano 48160e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 48179b02e498SEric Biggers (offset + len > inode->i_size || 481851e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 48190e8b6879SLukas Czerner new_size = offset + len; 48200e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size); 48210e8b6879SLukas Czerner if (ret) 48220e8b6879SLukas Czerner goto out; 48230e8b6879SLukas Czerner } 48240e8b6879SLukas Czerner 482517048e8aSJan Kara /* Wait all existing dio workers, newcomers will block on i_mutex */ 482617048e8aSJan Kara inode_dio_wait(inode); 482717048e8aSJan Kara 482877a2e84dSTahsin Erdogan ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 48290e8b6879SLukas Czerner if (ret) 48300e8b6879SLukas Czerner goto out; 48310e8b6879SLukas Czerner 4832c174e6d6SDmitry Monakhov if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4833c174e6d6SDmitry Monakhov ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 4834c174e6d6SDmitry Monakhov EXT4_I(inode)->i_sync_tid); 4835f282ac19SLukas Czerner } 4836f282ac19SLukas Czerner out: 48375955102cSAl Viro inode_unlock(inode); 48380e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 48390e8b6879SLukas Czerner return ret; 4840a2df2a63SAmit Arora } 48416873fa0dSEric Sandeen 48426873fa0dSEric Sandeen /* 48430031462bSMingming Cao * This function convert a range of blocks to written extents 48440031462bSMingming Cao * The caller of this function will pass the start offset and the size. 48450031462bSMingming Cao * all unwritten extents within this range will be converted to 48460031462bSMingming Cao * written extents. 48470031462bSMingming Cao * 48480031462bSMingming Cao * This function is called from the direct IO end io call back 48490031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4850109f5565SMingming * Returns 0 on success. 48510031462bSMingming Cao */ 48526b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 48536b523df4SJan Kara loff_t offset, ssize_t len) 48540031462bSMingming Cao { 48550031462bSMingming Cao unsigned int max_blocks; 48560031462bSMingming Cao int ret = 0; 48570031462bSMingming Cao int ret2 = 0; 48582ed88685STheodore Ts'o struct ext4_map_blocks map; 4859a00713eaSRitesh Harjani unsigned int blkbits = inode->i_blkbits; 4860a00713eaSRitesh Harjani unsigned int credits = 0; 48610031462bSMingming Cao 48622ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4863518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4864518eaa63SFabian Frederick 4865a00713eaSRitesh Harjani if (!handle) { 48666b523df4SJan Kara /* 48670031462bSMingming Cao * credits to insert 1 extent into extent tree 48680031462bSMingming Cao */ 48690031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 48706b523df4SJan Kara } 48710031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 48722ed88685STheodore Ts'o map.m_lblk += ret; 48732ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 48746b523df4SJan Kara if (credits) { 48756b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 48766b523df4SJan Kara credits); 48770031462bSMingming Cao if (IS_ERR(handle)) { 48780031462bSMingming Cao ret = PTR_ERR(handle); 48790031462bSMingming Cao break; 48800031462bSMingming Cao } 48816b523df4SJan Kara } 48822ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4883c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4884b06acd38SLukas Czerner if (ret <= 0) 4885b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4886b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 488792b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4888b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 488992b97816STheodore Ts'o map.m_len, ret); 48900031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 48916b523df4SJan Kara if (credits) 48920031462bSMingming Cao ret2 = ext4_journal_stop(handle); 48930031462bSMingming Cao if (ret <= 0 || ret2) 48940031462bSMingming Cao break; 48950031462bSMingming Cao } 48960031462bSMingming Cao return ret > 0 ? ret2 : ret; 48970031462bSMingming Cao } 48986d9c85ebSYongqiang Yang 4899a00713eaSRitesh Harjani int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4900a00713eaSRitesh Harjani { 4901a00713eaSRitesh Harjani int ret, err = 0; 4902c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec; 4903a00713eaSRitesh Harjani 4904a00713eaSRitesh Harjani /* 4905a00713eaSRitesh Harjani * This is somewhat ugly but the idea is clear: When transaction is 4906a00713eaSRitesh Harjani * reserved, everything goes into it. Otherwise we rather start several 4907a00713eaSRitesh Harjani * smaller transactions for conversion of each extent separately. 4908a00713eaSRitesh Harjani */ 4909a00713eaSRitesh Harjani if (handle) { 4910a00713eaSRitesh Harjani handle = ext4_journal_start_reserved(handle, 4911a00713eaSRitesh Harjani EXT4_HT_EXT_CONVERT); 4912a00713eaSRitesh Harjani if (IS_ERR(handle)) 4913a00713eaSRitesh Harjani return PTR_ERR(handle); 4914a00713eaSRitesh Harjani } 4915a00713eaSRitesh Harjani 4916c8cc8816SRitesh Harjani list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4917a00713eaSRitesh Harjani ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4918c8cc8816SRitesh Harjani io_end_vec->offset, 4919c8cc8816SRitesh Harjani io_end_vec->size); 4920c8cc8816SRitesh Harjani if (ret) 4921c8cc8816SRitesh Harjani break; 4922c8cc8816SRitesh Harjani } 4923c8cc8816SRitesh Harjani 4924a00713eaSRitesh Harjani if (handle) 4925a00713eaSRitesh Harjani err = ext4_journal_stop(handle); 4926a00713eaSRitesh Harjani 4927a00713eaSRitesh Harjani return ret < 0 ? ret : err; 4928a00713eaSRitesh Harjani } 4929a00713eaSRitesh Harjani 49300031462bSMingming Cao /* 493169eb33dcSZheng Liu * If newes is not existing extent (newes->ec_pblk equals zero) find 493269eb33dcSZheng Liu * delayed extent at start of newes and update newes accordingly and 493391dd8c11SLukas Czerner * return start of the next delayed extent. 493491dd8c11SLukas Czerner * 493569eb33dcSZheng Liu * If newes is existing extent (newes->ec_pblk is not equal zero) 493691dd8c11SLukas Czerner * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 493769eb33dcSZheng Liu * extent found. Leave newes unmodified. 49386873fa0dSEric Sandeen */ 493991dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 494069eb33dcSZheng Liu struct extent_status *newes) 49416873fa0dSEric Sandeen { 4942b3aff3e3SZheng Liu struct extent_status es; 4943be401363SZheng Liu ext4_lblk_t block, next_del; 49446873fa0dSEric Sandeen 494569eb33dcSZheng Liu if (newes->es_pblk == 0) { 4946ad431025SEric Whitney ext4_es_find_extent_range(inode, &ext4_es_is_delayed, 4947ad431025SEric Whitney newes->es_lblk, 4948ad431025SEric Whitney newes->es_lblk + newes->es_len - 1, 4949ad431025SEric Whitney &es); 4950e30b5dcaSYan, Zheng 49516d9c85ebSYongqiang Yang /* 495269eb33dcSZheng Liu * No extent in extent-tree contains block @newes->es_pblk, 49536d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 49546d9c85ebSYongqiang Yang */ 495506b0c886SZheng Liu if (es.es_len == 0) 4956b3aff3e3SZheng Liu /* A hole found. */ 495791dd8c11SLukas Czerner return 0; 49586d9c85ebSYongqiang Yang 495969eb33dcSZheng Liu if (es.es_lblk > newes->es_lblk) { 4960b3aff3e3SZheng Liu /* A hole found. */ 496169eb33dcSZheng Liu newes->es_len = min(es.es_lblk - newes->es_lblk, 496269eb33dcSZheng Liu newes->es_len); 496391dd8c11SLukas Czerner return 0; 49646873fa0dSEric Sandeen } 49656d9c85ebSYongqiang Yang 496669eb33dcSZheng Liu newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 49676d9c85ebSYongqiang Yang } 49686873fa0dSEric Sandeen 496969eb33dcSZheng Liu block = newes->es_lblk + newes->es_len; 4970ad431025SEric Whitney ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block, 4971ad431025SEric Whitney EXT_MAX_BLOCKS, &es); 4972be401363SZheng Liu if (es.es_len == 0) 4973be401363SZheng Liu next_del = EXT_MAX_BLOCKS; 4974be401363SZheng Liu else 4975be401363SZheng Liu next_del = es.es_lblk; 4976be401363SZheng Liu 497791dd8c11SLukas Czerner return next_del; 49786873fa0dSEric Sandeen } 49796873fa0dSEric Sandeen 49803a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 49813a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 49826873fa0dSEric Sandeen { 49836873fa0dSEric Sandeen __u64 physical = 0; 49846873fa0dSEric Sandeen __u64 length; 49856873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 49866873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 49876873fa0dSEric Sandeen int error = 0; 49886873fa0dSEric Sandeen 49896873fa0dSEric Sandeen /* in-inode? */ 499019f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 49916873fa0dSEric Sandeen struct ext4_iloc iloc; 49926873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 49936873fa0dSEric Sandeen 49946873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 49956873fa0dSEric Sandeen if (error) 49966873fa0dSEric Sandeen return error; 4997a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 49986873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 49996873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 50006873fa0dSEric Sandeen physical += offset; 50016873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 50026873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 5003fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 50046873fa0dSEric Sandeen } else { /* external block */ 5005a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 50066873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 50076873fa0dSEric Sandeen } 50086873fa0dSEric Sandeen 50096873fa0dSEric Sandeen if (physical) 50106873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 50116873fa0dSEric Sandeen length, flags); 50126873fa0dSEric Sandeen return (error < 0 ? error : 0); 50136873fa0dSEric Sandeen } 50146873fa0dSEric Sandeen 5015bb5835edSTheodore Ts'o static int _ext4_fiemap(struct inode *inode, 5016bb5835edSTheodore Ts'o struct fiemap_extent_info *fieinfo, 5017bb5835edSTheodore Ts'o __u64 start, __u64 len, 5018bb5835edSTheodore Ts'o int (*fill)(struct inode *, ext4_lblk_t, 5019bb5835edSTheodore Ts'o ext4_lblk_t, 5020bb5835edSTheodore Ts'o struct fiemap_extent_info *)) 50216873fa0dSEric Sandeen { 50226873fa0dSEric Sandeen ext4_lblk_t start_blk; 5023bb5835edSTheodore Ts'o u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR; 5024bb5835edSTheodore Ts'o 50256873fa0dSEric Sandeen int error = 0; 50266873fa0dSEric Sandeen 502794191985STao Ma if (ext4_has_inline_data(inode)) { 502894191985STao Ma int has_inline = 1; 502994191985STao Ma 5030d952d69eSDmitry Monakhov error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline, 5031d952d69eSDmitry Monakhov start, len); 503294191985STao Ma 503394191985STao Ma if (has_inline) 503494191985STao Ma return error; 503594191985STao Ma } 503694191985STao Ma 50377869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 50387869a4a6STheodore Ts'o error = ext4_ext_precache(inode); 50397869a4a6STheodore Ts'o if (error) 50407869a4a6STheodore Ts'o return error; 5041bb5835edSTheodore Ts'o fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 50427869a4a6STheodore Ts'o } 50437869a4a6STheodore Ts'o 50446873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 5045bb5835edSTheodore Ts'o if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 5046bb5835edSTheodore Ts'o fill == ext4_fill_fiemap_extents) 5047ad7fefb1STheodore Ts'o return generic_block_fiemap(inode, fieinfo, start, len, 50486873fa0dSEric Sandeen ext4_get_block); 50496873fa0dSEric Sandeen 5050bb5835edSTheodore Ts'o if (fill == ext4_fill_es_cache_info) 5051bb5835edSTheodore Ts'o ext4_fiemap_flags &= FIEMAP_FLAG_XATTR; 5052bb5835edSTheodore Ts'o if (fiemap_check_flags(fieinfo, ext4_fiemap_flags)) 50536873fa0dSEric Sandeen return -EBADR; 50546873fa0dSEric Sandeen 50556873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 50566873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 50576873fa0dSEric Sandeen } else { 5058aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 5059aca92ff6SLeonard Michlmayr __u64 last_blk; 5060aca92ff6SLeonard Michlmayr 50616873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 5062aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5063f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 5064f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 5065aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 50666873fa0dSEric Sandeen 50676873fa0dSEric Sandeen /* 506891dd8c11SLukas Czerner * Walk the extent tree gathering extent information 506991dd8c11SLukas Czerner * and pushing extents back to the user. 50706873fa0dSEric Sandeen */ 5071bb5835edSTheodore Ts'o error = fill(inode, start_blk, len_blks, fieinfo); 50726873fa0dSEric Sandeen } 50736873fa0dSEric Sandeen return error; 50746873fa0dSEric Sandeen } 50759eb79482SNamjae Jeon 5076bb5835edSTheodore Ts'o int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 5077bb5835edSTheodore Ts'o __u64 start, __u64 len) 5078bb5835edSTheodore Ts'o { 5079bb5835edSTheodore Ts'o return _ext4_fiemap(inode, fieinfo, start, len, 5080bb5835edSTheodore Ts'o ext4_fill_fiemap_extents); 5081bb5835edSTheodore Ts'o } 5082bb5835edSTheodore Ts'o 5083bb5835edSTheodore Ts'o int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 5084bb5835edSTheodore Ts'o __u64 start, __u64 len) 5085bb5835edSTheodore Ts'o { 5086bb5835edSTheodore Ts'o if (ext4_has_inline_data(inode)) { 5087bb5835edSTheodore Ts'o int has_inline; 5088bb5835edSTheodore Ts'o 5089bb5835edSTheodore Ts'o down_read(&EXT4_I(inode)->xattr_sem); 5090bb5835edSTheodore Ts'o has_inline = ext4_has_inline_data(inode); 5091bb5835edSTheodore Ts'o up_read(&EXT4_I(inode)->xattr_sem); 5092bb5835edSTheodore Ts'o if (has_inline) 5093bb5835edSTheodore Ts'o return 0; 5094bb5835edSTheodore Ts'o } 5095bb5835edSTheodore Ts'o 5096bb5835edSTheodore Ts'o return _ext4_fiemap(inode, fieinfo, start, len, 5097bb5835edSTheodore Ts'o ext4_fill_es_cache_info); 5098bb5835edSTheodore Ts'o } 5099bb5835edSTheodore Ts'o 5100bb5835edSTheodore Ts'o 51019eb79482SNamjae Jeon /* 51029eb79482SNamjae Jeon * ext4_access_path: 51039eb79482SNamjae Jeon * Function to access the path buffer for marking it dirty. 51049eb79482SNamjae Jeon * It also checks if there are sufficient credits left in the journal handle 51059eb79482SNamjae Jeon * to update path. 51069eb79482SNamjae Jeon */ 51079eb79482SNamjae Jeon static int 51089eb79482SNamjae Jeon ext4_access_path(handle_t *handle, struct inode *inode, 51099eb79482SNamjae Jeon struct ext4_ext_path *path) 51109eb79482SNamjae Jeon { 51119eb79482SNamjae Jeon int credits, err; 51129eb79482SNamjae Jeon 51139eb79482SNamjae Jeon if (!ext4_handle_valid(handle)) 51149eb79482SNamjae Jeon return 0; 51159eb79482SNamjae Jeon 51169eb79482SNamjae Jeon /* 51179eb79482SNamjae Jeon * Check if need to extend journal credits 51189eb79482SNamjae Jeon * 3 for leaf, sb, and inode plus 2 (bmap and group 51199eb79482SNamjae Jeon * descriptor) for each block group; assume two block 51209eb79482SNamjae Jeon * groups 51219eb79482SNamjae Jeon */ 51229eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 512383448bdfSJan Kara err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0); 5124a4130367SJan Kara if (err < 0) 51259eb79482SNamjae Jeon return err; 51269eb79482SNamjae Jeon 51279eb79482SNamjae Jeon err = ext4_ext_get_access(handle, inode, path); 51289eb79482SNamjae Jeon return err; 51299eb79482SNamjae Jeon } 51309eb79482SNamjae Jeon 51319eb79482SNamjae Jeon /* 51329eb79482SNamjae Jeon * ext4_ext_shift_path_extents: 51339eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext 5134331573feSNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 5135331573feSNamjae Jeon * if it is right shift or left shift operation. 51369eb79482SNamjae Jeon */ 51379eb79482SNamjae Jeon static int 51389eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 51399eb79482SNamjae Jeon struct inode *inode, handle_t *handle, 5140331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 51419eb79482SNamjae Jeon { 51429eb79482SNamjae Jeon int depth, err = 0; 51439eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last; 51444756ee18Szhengbin bool update = false; 51459eb79482SNamjae Jeon depth = path->p_depth; 51469eb79482SNamjae Jeon 51479eb79482SNamjae Jeon while (depth >= 0) { 51489eb79482SNamjae Jeon if (depth == path->p_depth) { 51499eb79482SNamjae Jeon ex_start = path[depth].p_ext; 51509eb79482SNamjae Jeon if (!ex_start) 51516a797d27SDarrick J. Wong return -EFSCORRUPTED; 51529eb79482SNamjae Jeon 51539eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 51549eb79482SNamjae Jeon 51559eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 51569eb79482SNamjae Jeon if (err) 51579eb79482SNamjae Jeon goto out; 51589eb79482SNamjae Jeon 51599eb79482SNamjae Jeon if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 51604756ee18Szhengbin update = true; 51619eb79482SNamjae Jeon 51629eb79482SNamjae Jeon while (ex_start <= ex_last) { 5163331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 5164331573feSNamjae Jeon le32_add_cpu(&ex_start->ee_block, 5165331573feSNamjae Jeon -shift); 51666dd834efSLukas Czerner /* Try to merge to the left. */ 51676dd834efSLukas Czerner if ((ex_start > 5168331573feSNamjae Jeon EXT_FIRST_EXTENT(path[depth].p_hdr)) 5169331573feSNamjae Jeon && 51706dd834efSLukas Czerner ext4_ext_try_to_merge_right(inode, 51719eb79482SNamjae Jeon path, ex_start - 1)) 51729eb79482SNamjae Jeon ex_last--; 51736dd834efSLukas Czerner else 51749eb79482SNamjae Jeon ex_start++; 5175331573feSNamjae Jeon } else { 5176331573feSNamjae Jeon le32_add_cpu(&ex_last->ee_block, shift); 5177331573feSNamjae Jeon ext4_ext_try_to_merge_right(inode, path, 5178331573feSNamjae Jeon ex_last); 5179331573feSNamjae Jeon ex_last--; 5180331573feSNamjae Jeon } 51819eb79482SNamjae Jeon } 51829eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 51839eb79482SNamjae Jeon if (err) 51849eb79482SNamjae Jeon goto out; 51859eb79482SNamjae Jeon 51869eb79482SNamjae Jeon if (--depth < 0 || !update) 51879eb79482SNamjae Jeon break; 51889eb79482SNamjae Jeon } 51899eb79482SNamjae Jeon 51909eb79482SNamjae Jeon /* Update index too */ 51919eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 51929eb79482SNamjae Jeon if (err) 51939eb79482SNamjae Jeon goto out; 51949eb79482SNamjae Jeon 5195331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5196847c6c42SZheng Liu le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5197331573feSNamjae Jeon else 5198331573feSNamjae Jeon le32_add_cpu(&path[depth].p_idx->ei_block, shift); 51999eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 52009eb79482SNamjae Jeon if (err) 52019eb79482SNamjae Jeon goto out; 52029eb79482SNamjae Jeon 52039eb79482SNamjae Jeon /* we are done if current index is not a starting index */ 52049eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 52059eb79482SNamjae Jeon break; 52069eb79482SNamjae Jeon 52079eb79482SNamjae Jeon depth--; 52089eb79482SNamjae Jeon } 52099eb79482SNamjae Jeon 52109eb79482SNamjae Jeon out: 52119eb79482SNamjae Jeon return err; 52129eb79482SNamjae Jeon } 52139eb79482SNamjae Jeon 52149eb79482SNamjae Jeon /* 52159eb79482SNamjae Jeon * ext4_ext_shift_extents: 5216331573feSNamjae Jeon * All the extents which lies in the range from @start to the last allocated 5217331573feSNamjae Jeon * block for the @inode are shifted either towards left or right (depending 5218331573feSNamjae Jeon * upon @SHIFT) by @shift blocks. 52199eb79482SNamjae Jeon * On success, 0 is returned, error otherwise. 52209eb79482SNamjae Jeon */ 52219eb79482SNamjae Jeon static int 52229eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5223331573feSNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift, 5224331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 52259eb79482SNamjae Jeon { 52269eb79482SNamjae Jeon struct ext4_ext_path *path; 52279eb79482SNamjae Jeon int ret = 0, depth; 52289eb79482SNamjae Jeon struct ext4_extent *extent; 5229331573feSNamjae Jeon ext4_lblk_t stop, *iterator, ex_start, ex_end; 52309eb79482SNamjae Jeon 52319eb79482SNamjae Jeon /* Let path point to the last extent */ 523203e916faSRoman Pen path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 523303e916faSRoman Pen EXT4_EX_NOCACHE); 52349eb79482SNamjae Jeon if (IS_ERR(path)) 52359eb79482SNamjae Jeon return PTR_ERR(path); 52369eb79482SNamjae Jeon 52379eb79482SNamjae Jeon depth = path->p_depth; 52389eb79482SNamjae Jeon extent = path[depth].p_ext; 5239ee4bd0d9STheodore Ts'o if (!extent) 5240ee4bd0d9STheodore Ts'o goto out; 52419eb79482SNamjae Jeon 52422a9b8cbaSRoman Pen stop = le32_to_cpu(extent->ee_block); 52439eb79482SNamjae Jeon 52449eb79482SNamjae Jeon /* 5245349fa7d6SEric Biggers * For left shifts, make sure the hole on the left is big enough to 5246349fa7d6SEric Biggers * accommodate the shift. For right shifts, make sure the last extent 5247349fa7d6SEric Biggers * won't be shifted beyond EXT_MAX_BLOCKS. 52489eb79482SNamjae Jeon */ 5249331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 525003e916faSRoman Pen path = ext4_find_extent(inode, start - 1, &path, 525103e916faSRoman Pen EXT4_EX_NOCACHE); 52528dc79ec4SDmitry Monakhov if (IS_ERR(path)) 52538dc79ec4SDmitry Monakhov return PTR_ERR(path); 52549eb79482SNamjae Jeon depth = path->p_depth; 52559eb79482SNamjae Jeon extent = path[depth].p_ext; 52568dc79ec4SDmitry Monakhov if (extent) { 5257847c6c42SZheng Liu ex_start = le32_to_cpu(extent->ee_block); 5258847c6c42SZheng Liu ex_end = le32_to_cpu(extent->ee_block) + 5259847c6c42SZheng Liu ext4_ext_get_actual_len(extent); 52608dc79ec4SDmitry Monakhov } else { 52618dc79ec4SDmitry Monakhov ex_start = 0; 52628dc79ec4SDmitry Monakhov ex_end = 0; 52638dc79ec4SDmitry Monakhov } 52649eb79482SNamjae Jeon 52659eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) || 5266331573feSNamjae Jeon (shift > start - ex_end)) { 5267349fa7d6SEric Biggers ret = -EINVAL; 5268349fa7d6SEric Biggers goto out; 5269349fa7d6SEric Biggers } 5270349fa7d6SEric Biggers } else { 5271349fa7d6SEric Biggers if (shift > EXT_MAX_BLOCKS - 5272349fa7d6SEric Biggers (stop + ext4_ext_get_actual_len(extent))) { 5273349fa7d6SEric Biggers ret = -EINVAL; 5274349fa7d6SEric Biggers goto out; 5275331573feSNamjae Jeon } 5276331573feSNamjae Jeon } 5277331573feSNamjae Jeon 5278331573feSNamjae Jeon /* 5279331573feSNamjae Jeon * In case of left shift, iterator points to start and it is increased 5280331573feSNamjae Jeon * till we reach stop. In case of right shift, iterator points to stop 5281331573feSNamjae Jeon * and it is decreased till we reach start. 5282331573feSNamjae Jeon */ 5283331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5284331573feSNamjae Jeon iterator = &start; 5285331573feSNamjae Jeon else 5286331573feSNamjae Jeon iterator = &stop; 52879eb79482SNamjae Jeon 52882a9b8cbaSRoman Pen /* 52892a9b8cbaSRoman Pen * Its safe to start updating extents. Start and stop are unsigned, so 52902a9b8cbaSRoman Pen * in case of right shift if extent with 0 block is reached, iterator 52912a9b8cbaSRoman Pen * becomes NULL to indicate the end of the loop. 52922a9b8cbaSRoman Pen */ 52932a9b8cbaSRoman Pen while (iterator && start <= stop) { 529403e916faSRoman Pen path = ext4_find_extent(inode, *iterator, &path, 529503e916faSRoman Pen EXT4_EX_NOCACHE); 52969eb79482SNamjae Jeon if (IS_ERR(path)) 52979eb79482SNamjae Jeon return PTR_ERR(path); 52989eb79482SNamjae Jeon depth = path->p_depth; 52999eb79482SNamjae Jeon extent = path[depth].p_ext; 5300a18ed359SDmitry Monakhov if (!extent) { 5301a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5302331573feSNamjae Jeon (unsigned long) *iterator); 53036a797d27SDarrick J. Wong return -EFSCORRUPTED; 5304a18ed359SDmitry Monakhov } 5305331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT && *iterator > 5306331573feSNamjae Jeon le32_to_cpu(extent->ee_block)) { 53079eb79482SNamjae Jeon /* Hole, move to the next extent */ 5308f8fb4f41SDmitry Monakhov if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5309f8fb4f41SDmitry Monakhov path[depth].p_ext++; 5310f8fb4f41SDmitry Monakhov } else { 5311331573feSNamjae Jeon *iterator = ext4_ext_next_allocated_block(path); 5312f8fb4f41SDmitry Monakhov continue; 53139eb79482SNamjae Jeon } 53149eb79482SNamjae Jeon } 5315331573feSNamjae Jeon 5316331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 5317331573feSNamjae Jeon extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5318331573feSNamjae Jeon *iterator = le32_to_cpu(extent->ee_block) + 5319331573feSNamjae Jeon ext4_ext_get_actual_len(extent); 5320331573feSNamjae Jeon } else { 5321331573feSNamjae Jeon extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 53222a9b8cbaSRoman Pen if (le32_to_cpu(extent->ee_block) > 0) 53232a9b8cbaSRoman Pen *iterator = le32_to_cpu(extent->ee_block) - 1; 53242a9b8cbaSRoman Pen else 53252a9b8cbaSRoman Pen /* Beginning is reached, end of the loop */ 53262a9b8cbaSRoman Pen iterator = NULL; 5327331573feSNamjae Jeon /* Update path extent in case we need to stop */ 5328331573feSNamjae Jeon while (le32_to_cpu(extent->ee_block) < start) 5329331573feSNamjae Jeon extent++; 5330331573feSNamjae Jeon path[depth].p_ext = extent; 5331331573feSNamjae Jeon } 53329eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode, 5333331573feSNamjae Jeon handle, SHIFT); 53349eb79482SNamjae Jeon if (ret) 53359eb79482SNamjae Jeon break; 53369eb79482SNamjae Jeon } 5337ee4bd0d9STheodore Ts'o out: 5338ee4bd0d9STheodore Ts'o ext4_ext_drop_refs(path); 5339ee4bd0d9STheodore Ts'o kfree(path); 53409eb79482SNamjae Jeon return ret; 53419eb79482SNamjae Jeon } 53429eb79482SNamjae Jeon 53439eb79482SNamjae Jeon /* 53449eb79482SNamjae Jeon * ext4_collapse_range: 53459eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4 53469eb79482SNamjae Jeon * Returns: 0 and non-zero on error. 53479eb79482SNamjae Jeon */ 534843f81677SEric Biggers static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 53499eb79482SNamjae Jeon { 53509eb79482SNamjae Jeon struct super_block *sb = inode->i_sb; 53519eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop; 53529eb79482SNamjae Jeon handle_t *handle; 53539eb79482SNamjae Jeon unsigned int credits; 5354a8680e0dSNamjae Jeon loff_t new_size, ioffset; 53559eb79482SNamjae Jeon int ret; 53569eb79482SNamjae Jeon 5357b9576fc3STheodore Ts'o /* 5358b9576fc3STheodore Ts'o * We need to test this early because xfstests assumes that a 5359b9576fc3STheodore Ts'o * collapse range of (0, 1) will return EOPNOTSUPP if the file 5360b9576fc3STheodore Ts'o * system does not support collapse range. 5361b9576fc3STheodore Ts'o */ 5362b9576fc3STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5363b9576fc3STheodore Ts'o return -EOPNOTSUPP; 5364b9576fc3STheodore Ts'o 53659b02e498SEric Biggers /* Collapse range works only on fs cluster size aligned regions. */ 53669b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 53679eb79482SNamjae Jeon return -EINVAL; 53689eb79482SNamjae Jeon 53699eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len); 53709eb79482SNamjae Jeon 53719eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 53729eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 53739eb79482SNamjae Jeon 53741ce01c4aSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 53751ce01c4aSNamjae Jeon if (ext4_should_journal_data(inode)) { 53761ce01c4aSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 53771ce01c4aSNamjae Jeon if (ret) 53781ce01c4aSNamjae Jeon return ret; 53791ce01c4aSNamjae Jeon } 53801ce01c4aSNamjae Jeon 53815955102cSAl Viro inode_lock(inode); 538223fffa92SLukas Czerner /* 538323fffa92SLukas Czerner * There is no need to overlap collapse range with EOF, in which case 538423fffa92SLukas Czerner * it is effectively a truncate operation 538523fffa92SLukas Czerner */ 53869b02e498SEric Biggers if (offset + len >= inode->i_size) { 538723fffa92SLukas Czerner ret = -EINVAL; 538823fffa92SLukas Czerner goto out_mutex; 538923fffa92SLukas Czerner } 539023fffa92SLukas Czerner 53919eb79482SNamjae Jeon /* Currently just for extent based files */ 53929eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 53939eb79482SNamjae Jeon ret = -EOPNOTSUPP; 53949eb79482SNamjae Jeon goto out_mutex; 53959eb79482SNamjae Jeon } 53969eb79482SNamjae Jeon 53979eb79482SNamjae Jeon /* Wait for existing dio to complete */ 53989eb79482SNamjae Jeon inode_dio_wait(inode); 53999eb79482SNamjae Jeon 5400ea3d7209SJan Kara /* 5401ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5402ea3d7209SJan Kara * page cache. 5403ea3d7209SJan Kara */ 5404ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 5405430657b6SRoss Zwisler 5406430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5407430657b6SRoss Zwisler if (ret) 5408430657b6SRoss Zwisler goto out_mmap; 5409430657b6SRoss Zwisler 541032ebffd3SJan Kara /* 541132ebffd3SJan Kara * Need to round down offset to be aligned with page size boundary 541232ebffd3SJan Kara * for page size > block size. 541332ebffd3SJan Kara */ 541432ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 541532ebffd3SJan Kara /* 541632ebffd3SJan Kara * Write tail of the last page before removed range since it will get 541732ebffd3SJan Kara * removed from the page cache below. 541832ebffd3SJan Kara */ 541932ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); 542032ebffd3SJan Kara if (ret) 542132ebffd3SJan Kara goto out_mmap; 542232ebffd3SJan Kara /* 542332ebffd3SJan Kara * Write data that will be shifted to preserve them when discarding 542432ebffd3SJan Kara * page cache below. We are also protected from pages becoming dirty 542532ebffd3SJan Kara * by i_mmap_sem. 542632ebffd3SJan Kara */ 542732ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, 542832ebffd3SJan Kara LLONG_MAX); 542932ebffd3SJan Kara if (ret) 543032ebffd3SJan Kara goto out_mmap; 5431ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5432ea3d7209SJan Kara 54339eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 54349eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 54359eb79482SNamjae Jeon if (IS_ERR(handle)) { 54369eb79482SNamjae Jeon ret = PTR_ERR(handle); 5437ea3d7209SJan Kara goto out_mmap; 54389eb79482SNamjae Jeon } 54399eb79482SNamjae Jeon 54409eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 54419eb79482SNamjae Jeon ext4_discard_preallocations(inode); 54429eb79482SNamjae Jeon 54439eb79482SNamjae Jeon ret = ext4_es_remove_extent(inode, punch_start, 54442c1d2328SLukas Czerner EXT_MAX_BLOCKS - punch_start); 54459eb79482SNamjae Jeon if (ret) { 54469eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54479eb79482SNamjae Jeon goto out_stop; 54489eb79482SNamjae Jeon } 54499eb79482SNamjae Jeon 54509eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 54519eb79482SNamjae Jeon if (ret) { 54529eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54539eb79482SNamjae Jeon goto out_stop; 54549eb79482SNamjae Jeon } 5455ef24f6c2SLukas Czerner ext4_discard_preallocations(inode); 54569eb79482SNamjae Jeon 54579eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5458331573feSNamjae Jeon punch_stop - punch_start, SHIFT_LEFT); 54599eb79482SNamjae Jeon if (ret) { 54609eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54619eb79482SNamjae Jeon goto out_stop; 54629eb79482SNamjae Jeon } 54639eb79482SNamjae Jeon 54649b02e498SEric Biggers new_size = inode->i_size - len; 54659337d5d3SLukas Czerner i_size_write(inode, new_size); 54669eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size; 54679eb79482SNamjae Jeon 54689eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54699eb79482SNamjae Jeon if (IS_SYNC(inode)) 54709eb79482SNamjae Jeon ext4_handle_sync(handle); 5471eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 54729eb79482SNamjae Jeon ext4_mark_inode_dirty(handle, inode); 547367a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 54749eb79482SNamjae Jeon 54759eb79482SNamjae Jeon out_stop: 54769eb79482SNamjae Jeon ext4_journal_stop(handle); 5477ea3d7209SJan Kara out_mmap: 5478ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 54799eb79482SNamjae Jeon out_mutex: 54805955102cSAl Viro inode_unlock(inode); 54819eb79482SNamjae Jeon return ret; 54829eb79482SNamjae Jeon } 5483fcf6b1b7SDmitry Monakhov 5484331573feSNamjae Jeon /* 5485331573feSNamjae Jeon * ext4_insert_range: 5486331573feSNamjae Jeon * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5487331573feSNamjae Jeon * The data blocks starting from @offset to the EOF are shifted by @len 5488331573feSNamjae Jeon * towards right to create a hole in the @inode. Inode size is increased 5489331573feSNamjae Jeon * by len bytes. 5490331573feSNamjae Jeon * Returns 0 on success, error otherwise. 5491331573feSNamjae Jeon */ 549243f81677SEric Biggers static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) 5493331573feSNamjae Jeon { 5494331573feSNamjae Jeon struct super_block *sb = inode->i_sb; 5495331573feSNamjae Jeon handle_t *handle; 5496331573feSNamjae Jeon struct ext4_ext_path *path; 5497331573feSNamjae Jeon struct ext4_extent *extent; 5498331573feSNamjae Jeon ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5499331573feSNamjae Jeon unsigned int credits, ee_len; 5500331573feSNamjae Jeon int ret = 0, depth, split_flag = 0; 5501331573feSNamjae Jeon loff_t ioffset; 5502331573feSNamjae Jeon 5503331573feSNamjae Jeon /* 5504331573feSNamjae Jeon * We need to test this early because xfstests assumes that an 5505331573feSNamjae Jeon * insert range of (0, 1) will return EOPNOTSUPP if the file 5506331573feSNamjae Jeon * system does not support insert range. 5507331573feSNamjae Jeon */ 5508331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5509331573feSNamjae Jeon return -EOPNOTSUPP; 5510331573feSNamjae Jeon 55119b02e498SEric Biggers /* Insert range works only on fs cluster size aligned regions. */ 55129b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5513331573feSNamjae Jeon return -EINVAL; 5514331573feSNamjae Jeon 5515331573feSNamjae Jeon trace_ext4_insert_range(inode, offset, len); 5516331573feSNamjae Jeon 5517331573feSNamjae Jeon offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5518331573feSNamjae Jeon len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5519331573feSNamjae Jeon 5520331573feSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal */ 5521331573feSNamjae Jeon if (ext4_should_journal_data(inode)) { 5522331573feSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 5523331573feSNamjae Jeon if (ret) 5524331573feSNamjae Jeon return ret; 5525331573feSNamjae Jeon } 5526331573feSNamjae Jeon 55275955102cSAl Viro inode_lock(inode); 5528331573feSNamjae Jeon /* Currently just for extent based files */ 5529331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5530331573feSNamjae Jeon ret = -EOPNOTSUPP; 5531331573feSNamjae Jeon goto out_mutex; 5532331573feSNamjae Jeon } 5533331573feSNamjae Jeon 55349b02e498SEric Biggers /* Check whether the maximum file size would be exceeded */ 55359b02e498SEric Biggers if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5536331573feSNamjae Jeon ret = -EFBIG; 5537331573feSNamjae Jeon goto out_mutex; 5538331573feSNamjae Jeon } 5539331573feSNamjae Jeon 55409b02e498SEric Biggers /* Offset must be less than i_size */ 55419b02e498SEric Biggers if (offset >= inode->i_size) { 5542331573feSNamjae Jeon ret = -EINVAL; 5543331573feSNamjae Jeon goto out_mutex; 5544331573feSNamjae Jeon } 5545331573feSNamjae Jeon 5546331573feSNamjae Jeon /* Wait for existing dio to complete */ 5547331573feSNamjae Jeon inode_dio_wait(inode); 5548331573feSNamjae Jeon 5549ea3d7209SJan Kara /* 5550ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5551ea3d7209SJan Kara * page cache. 5552ea3d7209SJan Kara */ 5553ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 5554430657b6SRoss Zwisler 5555430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5556430657b6SRoss Zwisler if (ret) 5557430657b6SRoss Zwisler goto out_mmap; 5558430657b6SRoss Zwisler 555932ebffd3SJan Kara /* 556032ebffd3SJan Kara * Need to round down to align start offset to page size boundary 556132ebffd3SJan Kara * for page size > block size. 556232ebffd3SJan Kara */ 556332ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 556432ebffd3SJan Kara /* Write out all dirty pages */ 556532ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 556632ebffd3SJan Kara LLONG_MAX); 556732ebffd3SJan Kara if (ret) 556832ebffd3SJan Kara goto out_mmap; 5569ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5570ea3d7209SJan Kara 5571331573feSNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 5572331573feSNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5573331573feSNamjae Jeon if (IS_ERR(handle)) { 5574331573feSNamjae Jeon ret = PTR_ERR(handle); 5575ea3d7209SJan Kara goto out_mmap; 5576331573feSNamjae Jeon } 5577331573feSNamjae Jeon 5578331573feSNamjae Jeon /* Expand file to avoid data loss if there is error while shifting */ 5579331573feSNamjae Jeon inode->i_size += len; 5580331573feSNamjae Jeon EXT4_I(inode)->i_disksize += len; 5581eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 5582331573feSNamjae Jeon ret = ext4_mark_inode_dirty(handle, inode); 5583331573feSNamjae Jeon if (ret) 5584331573feSNamjae Jeon goto out_stop; 5585331573feSNamjae Jeon 5586331573feSNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 5587331573feSNamjae Jeon ext4_discard_preallocations(inode); 5588331573feSNamjae Jeon 5589331573feSNamjae Jeon path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5590331573feSNamjae Jeon if (IS_ERR(path)) { 5591331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5592331573feSNamjae Jeon goto out_stop; 5593331573feSNamjae Jeon } 5594331573feSNamjae Jeon 5595331573feSNamjae Jeon depth = ext_depth(inode); 5596331573feSNamjae Jeon extent = path[depth].p_ext; 5597331573feSNamjae Jeon if (extent) { 5598331573feSNamjae Jeon ee_start_lblk = le32_to_cpu(extent->ee_block); 5599331573feSNamjae Jeon ee_len = ext4_ext_get_actual_len(extent); 5600331573feSNamjae Jeon 5601331573feSNamjae Jeon /* 5602331573feSNamjae Jeon * If offset_lblk is not the starting block of extent, split 5603331573feSNamjae Jeon * the extent @offset_lblk 5604331573feSNamjae Jeon */ 5605331573feSNamjae Jeon if ((offset_lblk > ee_start_lblk) && 5606331573feSNamjae Jeon (offset_lblk < (ee_start_lblk + ee_len))) { 5607331573feSNamjae Jeon if (ext4_ext_is_unwritten(extent)) 5608331573feSNamjae Jeon split_flag = EXT4_EXT_MARK_UNWRIT1 | 5609331573feSNamjae Jeon EXT4_EXT_MARK_UNWRIT2; 5610331573feSNamjae Jeon ret = ext4_split_extent_at(handle, inode, &path, 5611331573feSNamjae Jeon offset_lblk, split_flag, 5612331573feSNamjae Jeon EXT4_EX_NOCACHE | 5613331573feSNamjae Jeon EXT4_GET_BLOCKS_PRE_IO | 5614331573feSNamjae Jeon EXT4_GET_BLOCKS_METADATA_NOFAIL); 5615331573feSNamjae Jeon } 5616331573feSNamjae Jeon 5617331573feSNamjae Jeon ext4_ext_drop_refs(path); 5618331573feSNamjae Jeon kfree(path); 5619331573feSNamjae Jeon if (ret < 0) { 5620331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5621331573feSNamjae Jeon goto out_stop; 5622331573feSNamjae Jeon } 5623edf15aa1SFabian Frederick } else { 5624edf15aa1SFabian Frederick ext4_ext_drop_refs(path); 5625edf15aa1SFabian Frederick kfree(path); 5626331573feSNamjae Jeon } 5627331573feSNamjae Jeon 5628331573feSNamjae Jeon ret = ext4_es_remove_extent(inode, offset_lblk, 5629331573feSNamjae Jeon EXT_MAX_BLOCKS - offset_lblk); 5630331573feSNamjae Jeon if (ret) { 5631331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5632331573feSNamjae Jeon goto out_stop; 5633331573feSNamjae Jeon } 5634331573feSNamjae Jeon 5635331573feSNamjae Jeon /* 5636331573feSNamjae Jeon * if offset_lblk lies in a hole which is at start of file, use 5637331573feSNamjae Jeon * ee_start_lblk to shift extents 5638331573feSNamjae Jeon */ 5639331573feSNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, 5640331573feSNamjae Jeon ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, 5641331573feSNamjae Jeon len_lblk, SHIFT_RIGHT); 5642331573feSNamjae Jeon 5643331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5644331573feSNamjae Jeon if (IS_SYNC(inode)) 5645331573feSNamjae Jeon ext4_handle_sync(handle); 564667a7d5f5SJan Kara if (ret >= 0) 564767a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 5648331573feSNamjae Jeon 5649331573feSNamjae Jeon out_stop: 5650331573feSNamjae Jeon ext4_journal_stop(handle); 5651ea3d7209SJan Kara out_mmap: 5652ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 5653331573feSNamjae Jeon out_mutex: 56545955102cSAl Viro inode_unlock(inode); 5655331573feSNamjae Jeon return ret; 5656331573feSNamjae Jeon } 5657331573feSNamjae Jeon 5658fcf6b1b7SDmitry Monakhov /** 5659c60990b3STheodore Ts'o * ext4_swap_extents() - Swap extents between two inodes 5660c60990b3STheodore Ts'o * @handle: handle for this transaction 5661fcf6b1b7SDmitry Monakhov * @inode1: First inode 5662fcf6b1b7SDmitry Monakhov * @inode2: Second inode 5663fcf6b1b7SDmitry Monakhov * @lblk1: Start block for first inode 5664fcf6b1b7SDmitry Monakhov * @lblk2: Start block for second inode 5665fcf6b1b7SDmitry Monakhov * @count: Number of blocks to swap 5666dcae058aSzhenwei.pi * @unwritten: Mark second inode's extents as unwritten after swap 5667fcf6b1b7SDmitry Monakhov * @erp: Pointer to save error value 5668fcf6b1b7SDmitry Monakhov * 5669fcf6b1b7SDmitry Monakhov * This helper routine does exactly what is promise "swap extents". All other 5670fcf6b1b7SDmitry Monakhov * stuff such as page-cache locking consistency, bh mapping consistency or 5671fcf6b1b7SDmitry Monakhov * extent's data copying must be performed by caller. 5672fcf6b1b7SDmitry Monakhov * Locking: 5673fcf6b1b7SDmitry Monakhov * i_mutex is held for both inodes 5674fcf6b1b7SDmitry Monakhov * i_data_sem is locked for write for both inodes 5675fcf6b1b7SDmitry Monakhov * Assumptions: 5676fcf6b1b7SDmitry Monakhov * All pages from requested range are locked for both inodes 5677fcf6b1b7SDmitry Monakhov */ 5678fcf6b1b7SDmitry Monakhov int 5679fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1, 5680fcf6b1b7SDmitry Monakhov struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5681fcf6b1b7SDmitry Monakhov ext4_lblk_t count, int unwritten, int *erp) 5682fcf6b1b7SDmitry Monakhov { 5683fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path1 = NULL; 5684fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path2 = NULL; 5685fcf6b1b7SDmitry Monakhov int replaced_count = 0; 5686fcf6b1b7SDmitry Monakhov 5687fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5688fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 56895955102cSAl Viro BUG_ON(!inode_is_locked(inode1)); 56905955102cSAl Viro BUG_ON(!inode_is_locked(inode2)); 5691fcf6b1b7SDmitry Monakhov 5692fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode1, lblk1, count); 569319008f6dSTheodore Ts'o if (unlikely(*erp)) 5694fcf6b1b7SDmitry Monakhov return 0; 5695fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode2, lblk2, count); 569619008f6dSTheodore Ts'o if (unlikely(*erp)) 5697fcf6b1b7SDmitry Monakhov return 0; 5698fcf6b1b7SDmitry Monakhov 5699fcf6b1b7SDmitry Monakhov while (count) { 5700fcf6b1b7SDmitry Monakhov struct ext4_extent *ex1, *ex2, tmp_ex; 5701fcf6b1b7SDmitry Monakhov ext4_lblk_t e1_blk, e2_blk; 5702fcf6b1b7SDmitry Monakhov int e1_len, e2_len, len; 5703fcf6b1b7SDmitry Monakhov int split = 0; 5704fcf6b1b7SDmitry Monakhov 5705ed8a1a76STheodore Ts'o path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5706a1c83681SViresh Kumar if (IS_ERR(path1)) { 5707fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path1); 570819008f6dSTheodore Ts'o path1 = NULL; 570919008f6dSTheodore Ts'o finish: 571019008f6dSTheodore Ts'o count = 0; 571119008f6dSTheodore Ts'o goto repeat; 5712fcf6b1b7SDmitry Monakhov } 5713ed8a1a76STheodore Ts'o path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5714a1c83681SViresh Kumar if (IS_ERR(path2)) { 5715fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path2); 571619008f6dSTheodore Ts'o path2 = NULL; 571719008f6dSTheodore Ts'o goto finish; 5718fcf6b1b7SDmitry Monakhov } 5719fcf6b1b7SDmitry Monakhov ex1 = path1[path1->p_depth].p_ext; 5720fcf6b1b7SDmitry Monakhov ex2 = path2[path2->p_depth].p_ext; 5721fcf6b1b7SDmitry Monakhov /* Do we have somthing to swap ? */ 5722fcf6b1b7SDmitry Monakhov if (unlikely(!ex2 || !ex1)) 572319008f6dSTheodore Ts'o goto finish; 5724fcf6b1b7SDmitry Monakhov 5725fcf6b1b7SDmitry Monakhov e1_blk = le32_to_cpu(ex1->ee_block); 5726fcf6b1b7SDmitry Monakhov e2_blk = le32_to_cpu(ex2->ee_block); 5727fcf6b1b7SDmitry Monakhov e1_len = ext4_ext_get_actual_len(ex1); 5728fcf6b1b7SDmitry Monakhov e2_len = ext4_ext_get_actual_len(ex2); 5729fcf6b1b7SDmitry Monakhov 5730fcf6b1b7SDmitry Monakhov /* Hole handling */ 5731fcf6b1b7SDmitry Monakhov if (!in_range(lblk1, e1_blk, e1_len) || 5732fcf6b1b7SDmitry Monakhov !in_range(lblk2, e2_blk, e2_len)) { 5733fcf6b1b7SDmitry Monakhov ext4_lblk_t next1, next2; 5734fcf6b1b7SDmitry Monakhov 5735fcf6b1b7SDmitry Monakhov /* if hole after extent, then go to next extent */ 5736fcf6b1b7SDmitry Monakhov next1 = ext4_ext_next_allocated_block(path1); 5737fcf6b1b7SDmitry Monakhov next2 = ext4_ext_next_allocated_block(path2); 5738fcf6b1b7SDmitry Monakhov /* If hole before extent, then shift to that extent */ 5739fcf6b1b7SDmitry Monakhov if (e1_blk > lblk1) 5740fcf6b1b7SDmitry Monakhov next1 = e1_blk; 5741fcf6b1b7SDmitry Monakhov if (e2_blk > lblk2) 57424e562013SManinder Singh next2 = e2_blk; 5743fcf6b1b7SDmitry Monakhov /* Do we have something to swap */ 5744fcf6b1b7SDmitry Monakhov if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 574519008f6dSTheodore Ts'o goto finish; 5746fcf6b1b7SDmitry Monakhov /* Move to the rightest boundary */ 5747fcf6b1b7SDmitry Monakhov len = next1 - lblk1; 5748fcf6b1b7SDmitry Monakhov if (len < next2 - lblk2) 5749fcf6b1b7SDmitry Monakhov len = next2 - lblk2; 5750fcf6b1b7SDmitry Monakhov if (len > count) 5751fcf6b1b7SDmitry Monakhov len = count; 5752fcf6b1b7SDmitry Monakhov lblk1 += len; 5753fcf6b1b7SDmitry Monakhov lblk2 += len; 5754fcf6b1b7SDmitry Monakhov count -= len; 5755fcf6b1b7SDmitry Monakhov goto repeat; 5756fcf6b1b7SDmitry Monakhov } 5757fcf6b1b7SDmitry Monakhov 5758fcf6b1b7SDmitry Monakhov /* Prepare left boundary */ 5759fcf6b1b7SDmitry Monakhov if (e1_blk < lblk1) { 5760fcf6b1b7SDmitry Monakhov split = 1; 5761fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5762dfe50809STheodore Ts'o &path1, lblk1, 0); 576319008f6dSTheodore Ts'o if (unlikely(*erp)) 576419008f6dSTheodore Ts'o goto finish; 5765fcf6b1b7SDmitry Monakhov } 5766fcf6b1b7SDmitry Monakhov if (e2_blk < lblk2) { 5767fcf6b1b7SDmitry Monakhov split = 1; 5768fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5769dfe50809STheodore Ts'o &path2, lblk2, 0); 577019008f6dSTheodore Ts'o if (unlikely(*erp)) 577119008f6dSTheodore Ts'o goto finish; 5772fcf6b1b7SDmitry Monakhov } 5773dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5774fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5775fcf6b1b7SDmitry Monakhov if (split) 5776fcf6b1b7SDmitry Monakhov goto repeat; 5777fcf6b1b7SDmitry Monakhov 5778fcf6b1b7SDmitry Monakhov /* Prepare right boundary */ 5779fcf6b1b7SDmitry Monakhov len = count; 5780fcf6b1b7SDmitry Monakhov if (len > e1_blk + e1_len - lblk1) 5781fcf6b1b7SDmitry Monakhov len = e1_blk + e1_len - lblk1; 5782fcf6b1b7SDmitry Monakhov if (len > e2_blk + e2_len - lblk2) 5783fcf6b1b7SDmitry Monakhov len = e2_blk + e2_len - lblk2; 5784fcf6b1b7SDmitry Monakhov 5785fcf6b1b7SDmitry Monakhov if (len != e1_len) { 5786fcf6b1b7SDmitry Monakhov split = 1; 5787fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5788dfe50809STheodore Ts'o &path1, lblk1 + len, 0); 578919008f6dSTheodore Ts'o if (unlikely(*erp)) 579019008f6dSTheodore Ts'o goto finish; 5791fcf6b1b7SDmitry Monakhov } 5792fcf6b1b7SDmitry Monakhov if (len != e2_len) { 5793fcf6b1b7SDmitry Monakhov split = 1; 5794fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5795dfe50809STheodore Ts'o &path2, lblk2 + len, 0); 5796fcf6b1b7SDmitry Monakhov if (*erp) 579719008f6dSTheodore Ts'o goto finish; 5798fcf6b1b7SDmitry Monakhov } 5799dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5800fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5801fcf6b1b7SDmitry Monakhov if (split) 5802fcf6b1b7SDmitry Monakhov goto repeat; 5803fcf6b1b7SDmitry Monakhov 5804fcf6b1b7SDmitry Monakhov BUG_ON(e2_len != e1_len); 5805fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 580619008f6dSTheodore Ts'o if (unlikely(*erp)) 580719008f6dSTheodore Ts'o goto finish; 5808fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 580919008f6dSTheodore Ts'o if (unlikely(*erp)) 581019008f6dSTheodore Ts'o goto finish; 5811fcf6b1b7SDmitry Monakhov 5812fcf6b1b7SDmitry Monakhov /* Both extents are fully inside boundaries. Swap it now */ 5813fcf6b1b7SDmitry Monakhov tmp_ex = *ex1; 5814fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5815fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5816fcf6b1b7SDmitry Monakhov ex1->ee_len = cpu_to_le16(e2_len); 5817fcf6b1b7SDmitry Monakhov ex2->ee_len = cpu_to_le16(e1_len); 5818fcf6b1b7SDmitry Monakhov if (unwritten) 5819fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex2); 5820fcf6b1b7SDmitry Monakhov if (ext4_ext_is_unwritten(&tmp_ex)) 5821fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex1); 5822fcf6b1b7SDmitry Monakhov 5823fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5824fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5825fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode2, path2 + 5826fcf6b1b7SDmitry Monakhov path2->p_depth); 582719008f6dSTheodore Ts'o if (unlikely(*erp)) 582819008f6dSTheodore Ts'o goto finish; 5829fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode1, path1 + 5830fcf6b1b7SDmitry Monakhov path1->p_depth); 5831fcf6b1b7SDmitry Monakhov /* 5832fcf6b1b7SDmitry Monakhov * Looks scarry ah..? second inode already points to new blocks, 5833fcf6b1b7SDmitry Monakhov * and it was successfully dirtied. But luckily error may happen 5834fcf6b1b7SDmitry Monakhov * only due to journal error, so full transaction will be 5835fcf6b1b7SDmitry Monakhov * aborted anyway. 5836fcf6b1b7SDmitry Monakhov */ 583719008f6dSTheodore Ts'o if (unlikely(*erp)) 583819008f6dSTheodore Ts'o goto finish; 5839fcf6b1b7SDmitry Monakhov lblk1 += len; 5840fcf6b1b7SDmitry Monakhov lblk2 += len; 5841fcf6b1b7SDmitry Monakhov replaced_count += len; 5842fcf6b1b7SDmitry Monakhov count -= len; 5843fcf6b1b7SDmitry Monakhov 5844fcf6b1b7SDmitry Monakhov repeat: 5845fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path1); 5846fcf6b1b7SDmitry Monakhov kfree(path1); 5847fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path2); 5848fcf6b1b7SDmitry Monakhov kfree(path2); 5849b7ea89adSTheodore Ts'o path1 = path2 = NULL; 5850fcf6b1b7SDmitry Monakhov } 5851fcf6b1b7SDmitry Monakhov return replaced_count; 5852fcf6b1b7SDmitry Monakhov } 58530b02f4c0SEric Whitney 58540b02f4c0SEric Whitney /* 58550b02f4c0SEric Whitney * ext4_clu_mapped - determine whether any block in a logical cluster has 58560b02f4c0SEric Whitney * been mapped to a physical cluster 58570b02f4c0SEric Whitney * 58580b02f4c0SEric Whitney * @inode - file containing the logical cluster 58590b02f4c0SEric Whitney * @lclu - logical cluster of interest 58600b02f4c0SEric Whitney * 58610b02f4c0SEric Whitney * Returns 1 if any block in the logical cluster is mapped, signifying 58620b02f4c0SEric Whitney * that a physical cluster has been allocated for it. Otherwise, 58630b02f4c0SEric Whitney * returns 0. Can also return negative error codes. Derived from 58640b02f4c0SEric Whitney * ext4_ext_map_blocks(). 58650b02f4c0SEric Whitney */ 58660b02f4c0SEric Whitney int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 58670b02f4c0SEric Whitney { 58680b02f4c0SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 58690b02f4c0SEric Whitney struct ext4_ext_path *path; 58700b02f4c0SEric Whitney int depth, mapped = 0, err = 0; 58710b02f4c0SEric Whitney struct ext4_extent *extent; 58720b02f4c0SEric Whitney ext4_lblk_t first_lblk, first_lclu, last_lclu; 58730b02f4c0SEric Whitney 58740b02f4c0SEric Whitney /* search for the extent closest to the first block in the cluster */ 58750b02f4c0SEric Whitney path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 58760b02f4c0SEric Whitney if (IS_ERR(path)) { 58770b02f4c0SEric Whitney err = PTR_ERR(path); 58780b02f4c0SEric Whitney path = NULL; 58790b02f4c0SEric Whitney goto out; 58800b02f4c0SEric Whitney } 58810b02f4c0SEric Whitney 58820b02f4c0SEric Whitney depth = ext_depth(inode); 58830b02f4c0SEric Whitney 58840b02f4c0SEric Whitney /* 58850b02f4c0SEric Whitney * A consistent leaf must not be empty. This situation is possible, 58860b02f4c0SEric Whitney * though, _during_ tree modification, and it's why an assert can't 58870b02f4c0SEric Whitney * be put in ext4_find_extent(). 58880b02f4c0SEric Whitney */ 58890b02f4c0SEric Whitney if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 58900b02f4c0SEric Whitney EXT4_ERROR_INODE(inode, 58910b02f4c0SEric Whitney "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 58920b02f4c0SEric Whitney (unsigned long) EXT4_C2B(sbi, lclu), 58930b02f4c0SEric Whitney depth, path[depth].p_block); 58940b02f4c0SEric Whitney err = -EFSCORRUPTED; 58950b02f4c0SEric Whitney goto out; 58960b02f4c0SEric Whitney } 58970b02f4c0SEric Whitney 58980b02f4c0SEric Whitney extent = path[depth].p_ext; 58990b02f4c0SEric Whitney 59000b02f4c0SEric Whitney /* can't be mapped if the extent tree is empty */ 59010b02f4c0SEric Whitney if (extent == NULL) 59020b02f4c0SEric Whitney goto out; 59030b02f4c0SEric Whitney 59040b02f4c0SEric Whitney first_lblk = le32_to_cpu(extent->ee_block); 59050b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 59060b02f4c0SEric Whitney 59070b02f4c0SEric Whitney /* 59080b02f4c0SEric Whitney * Three possible outcomes at this point - found extent spanning 59090b02f4c0SEric Whitney * the target cluster, to the left of the target cluster, or to the 59100b02f4c0SEric Whitney * right of the target cluster. The first two cases are handled here. 59110b02f4c0SEric Whitney * The last case indicates the target cluster is not mapped. 59120b02f4c0SEric Whitney */ 59130b02f4c0SEric Whitney if (lclu >= first_lclu) { 59140b02f4c0SEric Whitney last_lclu = EXT4_B2C(sbi, first_lblk + 59150b02f4c0SEric Whitney ext4_ext_get_actual_len(extent) - 1); 59160b02f4c0SEric Whitney if (lclu <= last_lclu) { 59170b02f4c0SEric Whitney mapped = 1; 59180b02f4c0SEric Whitney } else { 59190b02f4c0SEric Whitney first_lblk = ext4_ext_next_allocated_block(path); 59200b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 59210b02f4c0SEric Whitney if (lclu == first_lclu) 59220b02f4c0SEric Whitney mapped = 1; 59230b02f4c0SEric Whitney } 59240b02f4c0SEric Whitney } 59250b02f4c0SEric Whitney 59260b02f4c0SEric Whitney out: 59270b02f4c0SEric Whitney ext4_ext_drop_refs(path); 59280b02f4c0SEric Whitney kfree(path); 59290b02f4c0SEric Whitney 59300b02f4c0SEric Whitney return err ? err : mapped; 59310b02f4c0SEric Whitney } 5932