1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2a86c6181SAlex Tomas /* 3a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5a86c6181SAlex Tomas * 6a86c6181SAlex Tomas * Architecture independence: 7a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 8a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 9a86c6181SAlex Tomas */ 10a86c6181SAlex Tomas 11a86c6181SAlex Tomas /* 12a86c6181SAlex Tomas * Extents support for EXT4 13a86c6181SAlex Tomas * 14a86c6181SAlex Tomas * TODO: 15a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 16a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 17a86c6181SAlex Tomas * - smart tree reduction 18a86c6181SAlex Tomas */ 19a86c6181SAlex Tomas 20a86c6181SAlex Tomas #include <linux/fs.h> 21a86c6181SAlex Tomas #include <linux/time.h> 22cd02ff0bSMingming Cao #include <linux/jbd2.h> 23a86c6181SAlex Tomas #include <linux/highuid.h> 24a86c6181SAlex Tomas #include <linux/pagemap.h> 25a86c6181SAlex Tomas #include <linux/quotaops.h> 26a86c6181SAlex Tomas #include <linux/string.h> 27a86c6181SAlex Tomas #include <linux/slab.h> 287c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 296873fa0dSEric Sandeen #include <linux/fiemap.h> 3066114cadSTejun Heo #include <linux/backing-dev.h> 31d3b6f23fSRitesh Harjani #include <linux/iomap.h> 323dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 334a092d73STheodore Ts'o #include "ext4_extents.h" 34f19d5870STao Ma #include "xattr.h" 35a86c6181SAlex Tomas 360562e0baSJiaying Zhang #include <trace/events/ext4.h> 370562e0baSJiaying Zhang 385f95d21fSLukas Czerner /* 395f95d21fSLukas Czerner * used by extent splitting. 405f95d21fSLukas Czerner */ 415f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 425f95d21fSLukas Czerner due to ENOSPC */ 43556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 44556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 455f95d21fSLukas Czerner 46dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 47dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 48dee1f973SDmitry Monakhov 497ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 507ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 517ac5990dSDarrick J. Wong { 527ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 537ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 547ac5990dSDarrick J. Wong __u32 csum; 557ac5990dSDarrick J. Wong 567ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 577ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 587ac5990dSDarrick J. Wong return cpu_to_le32(csum); 597ac5990dSDarrick J. Wong } 607ac5990dSDarrick J. Wong 617ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 627ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 637ac5990dSDarrick J. Wong { 647ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 657ac5990dSDarrick J. Wong 669aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 677ac5990dSDarrick J. Wong return 1; 687ac5990dSDarrick J. Wong 697ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 707ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 717ac5990dSDarrick J. Wong return 0; 727ac5990dSDarrick J. Wong return 1; 737ac5990dSDarrick J. Wong } 747ac5990dSDarrick J. Wong 757ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 767ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 777ac5990dSDarrick J. Wong { 787ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 797ac5990dSDarrick J. Wong 809aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 817ac5990dSDarrick J. Wong return; 827ac5990dSDarrick J. Wong 837ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 847ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 857ac5990dSDarrick J. Wong } 867ac5990dSDarrick J. Wong 875f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 885f95d21fSLukas Czerner struct inode *inode, 89dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 905f95d21fSLukas Czerner ext4_lblk_t split, 915f95d21fSLukas Czerner int split_flag, 925f95d21fSLukas Czerner int flags); 935f95d21fSLukas Czerner 94a4130367SJan Kara static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) 95a86c6181SAlex Tomas { 967b808191STheodore Ts'o /* 97a4130367SJan Kara * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 98a4130367SJan Kara * moment, get_block can be called only for blocks inside i_size since 99a4130367SJan Kara * page cache has been already dropped and writes are blocked by 100a4130367SJan Kara * i_mutex. So we can safely drop the i_data_sem here. 1017b808191STheodore Ts'o */ 102a4130367SJan Kara BUG_ON(EXT4_JOURNAL(inode) == NULL); 103a4130367SJan Kara ext4_discard_preallocations(inode); 104a4130367SJan Kara up_write(&EXT4_I(inode)->i_data_sem); 105a4130367SJan Kara *dropped = 1; 106a4130367SJan Kara return 0; 107a4130367SJan Kara } 108487caeefSJan Kara 109a4130367SJan Kara /* 110a4130367SJan Kara * Make sure 'handle' has at least 'check_cred' credits. If not, restart 111a4130367SJan Kara * transaction with 'restart_cred' credits. The function drops i_data_sem 112a4130367SJan Kara * when restarting transaction and gets it after transaction is restarted. 113a4130367SJan Kara * 114a4130367SJan Kara * The function returns 0 on success, 1 if transaction had to be restarted, 115a4130367SJan Kara * and < 0 in case of fatal error. 116a4130367SJan Kara */ 117a4130367SJan Kara int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode, 11883448bdfSJan Kara int check_cred, int restart_cred, 11983448bdfSJan Kara int revoke_cred) 120a4130367SJan Kara { 121a4130367SJan Kara int ret; 122a4130367SJan Kara int dropped = 0; 123a4130367SJan Kara 124a4130367SJan Kara ret = ext4_journal_ensure_credits_fn(handle, check_cred, restart_cred, 12583448bdfSJan Kara revoke_cred, ext4_ext_trunc_restart_fn(inode, &dropped)); 126a4130367SJan Kara if (dropped) 127a4130367SJan Kara down_write(&EXT4_I(inode)->i_data_sem); 128a4130367SJan Kara return ret; 129a86c6181SAlex Tomas } 130a86c6181SAlex Tomas 131a86c6181SAlex Tomas /* 132a86c6181SAlex Tomas * could return: 133a86c6181SAlex Tomas * - EROFS 134a86c6181SAlex Tomas * - ENOMEM 135a86c6181SAlex Tomas */ 136a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 137a86c6181SAlex Tomas struct ext4_ext_path *path) 138a86c6181SAlex Tomas { 139a86c6181SAlex Tomas if (path->p_bh) { 140a86c6181SAlex Tomas /* path points to block */ 1415d601255Sliang xie BUFFER_TRACE(path->p_bh, "get_write_access"); 142a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 143a86c6181SAlex Tomas } 144a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 145a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 146a86c6181SAlex Tomas return 0; 147a86c6181SAlex Tomas } 148a86c6181SAlex Tomas 149a86c6181SAlex Tomas /* 150a86c6181SAlex Tomas * could return: 151a86c6181SAlex Tomas * - EROFS 152a86c6181SAlex Tomas * - ENOMEM 153a86c6181SAlex Tomas * - EIO 154a86c6181SAlex Tomas */ 15543f81677SEric Biggers static int __ext4_ext_dirty(const char *where, unsigned int line, 15643f81677SEric Biggers handle_t *handle, struct inode *inode, 15743f81677SEric Biggers struct ext4_ext_path *path) 158a86c6181SAlex Tomas { 159a86c6181SAlex Tomas int err; 1604b1f1660SDmitry Monakhov 1614b1f1660SDmitry Monakhov WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 162a86c6181SAlex Tomas if (path->p_bh) { 1637ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 164a86c6181SAlex Tomas /* path points to block */ 1659ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1669ea7a0dfSTheodore Ts'o inode, path->p_bh); 167a86c6181SAlex Tomas } else { 168a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 169a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 170a86c6181SAlex Tomas } 171a86c6181SAlex Tomas return err; 172a86c6181SAlex Tomas } 173a86c6181SAlex Tomas 17443f81677SEric Biggers #define ext4_ext_dirty(handle, inode, path) \ 17543f81677SEric Biggers __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 17643f81677SEric Biggers 177f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 178a86c6181SAlex Tomas struct ext4_ext_path *path, 179725d26d3SAneesh Kumar K.V ext4_lblk_t block) 180a86c6181SAlex Tomas { 181a86c6181SAlex Tomas if (path) { 18281fdbb4aSYongqiang Yang int depth = path->p_depth; 183a86c6181SAlex Tomas struct ext4_extent *ex; 184a86c6181SAlex Tomas 185ad4fb9caSKazuya Mio /* 186ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 187ad4fb9caSKazuya Mio * filling in a file which will eventually be 188ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 189ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 190ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 191ad4fb9caSKazuya Mio * executable file, or some database extending a table 192ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 193ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 194ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 195ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 196ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 197ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 198ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 199b8d6568aSTao Ma * especially if the latter case turns out to be 200ad4fb9caSKazuya Mio * common. 201ad4fb9caSKazuya Mio */ 2027e028976SAvantika Mathur ex = path[depth].p_ext; 203ad4fb9caSKazuya Mio if (ex) { 204ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 205ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 206ad4fb9caSKazuya Mio 207ad4fb9caSKazuya Mio if (block > ext_block) 208ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 209ad4fb9caSKazuya Mio else 210ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 211ad4fb9caSKazuya Mio } 212a86c6181SAlex Tomas 213d0d856e8SRandy Dunlap /* it looks like index is empty; 214d0d856e8SRandy Dunlap * try to find starting block from index itself */ 215a86c6181SAlex Tomas if (path[depth].p_bh) 216a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 217a86c6181SAlex Tomas } 218a86c6181SAlex Tomas 219a86c6181SAlex Tomas /* OK. use inode's group */ 220f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 221a86c6181SAlex Tomas } 222a86c6181SAlex Tomas 223654b4908SAneesh Kumar K.V /* 224654b4908SAneesh Kumar K.V * Allocation for a meta data block 225654b4908SAneesh Kumar K.V */ 226f65e6fbaSAlex Tomas static ext4_fsblk_t 227654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 228a86c6181SAlex Tomas struct ext4_ext_path *path, 22955f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 230a86c6181SAlex Tomas { 231f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 232a86c6181SAlex Tomas 233a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23455f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23555f020dbSAllison Henderson NULL, err); 236a86c6181SAlex Tomas return newblock; 237a86c6181SAlex Tomas } 238a86c6181SAlex Tomas 23955ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 240a86c6181SAlex Tomas { 241a86c6181SAlex Tomas int size; 242a86c6181SAlex Tomas 243a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 244a86c6181SAlex Tomas / sizeof(struct ext4_extent); 245bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24602dc62fbSYongqiang Yang if (!check && size > 6) 247a86c6181SAlex Tomas size = 6; 248a86c6181SAlex Tomas #endif 249a86c6181SAlex Tomas return size; 250a86c6181SAlex Tomas } 251a86c6181SAlex Tomas 25255ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 253a86c6181SAlex Tomas { 254a86c6181SAlex Tomas int size; 255a86c6181SAlex Tomas 256a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 257a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 258bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 25902dc62fbSYongqiang Yang if (!check && size > 5) 260a86c6181SAlex Tomas size = 5; 261a86c6181SAlex Tomas #endif 262a86c6181SAlex Tomas return size; 263a86c6181SAlex Tomas } 264a86c6181SAlex Tomas 26555ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 266a86c6181SAlex Tomas { 267a86c6181SAlex Tomas int size; 268a86c6181SAlex Tomas 269a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 270a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 271a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 272bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27302dc62fbSYongqiang Yang if (!check && size > 3) 274a86c6181SAlex Tomas size = 3; 275a86c6181SAlex Tomas #endif 276a86c6181SAlex Tomas return size; 277a86c6181SAlex Tomas } 278a86c6181SAlex Tomas 27955ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 280a86c6181SAlex Tomas { 281a86c6181SAlex Tomas int size; 282a86c6181SAlex Tomas 283a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 284a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 285a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 286bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28702dc62fbSYongqiang Yang if (!check && size > 4) 288a86c6181SAlex Tomas size = 4; 289a86c6181SAlex Tomas #endif 290a86c6181SAlex Tomas return size; 291a86c6181SAlex Tomas } 292a86c6181SAlex Tomas 293fcf6b1b7SDmitry Monakhov static inline int 294fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 295dfe50809STheodore Ts'o struct ext4_ext_path **ppath, ext4_lblk_t lblk, 296fcf6b1b7SDmitry Monakhov int nofail) 297fcf6b1b7SDmitry Monakhov { 298dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 299fcf6b1b7SDmitry Monakhov int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 300fcf6b1b7SDmitry Monakhov 301dfe50809STheodore Ts'o return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 302fcf6b1b7SDmitry Monakhov EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 303fcf6b1b7SDmitry Monakhov EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | 304fcf6b1b7SDmitry Monakhov (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); 305fcf6b1b7SDmitry Monakhov } 306fcf6b1b7SDmitry Monakhov 307c29c0ae7SAlex Tomas static int 308c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 309c29c0ae7SAlex Tomas { 310c29c0ae7SAlex Tomas int max; 311c29c0ae7SAlex Tomas 312c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 313c29c0ae7SAlex Tomas if (depth == 0) 31455ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 315c29c0ae7SAlex Tomas else 31655ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 317c29c0ae7SAlex Tomas } else { 318c29c0ae7SAlex Tomas if (depth == 0) 31955ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 320c29c0ae7SAlex Tomas else 32155ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 322c29c0ae7SAlex Tomas } 323c29c0ae7SAlex Tomas 324c29c0ae7SAlex Tomas return max; 325c29c0ae7SAlex Tomas } 326c29c0ae7SAlex Tomas 32756b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 32856b19868SAneesh Kumar K.V { 329bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 33056b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 3315946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 332e84a26ceSTheodore Ts'o 333f70749caSVegard Nossum /* 334f70749caSVegard Nossum * We allow neither: 335f70749caSVegard Nossum * - zero length 336f70749caSVegard Nossum * - overflow/wrap-around 337f70749caSVegard Nossum */ 338f70749caSVegard Nossum if (lblock + len <= lblock) 33931d4f3a2STheodore Ts'o return 0; 3406fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 34156b19868SAneesh Kumar K.V } 34256b19868SAneesh Kumar K.V 34356b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 34456b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 34556b19868SAneesh Kumar K.V { 346bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 347e84a26ceSTheodore Ts'o 3486fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 34956b19868SAneesh Kumar K.V } 35056b19868SAneesh Kumar K.V 35156b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 35256b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 35356b19868SAneesh Kumar K.V int depth) 35456b19868SAneesh Kumar K.V { 35556b19868SAneesh Kumar K.V unsigned short entries; 35656b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 35756b19868SAneesh Kumar K.V return 1; 35856b19868SAneesh Kumar K.V 35956b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 36056b19868SAneesh Kumar K.V 36156b19868SAneesh Kumar K.V if (depth == 0) { 36256b19868SAneesh Kumar K.V /* leaf entries */ 36381fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 3645946d089SEryu Guan struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 3655946d089SEryu Guan ext4_fsblk_t pblock = 0; 3665946d089SEryu Guan ext4_lblk_t lblock = 0; 3675946d089SEryu Guan ext4_lblk_t prev = 0; 3685946d089SEryu Guan int len = 0; 36956b19868SAneesh Kumar K.V while (entries) { 37056b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 37156b19868SAneesh Kumar K.V return 0; 3725946d089SEryu Guan 3735946d089SEryu Guan /* Check for overlapping extents */ 3745946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block); 3755946d089SEryu Guan len = ext4_ext_get_actual_len(ext); 3765946d089SEryu Guan if ((lblock <= prev) && prev) { 3775946d089SEryu Guan pblock = ext4_ext_pblock(ext); 3785946d089SEryu Guan es->s_last_error_block = cpu_to_le64(pblock); 3795946d089SEryu Guan return 0; 3805946d089SEryu Guan } 38156b19868SAneesh Kumar K.V ext++; 38256b19868SAneesh Kumar K.V entries--; 3835946d089SEryu Guan prev = lblock + len - 1; 38456b19868SAneesh Kumar K.V } 38556b19868SAneesh Kumar K.V } else { 38681fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 38756b19868SAneesh Kumar K.V while (entries) { 38856b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 38956b19868SAneesh Kumar K.V return 0; 39056b19868SAneesh Kumar K.V ext_idx++; 39156b19868SAneesh Kumar K.V entries--; 39256b19868SAneesh Kumar K.V } 39356b19868SAneesh Kumar K.V } 39456b19868SAneesh Kumar K.V return 1; 39556b19868SAneesh Kumar K.V } 39656b19868SAneesh Kumar K.V 397c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 398c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 399c349179bSTheodore Ts'o int depth, ext4_fsblk_t pblk) 400c29c0ae7SAlex Tomas { 401c29c0ae7SAlex Tomas const char *error_msg; 4026a797d27SDarrick J. Wong int max = 0, err = -EFSCORRUPTED; 403c29c0ae7SAlex Tomas 404c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 405c29c0ae7SAlex Tomas error_msg = "invalid magic"; 406c29c0ae7SAlex Tomas goto corrupted; 407c29c0ae7SAlex Tomas } 408c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 409c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 410c29c0ae7SAlex Tomas goto corrupted; 411c29c0ae7SAlex Tomas } 412c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 413c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 414c29c0ae7SAlex Tomas goto corrupted; 415c29c0ae7SAlex Tomas } 416c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 417c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 418c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 419c29c0ae7SAlex Tomas goto corrupted; 420c29c0ae7SAlex Tomas } 421c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 422c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 423c29c0ae7SAlex Tomas goto corrupted; 424c29c0ae7SAlex Tomas } 42556b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 42656b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 42756b19868SAneesh Kumar K.V goto corrupted; 42856b19868SAneesh Kumar K.V } 4297bc94916SVegard Nossum if (unlikely(depth > 32)) { 4307bc94916SVegard Nossum error_msg = "too large eh_depth"; 4317bc94916SVegard Nossum goto corrupted; 4327bc94916SVegard Nossum } 4337ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4347ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4357ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4367ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4376a797d27SDarrick J. Wong err = -EFSBADCRC; 4387ac5990dSDarrick J. Wong goto corrupted; 4397ac5990dSDarrick J. Wong } 440c29c0ae7SAlex Tomas return 0; 441c29c0ae7SAlex Tomas 442c29c0ae7SAlex Tomas corrupted: 443878520acSTheodore Ts'o ext4_set_errno(inode->i_sb, -err); 444c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 445c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, " 446c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 447c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg, 448c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic), 449c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 450c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 4516a797d27SDarrick J. Wong return err; 452c29c0ae7SAlex Tomas } 453c29c0ae7SAlex Tomas 454c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \ 455c349179bSTheodore Ts'o __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 456c29c0ae7SAlex Tomas 4577a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4587a262f7cSAneesh Kumar K.V { 459c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 4607a262f7cSAneesh Kumar K.V } 4617a262f7cSAneesh Kumar K.V 4624068664eSDmitry Monakhov static void ext4_cache_extents(struct inode *inode, 4634068664eSDmitry Monakhov struct ext4_extent_header *eh) 4644068664eSDmitry Monakhov { 4654068664eSDmitry Monakhov struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 4664068664eSDmitry Monakhov ext4_lblk_t prev = 0; 4674068664eSDmitry Monakhov int i; 4684068664eSDmitry Monakhov 4694068664eSDmitry Monakhov for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 4704068664eSDmitry Monakhov unsigned int status = EXTENT_STATUS_WRITTEN; 4714068664eSDmitry Monakhov ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 4724068664eSDmitry Monakhov int len = ext4_ext_get_actual_len(ex); 4734068664eSDmitry Monakhov 4744068664eSDmitry Monakhov if (prev && (prev != lblk)) 4754068664eSDmitry Monakhov ext4_es_cache_extent(inode, prev, lblk - prev, ~0, 4764068664eSDmitry Monakhov EXTENT_STATUS_HOLE); 4774068664eSDmitry Monakhov 4784068664eSDmitry Monakhov if (ext4_ext_is_unwritten(ex)) 4794068664eSDmitry Monakhov status = EXTENT_STATUS_UNWRITTEN; 4804068664eSDmitry Monakhov ext4_es_cache_extent(inode, lblk, len, 4814068664eSDmitry Monakhov ext4_ext_pblock(ex), status); 4824068664eSDmitry Monakhov prev = lblk + len; 4834068664eSDmitry Monakhov } 4844068664eSDmitry Monakhov } 4854068664eSDmitry Monakhov 4867d7ea89eSTheodore Ts'o static struct buffer_head * 4877d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line, 488107a7bd3STheodore Ts'o struct inode *inode, ext4_fsblk_t pblk, int depth, 489107a7bd3STheodore Ts'o int flags) 490f8489128SDarrick J. Wong { 4917d7ea89eSTheodore Ts'o struct buffer_head *bh; 4927d7ea89eSTheodore Ts'o int err; 493f8489128SDarrick J. Wong 494c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS); 4957d7ea89eSTheodore Ts'o if (unlikely(!bh)) 4967d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM); 4977d7ea89eSTheodore Ts'o 4987d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) { 4997d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 5007d7ea89eSTheodore Ts'o err = bh_submit_read(bh); 5017d7ea89eSTheodore Ts'o if (err < 0) 5027d7ea89eSTheodore Ts'o goto errout; 5037d7ea89eSTheodore Ts'o } 5047869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 5057d7ea89eSTheodore Ts'o return bh; 5060a944e8aSTheodore Ts'o if (!ext4_has_feature_journal(inode->i_sb) || 5070a944e8aSTheodore Ts'o (inode->i_ino != 5080a944e8aSTheodore Ts'o le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { 5097d7ea89eSTheodore Ts'o err = __ext4_ext_check(function, line, inode, 510c349179bSTheodore Ts'o ext_block_hdr(bh), depth, pblk); 5117d7ea89eSTheodore Ts'o if (err) 5127d7ea89eSTheodore Ts'o goto errout; 5130a944e8aSTheodore Ts'o } 514f8489128SDarrick J. Wong set_buffer_verified(bh); 515107a7bd3STheodore Ts'o /* 516107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries 517107a7bd3STheodore Ts'o */ 518107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 519107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh); 5204068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 521107a7bd3STheodore Ts'o } 5227d7ea89eSTheodore Ts'o return bh; 5237d7ea89eSTheodore Ts'o errout: 5247d7ea89eSTheodore Ts'o put_bh(bh); 5257d7ea89eSTheodore Ts'o return ERR_PTR(err); 5267d7ea89eSTheodore Ts'o 527f8489128SDarrick J. Wong } 528f8489128SDarrick J. Wong 529107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags) \ 530107a7bd3STheodore Ts'o __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 531107a7bd3STheodore Ts'o (depth), (flags)) 532f8489128SDarrick J. Wong 5337869a4a6STheodore Ts'o /* 5347869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the 5357869a4a6STheodore Ts'o * extent status tree 5367869a4a6STheodore Ts'o */ 5377869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode) 5387869a4a6STheodore Ts'o { 5397869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 5407869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL; 5417869a4a6STheodore Ts'o struct buffer_head *bh; 5427869a4a6STheodore Ts'o int i = 0, depth, ret = 0; 5437869a4a6STheodore Ts'o 5447869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5457869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */ 5467869a4a6STheodore Ts'o 5477869a4a6STheodore Ts'o down_read(&ei->i_data_sem); 5487869a4a6STheodore Ts'o depth = ext_depth(inode); 5497869a4a6STheodore Ts'o 5502f424a5aSRitesh Harjani /* Don't cache anything if there are no external extent blocks */ 5512f424a5aSRitesh Harjani if (!depth) { 5522f424a5aSRitesh Harjani up_read(&ei->i_data_sem); 5532f424a5aSRitesh Harjani return ret; 5542f424a5aSRitesh Harjani } 5552f424a5aSRitesh Harjani 5566396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 5577869a4a6STheodore Ts'o GFP_NOFS); 5587869a4a6STheodore Ts'o if (path == NULL) { 5597869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5607869a4a6STheodore Ts'o return -ENOMEM; 5617869a4a6STheodore Ts'o } 5627869a4a6STheodore Ts'o 5637869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode); 5647869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 5657869a4a6STheodore Ts'o if (ret) 5667869a4a6STheodore Ts'o goto out; 5677869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 5687869a4a6STheodore Ts'o while (i >= 0) { 5697869a4a6STheodore Ts'o /* 5707869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of 5717869a4a6STheodore Ts'o * the index block, go up 5727869a4a6STheodore Ts'o */ 5737869a4a6STheodore Ts'o if ((i == depth) || 5747869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 5757869a4a6STheodore Ts'o brelse(path[i].p_bh); 5767869a4a6STheodore Ts'o path[i].p_bh = NULL; 5777869a4a6STheodore Ts'o i--; 5787869a4a6STheodore Ts'o continue; 5797869a4a6STheodore Ts'o } 5807869a4a6STheodore Ts'o bh = read_extent_tree_block(inode, 5817869a4a6STheodore Ts'o ext4_idx_pblock(path[i].p_idx++), 5827869a4a6STheodore Ts'o depth - i - 1, 5837869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE); 5847869a4a6STheodore Ts'o if (IS_ERR(bh)) { 5857869a4a6STheodore Ts'o ret = PTR_ERR(bh); 5867869a4a6STheodore Ts'o break; 5877869a4a6STheodore Ts'o } 5887869a4a6STheodore Ts'o i++; 5897869a4a6STheodore Ts'o path[i].p_bh = bh; 5907869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh); 5917869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 5927869a4a6STheodore Ts'o } 5937869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 5947869a4a6STheodore Ts'o out: 5957869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5967869a4a6STheodore Ts'o ext4_ext_drop_refs(path); 5977869a4a6STheodore Ts'o kfree(path); 5987869a4a6STheodore Ts'o return ret; 5997869a4a6STheodore Ts'o } 6007869a4a6STheodore Ts'o 601a86c6181SAlex Tomas #ifdef EXT_DEBUG 602a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 603a86c6181SAlex Tomas { 604a86c6181SAlex Tomas int k, l = path->p_depth; 605a86c6181SAlex Tomas 606a86c6181SAlex Tomas ext_debug("path:"); 607a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 608a86c6181SAlex Tomas if (path->p_idx) { 6096e89bbb7SEric Biggers ext_debug(" %d->%llu", 6106e89bbb7SEric Biggers le32_to_cpu(path->p_idx->ei_block), 611bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 612a86c6181SAlex Tomas } else if (path->p_ext) { 613553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 614a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 615556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 616a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 617bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 618a86c6181SAlex Tomas } else 619a86c6181SAlex Tomas ext_debug(" []"); 620a86c6181SAlex Tomas } 621a86c6181SAlex Tomas ext_debug("\n"); 622a86c6181SAlex Tomas } 623a86c6181SAlex Tomas 624a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 625a86c6181SAlex Tomas { 626a86c6181SAlex Tomas int depth = ext_depth(inode); 627a86c6181SAlex Tomas struct ext4_extent_header *eh; 628a86c6181SAlex Tomas struct ext4_extent *ex; 629a86c6181SAlex Tomas int i; 630a86c6181SAlex Tomas 631a86c6181SAlex Tomas if (!path) 632a86c6181SAlex Tomas return; 633a86c6181SAlex Tomas 634a86c6181SAlex Tomas eh = path[depth].p_hdr; 635a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 636a86c6181SAlex Tomas 637553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 638553f9008SMingming 639a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 640553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 641556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 642bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 643a86c6181SAlex Tomas } 644a86c6181SAlex Tomas ext_debug("\n"); 645a86c6181SAlex Tomas } 6461b16da77SYongqiang Yang 6471b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 6481b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 6491b16da77SYongqiang Yang { 6501b16da77SYongqiang Yang int depth = ext_depth(inode); 6511b16da77SYongqiang Yang struct ext4_extent *ex; 6521b16da77SYongqiang Yang 6531b16da77SYongqiang Yang if (depth != level) { 6541b16da77SYongqiang Yang struct ext4_extent_idx *idx; 6551b16da77SYongqiang Yang idx = path[level].p_idx; 6561b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 6571b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 6581b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 6591b16da77SYongqiang Yang ext4_idx_pblock(idx), 6601b16da77SYongqiang Yang newblock); 6611b16da77SYongqiang Yang idx++; 6621b16da77SYongqiang Yang } 6631b16da77SYongqiang Yang 6641b16da77SYongqiang Yang return; 6651b16da77SYongqiang Yang } 6661b16da77SYongqiang Yang 6671b16da77SYongqiang Yang ex = path[depth].p_ext; 6681b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 6691b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 6701b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 6711b16da77SYongqiang Yang ext4_ext_pblock(ex), 672556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 6731b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 6741b16da77SYongqiang Yang newblock); 6751b16da77SYongqiang Yang ex++; 6761b16da77SYongqiang Yang } 6771b16da77SYongqiang Yang } 6781b16da77SYongqiang Yang 679a86c6181SAlex Tomas #else 680a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 681a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 6821b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 683a86c6181SAlex Tomas #endif 684a86c6181SAlex Tomas 685b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 686a86c6181SAlex Tomas { 687b7ea89adSTheodore Ts'o int depth, i; 688a86c6181SAlex Tomas 689b7ea89adSTheodore Ts'o if (!path) 690b7ea89adSTheodore Ts'o return; 691b7ea89adSTheodore Ts'o depth = path->p_depth; 692de745485SEric Biggers for (i = 0; i <= depth; i++, path++) { 693a86c6181SAlex Tomas if (path->p_bh) { 694a86c6181SAlex Tomas brelse(path->p_bh); 695a86c6181SAlex Tomas path->p_bh = NULL; 696a86c6181SAlex Tomas } 697a86c6181SAlex Tomas } 698de745485SEric Biggers } 699a86c6181SAlex Tomas 700a86c6181SAlex Tomas /* 701d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 702d0d856e8SRandy Dunlap * binary search for the closest index of the given block 703c29c0ae7SAlex Tomas * the header must be checked before calling this 704a86c6181SAlex Tomas */ 705a86c6181SAlex Tomas static void 706725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 707725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 708a86c6181SAlex Tomas { 709a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 710a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 711a86c6181SAlex Tomas 712a86c6181SAlex Tomas 713bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 714a86c6181SAlex Tomas 715a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 716e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 717a86c6181SAlex Tomas while (l <= r) { 718a86c6181SAlex Tomas m = l + (r - l) / 2; 719a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 720a86c6181SAlex Tomas r = m - 1; 721a86c6181SAlex Tomas else 722a86c6181SAlex Tomas l = m + 1; 72326d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 72426d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 72526d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 726a86c6181SAlex Tomas } 727a86c6181SAlex Tomas 728a86c6181SAlex Tomas path->p_idx = l - 1; 7294a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 730bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 731a86c6181SAlex Tomas 732a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 733a86c6181SAlex Tomas { 734a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 735a86c6181SAlex Tomas int k; 736a86c6181SAlex Tomas 737a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 738a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 7396e89bbb7SEric Biggers if (k != 0 && le32_to_cpu(ix->ei_block) <= 7406e89bbb7SEric Biggers le32_to_cpu(ix[-1].ei_block)) { 7414776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 7424776004fSTheodore Ts'o "first=0x%p\n", k, 743a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 7444776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 745a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 746a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 747a86c6181SAlex Tomas } 748a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 749a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 750a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 751a86c6181SAlex Tomas break; 752a86c6181SAlex Tomas chix = ix; 753a86c6181SAlex Tomas } 754a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 755a86c6181SAlex Tomas } 756a86c6181SAlex Tomas #endif 757a86c6181SAlex Tomas 758a86c6181SAlex Tomas } 759a86c6181SAlex Tomas 760a86c6181SAlex Tomas /* 761d0d856e8SRandy Dunlap * ext4_ext_binsearch: 762d0d856e8SRandy Dunlap * binary search for closest extent of the given block 763c29c0ae7SAlex Tomas * the header must be checked before calling this 764a86c6181SAlex Tomas */ 765a86c6181SAlex Tomas static void 766725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 767725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 768a86c6181SAlex Tomas { 769a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 770a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 771a86c6181SAlex Tomas 772a86c6181SAlex Tomas if (eh->eh_entries == 0) { 773a86c6181SAlex Tomas /* 774d0d856e8SRandy Dunlap * this leaf is empty: 775a86c6181SAlex Tomas * we get such a leaf in split/add case 776a86c6181SAlex Tomas */ 777a86c6181SAlex Tomas return; 778a86c6181SAlex Tomas } 779a86c6181SAlex Tomas 780bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 781a86c6181SAlex Tomas 782a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 783e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 784a86c6181SAlex Tomas 785a86c6181SAlex Tomas while (l <= r) { 786a86c6181SAlex Tomas m = l + (r - l) / 2; 787a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 788a86c6181SAlex Tomas r = m - 1; 789a86c6181SAlex Tomas else 790a86c6181SAlex Tomas l = m + 1; 79126d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 79226d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 79326d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 794a86c6181SAlex Tomas } 795a86c6181SAlex Tomas 796a86c6181SAlex Tomas path->p_ext = l - 1; 797553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 798a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 799bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 800556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 801a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 802a86c6181SAlex Tomas 803a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 804a86c6181SAlex Tomas { 805a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 806a86c6181SAlex Tomas int k; 807a86c6181SAlex Tomas 808a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 809a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 810a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 811a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 812a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 813a86c6181SAlex Tomas break; 814a86c6181SAlex Tomas chex = ex; 815a86c6181SAlex Tomas } 816a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 817a86c6181SAlex Tomas } 818a86c6181SAlex Tomas #endif 819a86c6181SAlex Tomas 820a86c6181SAlex Tomas } 821a86c6181SAlex Tomas 822a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 823a86c6181SAlex Tomas { 824a86c6181SAlex Tomas struct ext4_extent_header *eh; 825a86c6181SAlex Tomas 826a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 827a86c6181SAlex Tomas eh->eh_depth = 0; 828a86c6181SAlex Tomas eh->eh_entries = 0; 829a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 83055ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 831a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 832a86c6181SAlex Tomas return 0; 833a86c6181SAlex Tomas } 834a86c6181SAlex Tomas 835a86c6181SAlex Tomas struct ext4_ext_path * 836ed8a1a76STheodore Ts'o ext4_find_extent(struct inode *inode, ext4_lblk_t block, 837705912caSTheodore Ts'o struct ext4_ext_path **orig_path, int flags) 838a86c6181SAlex Tomas { 839a86c6181SAlex Tomas struct ext4_extent_header *eh; 840a86c6181SAlex Tomas struct buffer_head *bh; 841705912caSTheodore Ts'o struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 842705912caSTheodore Ts'o short int depth, i, ppos = 0; 843860d21e2STheodore Ts'o int ret; 844a86c6181SAlex Tomas 845a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 846c29c0ae7SAlex Tomas depth = ext_depth(inode); 847bc890a60STheodore Ts'o if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { 848bc890a60STheodore Ts'o EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", 849bc890a60STheodore Ts'o depth); 850bc890a60STheodore Ts'o ret = -EFSCORRUPTED; 851bc890a60STheodore Ts'o goto err; 852bc890a60STheodore Ts'o } 853a86c6181SAlex Tomas 85410809df8STheodore Ts'o if (path) { 855523f431cSTheodore Ts'o ext4_ext_drop_refs(path); 85610809df8STheodore Ts'o if (depth > path[0].p_maxdepth) { 85710809df8STheodore Ts'o kfree(path); 85810809df8STheodore Ts'o *orig_path = path = NULL; 85910809df8STheodore Ts'o } 86010809df8STheodore Ts'o } 86110809df8STheodore Ts'o if (!path) { 862a86c6181SAlex Tomas /* account possible depth increase */ 8636396bb22SKees Cook path = kcalloc(depth + 2, sizeof(struct ext4_ext_path), 864a86c6181SAlex Tomas GFP_NOFS); 86519008f6dSTheodore Ts'o if (unlikely(!path)) 866a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 86710809df8STheodore Ts'o path[0].p_maxdepth = depth + 1; 868a86c6181SAlex Tomas } 869a86c6181SAlex Tomas path[0].p_hdr = eh; 8701973adcbSShen Feng path[0].p_bh = NULL; 871a86c6181SAlex Tomas 872c29c0ae7SAlex Tomas i = depth; 8734068664eSDmitry Monakhov if (!(flags & EXT4_EX_NOCACHE) && depth == 0) 8744068664eSDmitry Monakhov ext4_cache_extents(inode, eh); 875a86c6181SAlex Tomas /* walk through the tree */ 876a86c6181SAlex Tomas while (i) { 877a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 878a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 879c29c0ae7SAlex Tomas 880a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 881bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 882a86c6181SAlex Tomas path[ppos].p_depth = i; 883a86c6181SAlex Tomas path[ppos].p_ext = NULL; 884a86c6181SAlex Tomas 885107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 886107a7bd3STheodore Ts'o flags); 887a1c83681SViresh Kumar if (IS_ERR(bh)) { 8887d7ea89eSTheodore Ts'o ret = PTR_ERR(bh); 889a86c6181SAlex Tomas goto err; 890860d21e2STheodore Ts'o } 8917d7ea89eSTheodore Ts'o 892a86c6181SAlex Tomas eh = ext_block_hdr(bh); 893a86c6181SAlex Tomas ppos++; 894a86c6181SAlex Tomas path[ppos].p_bh = bh; 895a86c6181SAlex Tomas path[ppos].p_hdr = eh; 896a86c6181SAlex Tomas } 897a86c6181SAlex Tomas 898a86c6181SAlex Tomas path[ppos].p_depth = i; 899a86c6181SAlex Tomas path[ppos].p_ext = NULL; 900a86c6181SAlex Tomas path[ppos].p_idx = NULL; 901a86c6181SAlex Tomas 902a86c6181SAlex Tomas /* find extent */ 903a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 9041973adcbSShen Feng /* if not an empty leaf */ 9051973adcbSShen Feng if (path[ppos].p_ext) 906bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 907a86c6181SAlex Tomas 908a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 909a86c6181SAlex Tomas 910a86c6181SAlex Tomas return path; 911a86c6181SAlex Tomas 912a86c6181SAlex Tomas err: 913a86c6181SAlex Tomas ext4_ext_drop_refs(path); 914a86c6181SAlex Tomas kfree(path); 915705912caSTheodore Ts'o if (orig_path) 916705912caSTheodore Ts'o *orig_path = NULL; 917860d21e2STheodore Ts'o return ERR_PTR(ret); 918a86c6181SAlex Tomas } 919a86c6181SAlex Tomas 920a86c6181SAlex Tomas /* 921d0d856e8SRandy Dunlap * ext4_ext_insert_index: 922d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 923d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 924a86c6181SAlex Tomas */ 9251f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 926a86c6181SAlex Tomas struct ext4_ext_path *curp, 927f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 928a86c6181SAlex Tomas { 929a86c6181SAlex Tomas struct ext4_extent_idx *ix; 930a86c6181SAlex Tomas int len, err; 931a86c6181SAlex Tomas 9327e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 9337e028976SAvantika Mathur if (err) 934a86c6181SAlex Tomas return err; 935a86c6181SAlex Tomas 936273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 937273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 938273df556SFrank Mayhar "logical %d == ei_block %d!", 939273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 9406a797d27SDarrick J. Wong return -EFSCORRUPTED; 941273df556SFrank Mayhar } 942d4620315SRobin Dong 943d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 944d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 945d4620315SRobin Dong EXT4_ERROR_INODE(inode, 946d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 947d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 948d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 9496a797d27SDarrick J. Wong return -EFSCORRUPTED; 950d4620315SRobin Dong } 951d4620315SRobin Dong 952a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 953a86c6181SAlex Tomas /* insert after */ 95480e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 955a86c6181SAlex Tomas ix = curp->p_idx + 1; 956a86c6181SAlex Tomas } else { 957a86c6181SAlex Tomas /* insert before */ 95880e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 959a86c6181SAlex Tomas ix = curp->p_idx; 960a86c6181SAlex Tomas } 961a86c6181SAlex Tomas 96280e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 96380e675f9SEric Gouriou BUG_ON(len < 0); 96480e675f9SEric Gouriou if (len > 0) { 96580e675f9SEric Gouriou ext_debug("insert new index %d: " 96680e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 96780e675f9SEric Gouriou logical, len, ix, ix + 1); 96880e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 96980e675f9SEric Gouriou } 97080e675f9SEric Gouriou 971f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 972f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 9736a797d27SDarrick J. Wong return -EFSCORRUPTED; 974f472e026STao Ma } 975f472e026STao Ma 976a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 977f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 978e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 979a86c6181SAlex Tomas 980273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 981273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 9826a797d27SDarrick J. Wong return -EFSCORRUPTED; 983273df556SFrank Mayhar } 984a86c6181SAlex Tomas 985a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 986a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 987a86c6181SAlex Tomas 988a86c6181SAlex Tomas return err; 989a86c6181SAlex Tomas } 990a86c6181SAlex Tomas 991a86c6181SAlex Tomas /* 992d0d856e8SRandy Dunlap * ext4_ext_split: 993d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 994d0d856e8SRandy Dunlap * at depth @at: 995a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 996a86c6181SAlex Tomas * - makes decision where to split 997d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 998a86c6181SAlex Tomas * into the newly allocated blocks 999d0d856e8SRandy Dunlap * - initializes subtree 1000a86c6181SAlex Tomas */ 1001a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 100255f020dbSAllison Henderson unsigned int flags, 1003a86c6181SAlex Tomas struct ext4_ext_path *path, 1004a86c6181SAlex Tomas struct ext4_extent *newext, int at) 1005a86c6181SAlex Tomas { 1006a86c6181SAlex Tomas struct buffer_head *bh = NULL; 1007a86c6181SAlex Tomas int depth = ext_depth(inode); 1008a86c6181SAlex Tomas struct ext4_extent_header *neh; 1009a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 1010a86c6181SAlex Tomas int i = at, k, m, a; 1011f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 1012a86c6181SAlex Tomas __le32 border; 1013f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1014a86c6181SAlex Tomas int err = 0; 1015592acbf1SSriram Rajagopalan size_t ext_size = 0; 1016a86c6181SAlex Tomas 1017a86c6181SAlex Tomas /* make decision: where to split? */ 1018d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 1019a86c6181SAlex Tomas 1020d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 1021a86c6181SAlex Tomas * border from split point */ 1022273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1023273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 10246a797d27SDarrick J. Wong return -EFSCORRUPTED; 1025273df556SFrank Mayhar } 1026a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1027a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 1028d0d856e8SRandy Dunlap ext_debug("leaf will be split." 1029a86c6181SAlex Tomas " next leaf starts at %d\n", 1030a86c6181SAlex Tomas le32_to_cpu(border)); 1031a86c6181SAlex Tomas } else { 1032a86c6181SAlex Tomas border = newext->ee_block; 1033a86c6181SAlex Tomas ext_debug("leaf will be added." 1034a86c6181SAlex Tomas " next leaf starts at %d\n", 1035a86c6181SAlex Tomas le32_to_cpu(border)); 1036a86c6181SAlex Tomas } 1037a86c6181SAlex Tomas 1038a86c6181SAlex Tomas /* 1039d0d856e8SRandy Dunlap * If error occurs, then we break processing 1040d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 1041a86c6181SAlex Tomas * be inserted and tree will be in consistent 1042d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 1043a86c6181SAlex Tomas */ 1044a86c6181SAlex Tomas 1045a86c6181SAlex Tomas /* 1046d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 1047d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 1048d0d856e8SRandy Dunlap * upon them. 1049a86c6181SAlex Tomas */ 10506396bb22SKees Cook ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS); 1051a86c6181SAlex Tomas if (!ablocks) 1052a86c6181SAlex Tomas return -ENOMEM; 1053a86c6181SAlex Tomas 1054a86c6181SAlex Tomas /* allocate all needed blocks */ 1055a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1056a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 1057654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 105855f020dbSAllison Henderson newext, &err, flags); 1059a86c6181SAlex Tomas if (newblock == 0) 1060a86c6181SAlex Tomas goto cleanup; 1061a86c6181SAlex Tomas ablocks[a] = newblock; 1062a86c6181SAlex Tomas } 1063a86c6181SAlex Tomas 1064a86c6181SAlex Tomas /* initialize new leaf */ 1065a86c6181SAlex Tomas newblock = ablocks[--a]; 1066273df556SFrank Mayhar if (unlikely(newblock == 0)) { 1067273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 10686a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1069273df556SFrank Mayhar goto cleanup; 1070273df556SFrank Mayhar } 1071c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1072aebf0243SWang Shilong if (unlikely(!bh)) { 1073860d21e2STheodore Ts'o err = -ENOMEM; 1074a86c6181SAlex Tomas goto cleanup; 1075a86c6181SAlex Tomas } 1076a86c6181SAlex Tomas lock_buffer(bh); 1077a86c6181SAlex Tomas 10787e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10797e028976SAvantika Mathur if (err) 1080a86c6181SAlex Tomas goto cleanup; 1081a86c6181SAlex Tomas 1082a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1083a86c6181SAlex Tomas neh->eh_entries = 0; 108455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1085a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1086a86c6181SAlex Tomas neh->eh_depth = 0; 1087a86c6181SAlex Tomas 1088d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 1089273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 1090273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 1091273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1092273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 1093273df556SFrank Mayhar path[depth].p_hdr->eh_max); 10946a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1095273df556SFrank Mayhar goto cleanup; 1096273df556SFrank Mayhar } 1097a86c6181SAlex Tomas /* start copy from next extent */ 10981b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 10991b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 1100a86c6181SAlex Tomas if (m) { 11011b16da77SYongqiang Yang struct ext4_extent *ex; 11021b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 11031b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1104e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1105a86c6181SAlex Tomas } 1106a86c6181SAlex Tomas 1107592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1108592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1109592acbf1SSriram Rajagopalan sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries); 1110592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 11117ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1112a86c6181SAlex Tomas set_buffer_uptodate(bh); 1113a86c6181SAlex Tomas unlock_buffer(bh); 1114a86c6181SAlex Tomas 11150390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11167e028976SAvantika Mathur if (err) 1117a86c6181SAlex Tomas goto cleanup; 1118a86c6181SAlex Tomas brelse(bh); 1119a86c6181SAlex Tomas bh = NULL; 1120a86c6181SAlex Tomas 1121a86c6181SAlex Tomas /* correct old leaf */ 1122a86c6181SAlex Tomas if (m) { 11237e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 11247e028976SAvantika Mathur if (err) 1125a86c6181SAlex Tomas goto cleanup; 1126e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 11277e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 11287e028976SAvantika Mathur if (err) 1129a86c6181SAlex Tomas goto cleanup; 1130a86c6181SAlex Tomas 1131a86c6181SAlex Tomas } 1132a86c6181SAlex Tomas 1133a86c6181SAlex Tomas /* create intermediate indexes */ 1134a86c6181SAlex Tomas k = depth - at - 1; 1135273df556SFrank Mayhar if (unlikely(k < 0)) { 1136273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 11376a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1138273df556SFrank Mayhar goto cleanup; 1139273df556SFrank Mayhar } 1140a86c6181SAlex Tomas if (k) 1141a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1142a86c6181SAlex Tomas /* insert new index into current index block */ 1143a86c6181SAlex Tomas /* current depth stored in i var */ 1144a86c6181SAlex Tomas i = depth - 1; 1145a86c6181SAlex Tomas while (k--) { 1146a86c6181SAlex Tomas oldblock = newblock; 1147a86c6181SAlex Tomas newblock = ablocks[--a]; 1148bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1149aebf0243SWang Shilong if (unlikely(!bh)) { 1150860d21e2STheodore Ts'o err = -ENOMEM; 1151a86c6181SAlex Tomas goto cleanup; 1152a86c6181SAlex Tomas } 1153a86c6181SAlex Tomas lock_buffer(bh); 1154a86c6181SAlex Tomas 11557e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11567e028976SAvantika Mathur if (err) 1157a86c6181SAlex Tomas goto cleanup; 1158a86c6181SAlex Tomas 1159a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1160a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1161a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 116255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1163a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1164a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1165a86c6181SAlex Tomas fidx->ei_block = border; 1166f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1167a86c6181SAlex Tomas 1168bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1169bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1170a86c6181SAlex Tomas 11711b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1172273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1173273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1174273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1175273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1176273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 11776a797d27SDarrick J. Wong err = -EFSCORRUPTED; 1178273df556SFrank Mayhar goto cleanup; 1179273df556SFrank Mayhar } 11801b16da77SYongqiang Yang /* start copy indexes */ 11811b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 11821b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 11831b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 11841b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1185a86c6181SAlex Tomas if (m) { 11861b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1187a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1188e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1189a86c6181SAlex Tomas } 1190592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1191592acbf1SSriram Rajagopalan ext_size = sizeof(struct ext4_extent_header) + 1192592acbf1SSriram Rajagopalan (sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries)); 1193592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, 1194592acbf1SSriram Rajagopalan inode->i_sb->s_blocksize - ext_size); 11957ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1196a86c6181SAlex Tomas set_buffer_uptodate(bh); 1197a86c6181SAlex Tomas unlock_buffer(bh); 1198a86c6181SAlex Tomas 11990390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 12007e028976SAvantika Mathur if (err) 1201a86c6181SAlex Tomas goto cleanup; 1202a86c6181SAlex Tomas brelse(bh); 1203a86c6181SAlex Tomas bh = NULL; 1204a86c6181SAlex Tomas 1205a86c6181SAlex Tomas /* correct old index */ 1206a86c6181SAlex Tomas if (m) { 1207a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1208a86c6181SAlex Tomas if (err) 1209a86c6181SAlex Tomas goto cleanup; 1210e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1211a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1212a86c6181SAlex Tomas if (err) 1213a86c6181SAlex Tomas goto cleanup; 1214a86c6181SAlex Tomas } 1215a86c6181SAlex Tomas 1216a86c6181SAlex Tomas i--; 1217a86c6181SAlex Tomas } 1218a86c6181SAlex Tomas 1219a86c6181SAlex Tomas /* insert new index */ 1220a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1221a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1222a86c6181SAlex Tomas 1223a86c6181SAlex Tomas cleanup: 1224a86c6181SAlex Tomas if (bh) { 1225a86c6181SAlex Tomas if (buffer_locked(bh)) 1226a86c6181SAlex Tomas unlock_buffer(bh); 1227a86c6181SAlex Tomas brelse(bh); 1228a86c6181SAlex Tomas } 1229a86c6181SAlex Tomas 1230a86c6181SAlex Tomas if (err) { 1231a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1232a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1233a86c6181SAlex Tomas if (!ablocks[i]) 1234a86c6181SAlex Tomas continue; 12357dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1236e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1237a86c6181SAlex Tomas } 1238a86c6181SAlex Tomas } 1239a86c6181SAlex Tomas kfree(ablocks); 1240a86c6181SAlex Tomas 1241a86c6181SAlex Tomas return err; 1242a86c6181SAlex Tomas } 1243a86c6181SAlex Tomas 1244a86c6181SAlex Tomas /* 1245d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1246d0d856e8SRandy Dunlap * implements tree growing procedure: 1247a86c6181SAlex Tomas * - allocates new block 1248a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1249d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1250a86c6181SAlex Tomas * just created block 1251a86c6181SAlex Tomas */ 1252a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1253be5cd90dSDmitry Monakhov unsigned int flags) 1254a86c6181SAlex Tomas { 1255a86c6181SAlex Tomas struct ext4_extent_header *neh; 1256a86c6181SAlex Tomas struct buffer_head *bh; 1257be5cd90dSDmitry Monakhov ext4_fsblk_t newblock, goal = 0; 1258be5cd90dSDmitry Monakhov struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1259a86c6181SAlex Tomas int err = 0; 1260592acbf1SSriram Rajagopalan size_t ext_size = 0; 1261a86c6181SAlex Tomas 1262be5cd90dSDmitry Monakhov /* Try to prepend new index to old one */ 1263be5cd90dSDmitry Monakhov if (ext_depth(inode)) 1264be5cd90dSDmitry Monakhov goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1265be5cd90dSDmitry Monakhov if (goal > le32_to_cpu(es->s_first_data_block)) { 1266be5cd90dSDmitry Monakhov flags |= EXT4_MB_HINT_TRY_GOAL; 1267be5cd90dSDmitry Monakhov goal--; 1268be5cd90dSDmitry Monakhov } else 1269be5cd90dSDmitry Monakhov goal = ext4_inode_to_goal_block(inode); 1270be5cd90dSDmitry Monakhov newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1271be5cd90dSDmitry Monakhov NULL, &err); 1272a86c6181SAlex Tomas if (newblock == 0) 1273a86c6181SAlex Tomas return err; 1274a86c6181SAlex Tomas 1275c45653c3SNikolay Borisov bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS); 1276aebf0243SWang Shilong if (unlikely(!bh)) 1277860d21e2STheodore Ts'o return -ENOMEM; 1278a86c6181SAlex Tomas lock_buffer(bh); 1279a86c6181SAlex Tomas 12807e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 12817e028976SAvantika Mathur if (err) { 1282a86c6181SAlex Tomas unlock_buffer(bh); 1283a86c6181SAlex Tomas goto out; 1284a86c6181SAlex Tomas } 1285a86c6181SAlex Tomas 1286592acbf1SSriram Rajagopalan ext_size = sizeof(EXT4_I(inode)->i_data); 1287a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 1288592acbf1SSriram Rajagopalan memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size); 1289592acbf1SSriram Rajagopalan /* zero out unused area in the extent block */ 1290592acbf1SSriram Rajagopalan memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size); 1291a86c6181SAlex Tomas 1292a86c6181SAlex Tomas /* set size of new block */ 1293a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1294a86c6181SAlex Tomas /* old root could have indexes or leaves 1295a86c6181SAlex Tomas * so calculate e_max right way */ 1296a86c6181SAlex Tomas if (ext_depth(inode)) 129755ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1298a86c6181SAlex Tomas else 129955ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1300a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 13017ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1302a86c6181SAlex Tomas set_buffer_uptodate(bh); 1303a86c6181SAlex Tomas unlock_buffer(bh); 1304a86c6181SAlex Tomas 13050390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 13067e028976SAvantika Mathur if (err) 1307a86c6181SAlex Tomas goto out; 1308a86c6181SAlex Tomas 13091939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1310a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 13111939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 13121939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 13131939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 13141939dd84SDmitry Monakhov /* Root extent block becomes index block */ 13151939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 13161939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 13171939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 13181939dd84SDmitry Monakhov } 13192ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1320a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 13215a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1322bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1323a86c6181SAlex Tomas 1324ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 13251939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1326a86c6181SAlex Tomas out: 1327a86c6181SAlex Tomas brelse(bh); 1328a86c6181SAlex Tomas 1329a86c6181SAlex Tomas return err; 1330a86c6181SAlex Tomas } 1331a86c6181SAlex Tomas 1332a86c6181SAlex Tomas /* 1333d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1334d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1335d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1336a86c6181SAlex Tomas */ 1337a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1338107a7bd3STheodore Ts'o unsigned int mb_flags, 1339107a7bd3STheodore Ts'o unsigned int gb_flags, 1340dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1341a86c6181SAlex Tomas struct ext4_extent *newext) 1342a86c6181SAlex Tomas { 1343dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1344a86c6181SAlex Tomas struct ext4_ext_path *curp; 1345a86c6181SAlex Tomas int depth, i, err = 0; 1346a86c6181SAlex Tomas 1347a86c6181SAlex Tomas repeat: 1348a86c6181SAlex Tomas i = depth = ext_depth(inode); 1349a86c6181SAlex Tomas 1350a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1351a86c6181SAlex Tomas curp = path + depth; 1352a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1353a86c6181SAlex Tomas i--; 1354a86c6181SAlex Tomas curp--; 1355a86c6181SAlex Tomas } 1356a86c6181SAlex Tomas 1357d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1358d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1359a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1360a86c6181SAlex Tomas /* if we found index with free entry, then use that 1361a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 1362107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1363787e0981SShen Feng if (err) 1364787e0981SShen Feng goto out; 1365a86c6181SAlex Tomas 1366a86c6181SAlex Tomas /* refill path */ 1367ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1368725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1369dfe50809STheodore Ts'o ppath, gb_flags); 1370a86c6181SAlex Tomas if (IS_ERR(path)) 1371a86c6181SAlex Tomas err = PTR_ERR(path); 1372a86c6181SAlex Tomas } else { 1373a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 1374be5cd90dSDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1375a86c6181SAlex Tomas if (err) 1376a86c6181SAlex Tomas goto out; 1377a86c6181SAlex Tomas 1378a86c6181SAlex Tomas /* refill path */ 1379ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1380725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1381dfe50809STheodore Ts'o ppath, gb_flags); 1382a86c6181SAlex Tomas if (IS_ERR(path)) { 1383a86c6181SAlex Tomas err = PTR_ERR(path); 1384a86c6181SAlex Tomas goto out; 1385a86c6181SAlex Tomas } 1386a86c6181SAlex Tomas 1387a86c6181SAlex Tomas /* 1388d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1389d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1390a86c6181SAlex Tomas */ 1391a86c6181SAlex Tomas depth = ext_depth(inode); 1392a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1393d0d856e8SRandy Dunlap /* now we need to split */ 1394a86c6181SAlex Tomas goto repeat; 1395a86c6181SAlex Tomas } 1396a86c6181SAlex Tomas } 1397a86c6181SAlex Tomas 1398a86c6181SAlex Tomas out: 1399a86c6181SAlex Tomas return err; 1400a86c6181SAlex Tomas } 1401a86c6181SAlex Tomas 1402a86c6181SAlex Tomas /* 14031988b51eSAlex Tomas * search the closest allocated block to the left for *logical 14041988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 14051988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 14061988b51eSAlex Tomas * returns 0 at @phys 14071988b51eSAlex Tomas * return value contains 0 (success) or error code 14081988b51eSAlex Tomas */ 14091f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 14101f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14111988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 14121988b51eSAlex Tomas { 14131988b51eSAlex Tomas struct ext4_extent_idx *ix; 14141988b51eSAlex Tomas struct ext4_extent *ex; 1415b939e376SAneesh Kumar K.V int depth, ee_len; 14161988b51eSAlex Tomas 1417273df556SFrank Mayhar if (unlikely(path == NULL)) { 1418273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 14196a797d27SDarrick J. Wong return -EFSCORRUPTED; 1420273df556SFrank Mayhar } 14211988b51eSAlex Tomas depth = path->p_depth; 14221988b51eSAlex Tomas *phys = 0; 14231988b51eSAlex Tomas 14241988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14251988b51eSAlex Tomas return 0; 14261988b51eSAlex Tomas 14271988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14281988b51eSAlex Tomas * then *logical, but it can be that extent is the 14291988b51eSAlex Tomas * first one in the file */ 14301988b51eSAlex Tomas 14311988b51eSAlex Tomas ex = path[depth].p_ext; 1432b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14331988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1434273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1435273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1436273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1437273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 14386a797d27SDarrick J. Wong return -EFSCORRUPTED; 1439273df556SFrank Mayhar } 14401988b51eSAlex Tomas while (--depth >= 0) { 14411988b51eSAlex Tomas ix = path[depth].p_idx; 1442273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1443273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1444273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 14456ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1446273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 14476ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1448273df556SFrank Mayhar depth); 14496a797d27SDarrick J. Wong return -EFSCORRUPTED; 1450273df556SFrank Mayhar } 14511988b51eSAlex Tomas } 14521988b51eSAlex Tomas return 0; 14531988b51eSAlex Tomas } 14541988b51eSAlex Tomas 1455273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1456273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1457273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1458273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 14596a797d27SDarrick J. Wong return -EFSCORRUPTED; 1460273df556SFrank Mayhar } 14611988b51eSAlex Tomas 1462b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1463bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 14641988b51eSAlex Tomas return 0; 14651988b51eSAlex Tomas } 14661988b51eSAlex Tomas 14671988b51eSAlex Tomas /* 14681988b51eSAlex Tomas * search the closest allocated block to the right for *logical 14691988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1470df3ab170STao Ma * if *logical is the largest allocated block, the function 14711988b51eSAlex Tomas * returns 0 at @phys 14721988b51eSAlex Tomas * return value contains 0 (success) or error code 14731988b51eSAlex Tomas */ 14741f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 14751f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14764d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 14774d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 14781988b51eSAlex Tomas { 14791988b51eSAlex Tomas struct buffer_head *bh = NULL; 14801988b51eSAlex Tomas struct ext4_extent_header *eh; 14811988b51eSAlex Tomas struct ext4_extent_idx *ix; 14821988b51eSAlex Tomas struct ext4_extent *ex; 14831988b51eSAlex Tomas ext4_fsblk_t block; 1484395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1485395a87bfSEric Sandeen int ee_len; 14861988b51eSAlex Tomas 1487273df556SFrank Mayhar if (unlikely(path == NULL)) { 1488273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 14896a797d27SDarrick J. Wong return -EFSCORRUPTED; 1490273df556SFrank Mayhar } 14911988b51eSAlex Tomas depth = path->p_depth; 14921988b51eSAlex Tomas *phys = 0; 14931988b51eSAlex Tomas 14941988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14951988b51eSAlex Tomas return 0; 14961988b51eSAlex Tomas 14971988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14981988b51eSAlex Tomas * then *logical, but it can be that extent is the 14991988b51eSAlex Tomas * first one in the file */ 15001988b51eSAlex Tomas 15011988b51eSAlex Tomas ex = path[depth].p_ext; 1502b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 15031988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1504273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1505273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1506273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1507273df556SFrank Mayhar depth); 15086a797d27SDarrick J. Wong return -EFSCORRUPTED; 1509273df556SFrank Mayhar } 15101988b51eSAlex Tomas while (--depth >= 0) { 15111988b51eSAlex Tomas ix = path[depth].p_idx; 1512273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1513273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1514273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1515273df556SFrank Mayhar *logical); 15166a797d27SDarrick J. Wong return -EFSCORRUPTED; 1517273df556SFrank Mayhar } 15181988b51eSAlex Tomas } 15194d33b1efSTheodore Ts'o goto found_extent; 15201988b51eSAlex Tomas } 15211988b51eSAlex Tomas 1522273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1523273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1524273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1525273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 15266a797d27SDarrick J. Wong return -EFSCORRUPTED; 1527273df556SFrank Mayhar } 15281988b51eSAlex Tomas 15291988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 15301988b51eSAlex Tomas /* next allocated block in this leaf */ 15311988b51eSAlex Tomas ex++; 15324d33b1efSTheodore Ts'o goto found_extent; 15331988b51eSAlex Tomas } 15341988b51eSAlex Tomas 15351988b51eSAlex Tomas /* go up and search for index to the right */ 15361988b51eSAlex Tomas while (--depth >= 0) { 15371988b51eSAlex Tomas ix = path[depth].p_idx; 15381988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 153925f1ee3aSWu Fengguang goto got_index; 15401988b51eSAlex Tomas } 15411988b51eSAlex Tomas 154225f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 15431988b51eSAlex Tomas return 0; 15441988b51eSAlex Tomas 154525f1ee3aSWu Fengguang got_index: 15461988b51eSAlex Tomas /* we've found index to the right, let's 15471988b51eSAlex Tomas * follow it and find the closest allocated 15481988b51eSAlex Tomas * block to the right */ 15491988b51eSAlex Tomas ix++; 1550bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15511988b51eSAlex Tomas while (++depth < path->p_depth) { 1552395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 15537d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, block, 1554107a7bd3STheodore Ts'o path->p_depth - depth, 0); 15557d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15567d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15577d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh); 15581988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1559bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15601988b51eSAlex Tomas put_bh(bh); 15611988b51eSAlex Tomas } 15621988b51eSAlex Tomas 1563107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 15647d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15657d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15661988b51eSAlex Tomas eh = ext_block_hdr(bh); 15671988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 15684d33b1efSTheodore Ts'o found_extent: 15691988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1570bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 15714d33b1efSTheodore Ts'o *ret_ex = ex; 15724d33b1efSTheodore Ts'o if (bh) 15731988b51eSAlex Tomas put_bh(bh); 15741988b51eSAlex Tomas return 0; 15751988b51eSAlex Tomas } 15761988b51eSAlex Tomas 15771988b51eSAlex Tomas /* 1578d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1579f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1580d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1581d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1582d0d856e8SRandy Dunlap * with leaves. 1583a86c6181SAlex Tomas */ 1584fcf6b1b7SDmitry Monakhov ext4_lblk_t 1585a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1586a86c6181SAlex Tomas { 1587a86c6181SAlex Tomas int depth; 1588a86c6181SAlex Tomas 1589a86c6181SAlex Tomas BUG_ON(path == NULL); 1590a86c6181SAlex Tomas depth = path->p_depth; 1591a86c6181SAlex Tomas 1592a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1593f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1594a86c6181SAlex Tomas 1595a86c6181SAlex Tomas while (depth >= 0) { 15966e89bbb7SEric Biggers struct ext4_ext_path *p = &path[depth]; 15976e89bbb7SEric Biggers 1598a86c6181SAlex Tomas if (depth == path->p_depth) { 1599a86c6181SAlex Tomas /* leaf */ 16006e89bbb7SEric Biggers if (p->p_ext && p->p_ext != EXT_LAST_EXTENT(p->p_hdr)) 16016e89bbb7SEric Biggers return le32_to_cpu(p->p_ext[1].ee_block); 1602a86c6181SAlex Tomas } else { 1603a86c6181SAlex Tomas /* index */ 16046e89bbb7SEric Biggers if (p->p_idx != EXT_LAST_INDEX(p->p_hdr)) 16056e89bbb7SEric Biggers return le32_to_cpu(p->p_idx[1].ei_block); 1606a86c6181SAlex Tomas } 1607a86c6181SAlex Tomas depth--; 1608a86c6181SAlex Tomas } 1609a86c6181SAlex Tomas 1610f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1611a86c6181SAlex Tomas } 1612a86c6181SAlex Tomas 1613a86c6181SAlex Tomas /* 1614d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1615f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1616a86c6181SAlex Tomas */ 16175718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1618a86c6181SAlex Tomas { 1619a86c6181SAlex Tomas int depth; 1620a86c6181SAlex Tomas 1621a86c6181SAlex Tomas BUG_ON(path == NULL); 1622a86c6181SAlex Tomas depth = path->p_depth; 1623a86c6181SAlex Tomas 1624a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1625a86c6181SAlex Tomas if (depth == 0) 1626f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1627a86c6181SAlex Tomas 1628a86c6181SAlex Tomas /* go to index block */ 1629a86c6181SAlex Tomas depth--; 1630a86c6181SAlex Tomas 1631a86c6181SAlex Tomas while (depth >= 0) { 1632a86c6181SAlex Tomas if (path[depth].p_idx != 1633a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1634725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1635725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1636a86c6181SAlex Tomas depth--; 1637a86c6181SAlex Tomas } 1638a86c6181SAlex Tomas 1639f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1640a86c6181SAlex Tomas } 1641a86c6181SAlex Tomas 1642a86c6181SAlex Tomas /* 1643d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1644d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1645d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1646a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1647a86c6181SAlex Tomas */ 16481d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1649a86c6181SAlex Tomas struct ext4_ext_path *path) 1650a86c6181SAlex Tomas { 1651a86c6181SAlex Tomas struct ext4_extent_header *eh; 1652a86c6181SAlex Tomas int depth = ext_depth(inode); 1653a86c6181SAlex Tomas struct ext4_extent *ex; 1654a86c6181SAlex Tomas __le32 border; 1655a86c6181SAlex Tomas int k, err = 0; 1656a86c6181SAlex Tomas 1657a86c6181SAlex Tomas eh = path[depth].p_hdr; 1658a86c6181SAlex Tomas ex = path[depth].p_ext; 1659273df556SFrank Mayhar 1660273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1661273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1662273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 16636a797d27SDarrick J. Wong return -EFSCORRUPTED; 1664273df556SFrank Mayhar } 1665a86c6181SAlex Tomas 1666a86c6181SAlex Tomas if (depth == 0) { 1667a86c6181SAlex Tomas /* there is no tree at all */ 1668a86c6181SAlex Tomas return 0; 1669a86c6181SAlex Tomas } 1670a86c6181SAlex Tomas 1671a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1672a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1673a86c6181SAlex Tomas return 0; 1674a86c6181SAlex Tomas } 1675a86c6181SAlex Tomas 1676a86c6181SAlex Tomas /* 1677d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1678a86c6181SAlex Tomas */ 1679a86c6181SAlex Tomas k = depth - 1; 1680a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 16817e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16827e028976SAvantika Mathur if (err) 1683a86c6181SAlex Tomas return err; 1684a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16857e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16867e028976SAvantika Mathur if (err) 1687a86c6181SAlex Tomas return err; 1688a86c6181SAlex Tomas 1689a86c6181SAlex Tomas while (k--) { 1690a86c6181SAlex Tomas /* change all left-side indexes */ 1691a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1692a86c6181SAlex Tomas break; 16937e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16947e028976SAvantika Mathur if (err) 1695a86c6181SAlex Tomas break; 1696a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16977e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16987e028976SAvantika Mathur if (err) 1699a86c6181SAlex Tomas break; 1700a86c6181SAlex Tomas } 1701a86c6181SAlex Tomas 1702a86c6181SAlex Tomas return err; 1703a86c6181SAlex Tomas } 1704a86c6181SAlex Tomas 170543f81677SEric Biggers static int ext4_can_extents_be_merged(struct inode *inode, 170643f81677SEric Biggers struct ext4_extent *ex1, 1707a86c6181SAlex Tomas struct ext4_extent *ex2) 1708a86c6181SAlex Tomas { 1709da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len; 1710a2df2a63SAmit Arora 1711556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1712a2df2a63SAmit Arora return 0; 1713a2df2a63SAmit Arora 1714a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1715a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1716a2df2a63SAmit Arora 1717a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 171863f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1719a86c6181SAlex Tomas return 0; 1720a86c6181SAlex Tomas 1721da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1722471d4011SSuparna Bhattacharya return 0; 1723378f32baSMatthew Bobrowski 1724556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) && 1725378f32baSMatthew Bobrowski ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN) 1726a9b82415SDarrick J. Wong return 0; 1727bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1728b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1729a86c6181SAlex Tomas return 0; 1730a86c6181SAlex Tomas #endif 1731a86c6181SAlex Tomas 1732bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1733a86c6181SAlex Tomas return 1; 1734a86c6181SAlex Tomas return 0; 1735a86c6181SAlex Tomas } 1736a86c6181SAlex Tomas 1737a86c6181SAlex Tomas /* 173856055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 173956055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 174056055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 174156055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 174256055d3aSAmit Arora * 1 if they got merged. 174356055d3aSAmit Arora */ 1744197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 174556055d3aSAmit Arora struct ext4_ext_path *path, 174656055d3aSAmit Arora struct ext4_extent *ex) 174756055d3aSAmit Arora { 174856055d3aSAmit Arora struct ext4_extent_header *eh; 174956055d3aSAmit Arora unsigned int depth, len; 1750556615dcSLukas Czerner int merge_done = 0, unwritten; 175156055d3aSAmit Arora 175256055d3aSAmit Arora depth = ext_depth(inode); 175356055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 175456055d3aSAmit Arora eh = path[depth].p_hdr; 175556055d3aSAmit Arora 175656055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 175756055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 175856055d3aSAmit Arora break; 175956055d3aSAmit Arora /* merge with next extent! */ 1760556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 176156055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 176256055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 1763556615dcSLukas Czerner if (unwritten) 1764556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 176556055d3aSAmit Arora 176656055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 176756055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 176856055d3aSAmit Arora * sizeof(struct ext4_extent); 176956055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 177056055d3aSAmit Arora } 1771e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 177256055d3aSAmit Arora merge_done = 1; 177356055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 177456055d3aSAmit Arora if (!eh->eh_entries) 177524676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 177656055d3aSAmit Arora } 177756055d3aSAmit Arora 177856055d3aSAmit Arora return merge_done; 177956055d3aSAmit Arora } 178056055d3aSAmit Arora 178156055d3aSAmit Arora /* 1782ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1783ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1784ecb94f5fSTheodore Ts'o */ 1785ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1786ecb94f5fSTheodore Ts'o struct inode *inode, 1787ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1788ecb94f5fSTheodore Ts'o { 1789ecb94f5fSTheodore Ts'o size_t s; 1790ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1791ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1792ecb94f5fSTheodore Ts'o 1793ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1794ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1795ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1796ecb94f5fSTheodore Ts'o return; 1797ecb94f5fSTheodore Ts'o 1798ecb94f5fSTheodore Ts'o /* 1799ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1800ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1801ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1802ecb94f5fSTheodore Ts'o */ 180383448bdfSJan Kara if (ext4_journal_extend(handle, 2, 180483448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 1))) 1805ecb94f5fSTheodore Ts'o return; 1806ecb94f5fSTheodore Ts'o 1807ecb94f5fSTheodore Ts'o /* 1808ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1809ecb94f5fSTheodore Ts'o */ 1810ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1811ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1812ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1813ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1814ecb94f5fSTheodore Ts'o 181510809df8STheodore Ts'o path[1].p_maxdepth = path[0].p_maxdepth; 1816ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1817ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1818ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1819ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1820ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1821ecb94f5fSTheodore Ts'o 1822ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1823ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 182471d4f7d0STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1825ecb94f5fSTheodore Ts'o } 1826ecb94f5fSTheodore Ts'o 1827ecb94f5fSTheodore Ts'o /* 1828adde81cfSEric Biggers * This function tries to merge the @ex extent to neighbours in the tree, then 1829adde81cfSEric Biggers * tries to collapse the extent tree into the inode. 1830197217a5SYongqiang Yang */ 1831ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1832ecb94f5fSTheodore Ts'o struct inode *inode, 1833197217a5SYongqiang Yang struct ext4_ext_path *path, 1834adde81cfSEric Biggers struct ext4_extent *ex) 1835adde81cfSEric Biggers { 1836197217a5SYongqiang Yang struct ext4_extent_header *eh; 1837197217a5SYongqiang Yang unsigned int depth; 1838197217a5SYongqiang Yang int merge_done = 0; 1839197217a5SYongqiang Yang 1840197217a5SYongqiang Yang depth = ext_depth(inode); 1841197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1842197217a5SYongqiang Yang eh = path[depth].p_hdr; 1843197217a5SYongqiang Yang 1844197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1845197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1846197217a5SYongqiang Yang 1847197217a5SYongqiang Yang if (!merge_done) 1848ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1849197217a5SYongqiang Yang 1850ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1851197217a5SYongqiang Yang } 1852197217a5SYongqiang Yang 1853197217a5SYongqiang Yang /* 185425d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 185525d14f98SAmit Arora * existing extent. 185625d14f98SAmit Arora * 185725d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 185825d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 185925d14f98SAmit Arora * If there is no overlap found, it returns 0. 186025d14f98SAmit Arora */ 18614d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 18624d33b1efSTheodore Ts'o struct inode *inode, 186325d14f98SAmit Arora struct ext4_extent *newext, 186425d14f98SAmit Arora struct ext4_ext_path *path) 186525d14f98SAmit Arora { 1866725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 186725d14f98SAmit Arora unsigned int depth, len1; 186825d14f98SAmit Arora unsigned int ret = 0; 186925d14f98SAmit Arora 187025d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1871a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 187225d14f98SAmit Arora depth = ext_depth(inode); 187325d14f98SAmit Arora if (!path[depth].p_ext) 187425d14f98SAmit Arora goto out; 1875f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 187625d14f98SAmit Arora 187725d14f98SAmit Arora /* 187825d14f98SAmit Arora * get the next allocated block if the extent in the path 187925d14f98SAmit Arora * is before the requested block(s) 188025d14f98SAmit Arora */ 188125d14f98SAmit Arora if (b2 < b1) { 188225d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1883f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 188425d14f98SAmit Arora goto out; 1885f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2); 188625d14f98SAmit Arora } 188725d14f98SAmit Arora 1888725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 188925d14f98SAmit Arora if (b1 + len1 < b1) { 1890f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 189125d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 189225d14f98SAmit Arora ret = 1; 189325d14f98SAmit Arora } 189425d14f98SAmit Arora 189525d14f98SAmit Arora /* check for overlap */ 189625d14f98SAmit Arora if (b1 + len1 > b2) { 189725d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 189825d14f98SAmit Arora ret = 1; 189925d14f98SAmit Arora } 190025d14f98SAmit Arora out: 190125d14f98SAmit Arora return ret; 190225d14f98SAmit Arora } 190325d14f98SAmit Arora 190425d14f98SAmit Arora /* 1905d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1906d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1907d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1908d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1909a86c6181SAlex Tomas */ 1910a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1911dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1912107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags) 1913a86c6181SAlex Tomas { 1914dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1915a86c6181SAlex Tomas struct ext4_extent_header *eh; 1916a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1917a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1918a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1919725d26d3SAneesh Kumar K.V int depth, len, err; 1920725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1921556615dcSLukas Czerner int mb_flags = 0, unwritten; 1922a86c6181SAlex Tomas 1923e3cf5d5dSTheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1924e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1925273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1926273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 19276a797d27SDarrick J. Wong return -EFSCORRUPTED; 1928273df556SFrank Mayhar } 1929a86c6181SAlex Tomas depth = ext_depth(inode); 1930a86c6181SAlex Tomas ex = path[depth].p_ext; 1931be8981beSLukas Czerner eh = path[depth].p_hdr; 1932273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1933273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 19346a797d27SDarrick J. Wong return -EFSCORRUPTED; 1935273df556SFrank Mayhar } 1936a86c6181SAlex Tomas 1937a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1938107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1939be8981beSLukas Czerner 1940be8981beSLukas Czerner /* 1941be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1942be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 1943ed8a1a76STheodore Ts'o * ext4_find_extent() can return either extent on the 1944be8981beSLukas Czerner * left, or on the right from the searched position. This 1945be8981beSLukas Czerner * will make merging more effective. 1946be8981beSLukas Czerner */ 1947be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 1948be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 1949be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 1950be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 1951be8981beSLukas Czerner ex += 1; 1952be8981beSLukas Czerner goto prepend; 1953be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1954be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 1955be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 1956be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 1957be8981beSLukas Czerner ex -= 1; 1958be8981beSLukas Czerner 1959be8981beSLukas Czerner /* Try to append newex to the ex */ 1960be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 1961be8981beSLukas Czerner ext_debug("append [%d]%d block to %u:[%d]%d" 1962be8981beSLukas Czerner "(from %llu)\n", 1963556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 1964a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1965a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1966556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 1967bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1968bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 1969be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1970be8981beSLukas Czerner path + depth); 19717e028976SAvantika Mathur if (err) 1972a86c6181SAlex Tomas return err; 1973556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 1974a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1975a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1976556615dcSLukas Czerner if (unwritten) 1977556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 1978a86c6181SAlex Tomas eh = path[depth].p_hdr; 1979a86c6181SAlex Tomas nearex = ex; 1980a86c6181SAlex Tomas goto merge; 1981a86c6181SAlex Tomas } 1982a86c6181SAlex Tomas 1983be8981beSLukas Czerner prepend: 1984be8981beSLukas Czerner /* Try to prepend newex to the ex */ 1985be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 1986be8981beSLukas Czerner ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 1987be8981beSLukas Czerner "(from %llu)\n", 1988be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 1989556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 1990be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 1991be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 1992556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 1993be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 1994be8981beSLukas Czerner ext4_ext_pblock(ex)); 1995be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1996be8981beSLukas Czerner path + depth); 1997be8981beSLukas Czerner if (err) 1998be8981beSLukas Czerner return err; 1999be8981beSLukas Czerner 2000556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 2001be8981beSLukas Czerner ex->ee_block = newext->ee_block; 2002be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2003be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2004be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 2005556615dcSLukas Czerner if (unwritten) 2006556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2007be8981beSLukas Czerner eh = path[depth].p_hdr; 2008be8981beSLukas Czerner nearex = ex; 2009be8981beSLukas Czerner goto merge; 2010be8981beSLukas Czerner } 2011be8981beSLukas Czerner } 2012be8981beSLukas Czerner 2013a86c6181SAlex Tomas depth = ext_depth(inode); 2014a86c6181SAlex Tomas eh = path[depth].p_hdr; 2015a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2016a86c6181SAlex Tomas goto has_space; 2017a86c6181SAlex Tomas 2018a86c6181SAlex Tomas /* probably next leaf has space for us? */ 2019a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 2020598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 2021598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 20225718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 2023598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 202432de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 2025a86c6181SAlex Tomas BUG_ON(npath != NULL); 2026ed8a1a76STheodore Ts'o npath = ext4_find_extent(inode, next, NULL, 0); 2027a86c6181SAlex Tomas if (IS_ERR(npath)) 2028a86c6181SAlex Tomas return PTR_ERR(npath); 2029a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 2030a86c6181SAlex Tomas eh = npath[depth].p_hdr; 2031a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 203225985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 2033a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 2034a86c6181SAlex Tomas path = npath; 2035ffb505ffSRobin Dong goto has_space; 2036a86c6181SAlex Tomas } 2037a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 2038a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2039a86c6181SAlex Tomas } 2040a86c6181SAlex Tomas 2041a86c6181SAlex Tomas /* 2042d0d856e8SRandy Dunlap * There is no free space in the found leaf. 2043d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 2044a86c6181SAlex Tomas */ 2045107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2046e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_USE_RESERVED; 2047107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2048dfe50809STheodore Ts'o ppath, newext); 2049a86c6181SAlex Tomas if (err) 2050a86c6181SAlex Tomas goto cleanup; 2051a86c6181SAlex Tomas depth = ext_depth(inode); 2052a86c6181SAlex Tomas eh = path[depth].p_hdr; 2053a86c6181SAlex Tomas 2054a86c6181SAlex Tomas has_space: 2055a86c6181SAlex Tomas nearex = path[depth].p_ext; 2056a86c6181SAlex Tomas 20577e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 20587e028976SAvantika Mathur if (err) 2059a86c6181SAlex Tomas goto cleanup; 2060a86c6181SAlex Tomas 2061a86c6181SAlex Tomas if (!nearex) { 2062a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 206332de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2064a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2065bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2066556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2067a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 206880e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 2069a86c6181SAlex Tomas } else { 207080e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 207180e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 207280e675f9SEric Gouriou /* Insert after */ 207332de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 207432de6756SYongqiang Yang "nearest %p\n", 2075a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2076bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2077556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2078a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 207980e675f9SEric Gouriou nearex); 208080e675f9SEric Gouriou nearex++; 208180e675f9SEric Gouriou } else { 208280e675f9SEric Gouriou /* Insert before */ 208380e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 208432de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 208532de6756SYongqiang Yang "nearest %p\n", 208680e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 208780e675f9SEric Gouriou ext4_ext_pblock(newext), 2088556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 208980e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 209080e675f9SEric Gouriou nearex); 209180e675f9SEric Gouriou } 209280e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 209380e675f9SEric Gouriou if (len > 0) { 209432de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 209580e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 209680e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 209780e675f9SEric Gouriou ext4_ext_pblock(newext), 2098556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 209980e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 210080e675f9SEric Gouriou len, nearex, nearex + 1); 210180e675f9SEric Gouriou memmove(nearex + 1, nearex, 210280e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 210380e675f9SEric Gouriou } 2104a86c6181SAlex Tomas } 2105a86c6181SAlex Tomas 2106e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 210780e675f9SEric Gouriou path[depth].p_ext = nearex; 2108a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2109bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2110a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2111a86c6181SAlex Tomas 2112a86c6181SAlex Tomas merge: 2113e7bcf823SHaiboLiu /* try to merge extents */ 2114107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2115ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2116a86c6181SAlex Tomas 2117a86c6181SAlex Tomas 2118a86c6181SAlex Tomas /* time to correct all indexes above */ 2119a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2120a86c6181SAlex Tomas if (err) 2121a86c6181SAlex Tomas goto cleanup; 2122a86c6181SAlex Tomas 2123ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2124a86c6181SAlex Tomas 2125a86c6181SAlex Tomas cleanup: 2126a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 2127a86c6181SAlex Tomas kfree(npath); 2128a86c6181SAlex Tomas return err; 2129a86c6181SAlex Tomas } 2130a86c6181SAlex Tomas 2131bb5835edSTheodore Ts'o static int ext4_fill_es_cache_info(struct inode *inode, 2132bb5835edSTheodore Ts'o ext4_lblk_t block, ext4_lblk_t num, 2133bb5835edSTheodore Ts'o struct fiemap_extent_info *fieinfo) 2134bb5835edSTheodore Ts'o { 2135bb5835edSTheodore Ts'o ext4_lblk_t next, end = block + num - 1; 2136bb5835edSTheodore Ts'o struct extent_status es; 2137bb5835edSTheodore Ts'o unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 2138bb5835edSTheodore Ts'o unsigned int flags; 2139bb5835edSTheodore Ts'o int err; 2140bb5835edSTheodore Ts'o 2141bb5835edSTheodore Ts'o while (block <= end) { 2142bb5835edSTheodore Ts'o next = 0; 2143bb5835edSTheodore Ts'o flags = 0; 2144bb5835edSTheodore Ts'o if (!ext4_es_lookup_extent(inode, block, &next, &es)) 2145bb5835edSTheodore Ts'o break; 2146bb5835edSTheodore Ts'o if (ext4_es_is_unwritten(&es)) 2147bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_UNWRITTEN; 2148bb5835edSTheodore Ts'o if (ext4_es_is_delayed(&es)) 2149bb5835edSTheodore Ts'o flags |= (FIEMAP_EXTENT_DELALLOC | 2150bb5835edSTheodore Ts'o FIEMAP_EXTENT_UNKNOWN); 2151bb5835edSTheodore Ts'o if (ext4_es_is_hole(&es)) 2152bb5835edSTheodore Ts'o flags |= EXT4_FIEMAP_EXTENT_HOLE; 2153bb5835edSTheodore Ts'o if (next == 0) 2154bb5835edSTheodore Ts'o flags |= FIEMAP_EXTENT_LAST; 2155bb5835edSTheodore Ts'o if (flags & (FIEMAP_EXTENT_DELALLOC| 2156bb5835edSTheodore Ts'o EXT4_FIEMAP_EXTENT_HOLE)) 2157bb5835edSTheodore Ts'o es.es_pblk = 0; 2158bb5835edSTheodore Ts'o else 2159bb5835edSTheodore Ts'o es.es_pblk = ext4_es_pblock(&es); 2160bb5835edSTheodore Ts'o err = fiemap_fill_next_extent(fieinfo, 2161bb5835edSTheodore Ts'o (__u64)es.es_lblk << blksize_bits, 2162bb5835edSTheodore Ts'o (__u64)es.es_pblk << blksize_bits, 2163bb5835edSTheodore Ts'o (__u64)es.es_len << blksize_bits, 2164bb5835edSTheodore Ts'o flags); 2165bb5835edSTheodore Ts'o if (next == 0) 2166bb5835edSTheodore Ts'o break; 2167bb5835edSTheodore Ts'o block = next; 2168bb5835edSTheodore Ts'o if (err < 0) 2169bb5835edSTheodore Ts'o return err; 2170bb5835edSTheodore Ts'o if (err == 1) 2171bb5835edSTheodore Ts'o return 0; 2172bb5835edSTheodore Ts'o } 2173bb5835edSTheodore Ts'o return 0; 2174bb5835edSTheodore Ts'o } 2175bb5835edSTheodore Ts'o 2176bb5835edSTheodore Ts'o 2177a86c6181SAlex Tomas /* 2178140a5250SJan Kara * ext4_ext_determine_hole - determine hole around given block 2179140a5250SJan Kara * @inode: inode we lookup in 2180140a5250SJan Kara * @path: path in extent tree to @lblk 2181140a5250SJan Kara * @lblk: pointer to logical block around which we want to determine hole 2182140a5250SJan Kara * 2183140a5250SJan Kara * Determine hole length (and start if easily possible) around given logical 2184140a5250SJan Kara * block. We don't try too hard to find the beginning of the hole but @path 2185140a5250SJan Kara * actually points to extent before @lblk, we provide it. 2186140a5250SJan Kara * 2187140a5250SJan Kara * The function returns the length of a hole starting at @lblk. We update @lblk 2188140a5250SJan Kara * to the beginning of the hole if we managed to find it. 2189140a5250SJan Kara */ 2190140a5250SJan Kara static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, 2191140a5250SJan Kara struct ext4_ext_path *path, 2192140a5250SJan Kara ext4_lblk_t *lblk) 2193140a5250SJan Kara { 2194140a5250SJan Kara int depth = ext_depth(inode); 2195140a5250SJan Kara struct ext4_extent *ex; 2196140a5250SJan Kara ext4_lblk_t len; 2197140a5250SJan Kara 2198140a5250SJan Kara ex = path[depth].p_ext; 2199140a5250SJan Kara if (ex == NULL) { 2200140a5250SJan Kara /* there is no extent yet, so gap is [0;-] */ 2201140a5250SJan Kara *lblk = 0; 2202140a5250SJan Kara len = EXT_MAX_BLOCKS; 2203140a5250SJan Kara } else if (*lblk < le32_to_cpu(ex->ee_block)) { 2204140a5250SJan Kara len = le32_to_cpu(ex->ee_block) - *lblk; 2205140a5250SJan Kara } else if (*lblk >= le32_to_cpu(ex->ee_block) 2206140a5250SJan Kara + ext4_ext_get_actual_len(ex)) { 2207140a5250SJan Kara ext4_lblk_t next; 2208140a5250SJan Kara 2209140a5250SJan Kara *lblk = le32_to_cpu(ex->ee_block) + ext4_ext_get_actual_len(ex); 2210140a5250SJan Kara next = ext4_ext_next_allocated_block(path); 2211140a5250SJan Kara BUG_ON(next == *lblk); 2212140a5250SJan Kara len = next - *lblk; 2213140a5250SJan Kara } else { 2214140a5250SJan Kara BUG(); 2215140a5250SJan Kara } 2216140a5250SJan Kara return len; 2217140a5250SJan Kara } 2218140a5250SJan Kara 2219140a5250SJan Kara /* 2220d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2221d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2222a86c6181SAlex Tomas * and cache this gap 2223a86c6181SAlex Tomas */ 222409b88252SAvantika Mathur static void 2225140a5250SJan Kara ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, 2226140a5250SJan Kara ext4_lblk_t hole_len) 2227a86c6181SAlex Tomas { 22282f8e0a7cSZheng Liu struct extent_status es; 2229a86c6181SAlex Tomas 2230ad431025SEric Whitney ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, 2231140a5250SJan Kara hole_start + hole_len - 1, &es); 22322f8e0a7cSZheng Liu if (es.es_len) { 22332f8e0a7cSZheng Liu /* There's delayed extent containing lblock? */ 2234140a5250SJan Kara if (es.es_lblk <= hole_start) 22352f8e0a7cSZheng Liu return; 2236140a5250SJan Kara hole_len = min(es.es_lblk - hole_start, hole_len); 22372f8e0a7cSZheng Liu } 2238140a5250SJan Kara ext_debug(" -> %u:%u\n", hole_start, hole_len); 2239140a5250SJan Kara ext4_es_insert_extent(inode, hole_start, hole_len, ~0, 2240140a5250SJan Kara EXTENT_STATUS_HOLE); 2241a86c6181SAlex Tomas } 2242a86c6181SAlex Tomas 2243a86c6181SAlex Tomas /* 2244d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2245d0d856e8SRandy Dunlap * removes index from the index block. 2246a86c6181SAlex Tomas */ 22471d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2248c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2249a86c6181SAlex Tomas { 2250a86c6181SAlex Tomas int err; 2251f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2252a86c6181SAlex Tomas 2253a86c6181SAlex Tomas /* free index block */ 2254c36575e6SForrest Liu depth--; 2255c36575e6SForrest Liu path = path + depth; 2256bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2257273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2258273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 22596a797d27SDarrick J. Wong return -EFSCORRUPTED; 2260273df556SFrank Mayhar } 22617e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 22627e028976SAvantika Mathur if (err) 2263a86c6181SAlex Tomas return err; 22640e1147b0SRobin Dong 22650e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 22660e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 22670e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 22680e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 22690e1147b0SRobin Dong } 22700e1147b0SRobin Dong 2271e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 22727e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 22737e028976SAvantika Mathur if (err) 2274a86c6181SAlex Tomas return err; 22752ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2276d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2277d8990240SAditya Kali 22787dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2279e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2280c36575e6SForrest Liu 2281c36575e6SForrest Liu while (--depth >= 0) { 2282c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2283c36575e6SForrest Liu break; 2284c36575e6SForrest Liu path--; 2285c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2286c36575e6SForrest Liu if (err) 2287c36575e6SForrest Liu break; 2288c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2289c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2290c36575e6SForrest Liu if (err) 2291c36575e6SForrest Liu break; 2292c36575e6SForrest Liu } 2293a86c6181SAlex Tomas return err; 2294a86c6181SAlex Tomas } 2295a86c6181SAlex Tomas 2296a86c6181SAlex Tomas /* 2297ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2298ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2299ee12b630SMingming Cao * to the extent tree. 2300ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2301ee12b630SMingming Cao * under i_data_sem. 2302a86c6181SAlex Tomas */ 2303525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2304a86c6181SAlex Tomas struct ext4_ext_path *path) 2305a86c6181SAlex Tomas { 2306a86c6181SAlex Tomas if (path) { 2307ee12b630SMingming Cao int depth = ext_depth(inode); 2308f3bd1f3fSMingming Cao int ret = 0; 2309ee12b630SMingming Cao 2310a86c6181SAlex Tomas /* probably there is space in leaf? */ 2311a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2312ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2313ee12b630SMingming Cao 2314ee12b630SMingming Cao /* 2315ee12b630SMingming Cao * There are some space in the leaf tree, no 2316ee12b630SMingming Cao * need to account for leaf block credit 2317ee12b630SMingming Cao * 2318ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2319df3ab170STao Ma * and other metadata blocks still need to be 2320ee12b630SMingming Cao * accounted. 2321ee12b630SMingming Cao */ 2322525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2323ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 23245887e98bSAneesh Kumar K.V return ret; 2325ee12b630SMingming Cao } 2326ee12b630SMingming Cao } 2327ee12b630SMingming Cao 2328525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2329a86c6181SAlex Tomas } 2330a86c6181SAlex Tomas 2331a86c6181SAlex Tomas /* 2332fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2333ee12b630SMingming Cao * 2334fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2335fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2336ee12b630SMingming Cao * 2337fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2338fffb2739SJan Kara * than once, but this is really rare. 2339a86c6181SAlex Tomas */ 2340fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2341ee12b630SMingming Cao { 2342ee12b630SMingming Cao int index; 2343f19d5870STao Ma int depth; 2344f19d5870STao Ma 2345f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2346f19d5870STao Ma if (ext4_has_inline_data(inode)) 2347f19d5870STao Ma return 1; 2348f19d5870STao Ma 2349f19d5870STao Ma depth = ext_depth(inode); 2350a86c6181SAlex Tomas 2351fffb2739SJan Kara if (extents <= 1) 2352ee12b630SMingming Cao index = depth * 2; 2353ee12b630SMingming Cao else 2354ee12b630SMingming Cao index = depth * 3; 2355a86c6181SAlex Tomas 2356ee12b630SMingming Cao return index; 2357a86c6181SAlex Tomas } 2358a86c6181SAlex Tomas 2359981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode) 2360981250caSTheodore Ts'o { 2361ddfa17e4STahsin Erdogan if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) || 2362ddfa17e4STahsin Erdogan ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE)) 2363981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2364981250caSTheodore Ts'o else if (ext4_should_journal_data(inode)) 2365981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET; 2366981250caSTheodore Ts'o return 0; 2367981250caSTheodore Ts'o } 2368981250caSTheodore Ts'o 23699fe67149SEric Whitney /* 23709fe67149SEric Whitney * ext4_rereserve_cluster - increment the reserved cluster count when 23719fe67149SEric Whitney * freeing a cluster with a pending reservation 23729fe67149SEric Whitney * 23739fe67149SEric Whitney * @inode - file containing the cluster 23749fe67149SEric Whitney * @lblk - logical block in cluster to be reserved 23759fe67149SEric Whitney * 23769fe67149SEric Whitney * Increments the reserved cluster count and adjusts quota in a bigalloc 23779fe67149SEric Whitney * file system when freeing a partial cluster containing at least one 23789fe67149SEric Whitney * delayed and unwritten block. A partial cluster meeting that 23799fe67149SEric Whitney * requirement will have a pending reservation. If so, the 23809fe67149SEric Whitney * RERESERVE_CLUSTER flag is used when calling ext4_free_blocks() to 23819fe67149SEric Whitney * defer reserved and allocated space accounting to a subsequent call 23829fe67149SEric Whitney * to this function. 23839fe67149SEric Whitney */ 23849fe67149SEric Whitney static void ext4_rereserve_cluster(struct inode *inode, ext4_lblk_t lblk) 23859fe67149SEric Whitney { 23869fe67149SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 23879fe67149SEric Whitney struct ext4_inode_info *ei = EXT4_I(inode); 23889fe67149SEric Whitney 23899fe67149SEric Whitney dquot_reclaim_block(inode, EXT4_C2B(sbi, 1)); 23909fe67149SEric Whitney 23919fe67149SEric Whitney spin_lock(&ei->i_block_reservation_lock); 23929fe67149SEric Whitney ei->i_reserved_data_blocks++; 23939fe67149SEric Whitney percpu_counter_add(&sbi->s_dirtyclusters_counter, 1); 23949fe67149SEric Whitney spin_unlock(&ei->i_block_reservation_lock); 23959fe67149SEric Whitney 23969fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 1); 23979fe67149SEric Whitney ext4_remove_pending(inode, lblk); 23989fe67149SEric Whitney } 23999fe67149SEric Whitney 2400a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2401a86c6181SAlex Tomas struct ext4_extent *ex, 24029fe67149SEric Whitney struct partial_cluster *partial, 2403725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2404a86c6181SAlex Tomas { 24050aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2406a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 24079fe67149SEric Whitney ext4_fsblk_t last_pblk, pblk; 24089fe67149SEric Whitney ext4_lblk_t num; 24099fe67149SEric Whitney int flags; 241018888cf0SAndrey Sidorov 24119fe67149SEric Whitney /* only extent tail removal is allowed */ 24129fe67149SEric Whitney if (from < le32_to_cpu(ex->ee_block) || 24139fe67149SEric Whitney to != le32_to_cpu(ex->ee_block) + ee_len - 1) { 24149fe67149SEric Whitney ext4_error(sbi->s_sb, 24159fe67149SEric Whitney "strange request: removal(2) %u-%u from %u:%u", 24169fe67149SEric Whitney from, to, le32_to_cpu(ex->ee_block), ee_len); 24179fe67149SEric Whitney return 0; 24180aa06000STheodore Ts'o } 24190aa06000STheodore Ts'o 2420a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2421a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2422a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2423a86c6181SAlex Tomas sbi->s_ext_extents++; 2424a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2425a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2426a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2427a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2428a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2429a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2430a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2431a86c6181SAlex Tomas #endif 24329fe67149SEric Whitney 24339fe67149SEric Whitney trace_ext4_remove_blocks(inode, ex, from, to, partial); 24349fe67149SEric Whitney 24359fe67149SEric Whitney /* 24369fe67149SEric Whitney * if we have a partial cluster, and it's different from the 24379fe67149SEric Whitney * cluster of the last block in the extent, we free it 24389fe67149SEric Whitney */ 24399fe67149SEric Whitney last_pblk = ext4_ext_pblock(ex) + ee_len - 1; 24409fe67149SEric Whitney 24419fe67149SEric Whitney if (partial->state != initial && 24429fe67149SEric Whitney partial->pclu != EXT4_B2C(sbi, last_pblk)) { 24439fe67149SEric Whitney if (partial->state == tofree) { 24449fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 24459fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 24469fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 24479fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 24489fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 24499fe67149SEric Whitney sbi->s_cluster_ratio, flags); 24509fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 24519fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 24529fe67149SEric Whitney } 24539fe67149SEric Whitney partial->state = initial; 24549fe67149SEric Whitney } 2455725d26d3SAneesh Kumar K.V 2456a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 24570aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 24589fe67149SEric Whitney 2459d23142c6SLukas Czerner /* 24609fe67149SEric Whitney * We free the partial cluster at the end of the extent (if any), 24619fe67149SEric Whitney * unless the cluster is used by another extent (partial_cluster 24629fe67149SEric Whitney * state is nofree). If a partial cluster exists here, it must be 24639fe67149SEric Whitney * shared with the last block in the extent. 2464d23142c6SLukas Czerner */ 24659fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 24669fe67149SEric Whitney 24679fe67149SEric Whitney /* partial, left end cluster aligned, right end unaligned */ 24689fe67149SEric Whitney if ((EXT4_LBLK_COFF(sbi, to) != sbi->s_cluster_ratio - 1) && 24699fe67149SEric Whitney (EXT4_LBLK_CMASK(sbi, to) >= from) && 24709fe67149SEric Whitney (partial->state != nofree)) { 24719fe67149SEric Whitney if (ext4_is_pending(inode, to)) 24729fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 24739fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, 24749fe67149SEric Whitney EXT4_PBLK_CMASK(sbi, last_pblk), 24759fe67149SEric Whitney sbi->s_cluster_ratio, flags); 24769fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 24779fe67149SEric Whitney ext4_rereserve_cluster(inode, to); 24789fe67149SEric Whitney partial->state = initial; 24799fe67149SEric Whitney flags = get_default_free_blocks_flags(inode); 24809fe67149SEric Whitney } 24819fe67149SEric Whitney 2482d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2483d23142c6SLukas Czerner 24840aa06000STheodore Ts'o /* 24859fe67149SEric Whitney * For bigalloc file systems, we never free a partial cluster 24869fe67149SEric Whitney * at the beginning of the extent. Instead, we check to see if we 24879fe67149SEric Whitney * need to free it on a subsequent call to ext4_remove_blocks, 24889fe67149SEric Whitney * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 24890aa06000STheodore Ts'o */ 24909fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 24919fe67149SEric Whitney ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 24929fe67149SEric Whitney 24939fe67149SEric Whitney /* reset the partial cluster if we've freed past it */ 24949fe67149SEric Whitney if (partial->state != initial && partial->pclu != EXT4_B2C(sbi, pblk)) 24959fe67149SEric Whitney partial->state = initial; 24969fe67149SEric Whitney 24979fe67149SEric Whitney /* 24989fe67149SEric Whitney * If we've freed the entire extent but the beginning is not left 24999fe67149SEric Whitney * cluster aligned and is not marked as ineligible for freeing we 25009fe67149SEric Whitney * record the partial cluster at the beginning of the extent. It 25019fe67149SEric Whitney * wasn't freed by the preceding ext4_free_blocks() call, and we 25029fe67149SEric Whitney * need to look farther to the left to determine if it's to be freed 25039fe67149SEric Whitney * (not shared with another extent). Else, reset the partial 25049fe67149SEric Whitney * cluster - we're either done freeing or the beginning of the 25059fe67149SEric Whitney * extent is left cluster aligned. 25069fe67149SEric Whitney */ 25079fe67149SEric Whitney if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { 25089fe67149SEric Whitney if (partial->state == initial) { 25099fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 25109fe67149SEric Whitney partial->lblk = from; 25119fe67149SEric Whitney partial->state = tofree; 2512345ee947SEric Whitney } 25139fe67149SEric Whitney } else { 25149fe67149SEric Whitney partial->state = initial; 2515a86c6181SAlex Tomas } 2516a86c6181SAlex Tomas 25179fe67149SEric Whitney return 0; 25189fe67149SEric Whitney } 2519d583fb87SAllison Henderson 2520d583fb87SAllison Henderson /* 2521d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 25225bf43760SEric Whitney * blocks appearing between "start" and "end". Both "start" 25235bf43760SEric Whitney * and "end" must appear in the same extent or EIO is returned. 2524d583fb87SAllison Henderson * 2525d583fb87SAllison Henderson * @handle: The journal handle 2526d583fb87SAllison Henderson * @inode: The files inode 2527d583fb87SAllison Henderson * @path: The path to the leaf 2528d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 25295bf43760SEric Whitney * has been released from it. However, if this value is 25305bf43760SEric Whitney * negative, it's a cluster just to the right of the 25315bf43760SEric Whitney * punched region and it must not be freed. 2532d583fb87SAllison Henderson * @start: The first block to remove 2533d583fb87SAllison Henderson * @end: The last block to remove 2534d583fb87SAllison Henderson */ 2535a86c6181SAlex Tomas static int 2536a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2537d23142c6SLukas Czerner struct ext4_ext_path *path, 25389fe67149SEric Whitney struct partial_cluster *partial, 25390aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2540a86c6181SAlex Tomas { 25410aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2542a86c6181SAlex Tomas int err = 0, correct_index = 0; 254383448bdfSJan Kara int depth = ext_depth(inode), credits, revoke_credits; 2544a86c6181SAlex Tomas struct ext4_extent_header *eh; 2545750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2546725d26d3SAneesh Kumar K.V unsigned num; 2547725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2548a86c6181SAlex Tomas unsigned short ex_ee_len; 2549556615dcSLukas Czerner unsigned unwritten = 0; 2550a86c6181SAlex Tomas struct ext4_extent *ex; 2551d23142c6SLukas Czerner ext4_fsblk_t pblk; 2552a86c6181SAlex Tomas 2553c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 25545f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2555a86c6181SAlex Tomas if (!path[depth].p_hdr) 2556a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2557a86c6181SAlex Tomas eh = path[depth].p_hdr; 2558273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2559273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 25606a797d27SDarrick J. Wong return -EFSCORRUPTED; 2561273df556SFrank Mayhar } 2562a86c6181SAlex Tomas /* find where to start removing */ 25636ae06ff5SAshish Sangwan ex = path[depth].p_ext; 25646ae06ff5SAshish Sangwan if (!ex) 2565a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2566a86c6181SAlex Tomas 2567a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2568a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2569a86c6181SAlex Tomas 25709fe67149SEric Whitney trace_ext4_ext_rm_leaf(inode, start, ex, partial); 2571d8990240SAditya Kali 2572a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2573a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2574a41f2071SAneesh Kumar K.V 2575556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 2576556615dcSLukas Czerner unwritten = 1; 2577a41f2071SAneesh Kumar K.V else 2578556615dcSLukas Czerner unwritten = 0; 2579a41f2071SAneesh Kumar K.V 2580553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2581556615dcSLukas Czerner unwritten, ex_ee_len); 2582a86c6181SAlex Tomas path[depth].p_ext = ex; 2583a86c6181SAlex Tomas 2584a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2585d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2586d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2587a86c6181SAlex Tomas 2588a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2589a86c6181SAlex Tomas 2590d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 25915f95d21fSLukas Czerner if (end < ex_ee_block) { 2592d23142c6SLukas Czerner /* 2593d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2594f4226d9eSEric Whitney * so note that its first cluster is in use to avoid 2595f4226d9eSEric Whitney * freeing it when removing blocks. Eventually, the 2596f4226d9eSEric Whitney * right edge of the truncated/punched region will 2597f4226d9eSEric Whitney * be just to the left. 2598d23142c6SLukas Czerner */ 2599f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2600d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 26019fe67149SEric Whitney partial->pclu = EXT4_B2C(sbi, pblk); 26029fe67149SEric Whitney partial->state = nofree; 2603f4226d9eSEric Whitney } 2604d583fb87SAllison Henderson ex--; 2605d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2606d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2607d583fb87SAllison Henderson continue; 2608750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2609dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2610dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2611dc1841d6SLukas Czerner "on extent %u:%u", 2612dc1841d6SLukas Czerner start, end, ex_ee_block, 2613dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 26146a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2615d583fb87SAllison Henderson goto out; 2616a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2617a86c6181SAlex Tomas /* remove tail of the extent */ 2618750c9c47SDmitry Monakhov num = a - ex_ee_block; 2619a86c6181SAlex Tomas } else { 2620a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2621a86c6181SAlex Tomas num = 0; 2622d583fb87SAllison Henderson } 262334071da7STheodore Ts'o /* 262434071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 262534071da7STheodore Ts'o * descriptor) for each block group; assume two block 262634071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 262734071da7STheodore Ts'o * the worst case 262834071da7STheodore Ts'o */ 262934071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2630a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2631a86c6181SAlex Tomas correct_index = 1; 2632a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2633a86c6181SAlex Tomas } 26345aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 263583448bdfSJan Kara /* 263683448bdfSJan Kara * We may end up freeing some index blocks and data from the 263783448bdfSJan Kara * punched range. Note that partial clusters are accounted for 263883448bdfSJan Kara * by ext4_free_data_revoke_credits(). 263983448bdfSJan Kara */ 264083448bdfSJan Kara revoke_credits = 264183448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, 264283448bdfSJan Kara ext_depth(inode)) + 264383448bdfSJan Kara ext4_free_data_revoke_credits(inode, b - a + 1); 2644a86c6181SAlex Tomas 2645a4130367SJan Kara err = ext4_datasem_ensure_credits(handle, inode, credits, 264683448bdfSJan Kara credits, revoke_credits); 2647a4130367SJan Kara if (err) { 2648a4130367SJan Kara if (err > 0) 2649a4130367SJan Kara err = -EAGAIN; 2650a86c6181SAlex Tomas goto out; 2651a4130367SJan Kara } 2652a86c6181SAlex Tomas 2653a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2654a86c6181SAlex Tomas if (err) 2655a86c6181SAlex Tomas goto out; 2656a86c6181SAlex Tomas 26579fe67149SEric Whitney err = ext4_remove_blocks(handle, inode, ex, partial, a, b); 2658a86c6181SAlex Tomas if (err) 2659a86c6181SAlex Tomas goto out; 2660a86c6181SAlex Tomas 2661750c9c47SDmitry Monakhov if (num == 0) 2662d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2663f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2664a86c6181SAlex Tomas 2665a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2666749269faSAmit Arora /* 2667556615dcSLukas Czerner * Do not mark unwritten if all the blocks in the 2668749269faSAmit Arora * extent have been removed. 2669749269faSAmit Arora */ 2670556615dcSLukas Czerner if (unwritten && num) 2671556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2672d583fb87SAllison Henderson /* 2673d583fb87SAllison Henderson * If the extent was completely released, 2674d583fb87SAllison Henderson * we need to remove it from the leaf 2675d583fb87SAllison Henderson */ 2676d583fb87SAllison Henderson if (num == 0) { 2677f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2678d583fb87SAllison Henderson /* 2679d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2680d583fb87SAllison Henderson * extents up when an extent is removed so that 2681d583fb87SAllison Henderson * we dont have blank extents in the middle 2682d583fb87SAllison Henderson */ 2683d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2684d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2685d583fb87SAllison Henderson 2686d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2687d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2688d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2689d583fb87SAllison Henderson } 2690d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 26915bf43760SEric Whitney } 2692d583fb87SAllison Henderson 2693750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2694750c9c47SDmitry Monakhov if (err) 2695750c9c47SDmitry Monakhov goto out; 2696750c9c47SDmitry Monakhov 2697bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2698bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2699a86c6181SAlex Tomas ex--; 2700a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2701a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2702a86c6181SAlex Tomas } 2703a86c6181SAlex Tomas 2704a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2705a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2706a86c6181SAlex Tomas 27070aa06000STheodore Ts'o /* 2708ad6599abSEric Whitney * If there's a partial cluster and at least one extent remains in 2709ad6599abSEric Whitney * the leaf, free the partial cluster if it isn't shared with the 27105bf43760SEric Whitney * current extent. If it is shared with the current extent 27119fe67149SEric Whitney * we reset the partial cluster because we've reached the start of the 27125bf43760SEric Whitney * truncated/punched region and we're done removing blocks. 27130aa06000STheodore Ts'o */ 27149fe67149SEric Whitney if (partial->state == tofree && ex >= EXT_FIRST_EXTENT(eh)) { 27155bf43760SEric Whitney pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 27169fe67149SEric Whitney if (partial->pclu != EXT4_B2C(sbi, pblk)) { 27179fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 27189fe67149SEric Whitney 27199fe67149SEric Whitney if (ext4_is_pending(inode, partial->lblk)) 27209fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 27210aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 27229fe67149SEric Whitney EXT4_C2B(sbi, partial->pclu), 27239fe67149SEric Whitney sbi->s_cluster_ratio, flags); 27249fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 27259fe67149SEric Whitney ext4_rereserve_cluster(inode, partial->lblk); 27265bf43760SEric Whitney } 27279fe67149SEric Whitney partial->state = initial; 27280aa06000STheodore Ts'o } 27290aa06000STheodore Ts'o 2730a86c6181SAlex Tomas /* if this leaf is free, then we should 2731a86c6181SAlex Tomas * remove it from index block above */ 2732a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2733c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2734a86c6181SAlex Tomas 2735a86c6181SAlex Tomas out: 2736a86c6181SAlex Tomas return err; 2737a86c6181SAlex Tomas } 2738a86c6181SAlex Tomas 2739a86c6181SAlex Tomas /* 2740d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2741d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2742a86c6181SAlex Tomas */ 274309b88252SAvantika Mathur static int 2744a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2745a86c6181SAlex Tomas { 2746a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2747a86c6181SAlex Tomas 2748a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2749a86c6181SAlex Tomas return 0; 2750a86c6181SAlex Tomas 2751a86c6181SAlex Tomas /* 2752d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2753a86c6181SAlex Tomas * so we have to consider current index for truncation 2754a86c6181SAlex Tomas */ 2755a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2756a86c6181SAlex Tomas return 0; 2757a86c6181SAlex Tomas return 1; 2758a86c6181SAlex Tomas } 2759a86c6181SAlex Tomas 276026a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 27615f95d21fSLukas Czerner ext4_lblk_t end) 2762a86c6181SAlex Tomas { 2763f4226d9eSEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2764a86c6181SAlex Tomas int depth = ext_depth(inode); 2765968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 27669fe67149SEric Whitney struct partial_cluster partial; 2767a86c6181SAlex Tomas handle_t *handle; 27686f2080e6SDmitry Monakhov int i = 0, err = 0; 2769a86c6181SAlex Tomas 27709fe67149SEric Whitney partial.pclu = 0; 27719fe67149SEric Whitney partial.lblk = 0; 27729fe67149SEric Whitney partial.state = initial; 27739fe67149SEric Whitney 27745f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2775a86c6181SAlex Tomas 2776a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 277783448bdfSJan Kara handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE, 277883448bdfSJan Kara depth + 1, 277983448bdfSJan Kara ext4_free_metadata_revoke_credits(inode->i_sb, depth)); 2780a86c6181SAlex Tomas if (IS_ERR(handle)) 2781a86c6181SAlex Tomas return PTR_ERR(handle); 2782a86c6181SAlex Tomas 27830617b83fSDmitry Monakhov again: 278461801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2785d8990240SAditya Kali 2786a86c6181SAlex Tomas /* 27875f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 27885f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 27895f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 27905f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 27915f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 27925f95d21fSLukas Czerner */ 27935f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 27945f95d21fSLukas Czerner struct ext4_extent *ex; 2795f4226d9eSEric Whitney ext4_lblk_t ee_block, ex_end, lblk; 2796f4226d9eSEric Whitney ext4_fsblk_t pblk; 27975f95d21fSLukas Czerner 2798f4226d9eSEric Whitney /* find extent for or closest extent to this block */ 2799ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 28005f95d21fSLukas Czerner if (IS_ERR(path)) { 28015f95d21fSLukas Czerner ext4_journal_stop(handle); 28025f95d21fSLukas Czerner return PTR_ERR(path); 28035f95d21fSLukas Czerner } 28045f95d21fSLukas Czerner depth = ext_depth(inode); 28056f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 28065f95d21fSLukas Czerner ex = path[depth].p_ext; 2807968dee77SAshish Sangwan if (!ex) { 28086f2080e6SDmitry Monakhov if (depth) { 28096f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 28106f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 28116f2080e6SDmitry Monakhov depth); 28126a797d27SDarrick J. Wong err = -EFSCORRUPTED; 28136f2080e6SDmitry Monakhov } 28146f2080e6SDmitry Monakhov goto out; 2815968dee77SAshish Sangwan } 28165f95d21fSLukas Czerner 28175f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 2818f4226d9eSEric Whitney ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 28195f95d21fSLukas Czerner 28205f95d21fSLukas Czerner /* 28215f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 28225f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 28235f95d21fSLukas Czerner * tail of the first part of the split extent in 28245f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 28255f95d21fSLukas Czerner */ 2826f4226d9eSEric Whitney if (end >= ee_block && end < ex_end) { 2827f4226d9eSEric Whitney 2828f4226d9eSEric Whitney /* 2829f4226d9eSEric Whitney * If we're going to split the extent, note that 2830f4226d9eSEric Whitney * the cluster containing the block after 'end' is 2831f4226d9eSEric Whitney * in use to avoid freeing it when removing blocks. 2832f4226d9eSEric Whitney */ 2833f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2834f4226d9eSEric Whitney pblk = ext4_ext_pblock(ex) + end - ee_block + 2; 28359fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 28369fe67149SEric Whitney partial.state = nofree; 2837f4226d9eSEric Whitney } 2838f4226d9eSEric Whitney 28395f95d21fSLukas Czerner /* 28405f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 284127dd4385SLukas Czerner * block in the first new extent. Also we should not 284227dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 284327dd4385SLukas Czerner * reserved block if that happens. 28445f95d21fSLukas Czerner */ 2845dfe50809STheodore Ts'o err = ext4_force_split_extent_at(handle, inode, &path, 2846fcf6b1b7SDmitry Monakhov end + 1, 1); 28475f95d21fSLukas Czerner if (err < 0) 28485f95d21fSLukas Czerner goto out; 2849f4226d9eSEric Whitney 28507bd75230SEric Whitney } else if (sbi->s_cluster_ratio > 1 && end >= ex_end && 28517bd75230SEric Whitney partial.state == initial) { 2852f4226d9eSEric Whitney /* 28537bd75230SEric Whitney * If we're punching, there's an extent to the right. 28547bd75230SEric Whitney * If the partial cluster hasn't been set, set it to 28557bd75230SEric Whitney * that extent's first cluster and its state to nofree 28567bd75230SEric Whitney * so it won't be freed should it contain blocks to be 28577bd75230SEric Whitney * removed. If it's already set (tofree/nofree), we're 28587bd75230SEric Whitney * retrying and keep the original partial cluster info 28597bd75230SEric Whitney * so a cluster marked tofree as a result of earlier 28607bd75230SEric Whitney * extent removal is not lost. 2861f4226d9eSEric Whitney */ 2862f4226d9eSEric Whitney lblk = ex_end + 1; 2863f4226d9eSEric Whitney err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2864f4226d9eSEric Whitney &ex); 2865f4226d9eSEric Whitney if (err) 2866f4226d9eSEric Whitney goto out; 28679fe67149SEric Whitney if (pblk) { 28689fe67149SEric Whitney partial.pclu = EXT4_B2C(sbi, pblk); 28699fe67149SEric Whitney partial.state = nofree; 28709fe67149SEric Whitney } 28715f95d21fSLukas Czerner } 28725f95d21fSLukas Czerner } 28735f95d21fSLukas Czerner /* 2874d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2875d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2876a86c6181SAlex Tomas */ 28770617b83fSDmitry Monakhov depth = ext_depth(inode); 2878968dee77SAshish Sangwan if (path) { 2879968dee77SAshish Sangwan int k = i = depth; 2880968dee77SAshish Sangwan while (--k > 0) 2881968dee77SAshish Sangwan path[k].p_block = 2882968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2883968dee77SAshish Sangwan } else { 28846396bb22SKees Cook path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), 2885968dee77SAshish Sangwan GFP_NOFS); 2886a86c6181SAlex Tomas if (path == NULL) { 2887a86c6181SAlex Tomas ext4_journal_stop(handle); 2888a86c6181SAlex Tomas return -ENOMEM; 2889a86c6181SAlex Tomas } 289010809df8STheodore Ts'o path[0].p_maxdepth = path[0].p_depth = depth; 2891a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 289289a4e48fSTheodore Ts'o i = 0; 28935f95d21fSLukas Czerner 2894c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 28956a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2896a86c6181SAlex Tomas goto out; 2897a86c6181SAlex Tomas } 2898968dee77SAshish Sangwan } 2899968dee77SAshish Sangwan err = 0; 2900a86c6181SAlex Tomas 2901a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2902a86c6181SAlex Tomas if (i == depth) { 2903a86c6181SAlex Tomas /* this is leaf block */ 2904d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 29059fe67149SEric Whitney &partial, start, end); 2906d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2907a86c6181SAlex Tomas brelse(path[i].p_bh); 2908a86c6181SAlex Tomas path[i].p_bh = NULL; 2909a86c6181SAlex Tomas i--; 2910a86c6181SAlex Tomas continue; 2911a86c6181SAlex Tomas } 2912a86c6181SAlex Tomas 2913a86c6181SAlex Tomas /* this is index block */ 2914a86c6181SAlex Tomas if (!path[i].p_hdr) { 2915a86c6181SAlex Tomas ext_debug("initialize header\n"); 2916a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2917a86c6181SAlex Tomas } 2918a86c6181SAlex Tomas 2919a86c6181SAlex Tomas if (!path[i].p_idx) { 2920d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2921a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2922a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2923a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2924a86c6181SAlex Tomas path[i].p_hdr, 2925a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2926a86c6181SAlex Tomas } else { 2927d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2928a86c6181SAlex Tomas path[i].p_idx--; 2929a86c6181SAlex Tomas } 2930a86c6181SAlex Tomas 2931a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2932a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2933a86c6181SAlex Tomas path[i].p_idx); 2934a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2935c29c0ae7SAlex Tomas struct buffer_head *bh; 2936a86c6181SAlex Tomas /* go to the next level */ 29372ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2938bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2939a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 29407d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, 2941107a7bd3STheodore Ts'o ext4_idx_pblock(path[i].p_idx), depth - i - 1, 2942107a7bd3STheodore Ts'o EXT4_EX_NOCACHE); 29437d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 2944a86c6181SAlex Tomas /* should we reset i_size? */ 29457d7ea89eSTheodore Ts'o err = PTR_ERR(bh); 2946a86c6181SAlex Tomas break; 2947a86c6181SAlex Tomas } 294876828c88STheodore Ts'o /* Yield here to deal with large extent trees. 294976828c88STheodore Ts'o * Should be a no-op if we did IO above. */ 295076828c88STheodore Ts'o cond_resched(); 2951c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 29526a797d27SDarrick J. Wong err = -EFSCORRUPTED; 2953c29c0ae7SAlex Tomas break; 2954c29c0ae7SAlex Tomas } 2955c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2956a86c6181SAlex Tomas 2957d0d856e8SRandy Dunlap /* save actual number of indexes since this 2958d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2959a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2960a86c6181SAlex Tomas i++; 2961a86c6181SAlex Tomas } else { 2962d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2963a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2964d0d856e8SRandy Dunlap /* index is empty, remove it; 2965a86c6181SAlex Tomas * handle must be already prepared by the 2966a86c6181SAlex Tomas * truncatei_leaf() */ 2967c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 2968a86c6181SAlex Tomas } 2969d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2970a86c6181SAlex Tomas brelse(path[i].p_bh); 2971a86c6181SAlex Tomas path[i].p_bh = NULL; 2972a86c6181SAlex Tomas i--; 2973a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2974a86c6181SAlex Tomas } 2975a86c6181SAlex Tomas } 2976a86c6181SAlex Tomas 29779fe67149SEric Whitney trace_ext4_ext_remove_space_done(inode, start, end, depth, &partial, 29789fe67149SEric Whitney path->p_hdr->eh_entries); 2979d8990240SAditya Kali 29800756b908SEric Whitney /* 29819fe67149SEric Whitney * if there's a partial cluster and we have removed the first extent 29829fe67149SEric Whitney * in the file, then we also free the partial cluster, if any 29830756b908SEric Whitney */ 29849fe67149SEric Whitney if (partial.state == tofree && err == 0) { 29859fe67149SEric Whitney int flags = get_default_free_blocks_flags(inode); 29869fe67149SEric Whitney 29879fe67149SEric Whitney if (ext4_is_pending(inode, partial.lblk)) 29889fe67149SEric Whitney flags |= EXT4_FREE_BLOCKS_RERESERVE_CLUSTER; 29897b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 29909fe67149SEric Whitney EXT4_C2B(sbi, partial.pclu), 29919fe67149SEric Whitney sbi->s_cluster_ratio, flags); 29929fe67149SEric Whitney if (flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER) 29939fe67149SEric Whitney ext4_rereserve_cluster(inode, partial.lblk); 29949fe67149SEric Whitney partial.state = initial; 29957b415bf6SAditya Kali } 29967b415bf6SAditya Kali 2997a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2998a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2999a86c6181SAlex Tomas /* 3000d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 3001d0d856e8SRandy Dunlap * so we need to correct eh_depth 3002a86c6181SAlex Tomas */ 3003a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 3004a86c6181SAlex Tomas if (err == 0) { 3005a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 3006a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 300755ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 3008a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 3009a86c6181SAlex Tomas } 3010a86c6181SAlex Tomas } 3011a86c6181SAlex Tomas out: 3012a86c6181SAlex Tomas ext4_ext_drop_refs(path); 3013a86c6181SAlex Tomas kfree(path); 3014968dee77SAshish Sangwan path = NULL; 3015dfe50809STheodore Ts'o if (err == -EAGAIN) 3016dfe50809STheodore Ts'o goto again; 3017a86c6181SAlex Tomas ext4_journal_stop(handle); 3018a86c6181SAlex Tomas 3019a86c6181SAlex Tomas return err; 3020a86c6181SAlex Tomas } 3021a86c6181SAlex Tomas 3022a86c6181SAlex Tomas /* 3023a86c6181SAlex Tomas * called at mount time 3024a86c6181SAlex Tomas */ 3025a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 3026a86c6181SAlex Tomas { 3027a86c6181SAlex Tomas /* 3028a86c6181SAlex Tomas * possible initialization would be here 3029a86c6181SAlex Tomas */ 3030a86c6181SAlex Tomas 3031e2b911c5SDarrick J. Wong if (ext4_has_feature_extents(sb)) { 303290576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 303392b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 3034bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 303592b97816STheodore Ts'o ", aggressive tests" 3036a86c6181SAlex Tomas #endif 3037a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 303892b97816STheodore Ts'o ", check binsearch" 3039a86c6181SAlex Tomas #endif 3040a86c6181SAlex Tomas #ifdef EXTENTS_STATS 304192b97816STheodore Ts'o ", stats" 3042a86c6181SAlex Tomas #endif 304392b97816STheodore Ts'o "\n"); 304490576c0bSTheodore Ts'o #endif 3045a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3046a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3047a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 3048a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 3049a86c6181SAlex Tomas #endif 3050a86c6181SAlex Tomas } 3051a86c6181SAlex Tomas } 3052a86c6181SAlex Tomas 3053a86c6181SAlex Tomas /* 3054a86c6181SAlex Tomas * called at umount time 3055a86c6181SAlex Tomas */ 3056a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 3057a86c6181SAlex Tomas { 3058e2b911c5SDarrick J. Wong if (!ext4_has_feature_extents(sb)) 3059a86c6181SAlex Tomas return; 3060a86c6181SAlex Tomas 3061a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3062a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3063a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3064a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3065a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 3066a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 3067a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3068a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3069a86c6181SAlex Tomas } 3070a86c6181SAlex Tomas #endif 3071a86c6181SAlex Tomas } 3072a86c6181SAlex Tomas 3073d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3074d7b2a00cSZheng Liu { 3075d7b2a00cSZheng Liu ext4_lblk_t ee_block; 3076d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock; 3077d7b2a00cSZheng Liu unsigned int ee_len; 3078d7b2a00cSZheng Liu 3079d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block); 3080d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex); 3081d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex); 3082d7b2a00cSZheng Liu 3083d7b2a00cSZheng Liu if (ee_len == 0) 3084d7b2a00cSZheng Liu return 0; 3085d7b2a00cSZheng Liu 3086d7b2a00cSZheng Liu return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3087d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN); 3088d7b2a00cSZheng Liu } 3089d7b2a00cSZheng Liu 3090093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 3091093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3092093a088bSAneesh Kumar K.V { 30932407518dSLukas Czerner ext4_fsblk_t ee_pblock; 30942407518dSLukas Czerner unsigned int ee_len; 3095093a088bSAneesh Kumar K.V 3096093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 3097bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 309853085facSJan Kara return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock, 309953085facSJan Kara ee_len); 3100093a088bSAneesh Kumar K.V } 3101093a088bSAneesh Kumar K.V 310247ea3bb5SYongqiang Yang /* 310347ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 310447ea3bb5SYongqiang Yang * 310547ea3bb5SYongqiang Yang * @handle: the journal handle 310647ea3bb5SYongqiang Yang * @inode: the file inode 310747ea3bb5SYongqiang Yang * @path: the path to the extent 310847ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 310947ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 3110556615dcSLukas Czerner * the states(init or unwritten) of new extents. 311147ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 311247ea3bb5SYongqiang Yang * 311347ea3bb5SYongqiang Yang * 311447ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 311547ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 311647ea3bb5SYongqiang Yang * 311747ea3bb5SYongqiang Yang * There are two cases: 311847ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 311947ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 312047ea3bb5SYongqiang Yang * 312147ea3bb5SYongqiang Yang * return 0 on success. 312247ea3bb5SYongqiang Yang */ 312347ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 312447ea3bb5SYongqiang Yang struct inode *inode, 3125dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 312647ea3bb5SYongqiang Yang ext4_lblk_t split, 312747ea3bb5SYongqiang Yang int split_flag, 312847ea3bb5SYongqiang Yang int flags) 312947ea3bb5SYongqiang Yang { 3130dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 313147ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 313247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3133adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 313447ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 313547ea3bb5SYongqiang Yang unsigned int ee_len, depth; 313647ea3bb5SYongqiang Yang int err = 0; 313747ea3bb5SYongqiang Yang 3138dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3139dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3140dee1f973SDmitry Monakhov 314147ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 314247ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 314347ea3bb5SYongqiang Yang 314447ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 314547ea3bb5SYongqiang Yang 314647ea3bb5SYongqiang Yang depth = ext_depth(inode); 314747ea3bb5SYongqiang Yang ex = path[depth].p_ext; 314847ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 314947ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 315047ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 315147ea3bb5SYongqiang Yang 315247ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3153556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex) && 3154357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3155556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT1 | 3156556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2)); 315747ea3bb5SYongqiang Yang 315847ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 315947ea3bb5SYongqiang Yang if (err) 316047ea3bb5SYongqiang Yang goto out; 316147ea3bb5SYongqiang Yang 316247ea3bb5SYongqiang Yang if (split == ee_block) { 316347ea3bb5SYongqiang Yang /* 316447ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 316547ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 316647ea3bb5SYongqiang Yang * is not needed. 316747ea3bb5SYongqiang Yang */ 3168556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3169556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 317047ea3bb5SYongqiang Yang else 317147ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 317247ea3bb5SYongqiang Yang 317347ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3174ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 317547ea3bb5SYongqiang Yang 3176ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 317747ea3bb5SYongqiang Yang goto out; 317847ea3bb5SYongqiang Yang } 317947ea3bb5SYongqiang Yang 318047ea3bb5SYongqiang Yang /* case a */ 318147ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 318247ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 3183556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3184556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 318547ea3bb5SYongqiang Yang 318647ea3bb5SYongqiang Yang /* 318747ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 318847ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 318947ea3bb5SYongqiang Yang */ 319047ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 319147ea3bb5SYongqiang Yang if (err) 319247ea3bb5SYongqiang Yang goto fix_extent_len; 319347ea3bb5SYongqiang Yang 319447ea3bb5SYongqiang Yang ex2 = &newex; 319547ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 319647ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 319747ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 3198556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3199556615dcSLukas Czerner ext4_ext_mark_unwritten(ex2); 320047ea3bb5SYongqiang Yang 3201dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 320247ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3203dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3204adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3205dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3206adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 32078cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32088cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3209adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3210adb23551SZheng Liu ext4_ext_pblock(ex2)); 3211adb23551SZheng Liu } else { 3212dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3213adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 32148cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32158cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3216adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3217adb23551SZheng Liu ext4_ext_pblock(ex)); 3218adb23551SZheng Liu } 3219adb23551SZheng Liu } else { 322047ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3221adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 32228cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32238cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3224adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3225adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3226adb23551SZheng Liu } 3227dee1f973SDmitry Monakhov 322847ea3bb5SYongqiang Yang if (err) 322947ea3bb5SYongqiang Yang goto fix_extent_len; 323047ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3231af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3232ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3233ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3234adb23551SZheng Liu if (err) 3235adb23551SZheng Liu goto fix_extent_len; 3236adb23551SZheng Liu 3237adb23551SZheng Liu /* update extent status tree */ 3238d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3239adb23551SZheng Liu 324047ea3bb5SYongqiang Yang goto out; 324147ea3bb5SYongqiang Yang } else if (err) 324247ea3bb5SYongqiang Yang goto fix_extent_len; 324347ea3bb5SYongqiang Yang 324447ea3bb5SYongqiang Yang out: 324547ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 324647ea3bb5SYongqiang Yang return err; 324747ea3bb5SYongqiang Yang 324847ea3bb5SYongqiang Yang fix_extent_len: 324947ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 325029faed16SDmitry Monakhov ext4_ext_dirty(handle, inode, path + path->p_depth); 325147ea3bb5SYongqiang Yang return err; 325247ea3bb5SYongqiang Yang } 325347ea3bb5SYongqiang Yang 325447ea3bb5SYongqiang Yang /* 325547ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 325647ea3bb5SYongqiang Yang * by @map as split_flags indicates 325747ea3bb5SYongqiang Yang * 325847ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three) 325947ea3bb5SYongqiang Yang * There are three possibilities: 326047ea3bb5SYongqiang Yang * a> There is no split required 326147ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 326247ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 326347ea3bb5SYongqiang Yang * 326447ea3bb5SYongqiang Yang */ 326547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 326647ea3bb5SYongqiang Yang struct inode *inode, 3267dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 326847ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 326947ea3bb5SYongqiang Yang int split_flag, 327047ea3bb5SYongqiang Yang int flags) 327147ea3bb5SYongqiang Yang { 3272dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 327347ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 327447ea3bb5SYongqiang Yang struct ext4_extent *ex; 327547ea3bb5SYongqiang Yang unsigned int ee_len, depth; 327647ea3bb5SYongqiang Yang int err = 0; 3277556615dcSLukas Czerner int unwritten; 327847ea3bb5SYongqiang Yang int split_flag1, flags1; 32793a225670SZheng Liu int allocated = map->m_len; 328047ea3bb5SYongqiang Yang 328147ea3bb5SYongqiang Yang depth = ext_depth(inode); 328247ea3bb5SYongqiang Yang ex = path[depth].p_ext; 328347ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 328447ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 3285556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 328647ea3bb5SYongqiang Yang 328747ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3288dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 328947ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3290556615dcSLukas Czerner if (unwritten) 3291556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3292556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2; 3293dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3294dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 3295dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 329647ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 329793917411SYongqiang Yang if (err) 329893917411SYongqiang Yang goto out; 32993a225670SZheng Liu } else { 33003a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 330147ea3bb5SYongqiang Yang } 3302357b66fdSDmitry Monakhov /* 3303357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3304357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3305357b66fdSDmitry Monakhov */ 3306ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 330747ea3bb5SYongqiang Yang if (IS_ERR(path)) 330847ea3bb5SYongqiang Yang return PTR_ERR(path); 3309357b66fdSDmitry Monakhov depth = ext_depth(inode); 3310357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3311a18ed359SDmitry Monakhov if (!ex) { 3312a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3313a18ed359SDmitry Monakhov (unsigned long) map->m_lblk); 33146a797d27SDarrick J. Wong return -EFSCORRUPTED; 3315a18ed359SDmitry Monakhov } 3316556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 3317357b66fdSDmitry Monakhov split_flag1 = 0; 331847ea3bb5SYongqiang Yang 331947ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3320357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3321556615dcSLukas Czerner if (unwritten) { 3322556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3323357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3324556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2); 3325357b66fdSDmitry Monakhov } 3326dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 332747ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 332847ea3bb5SYongqiang Yang if (err) 332947ea3bb5SYongqiang Yang goto out; 333047ea3bb5SYongqiang Yang } 333147ea3bb5SYongqiang Yang 333247ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 333347ea3bb5SYongqiang Yang out: 33343a225670SZheng Liu return err ? err : allocated; 333547ea3bb5SYongqiang Yang } 333647ea3bb5SYongqiang Yang 333756055d3aSAmit Arora /* 3338e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 3339556615dcSLukas Czerner * to an unwritten extent. It may result in splitting the unwritten 334056055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 3341556615dcSLukas Czerner * unwritten). 334256055d3aSAmit Arora * There are three possibilities: 334356055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 334456055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 334556055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 33466f91bc5fSEric Gouriou * 33476f91bc5fSEric Gouriou * Pre-conditions: 3348556615dcSLukas Czerner * - The extent pointed to by 'path' is unwritten. 33496f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 33506f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 33516f91bc5fSEric Gouriou * 33526f91bc5fSEric Gouriou * Post-conditions on success: 33536f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 33546f91bc5fSEric Gouriou * that are allocated and initialized. 33556f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 335656055d3aSAmit Arora */ 3357725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3358725d26d3SAneesh Kumar K.V struct inode *inode, 3359e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3360dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 336127dd4385SLukas Czerner int flags) 336256055d3aSAmit Arora { 3363dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 336467a5da56SZheng Liu struct ext4_sb_info *sbi; 33656f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3366667eff35SYongqiang Yang struct ext4_map_blocks split_map; 33674f8caa60SJan Kara struct ext4_extent zero_ex1, zero_ex2; 3368bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 336921ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3370bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 3371bc2d9db4SLukas Czerner int allocated = 0, max_zeroout = 0; 337256055d3aSAmit Arora int err = 0; 33734f8caa60SJan Kara int split_flag = EXT4_EXT_DATA_VALID2; 337421ca087aSDmitry Monakhov 337521ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 337621ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3377bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 337821ca087aSDmitry Monakhov 337967a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 338021ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 338121ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3382bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3383bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 338456055d3aSAmit Arora 338556055d3aSAmit Arora depth = ext_depth(inode); 33866f91bc5fSEric Gouriou eh = path[depth].p_hdr; 338756055d3aSAmit Arora ex = path[depth].p_ext; 338856055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 338956055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 33904f8caa60SJan Kara zero_ex1.ee_len = 0; 33914f8caa60SJan Kara zero_ex2.ee_len = 0; 339221ca087aSDmitry Monakhov 33936f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 33946f91bc5fSEric Gouriou 33956f91bc5fSEric Gouriou /* Pre-conditions */ 3396556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex)); 33976f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 33986f91bc5fSEric Gouriou 33996f91bc5fSEric Gouriou /* 34006f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3401556615dcSLukas Czerner * unwritten extent to its neighbor. This is much cheaper 34026f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3403bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3404bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3405bc2d9db4SLukas Czerner * followed by append writes. 34066f91bc5fSEric Gouriou * 34076f91bc5fSEric Gouriou * Limitations of the current logic: 3408bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 34096f91bc5fSEric Gouriou * This would require removing the extent if the transfer 34106f91bc5fSEric Gouriou * is possible. 3411bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 34126f91bc5fSEric Gouriou * same extent tree node. 34136f91bc5fSEric Gouriou */ 3414bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3415bc2d9db4SLukas Czerner /* See if we can merge left */ 3416bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3417bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 34186f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 34196f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3420bc2d9db4SLukas Czerner unsigned int prev_len; 34216f91bc5fSEric Gouriou 3422bc2d9db4SLukas Czerner abut_ex = ex - 1; 3423bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3424bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3425bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 34266f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 34276f91bc5fSEric Gouriou 34286f91bc5fSEric Gouriou /* 3429bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 34306f91bc5fSEric Gouriou * upon those conditions: 3431bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3432bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3433bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3434bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 34356f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 34366f91bc5fSEric Gouriou */ 3437556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 34386f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 34396f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3440bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 34416f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 34426f91bc5fSEric Gouriou if (err) 34436f91bc5fSEric Gouriou goto out; 34446f91bc5fSEric Gouriou 34456f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3446bc2d9db4SLukas Czerner map, ex, abut_ex); 34476f91bc5fSEric Gouriou 3448bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3449bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3450bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3451bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3452556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 34536f91bc5fSEric Gouriou 3454bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3455bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 34566f91bc5fSEric Gouriou 3457bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3458bc2d9db4SLukas Czerner allocated = map_len; 3459bc2d9db4SLukas Czerner } 3460bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3461bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3462bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3463bc2d9db4SLukas Czerner /* See if we can merge right */ 3464bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3465bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3466bc2d9db4SLukas Czerner unsigned int next_len; 3467bc2d9db4SLukas Czerner 3468bc2d9db4SLukas Czerner abut_ex = ex + 1; 3469bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3470bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3471bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3472bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3473bc2d9db4SLukas Czerner 3474bc2d9db4SLukas Czerner /* 3475bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3476bc2d9db4SLukas Czerner * upon those conditions: 3477bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3478bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3479bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3480bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3481bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3482bc2d9db4SLukas Czerner */ 3483556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3484bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3485bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3486bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3487bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3488bc2d9db4SLukas Czerner if (err) 3489bc2d9db4SLukas Czerner goto out; 3490bc2d9db4SLukas Czerner 3491bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3492bc2d9db4SLukas Czerner map, ex, abut_ex); 3493bc2d9db4SLukas Czerner 3494bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3495bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3496bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3497bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3498556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3499bc2d9db4SLukas Czerner 3500bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3501bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3502bc2d9db4SLukas Czerner 3503bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3504bc2d9db4SLukas Czerner allocated = map_len; 3505bc2d9db4SLukas Czerner } 3506bc2d9db4SLukas Czerner } 3507bc2d9db4SLukas Czerner if (allocated) { 35086f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 35096f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 35106f91bc5fSEric Gouriou 35116f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3512bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 35136f91bc5fSEric Gouriou goto out; 3514bc2d9db4SLukas Czerner } else 3515bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 35166f91bc5fSEric Gouriou 3517667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 351821ca087aSDmitry Monakhov /* 351921ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 35209e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size. 352121ca087aSDmitry Monakhov */ 3522667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 352321ca087aSDmitry Monakhov 352467a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 352567a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 35264f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 352767a5da56SZheng Liu 3528667eff35SYongqiang Yang /* 35294f8caa60SJan Kara * five cases: 3530667eff35SYongqiang Yang * 1. split the extent into three extents. 35314f8caa60SJan Kara * 2. split the extent into two extents, zeroout the head of the first 35324f8caa60SJan Kara * extent. 35334f8caa60SJan Kara * 3. split the extent into two extents, zeroout the tail of the second 35344f8caa60SJan Kara * extent. 3535667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 35364f8caa60SJan Kara * 5. no splitting needed, just possibly zeroout the head and / or the 35374f8caa60SJan Kara * tail of the extent. 3538667eff35SYongqiang Yang */ 3539667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3540667eff35SYongqiang Yang split_map.m_len = map->m_len; 3541667eff35SYongqiang Yang 35424f8caa60SJan Kara if (max_zeroout && (allocated > split_map.m_len)) { 354367a5da56SZheng Liu if (allocated <= max_zeroout) { 35444f8caa60SJan Kara /* case 3 or 5 */ 35454f8caa60SJan Kara zero_ex1.ee_block = 35464f8caa60SJan Kara cpu_to_le32(split_map.m_lblk + 35474f8caa60SJan Kara split_map.m_len); 35484f8caa60SJan Kara zero_ex1.ee_len = 35494f8caa60SJan Kara cpu_to_le16(allocated - split_map.m_len); 35504f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex1, 35514f8caa60SJan Kara ext4_ext_pblock(ex) + split_map.m_lblk + 35524f8caa60SJan Kara split_map.m_len - ee_block); 35534f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex1); 3554667eff35SYongqiang Yang if (err) 3555667eff35SYongqiang Yang goto out; 3556667eff35SYongqiang Yang split_map.m_len = allocated; 35574f8caa60SJan Kara } 35584f8caa60SJan Kara if (split_map.m_lblk - ee_block + split_map.m_len < 35594f8caa60SJan Kara max_zeroout) { 35604f8caa60SJan Kara /* case 2 or 5 */ 35614f8caa60SJan Kara if (split_map.m_lblk != ee_block) { 35624f8caa60SJan Kara zero_ex2.ee_block = ex->ee_block; 35634f8caa60SJan Kara zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3564667eff35SYongqiang Yang ee_block); 35654f8caa60SJan Kara ext4_ext_store_pblock(&zero_ex2, 3566667eff35SYongqiang Yang ext4_ext_pblock(ex)); 35674f8caa60SJan Kara err = ext4_ext_zeroout(inode, &zero_ex2); 3568667eff35SYongqiang Yang if (err) 3569667eff35SYongqiang Yang goto out; 3570667eff35SYongqiang Yang } 3571667eff35SYongqiang Yang 35724f8caa60SJan Kara split_map.m_len += split_map.m_lblk - ee_block; 3573667eff35SYongqiang Yang split_map.m_lblk = ee_block; 35749b940f8eSAllison Henderson allocated = map->m_len; 3575667eff35SYongqiang Yang } 3576667eff35SYongqiang Yang } 3577667eff35SYongqiang Yang 3578ae9e9c6aSJan Kara err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3579ae9e9c6aSJan Kara flags); 3580ae9e9c6aSJan Kara if (err > 0) 3581ae9e9c6aSJan Kara err = 0; 3582667eff35SYongqiang Yang out: 3583adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 35844f8caa60SJan Kara if (!err) { 35854f8caa60SJan Kara err = ext4_zeroout_es(inode, &zero_ex1); 3586adb23551SZheng Liu if (!err) 35874f8caa60SJan Kara err = ext4_zeroout_es(inode, &zero_ex2); 35884f8caa60SJan Kara } 3589667eff35SYongqiang Yang return err ? err : allocated; 359056055d3aSAmit Arora } 359156055d3aSAmit Arora 3592c278bfecSAneesh Kumar K.V /* 3593e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 35940031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 3595556615dcSLukas Czerner * to an unwritten extent. 35960031462bSMingming Cao * 3597556615dcSLukas Czerner * Writing to an unwritten extent may result in splitting the unwritten 3598556615dcSLukas Czerner * extent into multiple initialized/unwritten extents (up to three) 35990031462bSMingming Cao * There are three possibilities: 3600556615dcSLukas Czerner * a> There is no split required: Entire extent should be unwritten 36010031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 36020031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 36030031462bSMingming Cao * 3604b8a86845SLukas Czerner * This works the same way in the case of initialized -> unwritten conversion. 3605b8a86845SLukas Czerner * 36060031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3607556615dcSLukas Czerner * the unwritten extent split. To prevent ENOSPC occur at the IO 3608556615dcSLukas Czerner * complete, we need to split the unwritten extent before DIO submit 3609556615dcSLukas Czerner * the IO. The unwritten extent called at this time will be split 3610556615dcSLukas Czerner * into three unwritten extent(at most). After IO complete, the part 36110031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 36120031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3613ba230c3fSMingming * 3614556615dcSLukas Czerner * Returns the size of unwritten extent to be written on success. 36150031462bSMingming Cao */ 3616b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle, 36170031462bSMingming Cao struct inode *inode, 3618e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3619dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 36200031462bSMingming Cao int flags) 36210031462bSMingming Cao { 3622dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 3623667eff35SYongqiang Yang ext4_lblk_t eof_block; 3624667eff35SYongqiang Yang ext4_lblk_t ee_block; 3625667eff35SYongqiang Yang struct ext4_extent *ex; 3626667eff35SYongqiang Yang unsigned int ee_len; 3627667eff35SYongqiang Yang int split_flag = 0, depth; 36280031462bSMingming Cao 3629b8a86845SLukas Czerner ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", 3630b8a86845SLukas Czerner __func__, inode->i_ino, 3631e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 363221ca087aSDmitry Monakhov 363321ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 363421ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3635e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3636e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 36370031462bSMingming Cao /* 363821ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 363921ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 364021ca087aSDmitry Monakhov */ 3641667eff35SYongqiang Yang depth = ext_depth(inode); 36420031462bSMingming Cao ex = path[depth].p_ext; 3643667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3644667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 36450031462bSMingming Cao 3646b8a86845SLukas Czerner /* Convert to unwritten */ 3647b8a86845SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3648b8a86845SLukas Czerner split_flag |= EXT4_EXT_DATA_VALID1; 3649b8a86845SLukas Czerner /* Convert to initialized */ 3650b8a86845SLukas Czerner } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3651b8a86845SLukas Czerner split_flag |= ee_block + ee_len <= eof_block ? 3652b8a86845SLukas Czerner EXT4_EXT_MAY_ZEROOUT : 0; 3653556615dcSLukas Czerner split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3654b8a86845SLukas Czerner } 3655667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3656dfe50809STheodore Ts'o return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 36570031462bSMingming Cao } 3658197217a5SYongqiang Yang 3659c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 36600031462bSMingming Cao struct inode *inode, 3661dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 3662dfe50809STheodore Ts'o struct ext4_ext_path **ppath) 36630031462bSMingming Cao { 3664dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 36650031462bSMingming Cao struct ext4_extent *ex; 3666dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3667dee1f973SDmitry Monakhov unsigned int ee_len; 36680031462bSMingming Cao int depth; 36690031462bSMingming Cao int err = 0; 36700031462bSMingming Cao 36710031462bSMingming Cao depth = ext_depth(inode); 36720031462bSMingming Cao ex = path[depth].p_ext; 3673dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3674dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 36750031462bSMingming Cao 3676197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3677197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3678dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3679dee1f973SDmitry Monakhov 3680ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3681ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3682ff95ec22SDmitry Monakhov * required. 3683ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3684ff95ec22SDmitry Monakhov * illegal. 3685ff95ec22SDmitry Monakhov */ 3686dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3687e3d550c2SRakesh Pandit #ifdef CONFIG_EXT4_DEBUG 3688e3d550c2SRakesh Pandit ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu," 36898d2ae1cbSJakub Wilk " len %u; IO logical block %llu, len %u", 3690ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3691ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3692ff95ec22SDmitry Monakhov #endif 3693dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3694dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3695dee1f973SDmitry Monakhov if (err < 0) 3696dfe50809STheodore Ts'o return err; 3697ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3698dfe50809STheodore Ts'o if (IS_ERR(path)) 3699dfe50809STheodore Ts'o return PTR_ERR(path); 3700dee1f973SDmitry Monakhov depth = ext_depth(inode); 3701dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3702dee1f973SDmitry Monakhov } 3703197217a5SYongqiang Yang 37040031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 37050031462bSMingming Cao if (err) 37060031462bSMingming Cao goto out; 37070031462bSMingming Cao /* first mark the extent as initialized */ 37080031462bSMingming Cao ext4_ext_mark_initialized(ex); 37090031462bSMingming Cao 3710197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3711197217a5SYongqiang Yang * borders are not changed 37120031462bSMingming Cao */ 3713ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3714197217a5SYongqiang Yang 37150031462bSMingming Cao /* Mark modified extent as dirty */ 3716ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 37170031462bSMingming Cao out: 37180031462bSMingming Cao ext4_ext_show_leaf(inode, path); 37190031462bSMingming Cao return err; 37200031462bSMingming Cao } 37210031462bSMingming Cao 37220031462bSMingming Cao static int 3723e8b83d93STheodore Ts'o convert_initialized_extent(handle_t *handle, struct inode *inode, 3724b8a86845SLukas Czerner struct ext4_map_blocks *map, 372529c6eaffSEric Whitney struct ext4_ext_path **ppath, 3726f064a9d6SEric Whitney unsigned int *allocated) 3727b8a86845SLukas Czerner { 37284f224b8bSTheodore Ts'o struct ext4_ext_path *path = *ppath; 3729e8b83d93STheodore Ts'o struct ext4_extent *ex; 3730e8b83d93STheodore Ts'o ext4_lblk_t ee_block; 3731e8b83d93STheodore Ts'o unsigned int ee_len; 3732e8b83d93STheodore Ts'o int depth; 3733b8a86845SLukas Czerner int err = 0; 3734b8a86845SLukas Czerner 3735b8a86845SLukas Czerner /* 3736b8a86845SLukas Czerner * Make sure that the extent is no bigger than we support with 3737556615dcSLukas Czerner * unwritten extent 3738b8a86845SLukas Czerner */ 3739556615dcSLukas Czerner if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3740556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3741b8a86845SLukas Czerner 3742e8b83d93STheodore Ts'o depth = ext_depth(inode); 3743e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3744e8b83d93STheodore Ts'o ee_block = le32_to_cpu(ex->ee_block); 3745e8b83d93STheodore Ts'o ee_len = ext4_ext_get_actual_len(ex); 3746e8b83d93STheodore Ts'o 3747e8b83d93STheodore Ts'o ext_debug("%s: inode %lu, logical" 3748e8b83d93STheodore Ts'o "block %llu, max_blocks %u\n", __func__, inode->i_ino, 3749e8b83d93STheodore Ts'o (unsigned long long)ee_block, ee_len); 3750e8b83d93STheodore Ts'o 3751e8b83d93STheodore Ts'o if (ee_block != map->m_lblk || ee_len > map->m_len) { 3752dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3753e8b83d93STheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3754e8b83d93STheodore Ts'o if (err < 0) 3755e8b83d93STheodore Ts'o return err; 3756ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3757e8b83d93STheodore Ts'o if (IS_ERR(path)) 3758e8b83d93STheodore Ts'o return PTR_ERR(path); 3759e8b83d93STheodore Ts'o depth = ext_depth(inode); 3760e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3761e8b83d93STheodore Ts'o if (!ex) { 3762e8b83d93STheodore Ts'o EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3763e8b83d93STheodore Ts'o (unsigned long) map->m_lblk); 37646a797d27SDarrick J. Wong return -EFSCORRUPTED; 3765e8b83d93STheodore Ts'o } 3766e8b83d93STheodore Ts'o } 3767e8b83d93STheodore Ts'o 3768e8b83d93STheodore Ts'o err = ext4_ext_get_access(handle, inode, path + depth); 3769e8b83d93STheodore Ts'o if (err) 3770e8b83d93STheodore Ts'o return err; 3771e8b83d93STheodore Ts'o /* first mark the extent as unwritten */ 3772e8b83d93STheodore Ts'o ext4_ext_mark_unwritten(ex); 3773e8b83d93STheodore Ts'o 3774e8b83d93STheodore Ts'o /* note: ext4_ext_correct_indexes() isn't needed here because 3775e8b83d93STheodore Ts'o * borders are not changed 3776e8b83d93STheodore Ts'o */ 3777e8b83d93STheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3778e8b83d93STheodore Ts'o 3779e8b83d93STheodore Ts'o /* Mark modified extent as dirty */ 3780e8b83d93STheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3781e8b83d93STheodore Ts'o if (err) 3782e8b83d93STheodore Ts'o return err; 3783e8b83d93STheodore Ts'o ext4_ext_show_leaf(inode, path); 3784e8b83d93STheodore Ts'o 3785b8a86845SLukas Czerner ext4_update_inode_fsync_trans(handle, inode, 1); 37864337ecd1SEric Whitney 3787b8a86845SLukas Czerner map->m_flags |= EXT4_MAP_UNWRITTEN; 3788f064a9d6SEric Whitney if (*allocated > map->m_len) 3789f064a9d6SEric Whitney *allocated = map->m_len; 3790f064a9d6SEric Whitney map->m_len = *allocated; 3791f064a9d6SEric Whitney return 0; 3792b8a86845SLukas Czerner } 3793b8a86845SLukas Czerner 3794b8a86845SLukas Czerner static int 3795556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3796e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3797dfe50809STheodore Ts'o struct ext4_ext_path **ppath, int flags, 3798e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 37990031462bSMingming Cao { 38004337ecd1SEric Whitney #ifdef EXT_DEBUG 3801dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 38024337ecd1SEric Whitney #endif 38030031462bSMingming Cao int ret = 0; 38040031462bSMingming Cao int err = 0; 38050031462bSMingming Cao 3806556615dcSLukas Czerner ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " 380788635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 3808e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 38090031462bSMingming Cao flags, allocated); 38100031462bSMingming Cao ext4_ext_show_leaf(inode, path); 38110031462bSMingming Cao 381227dd4385SLukas Czerner /* 3813556615dcSLukas Czerner * When writing into unwritten space, we should not fail to 381427dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 381527dd4385SLukas Czerner */ 381627dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 381727dd4385SLukas Czerner 3818556615dcSLukas Czerner trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 3819b5645534SZheng Liu allocated, newblock); 3820d8990240SAditya Kali 3821c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3822c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) { 3823dfe50809STheodore Ts'o ret = ext4_split_convert_extents(handle, inode, map, ppath, 3824dfe50809STheodore Ts'o flags | EXT4_GET_BLOCKS_CONVERT); 382582e54229SDmitry Monakhov if (ret <= 0) 382682e54229SDmitry Monakhov goto out; 3827a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 38280031462bSMingming Cao goto out; 38290031462bSMingming Cao } 3830c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3831c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT) { 3832c86d8db3SJan Kara if (flags & EXT4_GET_BLOCKS_ZERO) { 3833c86d8db3SJan Kara if (allocated > map->m_len) 3834c86d8db3SJan Kara allocated = map->m_len; 3835c86d8db3SJan Kara err = ext4_issue_zeroout(inode, map->m_lblk, newblock, 3836c86d8db3SJan Kara allocated); 3837c86d8db3SJan Kara if (err < 0) 3838c86d8db3SJan Kara goto out2; 3839c86d8db3SJan Kara } 3840dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 3841dfe50809STheodore Ts'o ppath); 38424337ecd1SEric Whitney if (ret >= 0) 3843b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 38444337ecd1SEric Whitney else 384558590b06STheodore Ts'o err = ret; 3846cdee7843SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 384715cc1767SEric Whitney map->m_pblk = newblock; 3848cdee7843SZheng Liu if (allocated > map->m_len) 3849cdee7843SZheng Liu allocated = map->m_len; 3850cdee7843SZheng Liu map->m_len = allocated; 38510031462bSMingming Cao goto out2; 38520031462bSMingming Cao } 38530031462bSMingming Cao /* buffered IO case */ 38540031462bSMingming Cao /* 38550031462bSMingming Cao * repeat fallocate creation request 38560031462bSMingming Cao * we already have an unwritten extent 38570031462bSMingming Cao */ 3858556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 3859a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 38600031462bSMingming Cao goto map_out; 3861a25a4e1aSZheng Liu } 38620031462bSMingming Cao 38630031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 38640031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 38650031462bSMingming Cao /* 38660031462bSMingming Cao * We have blocks reserved already. We 38670031462bSMingming Cao * return allocated blocks so that delalloc 38680031462bSMingming Cao * won't do block reservation for us. But 38690031462bSMingming Cao * the buffer head will be unmapped so that 38700031462bSMingming Cao * a read from the block returns 0s. 38710031462bSMingming Cao */ 3872e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 38730031462bSMingming Cao goto out1; 38740031462bSMingming Cao } 38750031462bSMingming Cao 38760031462bSMingming Cao /* buffered write, writepage time, convert*/ 3877dfe50809STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 3878a4e5d88bSDmitry Monakhov if (ret >= 0) 3879b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 38800031462bSMingming Cao out: 38810031462bSMingming Cao if (ret <= 0) { 38820031462bSMingming Cao err = ret; 38830031462bSMingming Cao goto out2; 38840031462bSMingming Cao } else 38850031462bSMingming Cao allocated = ret; 3886e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 388716e08b14Szhangyi (F) if (allocated > map->m_len) 3888e35fd660STheodore Ts'o allocated = map->m_len; 38893a225670SZheng Liu map->m_len = allocated; 38905f634d06SAneesh Kumar K.V 38910031462bSMingming Cao map_out: 3892e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 38930031462bSMingming Cao out1: 3894e35fd660STheodore Ts'o if (allocated > map->m_len) 3895e35fd660STheodore Ts'o allocated = map->m_len; 38960031462bSMingming Cao ext4_ext_show_leaf(inode, path); 3897e35fd660STheodore Ts'o map->m_pblk = newblock; 3898e35fd660STheodore Ts'o map->m_len = allocated; 38990031462bSMingming Cao out2: 39000031462bSMingming Cao return err ? err : allocated; 39010031462bSMingming Cao } 390258590b06STheodore Ts'o 39030031462bSMingming Cao /* 39044d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 39054d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 39064d33b1efSTheodore Ts'o * allocated in an extent. 3907d8990240SAditya Kali * @sb The filesystem superblock structure 39084d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 39094d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 39104d33b1efSTheodore Ts'o * cluster allocation 39114d33b1efSTheodore Ts'o * 39124d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 39134d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 39144d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 39154d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 39164d33b1efSTheodore Ts'o * want to catch. The first is this case: 39174d33b1efSTheodore Ts'o * 39184d33b1efSTheodore Ts'o * |--- cluster # N--| 39194d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 39204d33b1efSTheodore Ts'o * |==========| 39214d33b1efSTheodore Ts'o * 39224d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 39234d33b1efSTheodore Ts'o * 39244d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 39254d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 39264d33b1efSTheodore Ts'o * |=======================| 39274d33b1efSTheodore Ts'o * 39284d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 39294d33b1efSTheodore Ts'o * within the same cluster: 39304d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 39314d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 39324d33b1efSTheodore Ts'o * |------ requested region ------| 39334d33b1efSTheodore Ts'o * |================| 39344d33b1efSTheodore Ts'o * 39354d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 39364d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 39374d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 39384d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 39394d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 39404d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 39414d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 39424d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 39434d33b1efSTheodore Ts'o */ 3944d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 39454d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 39464d33b1efSTheodore Ts'o struct ext4_extent *ex, 39474d33b1efSTheodore Ts'o struct ext4_ext_path *path) 39484d33b1efSTheodore Ts'o { 3949d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 3950f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 39514d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 395214d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 39534d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 39544d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 39554d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 39564d33b1efSTheodore Ts'o 39574d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 39584d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 39594d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 39604d33b1efSTheodore Ts'o 39614d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 39624d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 39634d33b1efSTheodore Ts'o 39644d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 39654d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 39664d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 39674d33b1efSTheodore Ts'o ee_start += ee_len - 1; 3968f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 39694d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 39704d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 39714d33b1efSTheodore Ts'o /* 39724d33b1efSTheodore Ts'o * Check for and handle this case: 39734d33b1efSTheodore Ts'o * 39744d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 39754d33b1efSTheodore Ts'o * |------- extent ----| 39764d33b1efSTheodore Ts'o * |--- requested region ---| 39774d33b1efSTheodore Ts'o * |===========| 39784d33b1efSTheodore Ts'o */ 39794d33b1efSTheodore Ts'o 39804d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 39814d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 39824d33b1efSTheodore Ts'o 39834d33b1efSTheodore Ts'o /* 39844d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 39854d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 39864d33b1efSTheodore Ts'o * 39874d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 39884d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 39894d33b1efSTheodore Ts'o * |------ requested region ------| 39904d33b1efSTheodore Ts'o * |================| 39914d33b1efSTheodore Ts'o */ 39924d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 39934d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 39944d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 39954d33b1efSTheodore Ts'o } 3996d8990240SAditya Kali 3997d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 39984d33b1efSTheodore Ts'o return 1; 39994d33b1efSTheodore Ts'o } 4000d8990240SAditya Kali 4001d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 40024d33b1efSTheodore Ts'o return 0; 40034d33b1efSTheodore Ts'o } 40044d33b1efSTheodore Ts'o 40054d33b1efSTheodore Ts'o 40064d33b1efSTheodore Ts'o /* 4007f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4008f5ab0d1fSMingming Cao * 4009f5ab0d1fSMingming Cao * 4010c278bfecSAneesh Kumar K.V * Need to be called with 40110e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 40120e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4013f5ab0d1fSMingming Cao * 4014f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 4015f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4016f5ab0d1fSMingming Cao * buffer head is unmapped 4017f5ab0d1fSMingming Cao * otherwise blocks are mapped 4018f5ab0d1fSMingming Cao * 4019f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4020f5ab0d1fSMingming Cao * buffer head is unmapped 4021f5ab0d1fSMingming Cao * 4022f5ab0d1fSMingming Cao * return < 0, error case. 4023c278bfecSAneesh Kumar K.V */ 4024e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4025e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4026a86c6181SAlex Tomas { 4027a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 40284d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 40294d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 40300562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 403134990461SEric Whitney int err = 0, depth, ret; 40324d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 403381fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4034c9de560dSAlex Tomas struct ext4_allocation_request ar; 40354d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 4036a86c6181SAlex Tomas 403784fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 4038e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 40390562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4040a86c6181SAlex Tomas 4041a86c6181SAlex Tomas /* find extent for this block */ 4042ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4043a86c6181SAlex Tomas if (IS_ERR(path)) { 4044a86c6181SAlex Tomas err = PTR_ERR(path); 4045a86c6181SAlex Tomas path = NULL; 4046a86c6181SAlex Tomas goto out2; 4047a86c6181SAlex Tomas } 4048a86c6181SAlex Tomas 4049a86c6181SAlex Tomas depth = ext_depth(inode); 4050a86c6181SAlex Tomas 4051a86c6181SAlex Tomas /* 4052d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4053d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4054ed8a1a76STheodore Ts'o * this is why assert can't be put in ext4_find_extent() 4055a86c6181SAlex Tomas */ 4056273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4057273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4058f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4059f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4060f70f362bSTheodore Ts'o path[depth].p_block); 40616a797d27SDarrick J. Wong err = -EFSCORRUPTED; 4062034fb4c9SSurbhi Palande goto out2; 4063034fb4c9SSurbhi Palande } 4064a86c6181SAlex Tomas 40657e028976SAvantika Mathur ex = path[depth].p_ext; 40667e028976SAvantika Mathur if (ex) { 4067725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4068bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4069a2df2a63SAmit Arora unsigned short ee_len; 4070471d4011SSuparna Bhattacharya 4071b8a86845SLukas Czerner 4072471d4011SSuparna Bhattacharya /* 4073556615dcSLukas Czerner * unwritten extents are treated as holes, except that 407456055d3aSAmit Arora * we split out initialized portions during a write. 4075471d4011SSuparna Bhattacharya */ 4076a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4077d8990240SAditya Kali 4078d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4079d8990240SAditya Kali 4080d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4081e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4082e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4083d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4084e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 4085e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4086a86c6181SAlex Tomas ee_block, ee_len, newblock); 408756055d3aSAmit Arora 4088b8a86845SLukas Czerner /* 4089b8a86845SLukas Czerner * If the extent is initialized check whether the 4090b8a86845SLukas Czerner * caller wants to convert it to unwritten. 4091b8a86845SLukas Czerner */ 4092556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(ex)) && 4093b8a86845SLukas Czerner (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4094f064a9d6SEric Whitney err = convert_initialized_extent(handle, 4095f064a9d6SEric Whitney inode, map, &path, &allocated); 4096b8a86845SLukas Czerner goto out2; 4097f064a9d6SEric Whitney } else if (!ext4_ext_is_unwritten(ex)) { 4098a86c6181SAlex Tomas goto out; 4099f064a9d6SEric Whitney } 410069eb33dcSZheng Liu 4101556615dcSLukas Czerner ret = ext4_ext_handle_unwritten_extents( 4102dfe50809STheodore Ts'o handle, inode, map, &path, flags, 4103e861304bSAllison Henderson allocated, newblock); 4104ce37c429SEric Whitney if (ret < 0) 4105ce37c429SEric Whitney err = ret; 4106ce37c429SEric Whitney else 4107ce37c429SEric Whitney allocated = ret; 410831cf0f2cSEric Whitney goto out2; 410956055d3aSAmit Arora } 4110a86c6181SAlex Tomas } 4111a86c6181SAlex Tomas 4112a86c6181SAlex Tomas /* 4113d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4114a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4115a86c6181SAlex Tomas */ 4116c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 4117140a5250SJan Kara ext4_lblk_t hole_start, hole_len; 4118140a5250SJan Kara 4119facab4d9SJan Kara hole_start = map->m_lblk; 4120facab4d9SJan Kara hole_len = ext4_ext_determine_hole(inode, path, &hole_start); 412156055d3aSAmit Arora /* 412256055d3aSAmit Arora * put just found gap into cache to speed up 412356055d3aSAmit Arora * subsequent requests 412456055d3aSAmit Arora */ 4125140a5250SJan Kara ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); 4126facab4d9SJan Kara 4127facab4d9SJan Kara /* Update hole_len to reflect hole size after map->m_lblk */ 4128facab4d9SJan Kara if (hole_start != map->m_lblk) 4129facab4d9SJan Kara hole_len -= map->m_lblk - hole_start; 4130facab4d9SJan Kara map->m_pblk = 0; 4131facab4d9SJan Kara map->m_len = min_t(unsigned int, map->m_len, hole_len); 4132facab4d9SJan Kara 4133a86c6181SAlex Tomas goto out2; 4134a86c6181SAlex Tomas } 41354d33b1efSTheodore Ts'o 4136a86c6181SAlex Tomas /* 4137c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4138a86c6181SAlex Tomas */ 41394d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 4140d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 41414d33b1efSTheodore Ts'o 41424d33b1efSTheodore Ts'o /* 41434d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 4144ed8a1a76STheodore Ts'o * by ext4_find_extent() implies a cluster we can use. 41454d33b1efSTheodore Ts'o */ 41464d33b1efSTheodore Ts'o if (cluster_offset && ex && 4147d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 41484d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 41494d33b1efSTheodore Ts'o newblock = map->m_pblk; 41504d33b1efSTheodore Ts'o goto got_allocated_blocks; 41514d33b1efSTheodore Ts'o } 4152a86c6181SAlex Tomas 4153c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4154e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4155c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4156c9de560dSAlex Tomas if (err) 4157c9de560dSAlex Tomas goto out2; 4158e35fd660STheodore Ts'o ar.lright = map->m_lblk; 41594d33b1efSTheodore Ts'o ex2 = NULL; 41604d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4161c9de560dSAlex Tomas if (err) 4162c9de560dSAlex Tomas goto out2; 416325d14f98SAmit Arora 41644d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 41654d33b1efSTheodore Ts'o * cluster we can use. */ 41664d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4167d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 41684d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 41694d33b1efSTheodore Ts'o newblock = map->m_pblk; 41704d33b1efSTheodore Ts'o goto got_allocated_blocks; 41714d33b1efSTheodore Ts'o } 41724d33b1efSTheodore Ts'o 4173749269faSAmit Arora /* 4174749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4175749269faSAmit Arora * a single extent. For an initialized extent this limit is 4176556615dcSLukas Czerner * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4177556615dcSLukas Czerner * EXT_UNWRITTEN_MAX_LEN. 4178749269faSAmit Arora */ 4179e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4180556615dcSLukas Czerner !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4181e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4182556615dcSLukas Czerner else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4183556615dcSLukas Czerner (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4184556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN; 4185749269faSAmit Arora 4186e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4187e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 41884d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 418925d14f98SAmit Arora if (err) 4190b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 419125d14f98SAmit Arora else 4192e35fd660STheodore Ts'o allocated = map->m_len; 4193c9de560dSAlex Tomas 4194c9de560dSAlex Tomas /* allocate new block */ 4195c9de560dSAlex Tomas ar.inode = inode; 4196e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4197e35fd660STheodore Ts'o ar.logical = map->m_lblk; 41984d33b1efSTheodore Ts'o /* 41994d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 42004d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 42014d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 42024d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 42034d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 42044d33b1efSTheodore Ts'o * work correctly. 42054d33b1efSTheodore Ts'o */ 4206f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 42074d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 42084d33b1efSTheodore Ts'o ar.goal -= offset; 42094d33b1efSTheodore Ts'o ar.logical -= offset; 4210c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4211c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4212c9de560dSAlex Tomas else 4213c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4214c9de560dSAlex Tomas ar.flags = 0; 4215556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4216556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4217e3cf5d5dSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4218e3cf5d5dSTheodore Ts'o ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4219c5e298aeSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 4220c5e298aeSTheodore Ts'o ar.flags |= EXT4_MB_USE_RESERVED; 4221c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4222a86c6181SAlex Tomas if (!newblock) 4223a86c6181SAlex Tomas goto out2; 422484fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4225498e5f24STheodore Ts'o ar.goal, newblock, allocated); 42267b415bf6SAditya Kali allocated_clusters = ar.len; 42274d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 42284d33b1efSTheodore Ts'o if (ar.len > allocated) 42294d33b1efSTheodore Ts'o ar.len = allocated; 4230a86c6181SAlex Tomas 42314d33b1efSTheodore Ts'o got_allocated_blocks: 4232a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 42334d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4234c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 4235556615dcSLukas Czerner /* Mark unwritten */ 4236556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4237556615dcSLukas Czerner ext4_ext_mark_unwritten(&newex); 4238a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 42398d5d02e6SMingming Cao } 4240c8d46e41SJiaying Zhang 42414337ecd1SEric Whitney err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags); 424234990461SEric Whitney if (err) { 424334990461SEric Whitney if (allocated_clusters) { 424434990461SEric Whitney int fb_flags = 0; 424582e54229SDmitry Monakhov 424634990461SEric Whitney /* 424734990461SEric Whitney * free data blocks we just allocated. 424834990461SEric Whitney * not a good idea to call discard here directly, 424934990461SEric Whitney * but otherwise we'd need to call it every free(). 425034990461SEric Whitney */ 4251c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 425234990461SEric Whitney if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 425334990461SEric Whitney fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; 4254c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock, 425534990461SEric Whitney EXT4_C2B(sbi, allocated_clusters), 425634990461SEric Whitney fb_flags); 425734990461SEric Whitney } 4258a86c6181SAlex Tomas goto out2; 4259315054f0SAlex Tomas } 4260a86c6181SAlex Tomas 4261a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4262bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4263b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4264e35fd660STheodore Ts'o if (allocated > map->m_len) 4265e35fd660STheodore Ts'o allocated = map->m_len; 4266e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4267a86c6181SAlex Tomas 4268b436b9beSJan Kara /* 4269b6bf9171SEric Whitney * Reduce the reserved cluster count to reflect successful deferred 4270b6bf9171SEric Whitney * allocation of delayed allocated clusters or direct allocation of 4271b6bf9171SEric Whitney * clusters discovered to be delayed allocated. Once allocated, a 4272b6bf9171SEric Whitney * cluster is not included in the reserved count. 42735f634d06SAneesh Kumar K.V */ 4274*2971148dSEric Whitney if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) { 42757b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 42767b415bf6SAditya Kali /* 4277b6bf9171SEric Whitney * When allocating delayed allocated clusters, simply 4278b6bf9171SEric Whitney * reduce the reserved cluster count and claim quota 4279232ec872SLukas Czerner */ 4280232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4281232ec872SLukas Czerner 1); 4282b6bf9171SEric Whitney } else { 4283b6bf9171SEric Whitney ext4_lblk_t lblk, len; 4284b6bf9171SEric Whitney unsigned int n; 4285b6bf9171SEric Whitney 4286b6bf9171SEric Whitney /* 4287b6bf9171SEric Whitney * When allocating non-delayed allocated clusters 4288b6bf9171SEric Whitney * (from fallocate, filemap, DIO, or clusters 4289b6bf9171SEric Whitney * allocated when delalloc has been disabled by 4290b6bf9171SEric Whitney * ext4_nonda_switch), reduce the reserved cluster 4291b6bf9171SEric Whitney * count by the number of allocated clusters that 4292b6bf9171SEric Whitney * have previously been delayed allocated. Quota 4293b6bf9171SEric Whitney * has been claimed by ext4_mb_new_blocks() above, 4294b6bf9171SEric Whitney * so release the quota reservations made for any 4295b6bf9171SEric Whitney * previously delayed allocated clusters. 4296b6bf9171SEric Whitney */ 4297b6bf9171SEric Whitney lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); 4298b6bf9171SEric Whitney len = allocated_clusters << sbi->s_cluster_bits; 4299b6bf9171SEric Whitney n = ext4_es_delayed_clu(inode, lblk, len); 4300b6bf9171SEric Whitney if (n > 0) 4301b6bf9171SEric Whitney ext4_da_update_reserve_space(inode, (int) n, 0); 43027b415bf6SAditya Kali } 43037b415bf6SAditya Kali } 43045f634d06SAneesh Kumar K.V 43055f634d06SAneesh Kumar K.V /* 4306b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4307556615dcSLukas Czerner * when it is _not_ an unwritten extent. 4308b436b9beSJan Kara */ 4309556615dcSLukas Czerner if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4310b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 431169eb33dcSZheng Liu else 4312b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4313a86c6181SAlex Tomas out: 4314e35fd660STheodore Ts'o if (allocated > map->m_len) 4315e35fd660STheodore Ts'o allocated = map->m_len; 4316a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4317e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4318e35fd660STheodore Ts'o map->m_pblk = newblock; 4319e35fd660STheodore Ts'o map->m_len = allocated; 4320a86c6181SAlex Tomas out2: 4321a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4322a86c6181SAlex Tomas kfree(path); 4323e861304bSAllison Henderson 432463b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map, 432563b99968STheodore Ts'o err ? err : allocated); 43267877191cSLukas Czerner return err ? err : allocated; 4327a86c6181SAlex Tomas } 4328a86c6181SAlex Tomas 4329d0abb36dSTheodore Ts'o int ext4_ext_truncate(handle_t *handle, struct inode *inode) 4330a86c6181SAlex Tomas { 4331a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4332725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4333a86c6181SAlex Tomas int err = 0; 4334a86c6181SAlex Tomas 4335a86c6181SAlex Tomas /* 4336d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4337d0d856e8SRandy Dunlap * Probably we need not scan at all, 4338d0d856e8SRandy Dunlap * because page truncation is enough. 4339a86c6181SAlex Tomas */ 4340a86c6181SAlex Tomas 4341a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4342a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4343d0abb36dSTheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 4344d0abb36dSTheodore Ts'o if (err) 4345d0abb36dSTheodore Ts'o return err; 4346a86c6181SAlex Tomas 4347a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4348a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 43498acd5e9bSTheodore Ts'o retry: 435051865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 435151865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 435294eec0fcSTheodore Ts'o if (err == -ENOMEM) { 43538acd5e9bSTheodore Ts'o cond_resched(); 43548acd5e9bSTheodore Ts'o congestion_wait(BLK_RW_ASYNC, HZ/50); 43558acd5e9bSTheodore Ts'o goto retry; 43568acd5e9bSTheodore Ts'o } 4357d0abb36dSTheodore Ts'o if (err) 4358d0abb36dSTheodore Ts'o return err; 4359d0abb36dSTheodore Ts'o return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4360a86c6181SAlex Tomas } 4361a86c6181SAlex Tomas 43620e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4363c174e6d6SDmitry Monakhov ext4_lblk_t len, loff_t new_size, 436477a2e84dSTahsin Erdogan int flags) 4365a2df2a63SAmit Arora { 4366496ad9aaSAl Viro struct inode *inode = file_inode(file); 4367a2df2a63SAmit Arora handle_t *handle; 4368a2df2a63SAmit Arora int ret = 0; 4369a2df2a63SAmit Arora int ret2 = 0; 4370a2df2a63SAmit Arora int retries = 0; 43714134f5c8SLukas Czerner int depth = 0; 43722ed88685STheodore Ts'o struct ext4_map_blocks map; 43730e8b6879SLukas Czerner unsigned int credits; 4374c174e6d6SDmitry Monakhov loff_t epos; 4375a2df2a63SAmit Arora 4376c3fe493cSFabian Frederick BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)); 43770e8b6879SLukas Czerner map.m_lblk = offset; 4378c174e6d6SDmitry Monakhov map.m_len = len; 43793c6fe770SGreg Harm /* 43803c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 43813c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 43823c6fe770SGreg Harm * extents. 43833c6fe770SGreg Harm */ 4384556615dcSLukas Czerner if (len <= EXT_UNWRITTEN_MAX_LEN) 43853c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 438660d4616fSDmitry Monakhov 43870e8b6879SLukas Czerner /* 43880e8b6879SLukas Czerner * credits to insert 1 extent into extent tree 43890e8b6879SLukas Czerner */ 43900e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 43914134f5c8SLukas Czerner depth = ext_depth(inode); 43920e8b6879SLukas Czerner 4393a2df2a63SAmit Arora retry: 4394c174e6d6SDmitry Monakhov while (ret >= 0 && len) { 43954134f5c8SLukas Czerner /* 43964134f5c8SLukas Czerner * Recalculate credits when extent tree depth changes. 43974134f5c8SLukas Czerner */ 4398011c88e3SDan Carpenter if (depth != ext_depth(inode)) { 43994134f5c8SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 44004134f5c8SLukas Czerner depth = ext_depth(inode); 44014134f5c8SLukas Czerner } 44024134f5c8SLukas Czerner 44039924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 44049924a92aSTheodore Ts'o credits); 4405a2df2a63SAmit Arora if (IS_ERR(handle)) { 4406a2df2a63SAmit Arora ret = PTR_ERR(handle); 4407a2df2a63SAmit Arora break; 4408a2df2a63SAmit Arora } 4409a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4410221879c9SAneesh Kumar K.V if (ret <= 0) { 4411f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: " 4412b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4413b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4414b06acd38SLukas Czerner map.m_len, ret); 4415a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4416a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4417a2df2a63SAmit Arora break; 4418a2df2a63SAmit Arora } 4419c174e6d6SDmitry Monakhov map.m_lblk += ret; 4420c174e6d6SDmitry Monakhov map.m_len = len = len - ret; 4421c174e6d6SDmitry Monakhov epos = (loff_t)map.m_lblk << inode->i_blkbits; 4422eeca7ea1SDeepa Dinamani inode->i_ctime = current_time(inode); 4423c174e6d6SDmitry Monakhov if (new_size) { 4424c174e6d6SDmitry Monakhov if (epos > new_size) 4425c174e6d6SDmitry Monakhov epos = new_size; 4426c174e6d6SDmitry Monakhov if (ext4_update_inode_size(inode, epos) & 0x1) 4427c174e6d6SDmitry Monakhov inode->i_mtime = inode->i_ctime; 4428c174e6d6SDmitry Monakhov } 4429c174e6d6SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 4430c894aa97SEryu Guan ext4_update_inode_fsync_trans(handle, inode, 1); 4431a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4432a2df2a63SAmit Arora if (ret2) 4433a2df2a63SAmit Arora break; 4434a2df2a63SAmit Arora } 4435fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4436fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4437fd28784aSAneesh Kumar K.V ret = 0; 4438a2df2a63SAmit Arora goto retry; 4439a2df2a63SAmit Arora } 4440f282ac19SLukas Czerner 44410e8b6879SLukas Czerner return ret > 0 ? ret2 : ret; 44420e8b6879SLukas Czerner } 44430e8b6879SLukas Czerner 444443f81677SEric Biggers static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); 444543f81677SEric Biggers 444643f81677SEric Biggers static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len); 444743f81677SEric Biggers 4448b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset, 4449b8a86845SLukas Czerner loff_t len, int mode) 4450b8a86845SLukas Czerner { 4451b8a86845SLukas Czerner struct inode *inode = file_inode(file); 4452b8a86845SLukas Czerner handle_t *handle = NULL; 4453b8a86845SLukas Czerner unsigned int max_blocks; 4454b8a86845SLukas Czerner loff_t new_size = 0; 4455b8a86845SLukas Czerner int ret = 0; 4456b8a86845SLukas Czerner int flags; 445769dc9536SDmitry Monakhov int credits; 4458c174e6d6SDmitry Monakhov int partial_begin, partial_end; 4459b8a86845SLukas Czerner loff_t start, end; 4460b8a86845SLukas Czerner ext4_lblk_t lblk; 4461b8a86845SLukas Czerner unsigned int blkbits = inode->i_blkbits; 4462b8a86845SLukas Czerner 4463b8a86845SLukas Czerner trace_ext4_zero_range(inode, offset, len, mode); 4464b8a86845SLukas Czerner 4465e1ee60fdSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 4466e1ee60fdSNamjae Jeon if (ext4_should_journal_data(inode)) { 4467e1ee60fdSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 4468e1ee60fdSNamjae Jeon if (ret) 4469e1ee60fdSNamjae Jeon return ret; 4470e1ee60fdSNamjae Jeon } 4471e1ee60fdSNamjae Jeon 4472b8a86845SLukas Czerner /* 4473b8a86845SLukas Czerner * Round up offset. This is not fallocate, we neet to zero out 4474b8a86845SLukas Czerner * blocks, so convert interior block aligned part of the range to 4475b8a86845SLukas Czerner * unwritten and possibly manually zero out unaligned parts of the 4476b8a86845SLukas Czerner * range. 4477b8a86845SLukas Czerner */ 4478b8a86845SLukas Czerner start = round_up(offset, 1 << blkbits); 4479b8a86845SLukas Czerner end = round_down((offset + len), 1 << blkbits); 4480b8a86845SLukas Czerner 4481b8a86845SLukas Czerner if (start < offset || end > offset + len) 4482b8a86845SLukas Czerner return -EINVAL; 4483c174e6d6SDmitry Monakhov partial_begin = offset & ((1 << blkbits) - 1); 4484c174e6d6SDmitry Monakhov partial_end = (offset + len) & ((1 << blkbits) - 1); 4485b8a86845SLukas Czerner 4486b8a86845SLukas Czerner lblk = start >> blkbits; 4487b8a86845SLukas Czerner max_blocks = (end >> blkbits); 4488b8a86845SLukas Czerner if (max_blocks < lblk) 4489b8a86845SLukas Czerner max_blocks = 0; 4490b8a86845SLukas Czerner else 4491b8a86845SLukas Czerner max_blocks -= lblk; 4492b8a86845SLukas Czerner 44935955102cSAl Viro inode_lock(inode); 4494b8a86845SLukas Czerner 4495b8a86845SLukas Czerner /* 4496b8a86845SLukas Czerner * Indirect files do not support unwritten extnets 4497b8a86845SLukas Czerner */ 4498b8a86845SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4499b8a86845SLukas Czerner ret = -EOPNOTSUPP; 4500b8a86845SLukas Czerner goto out_mutex; 4501b8a86845SLukas Czerner } 4502b8a86845SLukas Czerner 4503b8a86845SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 45049b02e498SEric Biggers (offset + len > inode->i_size || 450551e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 4506b8a86845SLukas Czerner new_size = offset + len; 4507b8a86845SLukas Czerner ret = inode_newsize_ok(inode, new_size); 4508b8a86845SLukas Czerner if (ret) 4509b8a86845SLukas Czerner goto out_mutex; 4510b8a86845SLukas Czerner } 4511b8a86845SLukas Czerner 45120f2af21aSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 45130f2af21aSLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 45140f2af21aSLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 45150f2af21aSLukas Czerner 451617048e8aSJan Kara /* Wait all existing dio workers, newcomers will block on i_mutex */ 451717048e8aSJan Kara inode_dio_wait(inode); 451817048e8aSJan Kara 45190f2af21aSLukas Czerner /* Preallocate the range including the unaligned edges */ 45200f2af21aSLukas Czerner if (partial_begin || partial_end) { 45210f2af21aSLukas Czerner ret = ext4_alloc_file_blocks(file, 45220f2af21aSLukas Czerner round_down(offset, 1 << blkbits) >> blkbits, 45230f2af21aSLukas Czerner (round_up((offset + len), 1 << blkbits) - 45240f2af21aSLukas Czerner round_down(offset, 1 << blkbits)) >> blkbits, 452577a2e84dSTahsin Erdogan new_size, flags); 45260f2af21aSLukas Czerner if (ret) 45271d39834fSNikolay Borisov goto out_mutex; 45280f2af21aSLukas Czerner 45290f2af21aSLukas Czerner } 45300f2af21aSLukas Czerner 45310f2af21aSLukas Czerner /* Zero range excluding the unaligned edges */ 4532b8a86845SLukas Czerner if (max_blocks > 0) { 45330f2af21aSLukas Czerner flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 45340f2af21aSLukas Czerner EXT4_EX_NOCACHE); 4535b8a86845SLukas Czerner 4536ea3d7209SJan Kara /* 4537ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have 4538ea3d7209SJan Kara * released from page cache. 4539ea3d7209SJan Kara */ 4540ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 4541430657b6SRoss Zwisler 4542430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 4543430657b6SRoss Zwisler if (ret) { 4544430657b6SRoss Zwisler up_write(&EXT4_I(inode)->i_mmap_sem); 4545430657b6SRoss Zwisler goto out_mutex; 4546430657b6SRoss Zwisler } 4547430657b6SRoss Zwisler 454801127848SJan Kara ret = ext4_update_disksize_before_punch(inode, offset, len); 454901127848SJan Kara if (ret) { 455001127848SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 45511d39834fSNikolay Borisov goto out_mutex; 455201127848SJan Kara } 4553ea3d7209SJan Kara /* Now release the pages and zero block aligned part of pages */ 4554ea3d7209SJan Kara truncate_pagecache_range(inode, start, end - 1); 4555eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 4556ea3d7209SJan Kara 4557c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 455877a2e84dSTahsin Erdogan flags); 4559ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 4560b8a86845SLukas Czerner if (ret) 45611d39834fSNikolay Borisov goto out_mutex; 4562b8a86845SLukas Czerner } 4563c174e6d6SDmitry Monakhov if (!partial_begin && !partial_end) 45641d39834fSNikolay Borisov goto out_mutex; 4565c174e6d6SDmitry Monakhov 456669dc9536SDmitry Monakhov /* 456769dc9536SDmitry Monakhov * In worst case we have to writeout two nonadjacent unwritten 456869dc9536SDmitry Monakhov * blocks and update the inode 456969dc9536SDmitry Monakhov */ 457069dc9536SDmitry Monakhov credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 457169dc9536SDmitry Monakhov if (ext4_should_journal_data(inode)) 457269dc9536SDmitry Monakhov credits += 2; 457369dc9536SDmitry Monakhov handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4574b8a86845SLukas Czerner if (IS_ERR(handle)) { 4575b8a86845SLukas Czerner ret = PTR_ERR(handle); 4576b8a86845SLukas Czerner ext4_std_error(inode->i_sb, ret); 45771d39834fSNikolay Borisov goto out_mutex; 4578b8a86845SLukas Czerner } 4579b8a86845SLukas Czerner 4580eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 45814337ecd1SEric Whitney if (new_size) 45824631dbf6SDmitry Monakhov ext4_update_inode_size(inode, new_size); 4583b8a86845SLukas Czerner ext4_mark_inode_dirty(handle, inode); 4584b8a86845SLukas Czerner 4585b8a86845SLukas Czerner /* Zero out partial block at the edges of the range */ 4586b8a86845SLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, len); 458767a7d5f5SJan Kara if (ret >= 0) 458867a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4589b8a86845SLukas Czerner 4590b8a86845SLukas Czerner if (file->f_flags & O_SYNC) 4591b8a86845SLukas Czerner ext4_handle_sync(handle); 4592b8a86845SLukas Czerner 4593b8a86845SLukas Czerner ext4_journal_stop(handle); 4594b8a86845SLukas Czerner out_mutex: 45955955102cSAl Viro inode_unlock(inode); 4596b8a86845SLukas Czerner return ret; 4597b8a86845SLukas Czerner } 4598b8a86845SLukas Czerner 45990e8b6879SLukas Czerner /* 46000e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file 46010e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call. 46020e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method 46030e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is 46040e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call). 46050e8b6879SLukas Czerner */ 46060e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 46070e8b6879SLukas Czerner { 46080e8b6879SLukas Czerner struct inode *inode = file_inode(file); 46090e8b6879SLukas Czerner loff_t new_size = 0; 46100e8b6879SLukas Czerner unsigned int max_blocks; 46110e8b6879SLukas Czerner int ret = 0; 46120e8b6879SLukas Czerner int flags; 46130e8b6879SLukas Czerner ext4_lblk_t lblk; 46140e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits; 46150e8b6879SLukas Czerner 46162058f83aSMichael Halcrow /* 46172058f83aSMichael Halcrow * Encrypted inodes can't handle collapse range or insert 46182058f83aSMichael Halcrow * range since we would need to re-encrypt blocks with a 46192058f83aSMichael Halcrow * different IV or XTS tweak (which are based on the logical 46202058f83aSMichael Halcrow * block number). 46212058f83aSMichael Halcrow */ 4622592ddec7SChandan Rajendra if (IS_ENCRYPTED(inode) && 4623457b1e35SEric Biggers (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) 46242058f83aSMichael Halcrow return -EOPNOTSUPP; 46252058f83aSMichael Halcrow 46260e8b6879SLukas Czerner /* Return error if mode is not supported */ 46270e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4628331573feSNamjae Jeon FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | 4629331573feSNamjae Jeon FALLOC_FL_INSERT_RANGE)) 46300e8b6879SLukas Czerner return -EOPNOTSUPP; 46310e8b6879SLukas Czerner 46320e8b6879SLukas Czerner if (mode & FALLOC_FL_PUNCH_HOLE) 46330e8b6879SLukas Czerner return ext4_punch_hole(inode, offset, len); 46340e8b6879SLukas Czerner 46350e8b6879SLukas Czerner ret = ext4_convert_inline_data(inode); 46360e8b6879SLukas Czerner if (ret) 46370e8b6879SLukas Czerner return ret; 46380e8b6879SLukas Czerner 463940c406c7STheodore Ts'o if (mode & FALLOC_FL_COLLAPSE_RANGE) 464040c406c7STheodore Ts'o return ext4_collapse_range(inode, offset, len); 464140c406c7STheodore Ts'o 4642331573feSNamjae Jeon if (mode & FALLOC_FL_INSERT_RANGE) 4643331573feSNamjae Jeon return ext4_insert_range(inode, offset, len); 4644331573feSNamjae Jeon 4645b8a86845SLukas Czerner if (mode & FALLOC_FL_ZERO_RANGE) 4646b8a86845SLukas Czerner return ext4_zero_range(file, offset, len, mode); 4647b8a86845SLukas Czerner 46480e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode); 46490e8b6879SLukas Czerner lblk = offset >> blkbits; 46500e8b6879SLukas Czerner 4651518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4652556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 46530e8b6879SLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 46540e8b6879SLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 46550e8b6879SLukas Czerner 46565955102cSAl Viro inode_lock(inode); 46570e8b6879SLukas Czerner 4658280227a7SDavide Italiano /* 4659280227a7SDavide Italiano * We only support preallocation for extent-based files only 4660280227a7SDavide Italiano */ 4661280227a7SDavide Italiano if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4662280227a7SDavide Italiano ret = -EOPNOTSUPP; 4663280227a7SDavide Italiano goto out; 4664280227a7SDavide Italiano } 4665280227a7SDavide Italiano 46660e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 46679b02e498SEric Biggers (offset + len > inode->i_size || 466851e3ae81STheodore Ts'o offset + len > EXT4_I(inode)->i_disksize)) { 46690e8b6879SLukas Czerner new_size = offset + len; 46700e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size); 46710e8b6879SLukas Czerner if (ret) 46720e8b6879SLukas Czerner goto out; 46730e8b6879SLukas Czerner } 46740e8b6879SLukas Czerner 467517048e8aSJan Kara /* Wait all existing dio workers, newcomers will block on i_mutex */ 467617048e8aSJan Kara inode_dio_wait(inode); 467717048e8aSJan Kara 467877a2e84dSTahsin Erdogan ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags); 46790e8b6879SLukas Czerner if (ret) 46800e8b6879SLukas Czerner goto out; 46810e8b6879SLukas Czerner 4682c174e6d6SDmitry Monakhov if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4683c174e6d6SDmitry Monakhov ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 4684c174e6d6SDmitry Monakhov EXT4_I(inode)->i_sync_tid); 4685f282ac19SLukas Czerner } 4686f282ac19SLukas Czerner out: 46875955102cSAl Viro inode_unlock(inode); 46880e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 46890e8b6879SLukas Czerner return ret; 4690a2df2a63SAmit Arora } 46916873fa0dSEric Sandeen 46926873fa0dSEric Sandeen /* 46930031462bSMingming Cao * This function convert a range of blocks to written extents 46940031462bSMingming Cao * The caller of this function will pass the start offset and the size. 46950031462bSMingming Cao * all unwritten extents within this range will be converted to 46960031462bSMingming Cao * written extents. 46970031462bSMingming Cao * 46980031462bSMingming Cao * This function is called from the direct IO end io call back 46990031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4700109f5565SMingming * Returns 0 on success. 47010031462bSMingming Cao */ 47026b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 47036b523df4SJan Kara loff_t offset, ssize_t len) 47040031462bSMingming Cao { 47050031462bSMingming Cao unsigned int max_blocks; 47060031462bSMingming Cao int ret = 0; 47070031462bSMingming Cao int ret2 = 0; 47082ed88685STheodore Ts'o struct ext4_map_blocks map; 4709a00713eaSRitesh Harjani unsigned int blkbits = inode->i_blkbits; 4710a00713eaSRitesh Harjani unsigned int credits = 0; 47110031462bSMingming Cao 47122ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4713518eaa63SFabian Frederick max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits); 4714518eaa63SFabian Frederick 4715a00713eaSRitesh Harjani if (!handle) { 47166b523df4SJan Kara /* 47170031462bSMingming Cao * credits to insert 1 extent into extent tree 47180031462bSMingming Cao */ 47190031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 47206b523df4SJan Kara } 47210031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 47222ed88685STheodore Ts'o map.m_lblk += ret; 47232ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 47246b523df4SJan Kara if (credits) { 47256b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 47266b523df4SJan Kara credits); 47270031462bSMingming Cao if (IS_ERR(handle)) { 47280031462bSMingming Cao ret = PTR_ERR(handle); 47290031462bSMingming Cao break; 47300031462bSMingming Cao } 47316b523df4SJan Kara } 47322ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4733c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4734b06acd38SLukas Czerner if (ret <= 0) 4735b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4736b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 473792b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4738b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 473992b97816STheodore Ts'o map.m_len, ret); 47400031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 47416b523df4SJan Kara if (credits) 47420031462bSMingming Cao ret2 = ext4_journal_stop(handle); 47430031462bSMingming Cao if (ret <= 0 || ret2) 47440031462bSMingming Cao break; 47450031462bSMingming Cao } 47460031462bSMingming Cao return ret > 0 ? ret2 : ret; 47470031462bSMingming Cao } 47486d9c85ebSYongqiang Yang 4749a00713eaSRitesh Harjani int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end) 4750a00713eaSRitesh Harjani { 4751a00713eaSRitesh Harjani int ret, err = 0; 4752c8cc8816SRitesh Harjani struct ext4_io_end_vec *io_end_vec; 4753a00713eaSRitesh Harjani 4754a00713eaSRitesh Harjani /* 4755a00713eaSRitesh Harjani * This is somewhat ugly but the idea is clear: When transaction is 4756a00713eaSRitesh Harjani * reserved, everything goes into it. Otherwise we rather start several 4757a00713eaSRitesh Harjani * smaller transactions for conversion of each extent separately. 4758a00713eaSRitesh Harjani */ 4759a00713eaSRitesh Harjani if (handle) { 4760a00713eaSRitesh Harjani handle = ext4_journal_start_reserved(handle, 4761a00713eaSRitesh Harjani EXT4_HT_EXT_CONVERT); 4762a00713eaSRitesh Harjani if (IS_ERR(handle)) 4763a00713eaSRitesh Harjani return PTR_ERR(handle); 4764a00713eaSRitesh Harjani } 4765a00713eaSRitesh Harjani 4766c8cc8816SRitesh Harjani list_for_each_entry(io_end_vec, &io_end->list_vec, list) { 4767a00713eaSRitesh Harjani ret = ext4_convert_unwritten_extents(handle, io_end->inode, 4768c8cc8816SRitesh Harjani io_end_vec->offset, 4769c8cc8816SRitesh Harjani io_end_vec->size); 4770c8cc8816SRitesh Harjani if (ret) 4771c8cc8816SRitesh Harjani break; 4772c8cc8816SRitesh Harjani } 4773c8cc8816SRitesh Harjani 4774a00713eaSRitesh Harjani if (handle) 4775a00713eaSRitesh Harjani err = ext4_journal_stop(handle); 4776a00713eaSRitesh Harjani 4777a00713eaSRitesh Harjani return ret < 0 ? ret : err; 4778a00713eaSRitesh Harjani } 4779a00713eaSRitesh Harjani 4780d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap) 47816873fa0dSEric Sandeen { 47826873fa0dSEric Sandeen __u64 physical = 0; 4783d3b6f23fSRitesh Harjani __u64 length = 0; 47846873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 47856873fa0dSEric Sandeen int error = 0; 4786d3b6f23fSRitesh Harjani u16 iomap_type; 47876873fa0dSEric Sandeen 47886873fa0dSEric Sandeen /* in-inode? */ 478919f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 47906873fa0dSEric Sandeen struct ext4_iloc iloc; 47916873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 47926873fa0dSEric Sandeen 47936873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 47946873fa0dSEric Sandeen if (error) 47956873fa0dSEric Sandeen return error; 4796a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 47976873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 47986873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 47996873fa0dSEric Sandeen physical += offset; 48006873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 4801fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 4802d3b6f23fSRitesh Harjani iomap_type = IOMAP_INLINE; 4803d3b6f23fSRitesh Harjani } else if (EXT4_I(inode)->i_file_acl) { /* external block */ 4804a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 48056873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 4806d3b6f23fSRitesh Harjani iomap_type = IOMAP_MAPPED; 4807d3b6f23fSRitesh Harjani } else { 4808d3b6f23fSRitesh Harjani /* no in-inode or external block for xattr, so return -ENOENT */ 4809d3b6f23fSRitesh Harjani error = -ENOENT; 4810d3b6f23fSRitesh Harjani goto out; 48116873fa0dSEric Sandeen } 48126873fa0dSEric Sandeen 4813d3b6f23fSRitesh Harjani iomap->addr = physical; 4814d3b6f23fSRitesh Harjani iomap->offset = 0; 4815d3b6f23fSRitesh Harjani iomap->length = length; 4816d3b6f23fSRitesh Harjani iomap->type = iomap_type; 4817d3b6f23fSRitesh Harjani iomap->flags = 0; 4818d3b6f23fSRitesh Harjani out: 4819d3b6f23fSRitesh Harjani return error; 48206873fa0dSEric Sandeen } 48216873fa0dSEric Sandeen 4822d3b6f23fSRitesh Harjani static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset, 4823d3b6f23fSRitesh Harjani loff_t length, unsigned flags, 4824d3b6f23fSRitesh Harjani struct iomap *iomap, struct iomap *srcmap) 4825d3b6f23fSRitesh Harjani { 4826d3b6f23fSRitesh Harjani int error; 4827d3b6f23fSRitesh Harjani 4828d3b6f23fSRitesh Harjani error = ext4_iomap_xattr_fiemap(inode, iomap); 4829d3b6f23fSRitesh Harjani if (error == 0 && (offset >= iomap->length)) 4830d3b6f23fSRitesh Harjani error = -ENOENT; 4831d3b6f23fSRitesh Harjani return error; 4832d3b6f23fSRitesh Harjani } 4833d3b6f23fSRitesh Harjani 4834d3b6f23fSRitesh Harjani static const struct iomap_ops ext4_iomap_xattr_ops = { 4835d3b6f23fSRitesh Harjani .iomap_begin = ext4_iomap_xattr_begin, 4836d3b6f23fSRitesh Harjani }; 4837d3b6f23fSRitesh Harjani 4838d3b6f23fSRitesh Harjani static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4839d3b6f23fSRitesh Harjani __u64 start, __u64 len, bool from_es_cache) 48406873fa0dSEric Sandeen { 48416873fa0dSEric Sandeen ext4_lblk_t start_blk; 4842bb5835edSTheodore Ts'o u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR; 48436873fa0dSEric Sandeen int error = 0; 48446873fa0dSEric Sandeen 48457869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 48467869a4a6STheodore Ts'o error = ext4_ext_precache(inode); 48477869a4a6STheodore Ts'o if (error) 48487869a4a6STheodore Ts'o return error; 4849bb5835edSTheodore Ts'o fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; 48507869a4a6STheodore Ts'o } 48517869a4a6STheodore Ts'o 4852d3b6f23fSRitesh Harjani if (from_es_cache) 4853bb5835edSTheodore Ts'o ext4_fiemap_flags &= FIEMAP_FLAG_XATTR; 4854d3b6f23fSRitesh Harjani 4855bb5835edSTheodore Ts'o if (fiemap_check_flags(fieinfo, ext4_fiemap_flags)) 48566873fa0dSEric Sandeen return -EBADR; 48576873fa0dSEric Sandeen 48586873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 4859d3b6f23fSRitesh Harjani fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; 4860d3b6f23fSRitesh Harjani error = iomap_fiemap(inode, fieinfo, start, len, 4861d3b6f23fSRitesh Harjani &ext4_iomap_xattr_ops); 4862d3b6f23fSRitesh Harjani } else if (!from_es_cache) { 4863d3b6f23fSRitesh Harjani error = iomap_fiemap(inode, fieinfo, start, len, 4864d3b6f23fSRitesh Harjani &ext4_iomap_report_ops); 48656873fa0dSEric Sandeen } else { 4866aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4867aca92ff6SLeonard Michlmayr __u64 last_blk; 4868aca92ff6SLeonard Michlmayr 48696873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4870aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4871f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4872f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4873aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 48746873fa0dSEric Sandeen 48756873fa0dSEric Sandeen /* 487691dd8c11SLukas Czerner * Walk the extent tree gathering extent information 487791dd8c11SLukas Czerner * and pushing extents back to the user. 48786873fa0dSEric Sandeen */ 4879d3b6f23fSRitesh Harjani error = ext4_fill_es_cache_info(inode, start_blk, len_blks, 4880d3b6f23fSRitesh Harjani fieinfo); 48816873fa0dSEric Sandeen } 48826873fa0dSEric Sandeen return error; 48836873fa0dSEric Sandeen } 48849eb79482SNamjae Jeon 4885bb5835edSTheodore Ts'o int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4886bb5835edSTheodore Ts'o __u64 start, __u64 len) 4887bb5835edSTheodore Ts'o { 4888d3b6f23fSRitesh Harjani return _ext4_fiemap(inode, fieinfo, start, len, false); 4889bb5835edSTheodore Ts'o } 4890bb5835edSTheodore Ts'o 4891bb5835edSTheodore Ts'o int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, 4892bb5835edSTheodore Ts'o __u64 start, __u64 len) 4893bb5835edSTheodore Ts'o { 4894bb5835edSTheodore Ts'o if (ext4_has_inline_data(inode)) { 4895bb5835edSTheodore Ts'o int has_inline; 4896bb5835edSTheodore Ts'o 4897bb5835edSTheodore Ts'o down_read(&EXT4_I(inode)->xattr_sem); 4898bb5835edSTheodore Ts'o has_inline = ext4_has_inline_data(inode); 4899bb5835edSTheodore Ts'o up_read(&EXT4_I(inode)->xattr_sem); 4900bb5835edSTheodore Ts'o if (has_inline) 4901bb5835edSTheodore Ts'o return 0; 4902bb5835edSTheodore Ts'o } 4903bb5835edSTheodore Ts'o 4904d3b6f23fSRitesh Harjani return _ext4_fiemap(inode, fieinfo, start, len, true); 4905bb5835edSTheodore Ts'o } 4906bb5835edSTheodore Ts'o 4907bb5835edSTheodore Ts'o 49089eb79482SNamjae Jeon /* 49099eb79482SNamjae Jeon * ext4_access_path: 49109eb79482SNamjae Jeon * Function to access the path buffer for marking it dirty. 49119eb79482SNamjae Jeon * It also checks if there are sufficient credits left in the journal handle 49129eb79482SNamjae Jeon * to update path. 49139eb79482SNamjae Jeon */ 49149eb79482SNamjae Jeon static int 49159eb79482SNamjae Jeon ext4_access_path(handle_t *handle, struct inode *inode, 49169eb79482SNamjae Jeon struct ext4_ext_path *path) 49179eb79482SNamjae Jeon { 49189eb79482SNamjae Jeon int credits, err; 49199eb79482SNamjae Jeon 49209eb79482SNamjae Jeon if (!ext4_handle_valid(handle)) 49219eb79482SNamjae Jeon return 0; 49229eb79482SNamjae Jeon 49239eb79482SNamjae Jeon /* 49249eb79482SNamjae Jeon * Check if need to extend journal credits 49259eb79482SNamjae Jeon * 3 for leaf, sb, and inode plus 2 (bmap and group 49269eb79482SNamjae Jeon * descriptor) for each block group; assume two block 49279eb79482SNamjae Jeon * groups 49289eb79482SNamjae Jeon */ 49299eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 493083448bdfSJan Kara err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0); 4931a4130367SJan Kara if (err < 0) 49329eb79482SNamjae Jeon return err; 49339eb79482SNamjae Jeon 49349eb79482SNamjae Jeon err = ext4_ext_get_access(handle, inode, path); 49359eb79482SNamjae Jeon return err; 49369eb79482SNamjae Jeon } 49379eb79482SNamjae Jeon 49389eb79482SNamjae Jeon /* 49399eb79482SNamjae Jeon * ext4_ext_shift_path_extents: 49409eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext 4941331573feSNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr), by @shift blocks. @SHIFT tells 4942331573feSNamjae Jeon * if it is right shift or left shift operation. 49439eb79482SNamjae Jeon */ 49449eb79482SNamjae Jeon static int 49459eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 49469eb79482SNamjae Jeon struct inode *inode, handle_t *handle, 4947331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 49489eb79482SNamjae Jeon { 49499eb79482SNamjae Jeon int depth, err = 0; 49509eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last; 49514756ee18Szhengbin bool update = false; 49529eb79482SNamjae Jeon depth = path->p_depth; 49539eb79482SNamjae Jeon 49549eb79482SNamjae Jeon while (depth >= 0) { 49559eb79482SNamjae Jeon if (depth == path->p_depth) { 49569eb79482SNamjae Jeon ex_start = path[depth].p_ext; 49579eb79482SNamjae Jeon if (!ex_start) 49586a797d27SDarrick J. Wong return -EFSCORRUPTED; 49599eb79482SNamjae Jeon 49609eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 49619eb79482SNamjae Jeon 49629eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 49639eb79482SNamjae Jeon if (err) 49649eb79482SNamjae Jeon goto out; 49659eb79482SNamjae Jeon 49669eb79482SNamjae Jeon if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 49674756ee18Szhengbin update = true; 49689eb79482SNamjae Jeon 49699eb79482SNamjae Jeon while (ex_start <= ex_last) { 4970331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 4971331573feSNamjae Jeon le32_add_cpu(&ex_start->ee_block, 4972331573feSNamjae Jeon -shift); 49736dd834efSLukas Czerner /* Try to merge to the left. */ 49746dd834efSLukas Czerner if ((ex_start > 4975331573feSNamjae Jeon EXT_FIRST_EXTENT(path[depth].p_hdr)) 4976331573feSNamjae Jeon && 49776dd834efSLukas Czerner ext4_ext_try_to_merge_right(inode, 49789eb79482SNamjae Jeon path, ex_start - 1)) 49799eb79482SNamjae Jeon ex_last--; 49806dd834efSLukas Czerner else 49819eb79482SNamjae Jeon ex_start++; 4982331573feSNamjae Jeon } else { 4983331573feSNamjae Jeon le32_add_cpu(&ex_last->ee_block, shift); 4984331573feSNamjae Jeon ext4_ext_try_to_merge_right(inode, path, 4985331573feSNamjae Jeon ex_last); 4986331573feSNamjae Jeon ex_last--; 4987331573feSNamjae Jeon } 49889eb79482SNamjae Jeon } 49899eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 49909eb79482SNamjae Jeon if (err) 49919eb79482SNamjae Jeon goto out; 49929eb79482SNamjae Jeon 49939eb79482SNamjae Jeon if (--depth < 0 || !update) 49949eb79482SNamjae Jeon break; 49959eb79482SNamjae Jeon } 49969eb79482SNamjae Jeon 49979eb79482SNamjae Jeon /* Update index too */ 49989eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 49999eb79482SNamjae Jeon if (err) 50009eb79482SNamjae Jeon goto out; 50019eb79482SNamjae Jeon 5002331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5003847c6c42SZheng Liu le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 5004331573feSNamjae Jeon else 5005331573feSNamjae Jeon le32_add_cpu(&path[depth].p_idx->ei_block, shift); 50069eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 50079eb79482SNamjae Jeon if (err) 50089eb79482SNamjae Jeon goto out; 50099eb79482SNamjae Jeon 50109eb79482SNamjae Jeon /* we are done if current index is not a starting index */ 50119eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 50129eb79482SNamjae Jeon break; 50139eb79482SNamjae Jeon 50149eb79482SNamjae Jeon depth--; 50159eb79482SNamjae Jeon } 50169eb79482SNamjae Jeon 50179eb79482SNamjae Jeon out: 50189eb79482SNamjae Jeon return err; 50199eb79482SNamjae Jeon } 50209eb79482SNamjae Jeon 50219eb79482SNamjae Jeon /* 50229eb79482SNamjae Jeon * ext4_ext_shift_extents: 5023331573feSNamjae Jeon * All the extents which lies in the range from @start to the last allocated 5024331573feSNamjae Jeon * block for the @inode are shifted either towards left or right (depending 5025331573feSNamjae Jeon * upon @SHIFT) by @shift blocks. 50269eb79482SNamjae Jeon * On success, 0 is returned, error otherwise. 50279eb79482SNamjae Jeon */ 50289eb79482SNamjae Jeon static int 50299eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 5030331573feSNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift, 5031331573feSNamjae Jeon enum SHIFT_DIRECTION SHIFT) 50329eb79482SNamjae Jeon { 50339eb79482SNamjae Jeon struct ext4_ext_path *path; 50349eb79482SNamjae Jeon int ret = 0, depth; 50359eb79482SNamjae Jeon struct ext4_extent *extent; 5036331573feSNamjae Jeon ext4_lblk_t stop, *iterator, ex_start, ex_end; 50379eb79482SNamjae Jeon 50389eb79482SNamjae Jeon /* Let path point to the last extent */ 503903e916faSRoman Pen path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 504003e916faSRoman Pen EXT4_EX_NOCACHE); 50419eb79482SNamjae Jeon if (IS_ERR(path)) 50429eb79482SNamjae Jeon return PTR_ERR(path); 50439eb79482SNamjae Jeon 50449eb79482SNamjae Jeon depth = path->p_depth; 50459eb79482SNamjae Jeon extent = path[depth].p_ext; 5046ee4bd0d9STheodore Ts'o if (!extent) 5047ee4bd0d9STheodore Ts'o goto out; 50489eb79482SNamjae Jeon 50492a9b8cbaSRoman Pen stop = le32_to_cpu(extent->ee_block); 50509eb79482SNamjae Jeon 50519eb79482SNamjae Jeon /* 5052349fa7d6SEric Biggers * For left shifts, make sure the hole on the left is big enough to 5053349fa7d6SEric Biggers * accommodate the shift. For right shifts, make sure the last extent 5054349fa7d6SEric Biggers * won't be shifted beyond EXT_MAX_BLOCKS. 50559eb79482SNamjae Jeon */ 5056331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 505703e916faSRoman Pen path = ext4_find_extent(inode, start - 1, &path, 505803e916faSRoman Pen EXT4_EX_NOCACHE); 50598dc79ec4SDmitry Monakhov if (IS_ERR(path)) 50608dc79ec4SDmitry Monakhov return PTR_ERR(path); 50619eb79482SNamjae Jeon depth = path->p_depth; 50629eb79482SNamjae Jeon extent = path[depth].p_ext; 50638dc79ec4SDmitry Monakhov if (extent) { 5064847c6c42SZheng Liu ex_start = le32_to_cpu(extent->ee_block); 5065847c6c42SZheng Liu ex_end = le32_to_cpu(extent->ee_block) + 5066847c6c42SZheng Liu ext4_ext_get_actual_len(extent); 50678dc79ec4SDmitry Monakhov } else { 50688dc79ec4SDmitry Monakhov ex_start = 0; 50698dc79ec4SDmitry Monakhov ex_end = 0; 50708dc79ec4SDmitry Monakhov } 50719eb79482SNamjae Jeon 50729eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) || 5073331573feSNamjae Jeon (shift > start - ex_end)) { 5074349fa7d6SEric Biggers ret = -EINVAL; 5075349fa7d6SEric Biggers goto out; 5076349fa7d6SEric Biggers } 5077349fa7d6SEric Biggers } else { 5078349fa7d6SEric Biggers if (shift > EXT_MAX_BLOCKS - 5079349fa7d6SEric Biggers (stop + ext4_ext_get_actual_len(extent))) { 5080349fa7d6SEric Biggers ret = -EINVAL; 5081349fa7d6SEric Biggers goto out; 5082331573feSNamjae Jeon } 5083331573feSNamjae Jeon } 5084331573feSNamjae Jeon 5085331573feSNamjae Jeon /* 5086331573feSNamjae Jeon * In case of left shift, iterator points to start and it is increased 5087331573feSNamjae Jeon * till we reach stop. In case of right shift, iterator points to stop 5088331573feSNamjae Jeon * and it is decreased till we reach start. 5089331573feSNamjae Jeon */ 5090331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) 5091331573feSNamjae Jeon iterator = &start; 5092331573feSNamjae Jeon else 5093331573feSNamjae Jeon iterator = &stop; 50949eb79482SNamjae Jeon 50952a9b8cbaSRoman Pen /* 50962a9b8cbaSRoman Pen * Its safe to start updating extents. Start and stop are unsigned, so 50972a9b8cbaSRoman Pen * in case of right shift if extent with 0 block is reached, iterator 50982a9b8cbaSRoman Pen * becomes NULL to indicate the end of the loop. 50992a9b8cbaSRoman Pen */ 51002a9b8cbaSRoman Pen while (iterator && start <= stop) { 510103e916faSRoman Pen path = ext4_find_extent(inode, *iterator, &path, 510203e916faSRoman Pen EXT4_EX_NOCACHE); 51039eb79482SNamjae Jeon if (IS_ERR(path)) 51049eb79482SNamjae Jeon return PTR_ERR(path); 51059eb79482SNamjae Jeon depth = path->p_depth; 51069eb79482SNamjae Jeon extent = path[depth].p_ext; 5107a18ed359SDmitry Monakhov if (!extent) { 5108a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5109331573feSNamjae Jeon (unsigned long) *iterator); 51106a797d27SDarrick J. Wong return -EFSCORRUPTED; 5111a18ed359SDmitry Monakhov } 5112331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT && *iterator > 5113331573feSNamjae Jeon le32_to_cpu(extent->ee_block)) { 51149eb79482SNamjae Jeon /* Hole, move to the next extent */ 5115f8fb4f41SDmitry Monakhov if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5116f8fb4f41SDmitry Monakhov path[depth].p_ext++; 5117f8fb4f41SDmitry Monakhov } else { 5118331573feSNamjae Jeon *iterator = ext4_ext_next_allocated_block(path); 5119f8fb4f41SDmitry Monakhov continue; 51209eb79482SNamjae Jeon } 51219eb79482SNamjae Jeon } 5122331573feSNamjae Jeon 5123331573feSNamjae Jeon if (SHIFT == SHIFT_LEFT) { 5124331573feSNamjae Jeon extent = EXT_LAST_EXTENT(path[depth].p_hdr); 5125331573feSNamjae Jeon *iterator = le32_to_cpu(extent->ee_block) + 5126331573feSNamjae Jeon ext4_ext_get_actual_len(extent); 5127331573feSNamjae Jeon } else { 5128331573feSNamjae Jeon extent = EXT_FIRST_EXTENT(path[depth].p_hdr); 51292a9b8cbaSRoman Pen if (le32_to_cpu(extent->ee_block) > 0) 51302a9b8cbaSRoman Pen *iterator = le32_to_cpu(extent->ee_block) - 1; 51312a9b8cbaSRoman Pen else 51322a9b8cbaSRoman Pen /* Beginning is reached, end of the loop */ 51332a9b8cbaSRoman Pen iterator = NULL; 5134331573feSNamjae Jeon /* Update path extent in case we need to stop */ 5135331573feSNamjae Jeon while (le32_to_cpu(extent->ee_block) < start) 5136331573feSNamjae Jeon extent++; 5137331573feSNamjae Jeon path[depth].p_ext = extent; 5138331573feSNamjae Jeon } 51399eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode, 5140331573feSNamjae Jeon handle, SHIFT); 51419eb79482SNamjae Jeon if (ret) 51429eb79482SNamjae Jeon break; 51439eb79482SNamjae Jeon } 5144ee4bd0d9STheodore Ts'o out: 5145ee4bd0d9STheodore Ts'o ext4_ext_drop_refs(path); 5146ee4bd0d9STheodore Ts'o kfree(path); 51479eb79482SNamjae Jeon return ret; 51489eb79482SNamjae Jeon } 51499eb79482SNamjae Jeon 51509eb79482SNamjae Jeon /* 51519eb79482SNamjae Jeon * ext4_collapse_range: 51529eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4 51539eb79482SNamjae Jeon * Returns: 0 and non-zero on error. 51549eb79482SNamjae Jeon */ 515543f81677SEric Biggers static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 51569eb79482SNamjae Jeon { 51579eb79482SNamjae Jeon struct super_block *sb = inode->i_sb; 51589eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop; 51599eb79482SNamjae Jeon handle_t *handle; 51609eb79482SNamjae Jeon unsigned int credits; 5161a8680e0dSNamjae Jeon loff_t new_size, ioffset; 51629eb79482SNamjae Jeon int ret; 51639eb79482SNamjae Jeon 5164b9576fc3STheodore Ts'o /* 5165b9576fc3STheodore Ts'o * We need to test this early because xfstests assumes that a 5166b9576fc3STheodore Ts'o * collapse range of (0, 1) will return EOPNOTSUPP if the file 5167b9576fc3STheodore Ts'o * system does not support collapse range. 5168b9576fc3STheodore Ts'o */ 5169b9576fc3STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5170b9576fc3STheodore Ts'o return -EOPNOTSUPP; 5171b9576fc3STheodore Ts'o 51729b02e498SEric Biggers /* Collapse range works only on fs cluster size aligned regions. */ 51739b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 51749eb79482SNamjae Jeon return -EINVAL; 51759eb79482SNamjae Jeon 51769eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len); 51779eb79482SNamjae Jeon 51789eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 51799eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 51809eb79482SNamjae Jeon 51811ce01c4aSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 51821ce01c4aSNamjae Jeon if (ext4_should_journal_data(inode)) { 51831ce01c4aSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 51841ce01c4aSNamjae Jeon if (ret) 51851ce01c4aSNamjae Jeon return ret; 51861ce01c4aSNamjae Jeon } 51871ce01c4aSNamjae Jeon 51885955102cSAl Viro inode_lock(inode); 518923fffa92SLukas Czerner /* 519023fffa92SLukas Czerner * There is no need to overlap collapse range with EOF, in which case 519123fffa92SLukas Czerner * it is effectively a truncate operation 519223fffa92SLukas Czerner */ 51939b02e498SEric Biggers if (offset + len >= inode->i_size) { 519423fffa92SLukas Czerner ret = -EINVAL; 519523fffa92SLukas Czerner goto out_mutex; 519623fffa92SLukas Czerner } 519723fffa92SLukas Czerner 51989eb79482SNamjae Jeon /* Currently just for extent based files */ 51999eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 52009eb79482SNamjae Jeon ret = -EOPNOTSUPP; 52019eb79482SNamjae Jeon goto out_mutex; 52029eb79482SNamjae Jeon } 52039eb79482SNamjae Jeon 52049eb79482SNamjae Jeon /* Wait for existing dio to complete */ 52059eb79482SNamjae Jeon inode_dio_wait(inode); 52069eb79482SNamjae Jeon 5207ea3d7209SJan Kara /* 5208ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5209ea3d7209SJan Kara * page cache. 5210ea3d7209SJan Kara */ 5211ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 5212430657b6SRoss Zwisler 5213430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5214430657b6SRoss Zwisler if (ret) 5215430657b6SRoss Zwisler goto out_mmap; 5216430657b6SRoss Zwisler 521732ebffd3SJan Kara /* 521832ebffd3SJan Kara * Need to round down offset to be aligned with page size boundary 521932ebffd3SJan Kara * for page size > block size. 522032ebffd3SJan Kara */ 522132ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 522232ebffd3SJan Kara /* 522332ebffd3SJan Kara * Write tail of the last page before removed range since it will get 522432ebffd3SJan Kara * removed from the page cache below. 522532ebffd3SJan Kara */ 522632ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset); 522732ebffd3SJan Kara if (ret) 522832ebffd3SJan Kara goto out_mmap; 522932ebffd3SJan Kara /* 523032ebffd3SJan Kara * Write data that will be shifted to preserve them when discarding 523132ebffd3SJan Kara * page cache below. We are also protected from pages becoming dirty 523232ebffd3SJan Kara * by i_mmap_sem. 523332ebffd3SJan Kara */ 523432ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, offset + len, 523532ebffd3SJan Kara LLONG_MAX); 523632ebffd3SJan Kara if (ret) 523732ebffd3SJan Kara goto out_mmap; 5238ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5239ea3d7209SJan Kara 52409eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 52419eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 52429eb79482SNamjae Jeon if (IS_ERR(handle)) { 52439eb79482SNamjae Jeon ret = PTR_ERR(handle); 5244ea3d7209SJan Kara goto out_mmap; 52459eb79482SNamjae Jeon } 52469eb79482SNamjae Jeon 52479eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 52489eb79482SNamjae Jeon ext4_discard_preallocations(inode); 52499eb79482SNamjae Jeon 52509eb79482SNamjae Jeon ret = ext4_es_remove_extent(inode, punch_start, 52512c1d2328SLukas Czerner EXT_MAX_BLOCKS - punch_start); 52529eb79482SNamjae Jeon if (ret) { 52539eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 52549eb79482SNamjae Jeon goto out_stop; 52559eb79482SNamjae Jeon } 52569eb79482SNamjae Jeon 52579eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 52589eb79482SNamjae Jeon if (ret) { 52599eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 52609eb79482SNamjae Jeon goto out_stop; 52619eb79482SNamjae Jeon } 5262ef24f6c2SLukas Czerner ext4_discard_preallocations(inode); 52639eb79482SNamjae Jeon 52649eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop, 5265331573feSNamjae Jeon punch_stop - punch_start, SHIFT_LEFT); 52669eb79482SNamjae Jeon if (ret) { 52679eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 52689eb79482SNamjae Jeon goto out_stop; 52699eb79482SNamjae Jeon } 52709eb79482SNamjae Jeon 52719b02e498SEric Biggers new_size = inode->i_size - len; 52729337d5d3SLukas Czerner i_size_write(inode, new_size); 52739eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size; 52749eb79482SNamjae Jeon 52759eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 52769eb79482SNamjae Jeon if (IS_SYNC(inode)) 52779eb79482SNamjae Jeon ext4_handle_sync(handle); 5278eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 52799eb79482SNamjae Jeon ext4_mark_inode_dirty(handle, inode); 528067a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 52819eb79482SNamjae Jeon 52829eb79482SNamjae Jeon out_stop: 52839eb79482SNamjae Jeon ext4_journal_stop(handle); 5284ea3d7209SJan Kara out_mmap: 5285ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 52869eb79482SNamjae Jeon out_mutex: 52875955102cSAl Viro inode_unlock(inode); 52889eb79482SNamjae Jeon return ret; 52899eb79482SNamjae Jeon } 5290fcf6b1b7SDmitry Monakhov 5291331573feSNamjae Jeon /* 5292331573feSNamjae Jeon * ext4_insert_range: 5293331573feSNamjae Jeon * This function implements the FALLOC_FL_INSERT_RANGE flag of fallocate. 5294331573feSNamjae Jeon * The data blocks starting from @offset to the EOF are shifted by @len 5295331573feSNamjae Jeon * towards right to create a hole in the @inode. Inode size is increased 5296331573feSNamjae Jeon * by len bytes. 5297331573feSNamjae Jeon * Returns 0 on success, error otherwise. 5298331573feSNamjae Jeon */ 529943f81677SEric Biggers static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) 5300331573feSNamjae Jeon { 5301331573feSNamjae Jeon struct super_block *sb = inode->i_sb; 5302331573feSNamjae Jeon handle_t *handle; 5303331573feSNamjae Jeon struct ext4_ext_path *path; 5304331573feSNamjae Jeon struct ext4_extent *extent; 5305331573feSNamjae Jeon ext4_lblk_t offset_lblk, len_lblk, ee_start_lblk = 0; 5306331573feSNamjae Jeon unsigned int credits, ee_len; 5307331573feSNamjae Jeon int ret = 0, depth, split_flag = 0; 5308331573feSNamjae Jeon loff_t ioffset; 5309331573feSNamjae Jeon 5310331573feSNamjae Jeon /* 5311331573feSNamjae Jeon * We need to test this early because xfstests assumes that an 5312331573feSNamjae Jeon * insert range of (0, 1) will return EOPNOTSUPP if the file 5313331573feSNamjae Jeon * system does not support insert range. 5314331573feSNamjae Jeon */ 5315331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5316331573feSNamjae Jeon return -EOPNOTSUPP; 5317331573feSNamjae Jeon 53189b02e498SEric Biggers /* Insert range works only on fs cluster size aligned regions. */ 53199b02e498SEric Biggers if (!IS_ALIGNED(offset | len, EXT4_CLUSTER_SIZE(sb))) 5320331573feSNamjae Jeon return -EINVAL; 5321331573feSNamjae Jeon 5322331573feSNamjae Jeon trace_ext4_insert_range(inode, offset, len); 5323331573feSNamjae Jeon 5324331573feSNamjae Jeon offset_lblk = offset >> EXT4_BLOCK_SIZE_BITS(sb); 5325331573feSNamjae Jeon len_lblk = len >> EXT4_BLOCK_SIZE_BITS(sb); 5326331573feSNamjae Jeon 5327331573feSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal */ 5328331573feSNamjae Jeon if (ext4_should_journal_data(inode)) { 5329331573feSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 5330331573feSNamjae Jeon if (ret) 5331331573feSNamjae Jeon return ret; 5332331573feSNamjae Jeon } 5333331573feSNamjae Jeon 53345955102cSAl Viro inode_lock(inode); 5335331573feSNamjae Jeon /* Currently just for extent based files */ 5336331573feSNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 5337331573feSNamjae Jeon ret = -EOPNOTSUPP; 5338331573feSNamjae Jeon goto out_mutex; 5339331573feSNamjae Jeon } 5340331573feSNamjae Jeon 53419b02e498SEric Biggers /* Check whether the maximum file size would be exceeded */ 53429b02e498SEric Biggers if (len > inode->i_sb->s_maxbytes - inode->i_size) { 5343331573feSNamjae Jeon ret = -EFBIG; 5344331573feSNamjae Jeon goto out_mutex; 5345331573feSNamjae Jeon } 5346331573feSNamjae Jeon 53479b02e498SEric Biggers /* Offset must be less than i_size */ 53489b02e498SEric Biggers if (offset >= inode->i_size) { 5349331573feSNamjae Jeon ret = -EINVAL; 5350331573feSNamjae Jeon goto out_mutex; 5351331573feSNamjae Jeon } 5352331573feSNamjae Jeon 5353331573feSNamjae Jeon /* Wait for existing dio to complete */ 5354331573feSNamjae Jeon inode_dio_wait(inode); 5355331573feSNamjae Jeon 5356ea3d7209SJan Kara /* 5357ea3d7209SJan Kara * Prevent page faults from reinstantiating pages we have released from 5358ea3d7209SJan Kara * page cache. 5359ea3d7209SJan Kara */ 5360ea3d7209SJan Kara down_write(&EXT4_I(inode)->i_mmap_sem); 5361430657b6SRoss Zwisler 5362430657b6SRoss Zwisler ret = ext4_break_layouts(inode); 5363430657b6SRoss Zwisler if (ret) 5364430657b6SRoss Zwisler goto out_mmap; 5365430657b6SRoss Zwisler 536632ebffd3SJan Kara /* 536732ebffd3SJan Kara * Need to round down to align start offset to page size boundary 536832ebffd3SJan Kara * for page size > block size. 536932ebffd3SJan Kara */ 537032ebffd3SJan Kara ioffset = round_down(offset, PAGE_SIZE); 537132ebffd3SJan Kara /* Write out all dirty pages */ 537232ebffd3SJan Kara ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 537332ebffd3SJan Kara LLONG_MAX); 537432ebffd3SJan Kara if (ret) 537532ebffd3SJan Kara goto out_mmap; 5376ea3d7209SJan Kara truncate_pagecache(inode, ioffset); 5377ea3d7209SJan Kara 5378331573feSNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 5379331573feSNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 5380331573feSNamjae Jeon if (IS_ERR(handle)) { 5381331573feSNamjae Jeon ret = PTR_ERR(handle); 5382ea3d7209SJan Kara goto out_mmap; 5383331573feSNamjae Jeon } 5384331573feSNamjae Jeon 5385331573feSNamjae Jeon /* Expand file to avoid data loss if there is error while shifting */ 5386331573feSNamjae Jeon inode->i_size += len; 5387331573feSNamjae Jeon EXT4_I(inode)->i_disksize += len; 5388eeca7ea1SDeepa Dinamani inode->i_mtime = inode->i_ctime = current_time(inode); 5389331573feSNamjae Jeon ret = ext4_mark_inode_dirty(handle, inode); 5390331573feSNamjae Jeon if (ret) 5391331573feSNamjae Jeon goto out_stop; 5392331573feSNamjae Jeon 5393331573feSNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 5394331573feSNamjae Jeon ext4_discard_preallocations(inode); 5395331573feSNamjae Jeon 5396331573feSNamjae Jeon path = ext4_find_extent(inode, offset_lblk, NULL, 0); 5397331573feSNamjae Jeon if (IS_ERR(path)) { 5398331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5399331573feSNamjae Jeon goto out_stop; 5400331573feSNamjae Jeon } 5401331573feSNamjae Jeon 5402331573feSNamjae Jeon depth = ext_depth(inode); 5403331573feSNamjae Jeon extent = path[depth].p_ext; 5404331573feSNamjae Jeon if (extent) { 5405331573feSNamjae Jeon ee_start_lblk = le32_to_cpu(extent->ee_block); 5406331573feSNamjae Jeon ee_len = ext4_ext_get_actual_len(extent); 5407331573feSNamjae Jeon 5408331573feSNamjae Jeon /* 5409331573feSNamjae Jeon * If offset_lblk is not the starting block of extent, split 5410331573feSNamjae Jeon * the extent @offset_lblk 5411331573feSNamjae Jeon */ 5412331573feSNamjae Jeon if ((offset_lblk > ee_start_lblk) && 5413331573feSNamjae Jeon (offset_lblk < (ee_start_lblk + ee_len))) { 5414331573feSNamjae Jeon if (ext4_ext_is_unwritten(extent)) 5415331573feSNamjae Jeon split_flag = EXT4_EXT_MARK_UNWRIT1 | 5416331573feSNamjae Jeon EXT4_EXT_MARK_UNWRIT2; 5417331573feSNamjae Jeon ret = ext4_split_extent_at(handle, inode, &path, 5418331573feSNamjae Jeon offset_lblk, split_flag, 5419331573feSNamjae Jeon EXT4_EX_NOCACHE | 5420331573feSNamjae Jeon EXT4_GET_BLOCKS_PRE_IO | 5421331573feSNamjae Jeon EXT4_GET_BLOCKS_METADATA_NOFAIL); 5422331573feSNamjae Jeon } 5423331573feSNamjae Jeon 5424331573feSNamjae Jeon ext4_ext_drop_refs(path); 5425331573feSNamjae Jeon kfree(path); 5426331573feSNamjae Jeon if (ret < 0) { 5427331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5428331573feSNamjae Jeon goto out_stop; 5429331573feSNamjae Jeon } 5430edf15aa1SFabian Frederick } else { 5431edf15aa1SFabian Frederick ext4_ext_drop_refs(path); 5432edf15aa1SFabian Frederick kfree(path); 5433331573feSNamjae Jeon } 5434331573feSNamjae Jeon 5435331573feSNamjae Jeon ret = ext4_es_remove_extent(inode, offset_lblk, 5436331573feSNamjae Jeon EXT_MAX_BLOCKS - offset_lblk); 5437331573feSNamjae Jeon if (ret) { 5438331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5439331573feSNamjae Jeon goto out_stop; 5440331573feSNamjae Jeon } 5441331573feSNamjae Jeon 5442331573feSNamjae Jeon /* 5443331573feSNamjae Jeon * if offset_lblk lies in a hole which is at start of file, use 5444331573feSNamjae Jeon * ee_start_lblk to shift extents 5445331573feSNamjae Jeon */ 5446331573feSNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, 5447331573feSNamjae Jeon ee_start_lblk > offset_lblk ? ee_start_lblk : offset_lblk, 5448331573feSNamjae Jeon len_lblk, SHIFT_RIGHT); 5449331573feSNamjae Jeon 5450331573feSNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 5451331573feSNamjae Jeon if (IS_SYNC(inode)) 5452331573feSNamjae Jeon ext4_handle_sync(handle); 545367a7d5f5SJan Kara if (ret >= 0) 545467a7d5f5SJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 5455331573feSNamjae Jeon 5456331573feSNamjae Jeon out_stop: 5457331573feSNamjae Jeon ext4_journal_stop(handle); 5458ea3d7209SJan Kara out_mmap: 5459ea3d7209SJan Kara up_write(&EXT4_I(inode)->i_mmap_sem); 5460331573feSNamjae Jeon out_mutex: 54615955102cSAl Viro inode_unlock(inode); 5462331573feSNamjae Jeon return ret; 5463331573feSNamjae Jeon } 5464331573feSNamjae Jeon 5465fcf6b1b7SDmitry Monakhov /** 5466c60990b3STheodore Ts'o * ext4_swap_extents() - Swap extents between two inodes 5467c60990b3STheodore Ts'o * @handle: handle for this transaction 5468fcf6b1b7SDmitry Monakhov * @inode1: First inode 5469fcf6b1b7SDmitry Monakhov * @inode2: Second inode 5470fcf6b1b7SDmitry Monakhov * @lblk1: Start block for first inode 5471fcf6b1b7SDmitry Monakhov * @lblk2: Start block for second inode 5472fcf6b1b7SDmitry Monakhov * @count: Number of blocks to swap 5473dcae058aSzhenwei.pi * @unwritten: Mark second inode's extents as unwritten after swap 5474fcf6b1b7SDmitry Monakhov * @erp: Pointer to save error value 5475fcf6b1b7SDmitry Monakhov * 5476fcf6b1b7SDmitry Monakhov * This helper routine does exactly what is promise "swap extents". All other 5477fcf6b1b7SDmitry Monakhov * stuff such as page-cache locking consistency, bh mapping consistency or 5478fcf6b1b7SDmitry Monakhov * extent's data copying must be performed by caller. 5479fcf6b1b7SDmitry Monakhov * Locking: 5480fcf6b1b7SDmitry Monakhov * i_mutex is held for both inodes 5481fcf6b1b7SDmitry Monakhov * i_data_sem is locked for write for both inodes 5482fcf6b1b7SDmitry Monakhov * Assumptions: 5483fcf6b1b7SDmitry Monakhov * All pages from requested range are locked for both inodes 5484fcf6b1b7SDmitry Monakhov */ 5485fcf6b1b7SDmitry Monakhov int 5486fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1, 5487fcf6b1b7SDmitry Monakhov struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5488fcf6b1b7SDmitry Monakhov ext4_lblk_t count, int unwritten, int *erp) 5489fcf6b1b7SDmitry Monakhov { 5490fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path1 = NULL; 5491fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path2 = NULL; 5492fcf6b1b7SDmitry Monakhov int replaced_count = 0; 5493fcf6b1b7SDmitry Monakhov 5494fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5495fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 54965955102cSAl Viro BUG_ON(!inode_is_locked(inode1)); 54975955102cSAl Viro BUG_ON(!inode_is_locked(inode2)); 5498fcf6b1b7SDmitry Monakhov 5499fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode1, lblk1, count); 550019008f6dSTheodore Ts'o if (unlikely(*erp)) 5501fcf6b1b7SDmitry Monakhov return 0; 5502fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode2, lblk2, count); 550319008f6dSTheodore Ts'o if (unlikely(*erp)) 5504fcf6b1b7SDmitry Monakhov return 0; 5505fcf6b1b7SDmitry Monakhov 5506fcf6b1b7SDmitry Monakhov while (count) { 5507fcf6b1b7SDmitry Monakhov struct ext4_extent *ex1, *ex2, tmp_ex; 5508fcf6b1b7SDmitry Monakhov ext4_lblk_t e1_blk, e2_blk; 5509fcf6b1b7SDmitry Monakhov int e1_len, e2_len, len; 5510fcf6b1b7SDmitry Monakhov int split = 0; 5511fcf6b1b7SDmitry Monakhov 5512ed8a1a76STheodore Ts'o path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 5513a1c83681SViresh Kumar if (IS_ERR(path1)) { 5514fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path1); 551519008f6dSTheodore Ts'o path1 = NULL; 551619008f6dSTheodore Ts'o finish: 551719008f6dSTheodore Ts'o count = 0; 551819008f6dSTheodore Ts'o goto repeat; 5519fcf6b1b7SDmitry Monakhov } 5520ed8a1a76STheodore Ts'o path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 5521a1c83681SViresh Kumar if (IS_ERR(path2)) { 5522fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path2); 552319008f6dSTheodore Ts'o path2 = NULL; 552419008f6dSTheodore Ts'o goto finish; 5525fcf6b1b7SDmitry Monakhov } 5526fcf6b1b7SDmitry Monakhov ex1 = path1[path1->p_depth].p_ext; 5527fcf6b1b7SDmitry Monakhov ex2 = path2[path2->p_depth].p_ext; 5528fcf6b1b7SDmitry Monakhov /* Do we have somthing to swap ? */ 5529fcf6b1b7SDmitry Monakhov if (unlikely(!ex2 || !ex1)) 553019008f6dSTheodore Ts'o goto finish; 5531fcf6b1b7SDmitry Monakhov 5532fcf6b1b7SDmitry Monakhov e1_blk = le32_to_cpu(ex1->ee_block); 5533fcf6b1b7SDmitry Monakhov e2_blk = le32_to_cpu(ex2->ee_block); 5534fcf6b1b7SDmitry Monakhov e1_len = ext4_ext_get_actual_len(ex1); 5535fcf6b1b7SDmitry Monakhov e2_len = ext4_ext_get_actual_len(ex2); 5536fcf6b1b7SDmitry Monakhov 5537fcf6b1b7SDmitry Monakhov /* Hole handling */ 5538fcf6b1b7SDmitry Monakhov if (!in_range(lblk1, e1_blk, e1_len) || 5539fcf6b1b7SDmitry Monakhov !in_range(lblk2, e2_blk, e2_len)) { 5540fcf6b1b7SDmitry Monakhov ext4_lblk_t next1, next2; 5541fcf6b1b7SDmitry Monakhov 5542fcf6b1b7SDmitry Monakhov /* if hole after extent, then go to next extent */ 5543fcf6b1b7SDmitry Monakhov next1 = ext4_ext_next_allocated_block(path1); 5544fcf6b1b7SDmitry Monakhov next2 = ext4_ext_next_allocated_block(path2); 5545fcf6b1b7SDmitry Monakhov /* If hole before extent, then shift to that extent */ 5546fcf6b1b7SDmitry Monakhov if (e1_blk > lblk1) 5547fcf6b1b7SDmitry Monakhov next1 = e1_blk; 5548fcf6b1b7SDmitry Monakhov if (e2_blk > lblk2) 55494e562013SManinder Singh next2 = e2_blk; 5550fcf6b1b7SDmitry Monakhov /* Do we have something to swap */ 5551fcf6b1b7SDmitry Monakhov if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 555219008f6dSTheodore Ts'o goto finish; 5553fcf6b1b7SDmitry Monakhov /* Move to the rightest boundary */ 5554fcf6b1b7SDmitry Monakhov len = next1 - lblk1; 5555fcf6b1b7SDmitry Monakhov if (len < next2 - lblk2) 5556fcf6b1b7SDmitry Monakhov len = next2 - lblk2; 5557fcf6b1b7SDmitry Monakhov if (len > count) 5558fcf6b1b7SDmitry Monakhov len = count; 5559fcf6b1b7SDmitry Monakhov lblk1 += len; 5560fcf6b1b7SDmitry Monakhov lblk2 += len; 5561fcf6b1b7SDmitry Monakhov count -= len; 5562fcf6b1b7SDmitry Monakhov goto repeat; 5563fcf6b1b7SDmitry Monakhov } 5564fcf6b1b7SDmitry Monakhov 5565fcf6b1b7SDmitry Monakhov /* Prepare left boundary */ 5566fcf6b1b7SDmitry Monakhov if (e1_blk < lblk1) { 5567fcf6b1b7SDmitry Monakhov split = 1; 5568fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5569dfe50809STheodore Ts'o &path1, lblk1, 0); 557019008f6dSTheodore Ts'o if (unlikely(*erp)) 557119008f6dSTheodore Ts'o goto finish; 5572fcf6b1b7SDmitry Monakhov } 5573fcf6b1b7SDmitry Monakhov if (e2_blk < lblk2) { 5574fcf6b1b7SDmitry Monakhov split = 1; 5575fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5576dfe50809STheodore Ts'o &path2, lblk2, 0); 557719008f6dSTheodore Ts'o if (unlikely(*erp)) 557819008f6dSTheodore Ts'o goto finish; 5579fcf6b1b7SDmitry Monakhov } 5580dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5581fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5582fcf6b1b7SDmitry Monakhov if (split) 5583fcf6b1b7SDmitry Monakhov goto repeat; 5584fcf6b1b7SDmitry Monakhov 5585fcf6b1b7SDmitry Monakhov /* Prepare right boundary */ 5586fcf6b1b7SDmitry Monakhov len = count; 5587fcf6b1b7SDmitry Monakhov if (len > e1_blk + e1_len - lblk1) 5588fcf6b1b7SDmitry Monakhov len = e1_blk + e1_len - lblk1; 5589fcf6b1b7SDmitry Monakhov if (len > e2_blk + e2_len - lblk2) 5590fcf6b1b7SDmitry Monakhov len = e2_blk + e2_len - lblk2; 5591fcf6b1b7SDmitry Monakhov 5592fcf6b1b7SDmitry Monakhov if (len != e1_len) { 5593fcf6b1b7SDmitry Monakhov split = 1; 5594fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5595dfe50809STheodore Ts'o &path1, lblk1 + len, 0); 559619008f6dSTheodore Ts'o if (unlikely(*erp)) 559719008f6dSTheodore Ts'o goto finish; 5598fcf6b1b7SDmitry Monakhov } 5599fcf6b1b7SDmitry Monakhov if (len != e2_len) { 5600fcf6b1b7SDmitry Monakhov split = 1; 5601fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5602dfe50809STheodore Ts'o &path2, lblk2 + len, 0); 5603fcf6b1b7SDmitry Monakhov if (*erp) 560419008f6dSTheodore Ts'o goto finish; 5605fcf6b1b7SDmitry Monakhov } 5606dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5607fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5608fcf6b1b7SDmitry Monakhov if (split) 5609fcf6b1b7SDmitry Monakhov goto repeat; 5610fcf6b1b7SDmitry Monakhov 5611fcf6b1b7SDmitry Monakhov BUG_ON(e2_len != e1_len); 5612fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 561319008f6dSTheodore Ts'o if (unlikely(*erp)) 561419008f6dSTheodore Ts'o goto finish; 5615fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 561619008f6dSTheodore Ts'o if (unlikely(*erp)) 561719008f6dSTheodore Ts'o goto finish; 5618fcf6b1b7SDmitry Monakhov 5619fcf6b1b7SDmitry Monakhov /* Both extents are fully inside boundaries. Swap it now */ 5620fcf6b1b7SDmitry Monakhov tmp_ex = *ex1; 5621fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5622fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5623fcf6b1b7SDmitry Monakhov ex1->ee_len = cpu_to_le16(e2_len); 5624fcf6b1b7SDmitry Monakhov ex2->ee_len = cpu_to_le16(e1_len); 5625fcf6b1b7SDmitry Monakhov if (unwritten) 5626fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex2); 5627fcf6b1b7SDmitry Monakhov if (ext4_ext_is_unwritten(&tmp_ex)) 5628fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex1); 5629fcf6b1b7SDmitry Monakhov 5630fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5631fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5632fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode2, path2 + 5633fcf6b1b7SDmitry Monakhov path2->p_depth); 563419008f6dSTheodore Ts'o if (unlikely(*erp)) 563519008f6dSTheodore Ts'o goto finish; 5636fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode1, path1 + 5637fcf6b1b7SDmitry Monakhov path1->p_depth); 5638fcf6b1b7SDmitry Monakhov /* 5639fcf6b1b7SDmitry Monakhov * Looks scarry ah..? second inode already points to new blocks, 5640fcf6b1b7SDmitry Monakhov * and it was successfully dirtied. But luckily error may happen 5641fcf6b1b7SDmitry Monakhov * only due to journal error, so full transaction will be 5642fcf6b1b7SDmitry Monakhov * aborted anyway. 5643fcf6b1b7SDmitry Monakhov */ 564419008f6dSTheodore Ts'o if (unlikely(*erp)) 564519008f6dSTheodore Ts'o goto finish; 5646fcf6b1b7SDmitry Monakhov lblk1 += len; 5647fcf6b1b7SDmitry Monakhov lblk2 += len; 5648fcf6b1b7SDmitry Monakhov replaced_count += len; 5649fcf6b1b7SDmitry Monakhov count -= len; 5650fcf6b1b7SDmitry Monakhov 5651fcf6b1b7SDmitry Monakhov repeat: 5652fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path1); 5653fcf6b1b7SDmitry Monakhov kfree(path1); 5654fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path2); 5655fcf6b1b7SDmitry Monakhov kfree(path2); 5656b7ea89adSTheodore Ts'o path1 = path2 = NULL; 5657fcf6b1b7SDmitry Monakhov } 5658fcf6b1b7SDmitry Monakhov return replaced_count; 5659fcf6b1b7SDmitry Monakhov } 56600b02f4c0SEric Whitney 56610b02f4c0SEric Whitney /* 56620b02f4c0SEric Whitney * ext4_clu_mapped - determine whether any block in a logical cluster has 56630b02f4c0SEric Whitney * been mapped to a physical cluster 56640b02f4c0SEric Whitney * 56650b02f4c0SEric Whitney * @inode - file containing the logical cluster 56660b02f4c0SEric Whitney * @lclu - logical cluster of interest 56670b02f4c0SEric Whitney * 56680b02f4c0SEric Whitney * Returns 1 if any block in the logical cluster is mapped, signifying 56690b02f4c0SEric Whitney * that a physical cluster has been allocated for it. Otherwise, 56700b02f4c0SEric Whitney * returns 0. Can also return negative error codes. Derived from 56710b02f4c0SEric Whitney * ext4_ext_map_blocks(). 56720b02f4c0SEric Whitney */ 56730b02f4c0SEric Whitney int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu) 56740b02f4c0SEric Whitney { 56750b02f4c0SEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 56760b02f4c0SEric Whitney struct ext4_ext_path *path; 56770b02f4c0SEric Whitney int depth, mapped = 0, err = 0; 56780b02f4c0SEric Whitney struct ext4_extent *extent; 56790b02f4c0SEric Whitney ext4_lblk_t first_lblk, first_lclu, last_lclu; 56800b02f4c0SEric Whitney 56810b02f4c0SEric Whitney /* search for the extent closest to the first block in the cluster */ 56820b02f4c0SEric Whitney path = ext4_find_extent(inode, EXT4_C2B(sbi, lclu), NULL, 0); 56830b02f4c0SEric Whitney if (IS_ERR(path)) { 56840b02f4c0SEric Whitney err = PTR_ERR(path); 56850b02f4c0SEric Whitney path = NULL; 56860b02f4c0SEric Whitney goto out; 56870b02f4c0SEric Whitney } 56880b02f4c0SEric Whitney 56890b02f4c0SEric Whitney depth = ext_depth(inode); 56900b02f4c0SEric Whitney 56910b02f4c0SEric Whitney /* 56920b02f4c0SEric Whitney * A consistent leaf must not be empty. This situation is possible, 56930b02f4c0SEric Whitney * though, _during_ tree modification, and it's why an assert can't 56940b02f4c0SEric Whitney * be put in ext4_find_extent(). 56950b02f4c0SEric Whitney */ 56960b02f4c0SEric Whitney if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 56970b02f4c0SEric Whitney EXT4_ERROR_INODE(inode, 56980b02f4c0SEric Whitney "bad extent address - lblock: %lu, depth: %d, pblock: %lld", 56990b02f4c0SEric Whitney (unsigned long) EXT4_C2B(sbi, lclu), 57000b02f4c0SEric Whitney depth, path[depth].p_block); 57010b02f4c0SEric Whitney err = -EFSCORRUPTED; 57020b02f4c0SEric Whitney goto out; 57030b02f4c0SEric Whitney } 57040b02f4c0SEric Whitney 57050b02f4c0SEric Whitney extent = path[depth].p_ext; 57060b02f4c0SEric Whitney 57070b02f4c0SEric Whitney /* can't be mapped if the extent tree is empty */ 57080b02f4c0SEric Whitney if (extent == NULL) 57090b02f4c0SEric Whitney goto out; 57100b02f4c0SEric Whitney 57110b02f4c0SEric Whitney first_lblk = le32_to_cpu(extent->ee_block); 57120b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 57130b02f4c0SEric Whitney 57140b02f4c0SEric Whitney /* 57150b02f4c0SEric Whitney * Three possible outcomes at this point - found extent spanning 57160b02f4c0SEric Whitney * the target cluster, to the left of the target cluster, or to the 57170b02f4c0SEric Whitney * right of the target cluster. The first two cases are handled here. 57180b02f4c0SEric Whitney * The last case indicates the target cluster is not mapped. 57190b02f4c0SEric Whitney */ 57200b02f4c0SEric Whitney if (lclu >= first_lclu) { 57210b02f4c0SEric Whitney last_lclu = EXT4_B2C(sbi, first_lblk + 57220b02f4c0SEric Whitney ext4_ext_get_actual_len(extent) - 1); 57230b02f4c0SEric Whitney if (lclu <= last_lclu) { 57240b02f4c0SEric Whitney mapped = 1; 57250b02f4c0SEric Whitney } else { 57260b02f4c0SEric Whitney first_lblk = ext4_ext_next_allocated_block(path); 57270b02f4c0SEric Whitney first_lclu = EXT4_B2C(sbi, first_lblk); 57280b02f4c0SEric Whitney if (lclu == first_lclu) 57290b02f4c0SEric Whitney mapped = 1; 57300b02f4c0SEric Whitney } 57310b02f4c0SEric Whitney } 57320b02f4c0SEric Whitney 57330b02f4c0SEric Whitney out: 57340b02f4c0SEric Whitney ext4_ext_drop_refs(path); 57350b02f4c0SEric Whitney kfree(path); 57360b02f4c0SEric Whitney 57370b02f4c0SEric Whitney return err ? err : mapped; 57380b02f4c0SEric Whitney } 5739