1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/fs.h> 33a86c6181SAlex Tomas #include <linux/time.h> 34cd02ff0bSMingming Cao #include <linux/jbd2.h> 35a86c6181SAlex Tomas #include <linux/highuid.h> 36a86c6181SAlex Tomas #include <linux/pagemap.h> 37a86c6181SAlex Tomas #include <linux/quotaops.h> 38a86c6181SAlex Tomas #include <linux/string.h> 39a86c6181SAlex Tomas #include <linux/slab.h> 40a86c6181SAlex Tomas #include <asm/uaccess.h> 416873fa0dSEric Sandeen #include <linux/fiemap.h> 423dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 434a092d73STheodore Ts'o #include "ext4_extents.h" 44f19d5870STao Ma #include "xattr.h" 45a86c6181SAlex Tomas 460562e0baSJiaying Zhang #include <trace/events/ext4.h> 470562e0baSJiaying Zhang 485f95d21fSLukas Czerner /* 495f95d21fSLukas Czerner * used by extent splitting. 505f95d21fSLukas Czerner */ 515f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 525f95d21fSLukas Czerner due to ENOSPC */ 53556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */ 54556615dcSLukas Czerner #define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */ 555f95d21fSLukas Czerner 56dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 58dee1f973SDmitry Monakhov 597ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 607ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 617ac5990dSDarrick J. Wong { 627ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 637ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 647ac5990dSDarrick J. Wong __u32 csum; 657ac5990dSDarrick J. Wong 667ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 677ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 687ac5990dSDarrick J. Wong return cpu_to_le32(csum); 697ac5990dSDarrick J. Wong } 707ac5990dSDarrick J. Wong 717ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 727ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 737ac5990dSDarrick J. Wong { 747ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 757ac5990dSDarrick J. Wong 769aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 777ac5990dSDarrick J. Wong return 1; 787ac5990dSDarrick J. Wong 797ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 807ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 817ac5990dSDarrick J. Wong return 0; 827ac5990dSDarrick J. Wong return 1; 837ac5990dSDarrick J. Wong } 847ac5990dSDarrick J. Wong 857ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 867ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 877ac5990dSDarrick J. Wong { 887ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 897ac5990dSDarrick J. Wong 909aa5d32bSDmitry Monakhov if (!ext4_has_metadata_csum(inode->i_sb)) 917ac5990dSDarrick J. Wong return; 927ac5990dSDarrick J. Wong 937ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 947ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 957ac5990dSDarrick J. Wong } 967ac5990dSDarrick J. Wong 97d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 98d583fb87SAllison Henderson struct inode *inode, 99dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 100d583fb87SAllison Henderson struct ext4_map_blocks *map, 101d583fb87SAllison Henderson int split_flag, 102d583fb87SAllison Henderson int flags); 103d583fb87SAllison Henderson 1045f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 1055f95d21fSLukas Czerner struct inode *inode, 106dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1075f95d21fSLukas Czerner ext4_lblk_t split, 1085f95d21fSLukas Czerner int split_flag, 1095f95d21fSLukas Czerner int flags); 1105f95d21fSLukas Czerner 11191dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 11269eb33dcSZheng Liu struct extent_status *newes); 11391dd8c11SLukas Czerner 114487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 115487caeefSJan Kara struct inode *inode, 116487caeefSJan Kara int needed) 117a86c6181SAlex Tomas { 118a86c6181SAlex Tomas int err; 119a86c6181SAlex Tomas 1200390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 1210390131bSFrank Mayhar return 0; 122a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 1239102e4faSShen Feng return 0; 1249102e4faSShen Feng err = ext4_journal_extend(handle, needed); 1250123c939STheodore Ts'o if (err <= 0) 1269102e4faSShen Feng return err; 127487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 1280617b83fSDmitry Monakhov if (err == 0) 1290617b83fSDmitry Monakhov err = -EAGAIN; 130487caeefSJan Kara 131487caeefSJan Kara return err; 132a86c6181SAlex Tomas } 133a86c6181SAlex Tomas 134a86c6181SAlex Tomas /* 135a86c6181SAlex Tomas * could return: 136a86c6181SAlex Tomas * - EROFS 137a86c6181SAlex Tomas * - ENOMEM 138a86c6181SAlex Tomas */ 139a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 140a86c6181SAlex Tomas struct ext4_ext_path *path) 141a86c6181SAlex Tomas { 142a86c6181SAlex Tomas if (path->p_bh) { 143a86c6181SAlex Tomas /* path points to block */ 1445d601255Sliang xie BUFFER_TRACE(path->p_bh, "get_write_access"); 145a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 146a86c6181SAlex Tomas } 147a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 148a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 149a86c6181SAlex Tomas return 0; 150a86c6181SAlex Tomas } 151a86c6181SAlex Tomas 152a86c6181SAlex Tomas /* 153a86c6181SAlex Tomas * could return: 154a86c6181SAlex Tomas * - EROFS 155a86c6181SAlex Tomas * - ENOMEM 156a86c6181SAlex Tomas * - EIO 157a86c6181SAlex Tomas */ 1582656497bSDarrick J. Wong int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, 1592656497bSDarrick J. Wong struct inode *inode, struct ext4_ext_path *path) 160a86c6181SAlex Tomas { 161a86c6181SAlex Tomas int err; 1624b1f1660SDmitry Monakhov 1634b1f1660SDmitry Monakhov WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); 164a86c6181SAlex Tomas if (path->p_bh) { 1657ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 166a86c6181SAlex Tomas /* path points to block */ 1679ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1689ea7a0dfSTheodore Ts'o inode, path->p_bh); 169a86c6181SAlex Tomas } else { 170a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 171a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 172a86c6181SAlex Tomas } 173a86c6181SAlex Tomas return err; 174a86c6181SAlex Tomas } 175a86c6181SAlex Tomas 176f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 177a86c6181SAlex Tomas struct ext4_ext_path *path, 178725d26d3SAneesh Kumar K.V ext4_lblk_t block) 179a86c6181SAlex Tomas { 180a86c6181SAlex Tomas if (path) { 18181fdbb4aSYongqiang Yang int depth = path->p_depth; 182a86c6181SAlex Tomas struct ext4_extent *ex; 183a86c6181SAlex Tomas 184ad4fb9caSKazuya Mio /* 185ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 186ad4fb9caSKazuya Mio * filling in a file which will eventually be 187ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 188ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 189ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 190ad4fb9caSKazuya Mio * executable file, or some database extending a table 191ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 192ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 193ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 194ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 195ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 196ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 197ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 198b8d6568aSTao Ma * especially if the latter case turns out to be 199ad4fb9caSKazuya Mio * common. 200ad4fb9caSKazuya Mio */ 2017e028976SAvantika Mathur ex = path[depth].p_ext; 202ad4fb9caSKazuya Mio if (ex) { 203ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 204ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 205ad4fb9caSKazuya Mio 206ad4fb9caSKazuya Mio if (block > ext_block) 207ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 208ad4fb9caSKazuya Mio else 209ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 210ad4fb9caSKazuya Mio } 211a86c6181SAlex Tomas 212d0d856e8SRandy Dunlap /* it looks like index is empty; 213d0d856e8SRandy Dunlap * try to find starting block from index itself */ 214a86c6181SAlex Tomas if (path[depth].p_bh) 215a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 216a86c6181SAlex Tomas } 217a86c6181SAlex Tomas 218a86c6181SAlex Tomas /* OK. use inode's group */ 219f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 220a86c6181SAlex Tomas } 221a86c6181SAlex Tomas 222654b4908SAneesh Kumar K.V /* 223654b4908SAneesh Kumar K.V * Allocation for a meta data block 224654b4908SAneesh Kumar K.V */ 225f65e6fbaSAlex Tomas static ext4_fsblk_t 226654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 227a86c6181SAlex Tomas struct ext4_ext_path *path, 22855f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 229a86c6181SAlex Tomas { 230f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 231a86c6181SAlex Tomas 232a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23355f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23455f020dbSAllison Henderson NULL, err); 235a86c6181SAlex Tomas return newblock; 236a86c6181SAlex Tomas } 237a86c6181SAlex Tomas 23855ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 239a86c6181SAlex Tomas { 240a86c6181SAlex Tomas int size; 241a86c6181SAlex Tomas 242a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 243a86c6181SAlex Tomas / sizeof(struct ext4_extent); 244bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24502dc62fbSYongqiang Yang if (!check && size > 6) 246a86c6181SAlex Tomas size = 6; 247a86c6181SAlex Tomas #endif 248a86c6181SAlex Tomas return size; 249a86c6181SAlex Tomas } 250a86c6181SAlex Tomas 25155ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 252a86c6181SAlex Tomas { 253a86c6181SAlex Tomas int size; 254a86c6181SAlex Tomas 255a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 256a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 257bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 25802dc62fbSYongqiang Yang if (!check && size > 5) 259a86c6181SAlex Tomas size = 5; 260a86c6181SAlex Tomas #endif 261a86c6181SAlex Tomas return size; 262a86c6181SAlex Tomas } 263a86c6181SAlex Tomas 26455ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 265a86c6181SAlex Tomas { 266a86c6181SAlex Tomas int size; 267a86c6181SAlex Tomas 268a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 269a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 270a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 271bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27202dc62fbSYongqiang Yang if (!check && size > 3) 273a86c6181SAlex Tomas size = 3; 274a86c6181SAlex Tomas #endif 275a86c6181SAlex Tomas return size; 276a86c6181SAlex Tomas } 277a86c6181SAlex Tomas 27855ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 279a86c6181SAlex Tomas { 280a86c6181SAlex Tomas int size; 281a86c6181SAlex Tomas 282a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 283a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 284a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 285bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28602dc62fbSYongqiang Yang if (!check && size > 4) 287a86c6181SAlex Tomas size = 4; 288a86c6181SAlex Tomas #endif 289a86c6181SAlex Tomas return size; 290a86c6181SAlex Tomas } 291a86c6181SAlex Tomas 292fcf6b1b7SDmitry Monakhov static inline int 293fcf6b1b7SDmitry Monakhov ext4_force_split_extent_at(handle_t *handle, struct inode *inode, 294dfe50809STheodore Ts'o struct ext4_ext_path **ppath, ext4_lblk_t lblk, 295fcf6b1b7SDmitry Monakhov int nofail) 296fcf6b1b7SDmitry Monakhov { 297dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 298fcf6b1b7SDmitry Monakhov int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext); 299fcf6b1b7SDmitry Monakhov 300dfe50809STheodore Ts'o return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ? 301fcf6b1b7SDmitry Monakhov EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0, 302fcf6b1b7SDmitry Monakhov EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO | 303fcf6b1b7SDmitry Monakhov (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0)); 304fcf6b1b7SDmitry Monakhov } 305fcf6b1b7SDmitry Monakhov 306d2a17637SMingming Cao /* 307d2a17637SMingming Cao * Calculate the number of metadata blocks needed 308d2a17637SMingming Cao * to allocate @blocks 309d2a17637SMingming Cao * Worse case is one block per extent 310d2a17637SMingming Cao */ 31101f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 312d2a17637SMingming Cao { 3139d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 31481fdbb4aSYongqiang Yang int idxs; 315d2a17637SMingming Cao 3169d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 3179d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 318d2a17637SMingming Cao 319d2a17637SMingming Cao /* 3209d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 3219d0be502STheodore Ts'o * previous da block, it can share index blocks with the 3229d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 3239d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 3249d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 3259d0be502STheodore Ts'o * another index blocks. 326d2a17637SMingming Cao */ 3279d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 3289d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 32981fdbb4aSYongqiang Yang int num = 0; 33081fdbb4aSYongqiang Yang 3319d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 3329d0be502STheodore Ts'o num++; 3339d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 3349d0be502STheodore Ts'o num++; 3359d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 3369d0be502STheodore Ts'o num++; 3379d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3389d0be502STheodore Ts'o } else 3399d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 3409d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 341d2a17637SMingming Cao return num; 342d2a17637SMingming Cao } 343d2a17637SMingming Cao 3449d0be502STheodore Ts'o /* 3459d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 3469d0be502STheodore Ts'o * every level of the inode's extent tree. 3479d0be502STheodore Ts'o */ 3489d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 3499d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 3509d0be502STheodore Ts'o return ext_depth(inode) + 1; 3519d0be502STheodore Ts'o } 3529d0be502STheodore Ts'o 353c29c0ae7SAlex Tomas static int 354c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 355c29c0ae7SAlex Tomas { 356c29c0ae7SAlex Tomas int max; 357c29c0ae7SAlex Tomas 358c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 359c29c0ae7SAlex Tomas if (depth == 0) 36055ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 361c29c0ae7SAlex Tomas else 36255ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 363c29c0ae7SAlex Tomas } else { 364c29c0ae7SAlex Tomas if (depth == 0) 36555ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 366c29c0ae7SAlex Tomas else 36755ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 368c29c0ae7SAlex Tomas } 369c29c0ae7SAlex Tomas 370c29c0ae7SAlex Tomas return max; 371c29c0ae7SAlex Tomas } 372c29c0ae7SAlex Tomas 37356b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 37456b19868SAneesh Kumar K.V { 375bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 37656b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 3775946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 3785946d089SEryu Guan ext4_lblk_t last = lblock + len - 1; 379e84a26ceSTheodore Ts'o 3805946d089SEryu Guan if (lblock > last) 38131d4f3a2STheodore Ts'o return 0; 3826fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 38356b19868SAneesh Kumar K.V } 38456b19868SAneesh Kumar K.V 38556b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 38656b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 38756b19868SAneesh Kumar K.V { 388bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 389e84a26ceSTheodore Ts'o 3906fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 39156b19868SAneesh Kumar K.V } 39256b19868SAneesh Kumar K.V 39356b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 39456b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 39556b19868SAneesh Kumar K.V int depth) 39656b19868SAneesh Kumar K.V { 39756b19868SAneesh Kumar K.V unsigned short entries; 39856b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 39956b19868SAneesh Kumar K.V return 1; 40056b19868SAneesh Kumar K.V 40156b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 40256b19868SAneesh Kumar K.V 40356b19868SAneesh Kumar K.V if (depth == 0) { 40456b19868SAneesh Kumar K.V /* leaf entries */ 40581fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 4065946d089SEryu Guan struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 4075946d089SEryu Guan ext4_fsblk_t pblock = 0; 4085946d089SEryu Guan ext4_lblk_t lblock = 0; 4095946d089SEryu Guan ext4_lblk_t prev = 0; 4105946d089SEryu Guan int len = 0; 41156b19868SAneesh Kumar K.V while (entries) { 41256b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 41356b19868SAneesh Kumar K.V return 0; 4145946d089SEryu Guan 4155946d089SEryu Guan /* Check for overlapping extents */ 4165946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block); 4175946d089SEryu Guan len = ext4_ext_get_actual_len(ext); 4185946d089SEryu Guan if ((lblock <= prev) && prev) { 4195946d089SEryu Guan pblock = ext4_ext_pblock(ext); 4205946d089SEryu Guan es->s_last_error_block = cpu_to_le64(pblock); 4215946d089SEryu Guan return 0; 4225946d089SEryu Guan } 42356b19868SAneesh Kumar K.V ext++; 42456b19868SAneesh Kumar K.V entries--; 4255946d089SEryu Guan prev = lblock + len - 1; 42656b19868SAneesh Kumar K.V } 42756b19868SAneesh Kumar K.V } else { 42881fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 42956b19868SAneesh Kumar K.V while (entries) { 43056b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 43156b19868SAneesh Kumar K.V return 0; 43256b19868SAneesh Kumar K.V ext_idx++; 43356b19868SAneesh Kumar K.V entries--; 43456b19868SAneesh Kumar K.V } 43556b19868SAneesh Kumar K.V } 43656b19868SAneesh Kumar K.V return 1; 43756b19868SAneesh Kumar K.V } 43856b19868SAneesh Kumar K.V 439c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 440c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 441c349179bSTheodore Ts'o int depth, ext4_fsblk_t pblk) 442c29c0ae7SAlex Tomas { 443c29c0ae7SAlex Tomas const char *error_msg; 444c29c0ae7SAlex Tomas int max = 0; 445c29c0ae7SAlex Tomas 446c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 447c29c0ae7SAlex Tomas error_msg = "invalid magic"; 448c29c0ae7SAlex Tomas goto corrupted; 449c29c0ae7SAlex Tomas } 450c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 451c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 452c29c0ae7SAlex Tomas goto corrupted; 453c29c0ae7SAlex Tomas } 454c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 455c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 456c29c0ae7SAlex Tomas goto corrupted; 457c29c0ae7SAlex Tomas } 458c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 459c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 460c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 461c29c0ae7SAlex Tomas goto corrupted; 462c29c0ae7SAlex Tomas } 463c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 464c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 465c29c0ae7SAlex Tomas goto corrupted; 466c29c0ae7SAlex Tomas } 46756b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 46856b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 46956b19868SAneesh Kumar K.V goto corrupted; 47056b19868SAneesh Kumar K.V } 4717ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4727ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4737ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4747ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4757ac5990dSDarrick J. Wong goto corrupted; 4767ac5990dSDarrick J. Wong } 477c29c0ae7SAlex Tomas return 0; 478c29c0ae7SAlex Tomas 479c29c0ae7SAlex Tomas corrupted: 480c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 481c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, " 482c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 483c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg, 484c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic), 485c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 486c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 487c29c0ae7SAlex Tomas return -EIO; 488c29c0ae7SAlex Tomas } 489c29c0ae7SAlex Tomas 490c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \ 491c349179bSTheodore Ts'o __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 492c29c0ae7SAlex Tomas 4937a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4947a262f7cSAneesh Kumar K.V { 495c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 4967a262f7cSAneesh Kumar K.V } 4977a262f7cSAneesh Kumar K.V 4987d7ea89eSTheodore Ts'o static struct buffer_head * 4997d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line, 500107a7bd3STheodore Ts'o struct inode *inode, ext4_fsblk_t pblk, int depth, 501107a7bd3STheodore Ts'o int flags) 502f8489128SDarrick J. Wong { 5037d7ea89eSTheodore Ts'o struct buffer_head *bh; 5047d7ea89eSTheodore Ts'o int err; 505f8489128SDarrick J. Wong 5067d7ea89eSTheodore Ts'o bh = sb_getblk(inode->i_sb, pblk); 5077d7ea89eSTheodore Ts'o if (unlikely(!bh)) 5087d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM); 5097d7ea89eSTheodore Ts'o 5107d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) { 5117d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 5127d7ea89eSTheodore Ts'o err = bh_submit_read(bh); 5137d7ea89eSTheodore Ts'o if (err < 0) 5147d7ea89eSTheodore Ts'o goto errout; 5157d7ea89eSTheodore Ts'o } 5167869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 5177d7ea89eSTheodore Ts'o return bh; 5187d7ea89eSTheodore Ts'o err = __ext4_ext_check(function, line, inode, 519c349179bSTheodore Ts'o ext_block_hdr(bh), depth, pblk); 5207d7ea89eSTheodore Ts'o if (err) 5217d7ea89eSTheodore Ts'o goto errout; 522f8489128SDarrick J. Wong set_buffer_verified(bh); 523107a7bd3STheodore Ts'o /* 524107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries 525107a7bd3STheodore Ts'o */ 526107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 527107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh); 528107a7bd3STheodore Ts'o struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 529107a7bd3STheodore Ts'o ext4_lblk_t prev = 0; 530107a7bd3STheodore Ts'o int i; 531107a7bd3STheodore Ts'o 532107a7bd3STheodore Ts'o for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 533107a7bd3STheodore Ts'o unsigned int status = EXTENT_STATUS_WRITTEN; 534107a7bd3STheodore Ts'o ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 535107a7bd3STheodore Ts'o int len = ext4_ext_get_actual_len(ex); 536107a7bd3STheodore Ts'o 537107a7bd3STheodore Ts'o if (prev && (prev != lblk)) 538107a7bd3STheodore Ts'o ext4_es_cache_extent(inode, prev, 539107a7bd3STheodore Ts'o lblk - prev, ~0, 540107a7bd3STheodore Ts'o EXTENT_STATUS_HOLE); 541107a7bd3STheodore Ts'o 542556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 543107a7bd3STheodore Ts'o status = EXTENT_STATUS_UNWRITTEN; 544107a7bd3STheodore Ts'o ext4_es_cache_extent(inode, lblk, len, 545107a7bd3STheodore Ts'o ext4_ext_pblock(ex), status); 546107a7bd3STheodore Ts'o prev = lblk + len; 547107a7bd3STheodore Ts'o } 548107a7bd3STheodore Ts'o } 5497d7ea89eSTheodore Ts'o return bh; 5507d7ea89eSTheodore Ts'o errout: 5517d7ea89eSTheodore Ts'o put_bh(bh); 5527d7ea89eSTheodore Ts'o return ERR_PTR(err); 5537d7ea89eSTheodore Ts'o 554f8489128SDarrick J. Wong } 555f8489128SDarrick J. Wong 556107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags) \ 557107a7bd3STheodore Ts'o __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 558107a7bd3STheodore Ts'o (depth), (flags)) 559f8489128SDarrick J. Wong 5607869a4a6STheodore Ts'o /* 5617869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the 5627869a4a6STheodore Ts'o * extent status tree 5637869a4a6STheodore Ts'o */ 5647869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode) 5657869a4a6STheodore Ts'o { 5667869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 5677869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL; 5687869a4a6STheodore Ts'o struct buffer_head *bh; 5697869a4a6STheodore Ts'o int i = 0, depth, ret = 0; 5707869a4a6STheodore Ts'o 5717869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5727869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */ 5737869a4a6STheodore Ts'o 5747869a4a6STheodore Ts'o down_read(&ei->i_data_sem); 5757869a4a6STheodore Ts'o depth = ext_depth(inode); 5767869a4a6STheodore Ts'o 5777869a4a6STheodore Ts'o path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 5787869a4a6STheodore Ts'o GFP_NOFS); 5797869a4a6STheodore Ts'o if (path == NULL) { 5807869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5817869a4a6STheodore Ts'o return -ENOMEM; 5827869a4a6STheodore Ts'o } 5837869a4a6STheodore Ts'o 5847869a4a6STheodore Ts'o /* Don't cache anything if there are no external extent blocks */ 5857869a4a6STheodore Ts'o if (depth == 0) 5867869a4a6STheodore Ts'o goto out; 5877869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode); 5887869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 5897869a4a6STheodore Ts'o if (ret) 5907869a4a6STheodore Ts'o goto out; 5917869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 5927869a4a6STheodore Ts'o while (i >= 0) { 5937869a4a6STheodore Ts'o /* 5947869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of 5957869a4a6STheodore Ts'o * the index block, go up 5967869a4a6STheodore Ts'o */ 5977869a4a6STheodore Ts'o if ((i == depth) || 5987869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 5997869a4a6STheodore Ts'o brelse(path[i].p_bh); 6007869a4a6STheodore Ts'o path[i].p_bh = NULL; 6017869a4a6STheodore Ts'o i--; 6027869a4a6STheodore Ts'o continue; 6037869a4a6STheodore Ts'o } 6047869a4a6STheodore Ts'o bh = read_extent_tree_block(inode, 6057869a4a6STheodore Ts'o ext4_idx_pblock(path[i].p_idx++), 6067869a4a6STheodore Ts'o depth - i - 1, 6077869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE); 6087869a4a6STheodore Ts'o if (IS_ERR(bh)) { 6097869a4a6STheodore Ts'o ret = PTR_ERR(bh); 6107869a4a6STheodore Ts'o break; 6117869a4a6STheodore Ts'o } 6127869a4a6STheodore Ts'o i++; 6137869a4a6STheodore Ts'o path[i].p_bh = bh; 6147869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh); 6157869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 6167869a4a6STheodore Ts'o } 6177869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 6187869a4a6STheodore Ts'o out: 6197869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 6207869a4a6STheodore Ts'o ext4_ext_drop_refs(path); 6217869a4a6STheodore Ts'o kfree(path); 6227869a4a6STheodore Ts'o return ret; 6237869a4a6STheodore Ts'o } 6247869a4a6STheodore Ts'o 625a86c6181SAlex Tomas #ifdef EXT_DEBUG 626a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 627a86c6181SAlex Tomas { 628a86c6181SAlex Tomas int k, l = path->p_depth; 629a86c6181SAlex Tomas 630a86c6181SAlex Tomas ext_debug("path:"); 631a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 632a86c6181SAlex Tomas if (path->p_idx) { 6332ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 634bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 635a86c6181SAlex Tomas } else if (path->p_ext) { 636553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 637a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 638556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 639a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 640bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 641a86c6181SAlex Tomas } else 642a86c6181SAlex Tomas ext_debug(" []"); 643a86c6181SAlex Tomas } 644a86c6181SAlex Tomas ext_debug("\n"); 645a86c6181SAlex Tomas } 646a86c6181SAlex Tomas 647a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 648a86c6181SAlex Tomas { 649a86c6181SAlex Tomas int depth = ext_depth(inode); 650a86c6181SAlex Tomas struct ext4_extent_header *eh; 651a86c6181SAlex Tomas struct ext4_extent *ex; 652a86c6181SAlex Tomas int i; 653a86c6181SAlex Tomas 654a86c6181SAlex Tomas if (!path) 655a86c6181SAlex Tomas return; 656a86c6181SAlex Tomas 657a86c6181SAlex Tomas eh = path[depth].p_hdr; 658a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 659a86c6181SAlex Tomas 660553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 661553f9008SMingming 662a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 663553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 664556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 665bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 666a86c6181SAlex Tomas } 667a86c6181SAlex Tomas ext_debug("\n"); 668a86c6181SAlex Tomas } 6691b16da77SYongqiang Yang 6701b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 6711b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 6721b16da77SYongqiang Yang { 6731b16da77SYongqiang Yang int depth = ext_depth(inode); 6741b16da77SYongqiang Yang struct ext4_extent *ex; 6751b16da77SYongqiang Yang 6761b16da77SYongqiang Yang if (depth != level) { 6771b16da77SYongqiang Yang struct ext4_extent_idx *idx; 6781b16da77SYongqiang Yang idx = path[level].p_idx; 6791b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 6801b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 6811b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 6821b16da77SYongqiang Yang ext4_idx_pblock(idx), 6831b16da77SYongqiang Yang newblock); 6841b16da77SYongqiang Yang idx++; 6851b16da77SYongqiang Yang } 6861b16da77SYongqiang Yang 6871b16da77SYongqiang Yang return; 6881b16da77SYongqiang Yang } 6891b16da77SYongqiang Yang 6901b16da77SYongqiang Yang ex = path[depth].p_ext; 6911b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 6921b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 6931b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 6941b16da77SYongqiang Yang ext4_ext_pblock(ex), 695556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 6961b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 6971b16da77SYongqiang Yang newblock); 6981b16da77SYongqiang Yang ex++; 6991b16da77SYongqiang Yang } 7001b16da77SYongqiang Yang } 7011b16da77SYongqiang Yang 702a86c6181SAlex Tomas #else 703a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 704a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 7051b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 706a86c6181SAlex Tomas #endif 707a86c6181SAlex Tomas 708b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 709a86c6181SAlex Tomas { 710b7ea89adSTheodore Ts'o int depth, i; 711a86c6181SAlex Tomas 712b7ea89adSTheodore Ts'o if (!path) 713b7ea89adSTheodore Ts'o return; 714b7ea89adSTheodore Ts'o depth = path->p_depth; 715a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 716a86c6181SAlex Tomas if (path->p_bh) { 717a86c6181SAlex Tomas brelse(path->p_bh); 718a86c6181SAlex Tomas path->p_bh = NULL; 719a86c6181SAlex Tomas } 720a86c6181SAlex Tomas } 721a86c6181SAlex Tomas 722a86c6181SAlex Tomas /* 723d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 724d0d856e8SRandy Dunlap * binary search for the closest index of the given block 725c29c0ae7SAlex Tomas * the header must be checked before calling this 726a86c6181SAlex Tomas */ 727a86c6181SAlex Tomas static void 728725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 729725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 730a86c6181SAlex Tomas { 731a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 732a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 733a86c6181SAlex Tomas 734a86c6181SAlex Tomas 735bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 736a86c6181SAlex Tomas 737a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 738e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 739a86c6181SAlex Tomas while (l <= r) { 740a86c6181SAlex Tomas m = l + (r - l) / 2; 741a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 742a86c6181SAlex Tomas r = m - 1; 743a86c6181SAlex Tomas else 744a86c6181SAlex Tomas l = m + 1; 74526d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 74626d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 74726d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 748a86c6181SAlex Tomas } 749a86c6181SAlex Tomas 750a86c6181SAlex Tomas path->p_idx = l - 1; 7514a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 752bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 753a86c6181SAlex Tomas 754a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 755a86c6181SAlex Tomas { 756a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 757a86c6181SAlex Tomas int k; 758a86c6181SAlex Tomas 759a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 760a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 761a86c6181SAlex Tomas if (k != 0 && 762a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 7634776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 7644776004fSTheodore Ts'o "first=0x%p\n", k, 765a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 7664776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 767a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 768a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 769a86c6181SAlex Tomas } 770a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 771a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 772a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 773a86c6181SAlex Tomas break; 774a86c6181SAlex Tomas chix = ix; 775a86c6181SAlex Tomas } 776a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 777a86c6181SAlex Tomas } 778a86c6181SAlex Tomas #endif 779a86c6181SAlex Tomas 780a86c6181SAlex Tomas } 781a86c6181SAlex Tomas 782a86c6181SAlex Tomas /* 783d0d856e8SRandy Dunlap * ext4_ext_binsearch: 784d0d856e8SRandy Dunlap * binary search for closest extent of the given block 785c29c0ae7SAlex Tomas * the header must be checked before calling this 786a86c6181SAlex Tomas */ 787a86c6181SAlex Tomas static void 788725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 789725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 790a86c6181SAlex Tomas { 791a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 792a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 793a86c6181SAlex Tomas 794a86c6181SAlex Tomas if (eh->eh_entries == 0) { 795a86c6181SAlex Tomas /* 796d0d856e8SRandy Dunlap * this leaf is empty: 797a86c6181SAlex Tomas * we get such a leaf in split/add case 798a86c6181SAlex Tomas */ 799a86c6181SAlex Tomas return; 800a86c6181SAlex Tomas } 801a86c6181SAlex Tomas 802bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 803a86c6181SAlex Tomas 804a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 805e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 806a86c6181SAlex Tomas 807a86c6181SAlex Tomas while (l <= r) { 808a86c6181SAlex Tomas m = l + (r - l) / 2; 809a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 810a86c6181SAlex Tomas r = m - 1; 811a86c6181SAlex Tomas else 812a86c6181SAlex Tomas l = m + 1; 81326d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 81426d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 81526d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 816a86c6181SAlex Tomas } 817a86c6181SAlex Tomas 818a86c6181SAlex Tomas path->p_ext = l - 1; 819553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 820a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 821bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 822556615dcSLukas Czerner ext4_ext_is_unwritten(path->p_ext), 823a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 824a86c6181SAlex Tomas 825a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 826a86c6181SAlex Tomas { 827a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 828a86c6181SAlex Tomas int k; 829a86c6181SAlex Tomas 830a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 831a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 832a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 833a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 834a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 835a86c6181SAlex Tomas break; 836a86c6181SAlex Tomas chex = ex; 837a86c6181SAlex Tomas } 838a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 839a86c6181SAlex Tomas } 840a86c6181SAlex Tomas #endif 841a86c6181SAlex Tomas 842a86c6181SAlex Tomas } 843a86c6181SAlex Tomas 844a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 845a86c6181SAlex Tomas { 846a86c6181SAlex Tomas struct ext4_extent_header *eh; 847a86c6181SAlex Tomas 848a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 849a86c6181SAlex Tomas eh->eh_depth = 0; 850a86c6181SAlex Tomas eh->eh_entries = 0; 851a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 85255ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 853a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 854a86c6181SAlex Tomas return 0; 855a86c6181SAlex Tomas } 856a86c6181SAlex Tomas 857a86c6181SAlex Tomas struct ext4_ext_path * 858ed8a1a76STheodore Ts'o ext4_find_extent(struct inode *inode, ext4_lblk_t block, 859705912caSTheodore Ts'o struct ext4_ext_path **orig_path, int flags) 860a86c6181SAlex Tomas { 861a86c6181SAlex Tomas struct ext4_extent_header *eh; 862a86c6181SAlex Tomas struct buffer_head *bh; 863705912caSTheodore Ts'o struct ext4_ext_path *path = orig_path ? *orig_path : NULL; 864705912caSTheodore Ts'o short int depth, i, ppos = 0; 865860d21e2STheodore Ts'o int ret; 866a86c6181SAlex Tomas 867a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 868c29c0ae7SAlex Tomas depth = ext_depth(inode); 869a86c6181SAlex Tomas 87010809df8STheodore Ts'o if (path) { 871523f431cSTheodore Ts'o ext4_ext_drop_refs(path); 87210809df8STheodore Ts'o if (depth > path[0].p_maxdepth) { 87310809df8STheodore Ts'o kfree(path); 87410809df8STheodore Ts'o *orig_path = path = NULL; 87510809df8STheodore Ts'o } 87610809df8STheodore Ts'o } 87710809df8STheodore Ts'o if (!path) { 878a86c6181SAlex Tomas /* account possible depth increase */ 8795d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 880a86c6181SAlex Tomas GFP_NOFS); 88119008f6dSTheodore Ts'o if (unlikely(!path)) 882a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 88310809df8STheodore Ts'o path[0].p_maxdepth = depth + 1; 884a86c6181SAlex Tomas } 885a86c6181SAlex Tomas path[0].p_hdr = eh; 8861973adcbSShen Feng path[0].p_bh = NULL; 887a86c6181SAlex Tomas 888c29c0ae7SAlex Tomas i = depth; 889a86c6181SAlex Tomas /* walk through the tree */ 890a86c6181SAlex Tomas while (i) { 891a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 892a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 893c29c0ae7SAlex Tomas 894a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 895bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 896a86c6181SAlex Tomas path[ppos].p_depth = i; 897a86c6181SAlex Tomas path[ppos].p_ext = NULL; 898a86c6181SAlex Tomas 899107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 900107a7bd3STheodore Ts'o flags); 90119008f6dSTheodore Ts'o if (unlikely(IS_ERR(bh))) { 9027d7ea89eSTheodore Ts'o ret = PTR_ERR(bh); 903a86c6181SAlex Tomas goto err; 904860d21e2STheodore Ts'o } 9057d7ea89eSTheodore Ts'o 906a86c6181SAlex Tomas eh = ext_block_hdr(bh); 907a86c6181SAlex Tomas ppos++; 908273df556SFrank Mayhar if (unlikely(ppos > depth)) { 909273df556SFrank Mayhar put_bh(bh); 910273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 911273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 912860d21e2STheodore Ts'o ret = -EIO; 913273df556SFrank Mayhar goto err; 914273df556SFrank Mayhar } 915a86c6181SAlex Tomas path[ppos].p_bh = bh; 916a86c6181SAlex Tomas path[ppos].p_hdr = eh; 917a86c6181SAlex Tomas } 918a86c6181SAlex Tomas 919a86c6181SAlex Tomas path[ppos].p_depth = i; 920a86c6181SAlex Tomas path[ppos].p_ext = NULL; 921a86c6181SAlex Tomas path[ppos].p_idx = NULL; 922a86c6181SAlex Tomas 923a86c6181SAlex Tomas /* find extent */ 924a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 9251973adcbSShen Feng /* if not an empty leaf */ 9261973adcbSShen Feng if (path[ppos].p_ext) 927bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 928a86c6181SAlex Tomas 929a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 930a86c6181SAlex Tomas 931a86c6181SAlex Tomas return path; 932a86c6181SAlex Tomas 933a86c6181SAlex Tomas err: 934a86c6181SAlex Tomas ext4_ext_drop_refs(path); 935a86c6181SAlex Tomas kfree(path); 936705912caSTheodore Ts'o if (orig_path) 937705912caSTheodore Ts'o *orig_path = NULL; 938860d21e2STheodore Ts'o return ERR_PTR(ret); 939a86c6181SAlex Tomas } 940a86c6181SAlex Tomas 941a86c6181SAlex Tomas /* 942d0d856e8SRandy Dunlap * ext4_ext_insert_index: 943d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 944d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 945a86c6181SAlex Tomas */ 9461f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 947a86c6181SAlex Tomas struct ext4_ext_path *curp, 948f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 949a86c6181SAlex Tomas { 950a86c6181SAlex Tomas struct ext4_extent_idx *ix; 951a86c6181SAlex Tomas int len, err; 952a86c6181SAlex Tomas 9537e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 9547e028976SAvantika Mathur if (err) 955a86c6181SAlex Tomas return err; 956a86c6181SAlex Tomas 957273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 958273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 959273df556SFrank Mayhar "logical %d == ei_block %d!", 960273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 961273df556SFrank Mayhar return -EIO; 962273df556SFrank Mayhar } 963d4620315SRobin Dong 964d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 965d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 966d4620315SRobin Dong EXT4_ERROR_INODE(inode, 967d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 968d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 969d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 970d4620315SRobin Dong return -EIO; 971d4620315SRobin Dong } 972d4620315SRobin Dong 973a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 974a86c6181SAlex Tomas /* insert after */ 97580e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 976a86c6181SAlex Tomas ix = curp->p_idx + 1; 977a86c6181SAlex Tomas } else { 978a86c6181SAlex Tomas /* insert before */ 97980e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 980a86c6181SAlex Tomas ix = curp->p_idx; 981a86c6181SAlex Tomas } 982a86c6181SAlex Tomas 98380e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 98480e675f9SEric Gouriou BUG_ON(len < 0); 98580e675f9SEric Gouriou if (len > 0) { 98680e675f9SEric Gouriou ext_debug("insert new index %d: " 98780e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 98880e675f9SEric Gouriou logical, len, ix, ix + 1); 98980e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 99080e675f9SEric Gouriou } 99180e675f9SEric Gouriou 992f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 993f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 994f472e026STao Ma return -EIO; 995f472e026STao Ma } 996f472e026STao Ma 997a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 998f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 999e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 1000a86c6181SAlex Tomas 1001273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 1002273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 1003273df556SFrank Mayhar return -EIO; 1004273df556SFrank Mayhar } 1005a86c6181SAlex Tomas 1006a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 1007a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 1008a86c6181SAlex Tomas 1009a86c6181SAlex Tomas return err; 1010a86c6181SAlex Tomas } 1011a86c6181SAlex Tomas 1012a86c6181SAlex Tomas /* 1013d0d856e8SRandy Dunlap * ext4_ext_split: 1014d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 1015d0d856e8SRandy Dunlap * at depth @at: 1016a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 1017a86c6181SAlex Tomas * - makes decision where to split 1018d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 1019a86c6181SAlex Tomas * into the newly allocated blocks 1020d0d856e8SRandy Dunlap * - initializes subtree 1021a86c6181SAlex Tomas */ 1022a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 102355f020dbSAllison Henderson unsigned int flags, 1024a86c6181SAlex Tomas struct ext4_ext_path *path, 1025a86c6181SAlex Tomas struct ext4_extent *newext, int at) 1026a86c6181SAlex Tomas { 1027a86c6181SAlex Tomas struct buffer_head *bh = NULL; 1028a86c6181SAlex Tomas int depth = ext_depth(inode); 1029a86c6181SAlex Tomas struct ext4_extent_header *neh; 1030a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 1031a86c6181SAlex Tomas int i = at, k, m, a; 1032f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 1033a86c6181SAlex Tomas __le32 border; 1034f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1035a86c6181SAlex Tomas int err = 0; 1036a86c6181SAlex Tomas 1037a86c6181SAlex Tomas /* make decision: where to split? */ 1038d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 1039a86c6181SAlex Tomas 1040d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 1041a86c6181SAlex Tomas * border from split point */ 1042273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1043273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1044273df556SFrank Mayhar return -EIO; 1045273df556SFrank Mayhar } 1046a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1047a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 1048d0d856e8SRandy Dunlap ext_debug("leaf will be split." 1049a86c6181SAlex Tomas " next leaf starts at %d\n", 1050a86c6181SAlex Tomas le32_to_cpu(border)); 1051a86c6181SAlex Tomas } else { 1052a86c6181SAlex Tomas border = newext->ee_block; 1053a86c6181SAlex Tomas ext_debug("leaf will be added." 1054a86c6181SAlex Tomas " next leaf starts at %d\n", 1055a86c6181SAlex Tomas le32_to_cpu(border)); 1056a86c6181SAlex Tomas } 1057a86c6181SAlex Tomas 1058a86c6181SAlex Tomas /* 1059d0d856e8SRandy Dunlap * If error occurs, then we break processing 1060d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 1061a86c6181SAlex Tomas * be inserted and tree will be in consistent 1062d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 1063a86c6181SAlex Tomas */ 1064a86c6181SAlex Tomas 1065a86c6181SAlex Tomas /* 1066d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 1067d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 1068d0d856e8SRandy Dunlap * upon them. 1069a86c6181SAlex Tomas */ 10705d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 1071a86c6181SAlex Tomas if (!ablocks) 1072a86c6181SAlex Tomas return -ENOMEM; 1073a86c6181SAlex Tomas 1074a86c6181SAlex Tomas /* allocate all needed blocks */ 1075a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1076a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 1077654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 107855f020dbSAllison Henderson newext, &err, flags); 1079a86c6181SAlex Tomas if (newblock == 0) 1080a86c6181SAlex Tomas goto cleanup; 1081a86c6181SAlex Tomas ablocks[a] = newblock; 1082a86c6181SAlex Tomas } 1083a86c6181SAlex Tomas 1084a86c6181SAlex Tomas /* initialize new leaf */ 1085a86c6181SAlex Tomas newblock = ablocks[--a]; 1086273df556SFrank Mayhar if (unlikely(newblock == 0)) { 1087273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 1088273df556SFrank Mayhar err = -EIO; 1089273df556SFrank Mayhar goto cleanup; 1090273df556SFrank Mayhar } 1091a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1092aebf0243SWang Shilong if (unlikely(!bh)) { 1093860d21e2STheodore Ts'o err = -ENOMEM; 1094a86c6181SAlex Tomas goto cleanup; 1095a86c6181SAlex Tomas } 1096a86c6181SAlex Tomas lock_buffer(bh); 1097a86c6181SAlex Tomas 10987e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10997e028976SAvantika Mathur if (err) 1100a86c6181SAlex Tomas goto cleanup; 1101a86c6181SAlex Tomas 1102a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1103a86c6181SAlex Tomas neh->eh_entries = 0; 110455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1105a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1106a86c6181SAlex Tomas neh->eh_depth = 0; 1107a86c6181SAlex Tomas 1108d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 1109273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 1110273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 1111273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1112273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 1113273df556SFrank Mayhar path[depth].p_hdr->eh_max); 1114273df556SFrank Mayhar err = -EIO; 1115273df556SFrank Mayhar goto cleanup; 1116273df556SFrank Mayhar } 1117a86c6181SAlex Tomas /* start copy from next extent */ 11181b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 11191b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 1120a86c6181SAlex Tomas if (m) { 11211b16da77SYongqiang Yang struct ext4_extent *ex; 11221b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 11231b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1124e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1125a86c6181SAlex Tomas } 1126a86c6181SAlex Tomas 11277ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1128a86c6181SAlex Tomas set_buffer_uptodate(bh); 1129a86c6181SAlex Tomas unlock_buffer(bh); 1130a86c6181SAlex Tomas 11310390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11327e028976SAvantika Mathur if (err) 1133a86c6181SAlex Tomas goto cleanup; 1134a86c6181SAlex Tomas brelse(bh); 1135a86c6181SAlex Tomas bh = NULL; 1136a86c6181SAlex Tomas 1137a86c6181SAlex Tomas /* correct old leaf */ 1138a86c6181SAlex Tomas if (m) { 11397e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 11407e028976SAvantika Mathur if (err) 1141a86c6181SAlex Tomas goto cleanup; 1142e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 11437e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 11447e028976SAvantika Mathur if (err) 1145a86c6181SAlex Tomas goto cleanup; 1146a86c6181SAlex Tomas 1147a86c6181SAlex Tomas } 1148a86c6181SAlex Tomas 1149a86c6181SAlex Tomas /* create intermediate indexes */ 1150a86c6181SAlex Tomas k = depth - at - 1; 1151273df556SFrank Mayhar if (unlikely(k < 0)) { 1152273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1153273df556SFrank Mayhar err = -EIO; 1154273df556SFrank Mayhar goto cleanup; 1155273df556SFrank Mayhar } 1156a86c6181SAlex Tomas if (k) 1157a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1158a86c6181SAlex Tomas /* insert new index into current index block */ 1159a86c6181SAlex Tomas /* current depth stored in i var */ 1160a86c6181SAlex Tomas i = depth - 1; 1161a86c6181SAlex Tomas while (k--) { 1162a86c6181SAlex Tomas oldblock = newblock; 1163a86c6181SAlex Tomas newblock = ablocks[--a]; 1164bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1165aebf0243SWang Shilong if (unlikely(!bh)) { 1166860d21e2STheodore Ts'o err = -ENOMEM; 1167a86c6181SAlex Tomas goto cleanup; 1168a86c6181SAlex Tomas } 1169a86c6181SAlex Tomas lock_buffer(bh); 1170a86c6181SAlex Tomas 11717e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11727e028976SAvantika Mathur if (err) 1173a86c6181SAlex Tomas goto cleanup; 1174a86c6181SAlex Tomas 1175a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1176a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1177a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 117855ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1179a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1180a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1181a86c6181SAlex Tomas fidx->ei_block = border; 1182f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1183a86c6181SAlex Tomas 1184bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1185bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1186a86c6181SAlex Tomas 11871b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1188273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1189273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1190273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1191273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1192273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 1193273df556SFrank Mayhar err = -EIO; 1194273df556SFrank Mayhar goto cleanup; 1195273df556SFrank Mayhar } 11961b16da77SYongqiang Yang /* start copy indexes */ 11971b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 11981b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 11991b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 12001b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1201a86c6181SAlex Tomas if (m) { 12021b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1203a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1204e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1205a86c6181SAlex Tomas } 12067ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1207a86c6181SAlex Tomas set_buffer_uptodate(bh); 1208a86c6181SAlex Tomas unlock_buffer(bh); 1209a86c6181SAlex Tomas 12100390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 12117e028976SAvantika Mathur if (err) 1212a86c6181SAlex Tomas goto cleanup; 1213a86c6181SAlex Tomas brelse(bh); 1214a86c6181SAlex Tomas bh = NULL; 1215a86c6181SAlex Tomas 1216a86c6181SAlex Tomas /* correct old index */ 1217a86c6181SAlex Tomas if (m) { 1218a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1219a86c6181SAlex Tomas if (err) 1220a86c6181SAlex Tomas goto cleanup; 1221e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1222a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1223a86c6181SAlex Tomas if (err) 1224a86c6181SAlex Tomas goto cleanup; 1225a86c6181SAlex Tomas } 1226a86c6181SAlex Tomas 1227a86c6181SAlex Tomas i--; 1228a86c6181SAlex Tomas } 1229a86c6181SAlex Tomas 1230a86c6181SAlex Tomas /* insert new index */ 1231a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1232a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1233a86c6181SAlex Tomas 1234a86c6181SAlex Tomas cleanup: 1235a86c6181SAlex Tomas if (bh) { 1236a86c6181SAlex Tomas if (buffer_locked(bh)) 1237a86c6181SAlex Tomas unlock_buffer(bh); 1238a86c6181SAlex Tomas brelse(bh); 1239a86c6181SAlex Tomas } 1240a86c6181SAlex Tomas 1241a86c6181SAlex Tomas if (err) { 1242a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1243a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1244a86c6181SAlex Tomas if (!ablocks[i]) 1245a86c6181SAlex Tomas continue; 12467dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1247e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1248a86c6181SAlex Tomas } 1249a86c6181SAlex Tomas } 1250a86c6181SAlex Tomas kfree(ablocks); 1251a86c6181SAlex Tomas 1252a86c6181SAlex Tomas return err; 1253a86c6181SAlex Tomas } 1254a86c6181SAlex Tomas 1255a86c6181SAlex Tomas /* 1256d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1257d0d856e8SRandy Dunlap * implements tree growing procedure: 1258a86c6181SAlex Tomas * - allocates new block 1259a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1260d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1261a86c6181SAlex Tomas * just created block 1262a86c6181SAlex Tomas */ 1263a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1264be5cd90dSDmitry Monakhov unsigned int flags) 1265a86c6181SAlex Tomas { 1266a86c6181SAlex Tomas struct ext4_extent_header *neh; 1267a86c6181SAlex Tomas struct buffer_head *bh; 1268be5cd90dSDmitry Monakhov ext4_fsblk_t newblock, goal = 0; 1269be5cd90dSDmitry Monakhov struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 1270a86c6181SAlex Tomas int err = 0; 1271a86c6181SAlex Tomas 1272be5cd90dSDmitry Monakhov /* Try to prepend new index to old one */ 1273be5cd90dSDmitry Monakhov if (ext_depth(inode)) 1274be5cd90dSDmitry Monakhov goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode))); 1275be5cd90dSDmitry Monakhov if (goal > le32_to_cpu(es->s_first_data_block)) { 1276be5cd90dSDmitry Monakhov flags |= EXT4_MB_HINT_TRY_GOAL; 1277be5cd90dSDmitry Monakhov goal--; 1278be5cd90dSDmitry Monakhov } else 1279be5cd90dSDmitry Monakhov goal = ext4_inode_to_goal_block(inode); 1280be5cd90dSDmitry Monakhov newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 1281be5cd90dSDmitry Monakhov NULL, &err); 1282a86c6181SAlex Tomas if (newblock == 0) 1283a86c6181SAlex Tomas return err; 1284a86c6181SAlex Tomas 1285a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1286aebf0243SWang Shilong if (unlikely(!bh)) 1287860d21e2STheodore Ts'o return -ENOMEM; 1288a86c6181SAlex Tomas lock_buffer(bh); 1289a86c6181SAlex Tomas 12907e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 12917e028976SAvantika Mathur if (err) { 1292a86c6181SAlex Tomas unlock_buffer(bh); 1293a86c6181SAlex Tomas goto out; 1294a86c6181SAlex Tomas } 1295a86c6181SAlex Tomas 1296a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 12971939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 12981939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1299a86c6181SAlex Tomas 1300a86c6181SAlex Tomas /* set size of new block */ 1301a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1302a86c6181SAlex Tomas /* old root could have indexes or leaves 1303a86c6181SAlex Tomas * so calculate e_max right way */ 1304a86c6181SAlex Tomas if (ext_depth(inode)) 130555ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1306a86c6181SAlex Tomas else 130755ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1308a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 13097ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1310a86c6181SAlex Tomas set_buffer_uptodate(bh); 1311a86c6181SAlex Tomas unlock_buffer(bh); 1312a86c6181SAlex Tomas 13130390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 13147e028976SAvantika Mathur if (err) 1315a86c6181SAlex Tomas goto out; 1316a86c6181SAlex Tomas 13171939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1318a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 13191939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 13201939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 13211939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 13221939dd84SDmitry Monakhov /* Root extent block becomes index block */ 13231939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 13241939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 13251939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 13261939dd84SDmitry Monakhov } 13272ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1328a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 13295a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1330bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1331a86c6181SAlex Tomas 1332ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 13331939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1334a86c6181SAlex Tomas out: 1335a86c6181SAlex Tomas brelse(bh); 1336a86c6181SAlex Tomas 1337a86c6181SAlex Tomas return err; 1338a86c6181SAlex Tomas } 1339a86c6181SAlex Tomas 1340a86c6181SAlex Tomas /* 1341d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1342d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1343d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1344a86c6181SAlex Tomas */ 1345a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1346107a7bd3STheodore Ts'o unsigned int mb_flags, 1347107a7bd3STheodore Ts'o unsigned int gb_flags, 1348dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1349a86c6181SAlex Tomas struct ext4_extent *newext) 1350a86c6181SAlex Tomas { 1351dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1352a86c6181SAlex Tomas struct ext4_ext_path *curp; 1353a86c6181SAlex Tomas int depth, i, err = 0; 1354a86c6181SAlex Tomas 1355a86c6181SAlex Tomas repeat: 1356a86c6181SAlex Tomas i = depth = ext_depth(inode); 1357a86c6181SAlex Tomas 1358a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1359a86c6181SAlex Tomas curp = path + depth; 1360a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1361a86c6181SAlex Tomas i--; 1362a86c6181SAlex Tomas curp--; 1363a86c6181SAlex Tomas } 1364a86c6181SAlex Tomas 1365d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1366d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1367a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1368a86c6181SAlex Tomas /* if we found index with free entry, then use that 1369a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 1370107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1371787e0981SShen Feng if (err) 1372787e0981SShen Feng goto out; 1373a86c6181SAlex Tomas 1374a86c6181SAlex Tomas /* refill path */ 1375ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1376725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1377dfe50809STheodore Ts'o ppath, gb_flags); 1378a86c6181SAlex Tomas if (IS_ERR(path)) 1379a86c6181SAlex Tomas err = PTR_ERR(path); 1380a86c6181SAlex Tomas } else { 1381a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 1382be5cd90dSDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, mb_flags); 1383a86c6181SAlex Tomas if (err) 1384a86c6181SAlex Tomas goto out; 1385a86c6181SAlex Tomas 1386a86c6181SAlex Tomas /* refill path */ 1387ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, 1388725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1389dfe50809STheodore Ts'o ppath, gb_flags); 1390a86c6181SAlex Tomas if (IS_ERR(path)) { 1391a86c6181SAlex Tomas err = PTR_ERR(path); 1392a86c6181SAlex Tomas goto out; 1393a86c6181SAlex Tomas } 1394a86c6181SAlex Tomas 1395a86c6181SAlex Tomas /* 1396d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1397d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1398a86c6181SAlex Tomas */ 1399a86c6181SAlex Tomas depth = ext_depth(inode); 1400a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1401d0d856e8SRandy Dunlap /* now we need to split */ 1402a86c6181SAlex Tomas goto repeat; 1403a86c6181SAlex Tomas } 1404a86c6181SAlex Tomas } 1405a86c6181SAlex Tomas 1406a86c6181SAlex Tomas out: 1407a86c6181SAlex Tomas return err; 1408a86c6181SAlex Tomas } 1409a86c6181SAlex Tomas 1410a86c6181SAlex Tomas /* 14111988b51eSAlex Tomas * search the closest allocated block to the left for *logical 14121988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 14131988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 14141988b51eSAlex Tomas * returns 0 at @phys 14151988b51eSAlex Tomas * return value contains 0 (success) or error code 14161988b51eSAlex Tomas */ 14171f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 14181f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14191988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 14201988b51eSAlex Tomas { 14211988b51eSAlex Tomas struct ext4_extent_idx *ix; 14221988b51eSAlex Tomas struct ext4_extent *ex; 1423b939e376SAneesh Kumar K.V int depth, ee_len; 14241988b51eSAlex Tomas 1425273df556SFrank Mayhar if (unlikely(path == NULL)) { 1426273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1427273df556SFrank Mayhar return -EIO; 1428273df556SFrank Mayhar } 14291988b51eSAlex Tomas depth = path->p_depth; 14301988b51eSAlex Tomas *phys = 0; 14311988b51eSAlex Tomas 14321988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14331988b51eSAlex Tomas return 0; 14341988b51eSAlex Tomas 14351988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14361988b51eSAlex Tomas * then *logical, but it can be that extent is the 14371988b51eSAlex Tomas * first one in the file */ 14381988b51eSAlex Tomas 14391988b51eSAlex Tomas ex = path[depth].p_ext; 1440b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14411988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1442273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1443273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1444273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1445273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1446273df556SFrank Mayhar return -EIO; 1447273df556SFrank Mayhar } 14481988b51eSAlex Tomas while (--depth >= 0) { 14491988b51eSAlex Tomas ix = path[depth].p_idx; 1450273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1451273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1452273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 14536ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1454273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 14556ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1456273df556SFrank Mayhar depth); 1457273df556SFrank Mayhar return -EIO; 1458273df556SFrank Mayhar } 14591988b51eSAlex Tomas } 14601988b51eSAlex Tomas return 0; 14611988b51eSAlex Tomas } 14621988b51eSAlex Tomas 1463273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1464273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1465273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1466273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1467273df556SFrank Mayhar return -EIO; 1468273df556SFrank Mayhar } 14691988b51eSAlex Tomas 1470b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1471bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 14721988b51eSAlex Tomas return 0; 14731988b51eSAlex Tomas } 14741988b51eSAlex Tomas 14751988b51eSAlex Tomas /* 14761988b51eSAlex Tomas * search the closest allocated block to the right for *logical 14771988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1478df3ab170STao Ma * if *logical is the largest allocated block, the function 14791988b51eSAlex Tomas * returns 0 at @phys 14801988b51eSAlex Tomas * return value contains 0 (success) or error code 14811988b51eSAlex Tomas */ 14821f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 14831f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14844d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 14854d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 14861988b51eSAlex Tomas { 14871988b51eSAlex Tomas struct buffer_head *bh = NULL; 14881988b51eSAlex Tomas struct ext4_extent_header *eh; 14891988b51eSAlex Tomas struct ext4_extent_idx *ix; 14901988b51eSAlex Tomas struct ext4_extent *ex; 14911988b51eSAlex Tomas ext4_fsblk_t block; 1492395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1493395a87bfSEric Sandeen int ee_len; 14941988b51eSAlex Tomas 1495273df556SFrank Mayhar if (unlikely(path == NULL)) { 1496273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1497273df556SFrank Mayhar return -EIO; 1498273df556SFrank Mayhar } 14991988b51eSAlex Tomas depth = path->p_depth; 15001988b51eSAlex Tomas *phys = 0; 15011988b51eSAlex Tomas 15021988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 15031988b51eSAlex Tomas return 0; 15041988b51eSAlex Tomas 15051988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 15061988b51eSAlex Tomas * then *logical, but it can be that extent is the 15071988b51eSAlex Tomas * first one in the file */ 15081988b51eSAlex Tomas 15091988b51eSAlex Tomas ex = path[depth].p_ext; 1510b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 15111988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1512273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1513273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1514273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1515273df556SFrank Mayhar depth); 1516273df556SFrank Mayhar return -EIO; 1517273df556SFrank Mayhar } 15181988b51eSAlex Tomas while (--depth >= 0) { 15191988b51eSAlex Tomas ix = path[depth].p_idx; 1520273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1521273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1522273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1523273df556SFrank Mayhar *logical); 1524273df556SFrank Mayhar return -EIO; 1525273df556SFrank Mayhar } 15261988b51eSAlex Tomas } 15274d33b1efSTheodore Ts'o goto found_extent; 15281988b51eSAlex Tomas } 15291988b51eSAlex Tomas 1530273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1531273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1532273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1533273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1534273df556SFrank Mayhar return -EIO; 1535273df556SFrank Mayhar } 15361988b51eSAlex Tomas 15371988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 15381988b51eSAlex Tomas /* next allocated block in this leaf */ 15391988b51eSAlex Tomas ex++; 15404d33b1efSTheodore Ts'o goto found_extent; 15411988b51eSAlex Tomas } 15421988b51eSAlex Tomas 15431988b51eSAlex Tomas /* go up and search for index to the right */ 15441988b51eSAlex Tomas while (--depth >= 0) { 15451988b51eSAlex Tomas ix = path[depth].p_idx; 15461988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 154725f1ee3aSWu Fengguang goto got_index; 15481988b51eSAlex Tomas } 15491988b51eSAlex Tomas 155025f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 15511988b51eSAlex Tomas return 0; 15521988b51eSAlex Tomas 155325f1ee3aSWu Fengguang got_index: 15541988b51eSAlex Tomas /* we've found index to the right, let's 15551988b51eSAlex Tomas * follow it and find the closest allocated 15561988b51eSAlex Tomas * block to the right */ 15571988b51eSAlex Tomas ix++; 1558bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15591988b51eSAlex Tomas while (++depth < path->p_depth) { 1560395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 15617d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, block, 1562107a7bd3STheodore Ts'o path->p_depth - depth, 0); 15637d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15647d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15657d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh); 15661988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1567bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15681988b51eSAlex Tomas put_bh(bh); 15691988b51eSAlex Tomas } 15701988b51eSAlex Tomas 1571107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 15727d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15737d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15741988b51eSAlex Tomas eh = ext_block_hdr(bh); 15751988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 15764d33b1efSTheodore Ts'o found_extent: 15771988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1578bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 15794d33b1efSTheodore Ts'o *ret_ex = ex; 15804d33b1efSTheodore Ts'o if (bh) 15811988b51eSAlex Tomas put_bh(bh); 15821988b51eSAlex Tomas return 0; 15831988b51eSAlex Tomas } 15841988b51eSAlex Tomas 15851988b51eSAlex Tomas /* 1586d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1587f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1588d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1589d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1590d0d856e8SRandy Dunlap * with leaves. 1591a86c6181SAlex Tomas */ 1592fcf6b1b7SDmitry Monakhov ext4_lblk_t 1593a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1594a86c6181SAlex Tomas { 1595a86c6181SAlex Tomas int depth; 1596a86c6181SAlex Tomas 1597a86c6181SAlex Tomas BUG_ON(path == NULL); 1598a86c6181SAlex Tomas depth = path->p_depth; 1599a86c6181SAlex Tomas 1600a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1601f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1602a86c6181SAlex Tomas 1603a86c6181SAlex Tomas while (depth >= 0) { 1604a86c6181SAlex Tomas if (depth == path->p_depth) { 1605a86c6181SAlex Tomas /* leaf */ 16066f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 16076f8ff537SCurt Wohlgemuth path[depth].p_ext != 1608a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1609a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1610a86c6181SAlex Tomas } else { 1611a86c6181SAlex Tomas /* index */ 1612a86c6181SAlex Tomas if (path[depth].p_idx != 1613a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1614a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1615a86c6181SAlex Tomas } 1616a86c6181SAlex Tomas depth--; 1617a86c6181SAlex Tomas } 1618a86c6181SAlex Tomas 1619f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1620a86c6181SAlex Tomas } 1621a86c6181SAlex Tomas 1622a86c6181SAlex Tomas /* 1623d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1624f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1625a86c6181SAlex Tomas */ 16265718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1627a86c6181SAlex Tomas { 1628a86c6181SAlex Tomas int depth; 1629a86c6181SAlex Tomas 1630a86c6181SAlex Tomas BUG_ON(path == NULL); 1631a86c6181SAlex Tomas depth = path->p_depth; 1632a86c6181SAlex Tomas 1633a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1634a86c6181SAlex Tomas if (depth == 0) 1635f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1636a86c6181SAlex Tomas 1637a86c6181SAlex Tomas /* go to index block */ 1638a86c6181SAlex Tomas depth--; 1639a86c6181SAlex Tomas 1640a86c6181SAlex Tomas while (depth >= 0) { 1641a86c6181SAlex Tomas if (path[depth].p_idx != 1642a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1643725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1644725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1645a86c6181SAlex Tomas depth--; 1646a86c6181SAlex Tomas } 1647a86c6181SAlex Tomas 1648f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1649a86c6181SAlex Tomas } 1650a86c6181SAlex Tomas 1651a86c6181SAlex Tomas /* 1652d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1653d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1654d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1655a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1656a86c6181SAlex Tomas */ 16571d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1658a86c6181SAlex Tomas struct ext4_ext_path *path) 1659a86c6181SAlex Tomas { 1660a86c6181SAlex Tomas struct ext4_extent_header *eh; 1661a86c6181SAlex Tomas int depth = ext_depth(inode); 1662a86c6181SAlex Tomas struct ext4_extent *ex; 1663a86c6181SAlex Tomas __le32 border; 1664a86c6181SAlex Tomas int k, err = 0; 1665a86c6181SAlex Tomas 1666a86c6181SAlex Tomas eh = path[depth].p_hdr; 1667a86c6181SAlex Tomas ex = path[depth].p_ext; 1668273df556SFrank Mayhar 1669273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1670273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1671273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1672273df556SFrank Mayhar return -EIO; 1673273df556SFrank Mayhar } 1674a86c6181SAlex Tomas 1675a86c6181SAlex Tomas if (depth == 0) { 1676a86c6181SAlex Tomas /* there is no tree at all */ 1677a86c6181SAlex Tomas return 0; 1678a86c6181SAlex Tomas } 1679a86c6181SAlex Tomas 1680a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1681a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1682a86c6181SAlex Tomas return 0; 1683a86c6181SAlex Tomas } 1684a86c6181SAlex Tomas 1685a86c6181SAlex Tomas /* 1686d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1687a86c6181SAlex Tomas */ 1688a86c6181SAlex Tomas k = depth - 1; 1689a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 16907e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16917e028976SAvantika Mathur if (err) 1692a86c6181SAlex Tomas return err; 1693a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16947e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16957e028976SAvantika Mathur if (err) 1696a86c6181SAlex Tomas return err; 1697a86c6181SAlex Tomas 1698a86c6181SAlex Tomas while (k--) { 1699a86c6181SAlex Tomas /* change all left-side indexes */ 1700a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1701a86c6181SAlex Tomas break; 17027e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 17037e028976SAvantika Mathur if (err) 1704a86c6181SAlex Tomas break; 1705a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 17067e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 17077e028976SAvantika Mathur if (err) 1708a86c6181SAlex Tomas break; 1709a86c6181SAlex Tomas } 1710a86c6181SAlex Tomas 1711a86c6181SAlex Tomas return err; 1712a86c6181SAlex Tomas } 1713a86c6181SAlex Tomas 1714748de673SAkira Fujita int 1715a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1716a86c6181SAlex Tomas struct ext4_extent *ex2) 1717a86c6181SAlex Tomas { 1718da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len; 1719a2df2a63SAmit Arora 1720a2df2a63SAmit Arora /* 1721ec22ba8eSDmitry Monakhov * Make sure that both extents are initialized. We don't merge 1722556615dcSLukas Czerner * unwritten extents so that we can be sure that end_io code has 1723ec22ba8eSDmitry Monakhov * the extent that was written properly split out and conversion to 1724ec22ba8eSDmitry Monakhov * initialized is trivial. 1725a2df2a63SAmit Arora */ 1726556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2)) 1727a2df2a63SAmit Arora return 0; 1728a2df2a63SAmit Arora 1729a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1730a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1731a2df2a63SAmit Arora 1732a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 173363f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1734a86c6181SAlex Tomas return 0; 1735a86c6181SAlex Tomas 1736471d4011SSuparna Bhattacharya /* 1737471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1738471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1739d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1740471d4011SSuparna Bhattacharya */ 1741da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1742471d4011SSuparna Bhattacharya return 0; 1743556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex1) && 1744a9b82415SDarrick J. Wong (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || 1745a9b82415SDarrick J. Wong atomic_read(&EXT4_I(inode)->i_unwritten) || 1746556615dcSLukas Czerner (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN))) 1747a9b82415SDarrick J. Wong return 0; 1748bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1749b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1750a86c6181SAlex Tomas return 0; 1751a86c6181SAlex Tomas #endif 1752a86c6181SAlex Tomas 1753bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1754a86c6181SAlex Tomas return 1; 1755a86c6181SAlex Tomas return 0; 1756a86c6181SAlex Tomas } 1757a86c6181SAlex Tomas 1758a86c6181SAlex Tomas /* 175956055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 176056055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 176156055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 176256055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 176356055d3aSAmit Arora * 1 if they got merged. 176456055d3aSAmit Arora */ 1765197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 176656055d3aSAmit Arora struct ext4_ext_path *path, 176756055d3aSAmit Arora struct ext4_extent *ex) 176856055d3aSAmit Arora { 176956055d3aSAmit Arora struct ext4_extent_header *eh; 177056055d3aSAmit Arora unsigned int depth, len; 1771556615dcSLukas Czerner int merge_done = 0, unwritten; 177256055d3aSAmit Arora 177356055d3aSAmit Arora depth = ext_depth(inode); 177456055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 177556055d3aSAmit Arora eh = path[depth].p_hdr; 177656055d3aSAmit Arora 177756055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 177856055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 177956055d3aSAmit Arora break; 178056055d3aSAmit Arora /* merge with next extent! */ 1781556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 178256055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 178356055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 1784556615dcSLukas Czerner if (unwritten) 1785556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 178656055d3aSAmit Arora 178756055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 178856055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 178956055d3aSAmit Arora * sizeof(struct ext4_extent); 179056055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 179156055d3aSAmit Arora } 1792e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 179356055d3aSAmit Arora merge_done = 1; 179456055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 179556055d3aSAmit Arora if (!eh->eh_entries) 179624676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 179756055d3aSAmit Arora } 179856055d3aSAmit Arora 179956055d3aSAmit Arora return merge_done; 180056055d3aSAmit Arora } 180156055d3aSAmit Arora 180256055d3aSAmit Arora /* 1803ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1804ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1805ecb94f5fSTheodore Ts'o */ 1806ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1807ecb94f5fSTheodore Ts'o struct inode *inode, 1808ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1809ecb94f5fSTheodore Ts'o { 1810ecb94f5fSTheodore Ts'o size_t s; 1811ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1812ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1813ecb94f5fSTheodore Ts'o 1814ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1815ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1816ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1817ecb94f5fSTheodore Ts'o return; 1818ecb94f5fSTheodore Ts'o 1819ecb94f5fSTheodore Ts'o /* 1820ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1821ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1822ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1823ecb94f5fSTheodore Ts'o */ 1824ecb94f5fSTheodore Ts'o if (ext4_journal_extend(handle, 2)) 1825ecb94f5fSTheodore Ts'o return; 1826ecb94f5fSTheodore Ts'o 1827ecb94f5fSTheodore Ts'o /* 1828ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1829ecb94f5fSTheodore Ts'o */ 1830ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1831ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1832ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1833ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1834ecb94f5fSTheodore Ts'o 183510809df8STheodore Ts'o path[1].p_maxdepth = path[0].p_maxdepth; 1836ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1837ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1838ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1839ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1840ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1841ecb94f5fSTheodore Ts'o 1842ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1843ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 184471d4f7d0STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1845ecb94f5fSTheodore Ts'o } 1846ecb94f5fSTheodore Ts'o 1847ecb94f5fSTheodore Ts'o /* 1848197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1849197217a5SYongqiang Yang * return 1 if merge left else 0. 1850197217a5SYongqiang Yang */ 1851ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1852ecb94f5fSTheodore Ts'o struct inode *inode, 1853197217a5SYongqiang Yang struct ext4_ext_path *path, 1854197217a5SYongqiang Yang struct ext4_extent *ex) { 1855197217a5SYongqiang Yang struct ext4_extent_header *eh; 1856197217a5SYongqiang Yang unsigned int depth; 1857197217a5SYongqiang Yang int merge_done = 0; 1858197217a5SYongqiang Yang 1859197217a5SYongqiang Yang depth = ext_depth(inode); 1860197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1861197217a5SYongqiang Yang eh = path[depth].p_hdr; 1862197217a5SYongqiang Yang 1863197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1864197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1865197217a5SYongqiang Yang 1866197217a5SYongqiang Yang if (!merge_done) 1867ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1868197217a5SYongqiang Yang 1869ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1870197217a5SYongqiang Yang } 1871197217a5SYongqiang Yang 1872197217a5SYongqiang Yang /* 187325d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 187425d14f98SAmit Arora * existing extent. 187525d14f98SAmit Arora * 187625d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 187725d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 187825d14f98SAmit Arora * If there is no overlap found, it returns 0. 187925d14f98SAmit Arora */ 18804d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 18814d33b1efSTheodore Ts'o struct inode *inode, 188225d14f98SAmit Arora struct ext4_extent *newext, 188325d14f98SAmit Arora struct ext4_ext_path *path) 188425d14f98SAmit Arora { 1885725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 188625d14f98SAmit Arora unsigned int depth, len1; 188725d14f98SAmit Arora unsigned int ret = 0; 188825d14f98SAmit Arora 188925d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1890a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 189125d14f98SAmit Arora depth = ext_depth(inode); 189225d14f98SAmit Arora if (!path[depth].p_ext) 189325d14f98SAmit Arora goto out; 1894f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 189525d14f98SAmit Arora 189625d14f98SAmit Arora /* 189725d14f98SAmit Arora * get the next allocated block if the extent in the path 189825d14f98SAmit Arora * is before the requested block(s) 189925d14f98SAmit Arora */ 190025d14f98SAmit Arora if (b2 < b1) { 190125d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1902f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 190325d14f98SAmit Arora goto out; 1904f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2); 190525d14f98SAmit Arora } 190625d14f98SAmit Arora 1907725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 190825d14f98SAmit Arora if (b1 + len1 < b1) { 1909f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 191025d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 191125d14f98SAmit Arora ret = 1; 191225d14f98SAmit Arora } 191325d14f98SAmit Arora 191425d14f98SAmit Arora /* check for overlap */ 191525d14f98SAmit Arora if (b1 + len1 > b2) { 191625d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 191725d14f98SAmit Arora ret = 1; 191825d14f98SAmit Arora } 191925d14f98SAmit Arora out: 192025d14f98SAmit Arora return ret; 192125d14f98SAmit Arora } 192225d14f98SAmit Arora 192325d14f98SAmit Arora /* 1924d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1925d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1926d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1927d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1928a86c6181SAlex Tomas */ 1929a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1930dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 1931107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags) 1932a86c6181SAlex Tomas { 1933dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 1934a86c6181SAlex Tomas struct ext4_extent_header *eh; 1935a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1936a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1937a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1938725d26d3SAneesh Kumar K.V int depth, len, err; 1939725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1940556615dcSLukas Czerner int mb_flags = 0, unwritten; 1941a86c6181SAlex Tomas 1942e3cf5d5dSTheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1943e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_DELALLOC_RESERVED; 1944273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1945273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1946273df556SFrank Mayhar return -EIO; 1947273df556SFrank Mayhar } 1948a86c6181SAlex Tomas depth = ext_depth(inode); 1949a86c6181SAlex Tomas ex = path[depth].p_ext; 1950be8981beSLukas Czerner eh = path[depth].p_hdr; 1951273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1952273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1953273df556SFrank Mayhar return -EIO; 1954273df556SFrank Mayhar } 1955a86c6181SAlex Tomas 1956a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1957107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1958be8981beSLukas Czerner 1959be8981beSLukas Czerner /* 1960be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1961be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 1962ed8a1a76STheodore Ts'o * ext4_find_extent() can return either extent on the 1963be8981beSLukas Czerner * left, or on the right from the searched position. This 1964be8981beSLukas Czerner * will make merging more effective. 1965be8981beSLukas Czerner */ 1966be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 1967be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 1968be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 1969be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 1970be8981beSLukas Czerner ex += 1; 1971be8981beSLukas Czerner goto prepend; 1972be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1973be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 1974be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 1975be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 1976be8981beSLukas Czerner ex -= 1; 1977be8981beSLukas Czerner 1978be8981beSLukas Czerner /* Try to append newex to the ex */ 1979be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 1980be8981beSLukas Czerner ext_debug("append [%d]%d block to %u:[%d]%d" 1981be8981beSLukas Czerner "(from %llu)\n", 1982556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 1983a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1984a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1985556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 1986bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1987bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 1988be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1989be8981beSLukas Czerner path + depth); 19907e028976SAvantika Mathur if (err) 1991a86c6181SAlex Tomas return err; 1992556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 1993a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1994a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1995556615dcSLukas Czerner if (unwritten) 1996556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 1997a86c6181SAlex Tomas eh = path[depth].p_hdr; 1998a86c6181SAlex Tomas nearex = ex; 1999a86c6181SAlex Tomas goto merge; 2000a86c6181SAlex Tomas } 2001a86c6181SAlex Tomas 2002be8981beSLukas Czerner prepend: 2003be8981beSLukas Czerner /* Try to prepend newex to the ex */ 2004be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 2005be8981beSLukas Czerner ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 2006be8981beSLukas Czerner "(from %llu)\n", 2007be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 2008556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2009be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 2010be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 2011556615dcSLukas Czerner ext4_ext_is_unwritten(ex), 2012be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 2013be8981beSLukas Czerner ext4_ext_pblock(ex)); 2014be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 2015be8981beSLukas Czerner path + depth); 2016be8981beSLukas Czerner if (err) 2017be8981beSLukas Czerner return err; 2018be8981beSLukas Czerner 2019556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 2020be8981beSLukas Czerner ex->ee_block = newext->ee_block; 2021be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 2022be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 2023be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 2024556615dcSLukas Czerner if (unwritten) 2025556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2026be8981beSLukas Czerner eh = path[depth].p_hdr; 2027be8981beSLukas Czerner nearex = ex; 2028be8981beSLukas Czerner goto merge; 2029be8981beSLukas Czerner } 2030be8981beSLukas Czerner } 2031be8981beSLukas Czerner 2032a86c6181SAlex Tomas depth = ext_depth(inode); 2033a86c6181SAlex Tomas eh = path[depth].p_hdr; 2034a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 2035a86c6181SAlex Tomas goto has_space; 2036a86c6181SAlex Tomas 2037a86c6181SAlex Tomas /* probably next leaf has space for us? */ 2038a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 2039598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 2040598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 20415718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 2042598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 204332de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 2044a86c6181SAlex Tomas BUG_ON(npath != NULL); 2045ed8a1a76STheodore Ts'o npath = ext4_find_extent(inode, next, NULL, 0); 2046a86c6181SAlex Tomas if (IS_ERR(npath)) 2047a86c6181SAlex Tomas return PTR_ERR(npath); 2048a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 2049a86c6181SAlex Tomas eh = npath[depth].p_hdr; 2050a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 205125985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 2052a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 2053a86c6181SAlex Tomas path = npath; 2054ffb505ffSRobin Dong goto has_space; 2055a86c6181SAlex Tomas } 2056a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 2057a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2058a86c6181SAlex Tomas } 2059a86c6181SAlex Tomas 2060a86c6181SAlex Tomas /* 2061d0d856e8SRandy Dunlap * There is no free space in the found leaf. 2062d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 2063a86c6181SAlex Tomas */ 2064107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2065e3cf5d5dSTheodore Ts'o mb_flags |= EXT4_MB_USE_RESERVED; 2066107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2067dfe50809STheodore Ts'o ppath, newext); 2068a86c6181SAlex Tomas if (err) 2069a86c6181SAlex Tomas goto cleanup; 2070a86c6181SAlex Tomas depth = ext_depth(inode); 2071a86c6181SAlex Tomas eh = path[depth].p_hdr; 2072a86c6181SAlex Tomas 2073a86c6181SAlex Tomas has_space: 2074a86c6181SAlex Tomas nearex = path[depth].p_ext; 2075a86c6181SAlex Tomas 20767e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 20777e028976SAvantika Mathur if (err) 2078a86c6181SAlex Tomas goto cleanup; 2079a86c6181SAlex Tomas 2080a86c6181SAlex Tomas if (!nearex) { 2081a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 208232de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2083a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2084bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2085556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2086a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 208780e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 2088a86c6181SAlex Tomas } else { 208980e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 209080e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 209180e675f9SEric Gouriou /* Insert after */ 209232de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 209332de6756SYongqiang Yang "nearest %p\n", 2094a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2095bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2096556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 2097a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 209880e675f9SEric Gouriou nearex); 209980e675f9SEric Gouriou nearex++; 210080e675f9SEric Gouriou } else { 210180e675f9SEric Gouriou /* Insert before */ 210280e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 210332de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 210432de6756SYongqiang Yang "nearest %p\n", 210580e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 210680e675f9SEric Gouriou ext4_ext_pblock(newext), 2107556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 210880e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 210980e675f9SEric Gouriou nearex); 211080e675f9SEric Gouriou } 211180e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 211280e675f9SEric Gouriou if (len > 0) { 211332de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 211480e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 211580e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 211680e675f9SEric Gouriou ext4_ext_pblock(newext), 2117556615dcSLukas Czerner ext4_ext_is_unwritten(newext), 211880e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 211980e675f9SEric Gouriou len, nearex, nearex + 1); 212080e675f9SEric Gouriou memmove(nearex + 1, nearex, 212180e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 212280e675f9SEric Gouriou } 2123a86c6181SAlex Tomas } 2124a86c6181SAlex Tomas 2125e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 212680e675f9SEric Gouriou path[depth].p_ext = nearex; 2127a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2128bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2129a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2130a86c6181SAlex Tomas 2131a86c6181SAlex Tomas merge: 2132e7bcf823SHaiboLiu /* try to merge extents */ 2133107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2134ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2135a86c6181SAlex Tomas 2136a86c6181SAlex Tomas 2137a86c6181SAlex Tomas /* time to correct all indexes above */ 2138a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2139a86c6181SAlex Tomas if (err) 2140a86c6181SAlex Tomas goto cleanup; 2141a86c6181SAlex Tomas 2142ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2143a86c6181SAlex Tomas 2144a86c6181SAlex Tomas cleanup: 2145a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 2146a86c6181SAlex Tomas kfree(npath); 2147a86c6181SAlex Tomas return err; 2148a86c6181SAlex Tomas } 2149a86c6181SAlex Tomas 215091dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode, 215191dd8c11SLukas Czerner ext4_lblk_t block, ext4_lblk_t num, 215291dd8c11SLukas Czerner struct fiemap_extent_info *fieinfo) 21536873fa0dSEric Sandeen { 21546873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 21556873fa0dSEric Sandeen struct ext4_extent *ex; 215669eb33dcSZheng Liu struct extent_status es; 215791dd8c11SLukas Czerner ext4_lblk_t next, next_del, start = 0, end = 0; 21586873fa0dSEric Sandeen ext4_lblk_t last = block + num; 215991dd8c11SLukas Czerner int exists, depth = 0, err = 0; 216091dd8c11SLukas Czerner unsigned int flags = 0; 216191dd8c11SLukas Czerner unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 21626873fa0dSEric Sandeen 2163f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 21646873fa0dSEric Sandeen num = last - block; 21656873fa0dSEric Sandeen /* find extent for this block */ 2166fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 216791dd8c11SLukas Czerner 2168ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, block, &path, 0); 21696873fa0dSEric Sandeen if (IS_ERR(path)) { 217091dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 21716873fa0dSEric Sandeen err = PTR_ERR(path); 21726873fa0dSEric Sandeen path = NULL; 21736873fa0dSEric Sandeen break; 21746873fa0dSEric Sandeen } 21756873fa0dSEric Sandeen 21766873fa0dSEric Sandeen depth = ext_depth(inode); 2177273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 217891dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 2179273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2180273df556SFrank Mayhar err = -EIO; 2181273df556SFrank Mayhar break; 2182273df556SFrank Mayhar } 21836873fa0dSEric Sandeen ex = path[depth].p_ext; 21846873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 21856873fa0dSEric Sandeen 218691dd8c11SLukas Czerner flags = 0; 21876873fa0dSEric Sandeen exists = 0; 21886873fa0dSEric Sandeen if (!ex) { 21896873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 21906873fa0dSEric Sandeen * all requested space */ 21916873fa0dSEric Sandeen start = block; 21926873fa0dSEric Sandeen end = block + num; 21936873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 21946873fa0dSEric Sandeen /* need to allocate space before found extent */ 21956873fa0dSEric Sandeen start = block; 21966873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 21976873fa0dSEric Sandeen if (block + num < end) 21986873fa0dSEric Sandeen end = block + num; 21996873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 22006873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 22016873fa0dSEric Sandeen /* need to allocate space after found extent */ 22026873fa0dSEric Sandeen start = block; 22036873fa0dSEric Sandeen end = block + num; 22046873fa0dSEric Sandeen if (end >= next) 22056873fa0dSEric Sandeen end = next; 22066873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 22076873fa0dSEric Sandeen /* 22086873fa0dSEric Sandeen * some part of requested space is covered 22096873fa0dSEric Sandeen * by found extent 22106873fa0dSEric Sandeen */ 22116873fa0dSEric Sandeen start = block; 22126873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 22136873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 22146873fa0dSEric Sandeen if (block + num < end) 22156873fa0dSEric Sandeen end = block + num; 22166873fa0dSEric Sandeen exists = 1; 22176873fa0dSEric Sandeen } else { 22186873fa0dSEric Sandeen BUG(); 22196873fa0dSEric Sandeen } 22206873fa0dSEric Sandeen BUG_ON(end <= start); 22216873fa0dSEric Sandeen 22226873fa0dSEric Sandeen if (!exists) { 222369eb33dcSZheng Liu es.es_lblk = start; 222469eb33dcSZheng Liu es.es_len = end - start; 222569eb33dcSZheng Liu es.es_pblk = 0; 22266873fa0dSEric Sandeen } else { 222769eb33dcSZheng Liu es.es_lblk = le32_to_cpu(ex->ee_block); 222869eb33dcSZheng Liu es.es_len = ext4_ext_get_actual_len(ex); 222969eb33dcSZheng Liu es.es_pblk = ext4_ext_pblock(ex); 2230556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 223191dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_UNWRITTEN; 22326873fa0dSEric Sandeen } 22336873fa0dSEric Sandeen 223491dd8c11SLukas Czerner /* 223569eb33dcSZheng Liu * Find delayed extent and update es accordingly. We call 223669eb33dcSZheng Liu * it even in !exists case to find out whether es is the 223791dd8c11SLukas Czerner * last existing extent or not. 223891dd8c11SLukas Czerner */ 223969eb33dcSZheng Liu next_del = ext4_find_delayed_extent(inode, &es); 224091dd8c11SLukas Czerner if (!exists && next_del) { 224191dd8c11SLukas Czerner exists = 1; 224272dac95dSJie Liu flags |= (FIEMAP_EXTENT_DELALLOC | 224372dac95dSJie Liu FIEMAP_EXTENT_UNKNOWN); 224491dd8c11SLukas Czerner } 224591dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 224691dd8c11SLukas Czerner 224769eb33dcSZheng Liu if (unlikely(es.es_len == 0)) { 224869eb33dcSZheng Liu EXT4_ERROR_INODE(inode, "es.es_len == 0"); 2249273df556SFrank Mayhar err = -EIO; 2250273df556SFrank Mayhar break; 2251273df556SFrank Mayhar } 22526873fa0dSEric Sandeen 2253f7fec032SZheng Liu /* 2254f7fec032SZheng Liu * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2255f7fec032SZheng Liu * we need to check next == EXT_MAX_BLOCKS because it is 2256f7fec032SZheng Liu * possible that an extent is with unwritten and delayed 2257f7fec032SZheng Liu * status due to when an extent is delayed allocated and 2258f7fec032SZheng Liu * is allocated by fallocate status tree will track both of 2259f7fec032SZheng Liu * them in a extent. 2260f7fec032SZheng Liu * 2261f7fec032SZheng Liu * So we could return a unwritten and delayed extent, and 2262f7fec032SZheng Liu * its block is equal to 'next'. 2263f7fec032SZheng Liu */ 2264f7fec032SZheng Liu if (next == next_del && next == EXT_MAX_BLOCKS) { 226591dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_LAST; 226691dd8c11SLukas Czerner if (unlikely(next_del != EXT_MAX_BLOCKS || 226791dd8c11SLukas Czerner next != EXT_MAX_BLOCKS)) { 226891dd8c11SLukas Czerner EXT4_ERROR_INODE(inode, 226991dd8c11SLukas Czerner "next extent == %u, next " 227091dd8c11SLukas Czerner "delalloc extent = %u", 227191dd8c11SLukas Czerner next, next_del); 227291dd8c11SLukas Czerner err = -EIO; 227391dd8c11SLukas Czerner break; 227491dd8c11SLukas Czerner } 227591dd8c11SLukas Czerner } 227691dd8c11SLukas Czerner 227791dd8c11SLukas Czerner if (exists) { 227891dd8c11SLukas Czerner err = fiemap_fill_next_extent(fieinfo, 227969eb33dcSZheng Liu (__u64)es.es_lblk << blksize_bits, 228069eb33dcSZheng Liu (__u64)es.es_pblk << blksize_bits, 228169eb33dcSZheng Liu (__u64)es.es_len << blksize_bits, 228291dd8c11SLukas Czerner flags); 22836873fa0dSEric Sandeen if (err < 0) 22846873fa0dSEric Sandeen break; 228591dd8c11SLukas Czerner if (err == 1) { 22866873fa0dSEric Sandeen err = 0; 22876873fa0dSEric Sandeen break; 22886873fa0dSEric Sandeen } 22896873fa0dSEric Sandeen } 22906873fa0dSEric Sandeen 229169eb33dcSZheng Liu block = es.es_lblk + es.es_len; 22926873fa0dSEric Sandeen } 22936873fa0dSEric Sandeen 22946873fa0dSEric Sandeen ext4_ext_drop_refs(path); 22956873fa0dSEric Sandeen kfree(path); 22966873fa0dSEric Sandeen return err; 22976873fa0dSEric Sandeen } 22986873fa0dSEric Sandeen 2299a86c6181SAlex Tomas /* 2300d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2301d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2302a86c6181SAlex Tomas * and cache this gap 2303a86c6181SAlex Tomas */ 230409b88252SAvantika Mathur static void 2305a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2306725d26d3SAneesh Kumar K.V ext4_lblk_t block) 2307a86c6181SAlex Tomas { 2308a86c6181SAlex Tomas int depth = ext_depth(inode); 230927b1b228SAndi Shyti unsigned long len = 0; 231027b1b228SAndi Shyti ext4_lblk_t lblock = 0; 2311a86c6181SAlex Tomas struct ext4_extent *ex; 2312a86c6181SAlex Tomas 2313a86c6181SAlex Tomas ex = path[depth].p_ext; 2314a86c6181SAlex Tomas if (ex == NULL) { 231569eb33dcSZheng Liu /* 231669eb33dcSZheng Liu * there is no extent yet, so gap is [0;-] and we 231769eb33dcSZheng Liu * don't cache it 231869eb33dcSZheng Liu */ 2319a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 2320a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 2321a86c6181SAlex Tomas lblock = block; 2322a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 2323bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 2324bba90743SEric Sandeen block, 2325bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2326bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2327d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2328d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2329d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2330a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2331a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2332725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2333a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2334a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2335725d26d3SAneesh Kumar K.V 2336725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2337bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2338bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2339bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2340bba90743SEric Sandeen block); 2341725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2342725d26d3SAneesh Kumar K.V len = next - lblock; 2343d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2344d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2345d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2346a86c6181SAlex Tomas } else { 2347a86c6181SAlex Tomas BUG(); 2348a86c6181SAlex Tomas } 2349a86c6181SAlex Tomas 2350bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2351a86c6181SAlex Tomas } 2352a86c6181SAlex Tomas 2353a86c6181SAlex Tomas /* 2354d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2355d0d856e8SRandy Dunlap * removes index from the index block. 2356a86c6181SAlex Tomas */ 23571d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2358c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2359a86c6181SAlex Tomas { 2360a86c6181SAlex Tomas int err; 2361f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2362a86c6181SAlex Tomas 2363a86c6181SAlex Tomas /* free index block */ 2364c36575e6SForrest Liu depth--; 2365c36575e6SForrest Liu path = path + depth; 2366bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2367273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2368273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2369273df556SFrank Mayhar return -EIO; 2370273df556SFrank Mayhar } 23717e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 23727e028976SAvantika Mathur if (err) 2373a86c6181SAlex Tomas return err; 23740e1147b0SRobin Dong 23750e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 23760e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 23770e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 23780e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 23790e1147b0SRobin Dong } 23800e1147b0SRobin Dong 2381e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 23827e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 23837e028976SAvantika Mathur if (err) 2384a86c6181SAlex Tomas return err; 23852ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2386d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2387d8990240SAditya Kali 23887dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2389e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2390c36575e6SForrest Liu 2391c36575e6SForrest Liu while (--depth >= 0) { 2392c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2393c36575e6SForrest Liu break; 2394c36575e6SForrest Liu path--; 2395c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2396c36575e6SForrest Liu if (err) 2397c36575e6SForrest Liu break; 2398c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2399c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2400c36575e6SForrest Liu if (err) 2401c36575e6SForrest Liu break; 2402c36575e6SForrest Liu } 2403a86c6181SAlex Tomas return err; 2404a86c6181SAlex Tomas } 2405a86c6181SAlex Tomas 2406a86c6181SAlex Tomas /* 2407ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2408ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2409ee12b630SMingming Cao * to the extent tree. 2410ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2411ee12b630SMingming Cao * under i_data_sem. 2412a86c6181SAlex Tomas */ 2413525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2414a86c6181SAlex Tomas struct ext4_ext_path *path) 2415a86c6181SAlex Tomas { 2416a86c6181SAlex Tomas if (path) { 2417ee12b630SMingming Cao int depth = ext_depth(inode); 2418f3bd1f3fSMingming Cao int ret = 0; 2419ee12b630SMingming Cao 2420a86c6181SAlex Tomas /* probably there is space in leaf? */ 2421a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2422ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2423ee12b630SMingming Cao 2424ee12b630SMingming Cao /* 2425ee12b630SMingming Cao * There are some space in the leaf tree, no 2426ee12b630SMingming Cao * need to account for leaf block credit 2427ee12b630SMingming Cao * 2428ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2429df3ab170STao Ma * and other metadata blocks still need to be 2430ee12b630SMingming Cao * accounted. 2431ee12b630SMingming Cao */ 2432525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2433ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 24345887e98bSAneesh Kumar K.V return ret; 2435ee12b630SMingming Cao } 2436ee12b630SMingming Cao } 2437ee12b630SMingming Cao 2438525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2439a86c6181SAlex Tomas } 2440a86c6181SAlex Tomas 2441a86c6181SAlex Tomas /* 2442fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2443ee12b630SMingming Cao * 2444fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2445fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2446ee12b630SMingming Cao * 2447fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2448fffb2739SJan Kara * than once, but this is really rare. 2449a86c6181SAlex Tomas */ 2450fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2451ee12b630SMingming Cao { 2452ee12b630SMingming Cao int index; 2453f19d5870STao Ma int depth; 2454f19d5870STao Ma 2455f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2456f19d5870STao Ma if (ext4_has_inline_data(inode)) 2457f19d5870STao Ma return 1; 2458f19d5870STao Ma 2459f19d5870STao Ma depth = ext_depth(inode); 2460a86c6181SAlex Tomas 2461fffb2739SJan Kara if (extents <= 1) 2462ee12b630SMingming Cao index = depth * 2; 2463ee12b630SMingming Cao else 2464ee12b630SMingming Cao index = depth * 3; 2465a86c6181SAlex Tomas 2466ee12b630SMingming Cao return index; 2467a86c6181SAlex Tomas } 2468a86c6181SAlex Tomas 2469981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode) 2470981250caSTheodore Ts'o { 2471981250caSTheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2472981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2473981250caSTheodore Ts'o else if (ext4_should_journal_data(inode)) 2474981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET; 2475981250caSTheodore Ts'o return 0; 2476981250caSTheodore Ts'o } 2477981250caSTheodore Ts'o 2478a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2479a86c6181SAlex Tomas struct ext4_extent *ex, 2480d23142c6SLukas Czerner long long *partial_cluster, 2481725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2482a86c6181SAlex Tomas { 24830aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2484a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 24850aa06000STheodore Ts'o ext4_fsblk_t pblk; 2486981250caSTheodore Ts'o int flags = get_default_free_blocks_flags(inode); 248718888cf0SAndrey Sidorov 24880aa06000STheodore Ts'o /* 24890aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 24900aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 24910aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 24920aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 2493*345ee947SEric Whitney * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space. 24940aa06000STheodore Ts'o */ 24950aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 24960aa06000STheodore Ts'o 2497d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 24980aa06000STheodore Ts'o /* 24990aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 25000aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 25010aa06000STheodore Ts'o * partial cluster here. 25020aa06000STheodore Ts'o */ 25030aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 2504*345ee947SEric Whitney if (*partial_cluster > 0 && 2505*345ee947SEric Whitney *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { 25060aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 25070aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 25080aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 25090aa06000STheodore Ts'o *partial_cluster = 0; 25100aa06000STheodore Ts'o } 25110aa06000STheodore Ts'o 2512a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2513a86c6181SAlex Tomas { 2514a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2515a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2516a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2517a86c6181SAlex Tomas sbi->s_ext_extents++; 2518a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2519a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2520a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2521a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2522a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2523a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2524a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2525a86c6181SAlex Tomas } 2526a86c6181SAlex Tomas #endif 2527a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2528a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2529a86c6181SAlex Tomas /* tail removal */ 2530725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2531*345ee947SEric Whitney long long first_cluster; 2532725d26d3SAneesh Kumar K.V 2533a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 25340aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 2535d23142c6SLukas Czerner /* 2536d23142c6SLukas Czerner * Usually we want to free partial cluster at the end of the 2537d23142c6SLukas Czerner * extent, except for the situation when the cluster is still 2538d23142c6SLukas Czerner * used by any other extent (partial_cluster is negative). 2539d23142c6SLukas Czerner */ 2540d23142c6SLukas Czerner if (*partial_cluster < 0 && 2541*345ee947SEric Whitney *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1)) 2542d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2543d23142c6SLukas Czerner 2544d23142c6SLukas Czerner ext_debug("free last %u blocks starting %llu partial %lld\n", 2545d23142c6SLukas Czerner num, pblk, *partial_cluster); 25460aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 25470aa06000STheodore Ts'o /* 25480aa06000STheodore Ts'o * If the block range to be freed didn't start at the 25490aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 2550d23142c6SLukas Czerner * extent and the cluster is not used by any other extent, 2551d23142c6SLukas Czerner * save the partial cluster here, since we might need to 2552*345ee947SEric Whitney * delete if we determine that the truncate or punch hole 2553*345ee947SEric Whitney * operation has removed all of the blocks in the cluster. 2554*345ee947SEric Whitney * If that cluster is used by another extent, preserve its 2555*345ee947SEric Whitney * negative value so it isn't freed later on. 2556d23142c6SLukas Czerner * 2557*345ee947SEric Whitney * If the whole extent wasn't freed, we've reached the 2558*345ee947SEric Whitney * start of the truncated/punched region and have finished 2559*345ee947SEric Whitney * removing blocks. If there's a partial cluster here it's 2560*345ee947SEric Whitney * shared with the remainder of the extent and is no longer 2561*345ee947SEric Whitney * a candidate for removal. 25620aa06000STheodore Ts'o */ 2563*345ee947SEric Whitney if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) { 2564*345ee947SEric Whitney first_cluster = (long long) EXT4_B2C(sbi, pblk); 2565*345ee947SEric Whitney if (first_cluster != -*partial_cluster) 2566*345ee947SEric Whitney *partial_cluster = first_cluster; 2567*345ee947SEric Whitney } else { 25680aa06000STheodore Ts'o *partial_cluster = 0; 2569*345ee947SEric Whitney } 257078fb9cdfSLukas Czerner } else 257178fb9cdfSLukas Czerner ext4_error(sbi->s_sb, "strange request: removal(2) " 2572725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2573a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2574a86c6181SAlex Tomas return 0; 2575a86c6181SAlex Tomas } 2576a86c6181SAlex Tomas 2577d583fb87SAllison Henderson 2578d583fb87SAllison Henderson /* 2579d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 25805bf43760SEric Whitney * blocks appearing between "start" and "end". Both "start" 25815bf43760SEric Whitney * and "end" must appear in the same extent or EIO is returned. 2582d583fb87SAllison Henderson * 2583d583fb87SAllison Henderson * @handle: The journal handle 2584d583fb87SAllison Henderson * @inode: The files inode 2585d583fb87SAllison Henderson * @path: The path to the leaf 2586d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 25875bf43760SEric Whitney * has been released from it. However, if this value is 25885bf43760SEric Whitney * negative, it's a cluster just to the right of the 25895bf43760SEric Whitney * punched region and it must not be freed. 2590d583fb87SAllison Henderson * @start: The first block to remove 2591d583fb87SAllison Henderson * @end: The last block to remove 2592d583fb87SAllison Henderson */ 2593a86c6181SAlex Tomas static int 2594a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2595d23142c6SLukas Czerner struct ext4_ext_path *path, 2596d23142c6SLukas Czerner long long *partial_cluster, 25970aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2598a86c6181SAlex Tomas { 25990aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2600a86c6181SAlex Tomas int err = 0, correct_index = 0; 2601a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2602a86c6181SAlex Tomas struct ext4_extent_header *eh; 2603750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2604725d26d3SAneesh Kumar K.V unsigned num; 2605725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2606a86c6181SAlex Tomas unsigned short ex_ee_len; 2607556615dcSLukas Czerner unsigned unwritten = 0; 2608a86c6181SAlex Tomas struct ext4_extent *ex; 2609d23142c6SLukas Czerner ext4_fsblk_t pblk; 2610a86c6181SAlex Tomas 2611c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 26125f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2613a86c6181SAlex Tomas if (!path[depth].p_hdr) 2614a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2615a86c6181SAlex Tomas eh = path[depth].p_hdr; 2616273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2617273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2618273df556SFrank Mayhar return -EIO; 2619273df556SFrank Mayhar } 2620a86c6181SAlex Tomas /* find where to start removing */ 26216ae06ff5SAshish Sangwan ex = path[depth].p_ext; 26226ae06ff5SAshish Sangwan if (!ex) 2623a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2624a86c6181SAlex Tomas 2625a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2626a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2627a86c6181SAlex Tomas 2628d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2629d8990240SAditya Kali 2630a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2631a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2632a41f2071SAneesh Kumar K.V 2633556615dcSLukas Czerner if (ext4_ext_is_unwritten(ex)) 2634556615dcSLukas Czerner unwritten = 1; 2635a41f2071SAneesh Kumar K.V else 2636556615dcSLukas Czerner unwritten = 0; 2637a41f2071SAneesh Kumar K.V 2638553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2639556615dcSLukas Czerner unwritten, ex_ee_len); 2640a86c6181SAlex Tomas path[depth].p_ext = ex; 2641a86c6181SAlex Tomas 2642a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2643d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2644d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2645a86c6181SAlex Tomas 2646a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2647a86c6181SAlex Tomas 2648d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 26495f95d21fSLukas Czerner if (end < ex_ee_block) { 2650d23142c6SLukas Czerner /* 2651d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2652f4226d9eSEric Whitney * so note that its first cluster is in use to avoid 2653f4226d9eSEric Whitney * freeing it when removing blocks. Eventually, the 2654f4226d9eSEric Whitney * right edge of the truncated/punched region will 2655f4226d9eSEric Whitney * be just to the left. 2656d23142c6SLukas Czerner */ 2657f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2658d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 2659d23142c6SLukas Czerner *partial_cluster = 2660f4226d9eSEric Whitney -(long long) EXT4_B2C(sbi, pblk); 2661f4226d9eSEric Whitney } 2662d583fb87SAllison Henderson ex--; 2663d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2664d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2665d583fb87SAllison Henderson continue; 2666750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2667dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2668dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2669dc1841d6SLukas Czerner "on extent %u:%u", 2670dc1841d6SLukas Czerner start, end, ex_ee_block, 2671dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 2672d583fb87SAllison Henderson err = -EIO; 2673d583fb87SAllison Henderson goto out; 2674a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2675a86c6181SAlex Tomas /* remove tail of the extent */ 2676750c9c47SDmitry Monakhov num = a - ex_ee_block; 2677a86c6181SAlex Tomas } else { 2678a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2679a86c6181SAlex Tomas num = 0; 2680d583fb87SAllison Henderson } 268134071da7STheodore Ts'o /* 268234071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 268334071da7STheodore Ts'o * descriptor) for each block group; assume two block 268434071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 268534071da7STheodore Ts'o * the worst case 268634071da7STheodore Ts'o */ 268734071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2688a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2689a86c6181SAlex Tomas correct_index = 1; 2690a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2691a86c6181SAlex Tomas } 26925aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2693a86c6181SAlex Tomas 2694487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 26959102e4faSShen Feng if (err) 2696a86c6181SAlex Tomas goto out; 2697a86c6181SAlex Tomas 2698a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2699a86c6181SAlex Tomas if (err) 2700a86c6181SAlex Tomas goto out; 2701a86c6181SAlex Tomas 27020aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 27030aa06000STheodore Ts'o a, b); 2704a86c6181SAlex Tomas if (err) 2705a86c6181SAlex Tomas goto out; 2706a86c6181SAlex Tomas 2707750c9c47SDmitry Monakhov if (num == 0) 2708d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2709f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2710a86c6181SAlex Tomas 2711a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2712749269faSAmit Arora /* 2713556615dcSLukas Czerner * Do not mark unwritten if all the blocks in the 2714749269faSAmit Arora * extent have been removed. 2715749269faSAmit Arora */ 2716556615dcSLukas Czerner if (unwritten && num) 2717556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 2718d583fb87SAllison Henderson /* 2719d583fb87SAllison Henderson * If the extent was completely released, 2720d583fb87SAllison Henderson * we need to remove it from the leaf 2721d583fb87SAllison Henderson */ 2722d583fb87SAllison Henderson if (num == 0) { 2723f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2724d583fb87SAllison Henderson /* 2725d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2726d583fb87SAllison Henderson * extents up when an extent is removed so that 2727d583fb87SAllison Henderson * we dont have blank extents in the middle 2728d583fb87SAllison Henderson */ 2729d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2730d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2731d583fb87SAllison Henderson 2732d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2733d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2734d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2735d583fb87SAllison Henderson } 2736d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 27375bf43760SEric Whitney } 2738d583fb87SAllison Henderson 2739750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2740750c9c47SDmitry Monakhov if (err) 2741750c9c47SDmitry Monakhov goto out; 2742750c9c47SDmitry Monakhov 2743bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2744bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2745a86c6181SAlex Tomas ex--; 2746a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2747a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2748a86c6181SAlex Tomas } 2749a86c6181SAlex Tomas 2750a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2751a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2752a86c6181SAlex Tomas 27530aa06000STheodore Ts'o /* 2754ad6599abSEric Whitney * If there's a partial cluster and at least one extent remains in 2755ad6599abSEric Whitney * the leaf, free the partial cluster if it isn't shared with the 27565bf43760SEric Whitney * current extent. If it is shared with the current extent 27575bf43760SEric Whitney * we zero partial_cluster because we've reached the start of the 27585bf43760SEric Whitney * truncated/punched region and we're done removing blocks. 27590aa06000STheodore Ts'o */ 27605bf43760SEric Whitney if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) { 27615bf43760SEric Whitney pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 27625bf43760SEric Whitney if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) { 27630aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 27640aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 27655bf43760SEric Whitney sbi->s_cluster_ratio, 27665bf43760SEric Whitney get_default_free_blocks_flags(inode)); 27675bf43760SEric Whitney } 27680aa06000STheodore Ts'o *partial_cluster = 0; 27690aa06000STheodore Ts'o } 27700aa06000STheodore Ts'o 2771a86c6181SAlex Tomas /* if this leaf is free, then we should 2772a86c6181SAlex Tomas * remove it from index block above */ 2773a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2774c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2775a86c6181SAlex Tomas 2776a86c6181SAlex Tomas out: 2777a86c6181SAlex Tomas return err; 2778a86c6181SAlex Tomas } 2779a86c6181SAlex Tomas 2780a86c6181SAlex Tomas /* 2781d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2782d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2783a86c6181SAlex Tomas */ 278409b88252SAvantika Mathur static int 2785a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2786a86c6181SAlex Tomas { 2787a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2788a86c6181SAlex Tomas 2789a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2790a86c6181SAlex Tomas return 0; 2791a86c6181SAlex Tomas 2792a86c6181SAlex Tomas /* 2793d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2794a86c6181SAlex Tomas * so we have to consider current index for truncation 2795a86c6181SAlex Tomas */ 2796a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2797a86c6181SAlex Tomas return 0; 2798a86c6181SAlex Tomas return 1; 2799a86c6181SAlex Tomas } 2800a86c6181SAlex Tomas 280126a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 28025f95d21fSLukas Czerner ext4_lblk_t end) 2803a86c6181SAlex Tomas { 2804f4226d9eSEric Whitney struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2805a86c6181SAlex Tomas int depth = ext_depth(inode); 2806968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 2807d23142c6SLukas Czerner long long partial_cluster = 0; 2808a86c6181SAlex Tomas handle_t *handle; 28096f2080e6SDmitry Monakhov int i = 0, err = 0; 2810a86c6181SAlex Tomas 28115f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2812a86c6181SAlex Tomas 2813a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 28149924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2815a86c6181SAlex Tomas if (IS_ERR(handle)) 2816a86c6181SAlex Tomas return PTR_ERR(handle); 2817a86c6181SAlex Tomas 28180617b83fSDmitry Monakhov again: 281961801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2820d8990240SAditya Kali 2821a86c6181SAlex Tomas /* 28225f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 28235f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 28245f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 28255f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 28265f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 28275f95d21fSLukas Czerner */ 28285f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 28295f95d21fSLukas Czerner struct ext4_extent *ex; 2830f4226d9eSEric Whitney ext4_lblk_t ee_block, ex_end, lblk; 2831f4226d9eSEric Whitney ext4_fsblk_t pblk; 28325f95d21fSLukas Czerner 2833f4226d9eSEric Whitney /* find extent for or closest extent to this block */ 2834ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 28355f95d21fSLukas Czerner if (IS_ERR(path)) { 28365f95d21fSLukas Czerner ext4_journal_stop(handle); 28375f95d21fSLukas Czerner return PTR_ERR(path); 28385f95d21fSLukas Czerner } 28395f95d21fSLukas Czerner depth = ext_depth(inode); 28406f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 28415f95d21fSLukas Czerner ex = path[depth].p_ext; 2842968dee77SAshish Sangwan if (!ex) { 28436f2080e6SDmitry Monakhov if (depth) { 28446f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 28456f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 28466f2080e6SDmitry Monakhov depth); 28476f2080e6SDmitry Monakhov err = -EIO; 28486f2080e6SDmitry Monakhov } 28496f2080e6SDmitry Monakhov goto out; 2850968dee77SAshish Sangwan } 28515f95d21fSLukas Czerner 28525f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 2853f4226d9eSEric Whitney ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1; 28545f95d21fSLukas Czerner 28555f95d21fSLukas Czerner /* 28565f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 28575f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 28585f95d21fSLukas Czerner * tail of the first part of the split extent in 28595f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 28605f95d21fSLukas Czerner */ 2861f4226d9eSEric Whitney if (end >= ee_block && end < ex_end) { 2862f4226d9eSEric Whitney 2863f4226d9eSEric Whitney /* 2864f4226d9eSEric Whitney * If we're going to split the extent, note that 2865f4226d9eSEric Whitney * the cluster containing the block after 'end' is 2866f4226d9eSEric Whitney * in use to avoid freeing it when removing blocks. 2867f4226d9eSEric Whitney */ 2868f4226d9eSEric Whitney if (sbi->s_cluster_ratio > 1) { 2869f4226d9eSEric Whitney pblk = ext4_ext_pblock(ex) + end - ee_block + 2; 2870f4226d9eSEric Whitney partial_cluster = 2871f4226d9eSEric Whitney -(long long) EXT4_B2C(sbi, pblk); 2872f4226d9eSEric Whitney } 2873f4226d9eSEric Whitney 28745f95d21fSLukas Czerner /* 28755f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 287627dd4385SLukas Czerner * block in the first new extent. Also we should not 287727dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 287827dd4385SLukas Czerner * reserved block if that happens. 28795f95d21fSLukas Czerner */ 2880dfe50809STheodore Ts'o err = ext4_force_split_extent_at(handle, inode, &path, 2881fcf6b1b7SDmitry Monakhov end + 1, 1); 28825f95d21fSLukas Czerner if (err < 0) 28835f95d21fSLukas Czerner goto out; 2884f4226d9eSEric Whitney 2885f4226d9eSEric Whitney } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) { 2886f4226d9eSEric Whitney /* 2887f4226d9eSEric Whitney * If there's an extent to the right its first cluster 2888f4226d9eSEric Whitney * contains the immediate right boundary of the 2889f4226d9eSEric Whitney * truncated/punched region. Set partial_cluster to 2890f4226d9eSEric Whitney * its negative value so it won't be freed if shared 2891f4226d9eSEric Whitney * with the current extent. The end < ee_block case 2892f4226d9eSEric Whitney * is handled in ext4_ext_rm_leaf(). 2893f4226d9eSEric Whitney */ 2894f4226d9eSEric Whitney lblk = ex_end + 1; 2895f4226d9eSEric Whitney err = ext4_ext_search_right(inode, path, &lblk, &pblk, 2896f4226d9eSEric Whitney &ex); 2897f4226d9eSEric Whitney if (err) 2898f4226d9eSEric Whitney goto out; 2899f4226d9eSEric Whitney if (pblk) 2900f4226d9eSEric Whitney partial_cluster = 2901f4226d9eSEric Whitney -(long long) EXT4_B2C(sbi, pblk); 29025f95d21fSLukas Czerner } 29035f95d21fSLukas Czerner } 29045f95d21fSLukas Czerner /* 2905d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2906d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2907a86c6181SAlex Tomas */ 29080617b83fSDmitry Monakhov depth = ext_depth(inode); 2909968dee77SAshish Sangwan if (path) { 2910968dee77SAshish Sangwan int k = i = depth; 2911968dee77SAshish Sangwan while (--k > 0) 2912968dee77SAshish Sangwan path[k].p_block = 2913968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2914968dee77SAshish Sangwan } else { 2915968dee77SAshish Sangwan path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2916968dee77SAshish Sangwan GFP_NOFS); 2917a86c6181SAlex Tomas if (path == NULL) { 2918a86c6181SAlex Tomas ext4_journal_stop(handle); 2919a86c6181SAlex Tomas return -ENOMEM; 2920a86c6181SAlex Tomas } 292110809df8STheodore Ts'o path[0].p_maxdepth = path[0].p_depth = depth; 2922a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 292389a4e48fSTheodore Ts'o i = 0; 29245f95d21fSLukas Czerner 2925c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2926a86c6181SAlex Tomas err = -EIO; 2927a86c6181SAlex Tomas goto out; 2928a86c6181SAlex Tomas } 2929968dee77SAshish Sangwan } 2930968dee77SAshish Sangwan err = 0; 2931a86c6181SAlex Tomas 2932a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2933a86c6181SAlex Tomas if (i == depth) { 2934a86c6181SAlex Tomas /* this is leaf block */ 2935d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 29360aa06000STheodore Ts'o &partial_cluster, start, 29375f95d21fSLukas Czerner end); 2938d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2939a86c6181SAlex Tomas brelse(path[i].p_bh); 2940a86c6181SAlex Tomas path[i].p_bh = NULL; 2941a86c6181SAlex Tomas i--; 2942a86c6181SAlex Tomas continue; 2943a86c6181SAlex Tomas } 2944a86c6181SAlex Tomas 2945a86c6181SAlex Tomas /* this is index block */ 2946a86c6181SAlex Tomas if (!path[i].p_hdr) { 2947a86c6181SAlex Tomas ext_debug("initialize header\n"); 2948a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2949a86c6181SAlex Tomas } 2950a86c6181SAlex Tomas 2951a86c6181SAlex Tomas if (!path[i].p_idx) { 2952d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2953a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2954a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2955a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2956a86c6181SAlex Tomas path[i].p_hdr, 2957a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2958a86c6181SAlex Tomas } else { 2959d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2960a86c6181SAlex Tomas path[i].p_idx--; 2961a86c6181SAlex Tomas } 2962a86c6181SAlex Tomas 2963a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2964a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2965a86c6181SAlex Tomas path[i].p_idx); 2966a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2967c29c0ae7SAlex Tomas struct buffer_head *bh; 2968a86c6181SAlex Tomas /* go to the next level */ 29692ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2970bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2971a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 29727d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, 2973107a7bd3STheodore Ts'o ext4_idx_pblock(path[i].p_idx), depth - i - 1, 2974107a7bd3STheodore Ts'o EXT4_EX_NOCACHE); 29757d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 2976a86c6181SAlex Tomas /* should we reset i_size? */ 29777d7ea89eSTheodore Ts'o err = PTR_ERR(bh); 2978a86c6181SAlex Tomas break; 2979a86c6181SAlex Tomas } 298076828c88STheodore Ts'o /* Yield here to deal with large extent trees. 298176828c88STheodore Ts'o * Should be a no-op if we did IO above. */ 298276828c88STheodore Ts'o cond_resched(); 2983c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2984c29c0ae7SAlex Tomas err = -EIO; 2985c29c0ae7SAlex Tomas break; 2986c29c0ae7SAlex Tomas } 2987c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2988a86c6181SAlex Tomas 2989d0d856e8SRandy Dunlap /* save actual number of indexes since this 2990d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2991a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2992a86c6181SAlex Tomas i++; 2993a86c6181SAlex Tomas } else { 2994d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2995a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2996d0d856e8SRandy Dunlap /* index is empty, remove it; 2997a86c6181SAlex Tomas * handle must be already prepared by the 2998a86c6181SAlex Tomas * truncatei_leaf() */ 2999c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 3000a86c6181SAlex Tomas } 3001d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 3002a86c6181SAlex Tomas brelse(path[i].p_bh); 3003a86c6181SAlex Tomas path[i].p_bh = NULL; 3004a86c6181SAlex Tomas i--; 3005a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 3006a86c6181SAlex Tomas } 3007a86c6181SAlex Tomas } 3008a86c6181SAlex Tomas 300961801325SLukas Czerner trace_ext4_ext_remove_space_done(inode, start, end, depth, 301061801325SLukas Czerner partial_cluster, path->p_hdr->eh_entries); 3011d8990240SAditya Kali 30127b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 30137b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 30147b415bf6SAditya Kali * cluster as well. */ 3015d23142c6SLukas Czerner if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 3016981250caSTheodore Ts'o int flags = get_default_free_blocks_flags(inode); 30177b415bf6SAditya Kali 30187b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 3019f4226d9eSEric Whitney EXT4_C2B(sbi, partial_cluster), 3020f4226d9eSEric Whitney sbi->s_cluster_ratio, flags); 30217b415bf6SAditya Kali partial_cluster = 0; 30227b415bf6SAditya Kali } 30237b415bf6SAditya Kali 3024a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 3025a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 3026a86c6181SAlex Tomas /* 3027d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 3028d0d856e8SRandy Dunlap * so we need to correct eh_depth 3029a86c6181SAlex Tomas */ 3030a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 3031a86c6181SAlex Tomas if (err == 0) { 3032a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 3033a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 303455ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 3035a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 3036a86c6181SAlex Tomas } 3037a86c6181SAlex Tomas } 3038a86c6181SAlex Tomas out: 3039a86c6181SAlex Tomas ext4_ext_drop_refs(path); 3040a86c6181SAlex Tomas kfree(path); 3041968dee77SAshish Sangwan path = NULL; 3042dfe50809STheodore Ts'o if (err == -EAGAIN) 3043dfe50809STheodore Ts'o goto again; 3044a86c6181SAlex Tomas ext4_journal_stop(handle); 3045a86c6181SAlex Tomas 3046a86c6181SAlex Tomas return err; 3047a86c6181SAlex Tomas } 3048a86c6181SAlex Tomas 3049a86c6181SAlex Tomas /* 3050a86c6181SAlex Tomas * called at mount time 3051a86c6181SAlex Tomas */ 3052a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 3053a86c6181SAlex Tomas { 3054a86c6181SAlex Tomas /* 3055a86c6181SAlex Tomas * possible initialization would be here 3056a86c6181SAlex Tomas */ 3057a86c6181SAlex Tomas 305883982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 305990576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 306092b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 3061bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 306292b97816STheodore Ts'o ", aggressive tests" 3063a86c6181SAlex Tomas #endif 3064a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 306592b97816STheodore Ts'o ", check binsearch" 3066a86c6181SAlex Tomas #endif 3067a86c6181SAlex Tomas #ifdef EXTENTS_STATS 306892b97816STheodore Ts'o ", stats" 3069a86c6181SAlex Tomas #endif 307092b97816STheodore Ts'o "\n"); 307190576c0bSTheodore Ts'o #endif 3072a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3073a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3074a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 3075a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 3076a86c6181SAlex Tomas #endif 3077a86c6181SAlex Tomas } 3078a86c6181SAlex Tomas } 3079a86c6181SAlex Tomas 3080a86c6181SAlex Tomas /* 3081a86c6181SAlex Tomas * called at umount time 3082a86c6181SAlex Tomas */ 3083a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 3084a86c6181SAlex Tomas { 308583982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 3086a86c6181SAlex Tomas return; 3087a86c6181SAlex Tomas 3088a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3089a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3090a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3091a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3092a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 3093a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 3094a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3095a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3096a86c6181SAlex Tomas } 3097a86c6181SAlex Tomas #endif 3098a86c6181SAlex Tomas } 3099a86c6181SAlex Tomas 3100d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3101d7b2a00cSZheng Liu { 3102d7b2a00cSZheng Liu ext4_lblk_t ee_block; 3103d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock; 3104d7b2a00cSZheng Liu unsigned int ee_len; 3105d7b2a00cSZheng Liu 3106d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block); 3107d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex); 3108d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex); 3109d7b2a00cSZheng Liu 3110d7b2a00cSZheng Liu if (ee_len == 0) 3111d7b2a00cSZheng Liu return 0; 3112d7b2a00cSZheng Liu 3113d7b2a00cSZheng Liu return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3114d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN); 3115d7b2a00cSZheng Liu } 3116d7b2a00cSZheng Liu 3117093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 3118093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3119093a088bSAneesh Kumar K.V { 31202407518dSLukas Czerner ext4_fsblk_t ee_pblock; 31212407518dSLukas Czerner unsigned int ee_len; 3122b720303dSJing Zhang int ret; 3123093a088bSAneesh Kumar K.V 3124093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 3125bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 3126093a088bSAneesh Kumar K.V 3127a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 31282407518dSLukas Czerner if (ret > 0) 31292407518dSLukas Czerner ret = 0; 3130093a088bSAneesh Kumar K.V 31312407518dSLukas Czerner return ret; 3132093a088bSAneesh Kumar K.V } 3133093a088bSAneesh Kumar K.V 313447ea3bb5SYongqiang Yang /* 313547ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 313647ea3bb5SYongqiang Yang * 313747ea3bb5SYongqiang Yang * @handle: the journal handle 313847ea3bb5SYongqiang Yang * @inode: the file inode 313947ea3bb5SYongqiang Yang * @path: the path to the extent 314047ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 314147ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 3142556615dcSLukas Czerner * the states(init or unwritten) of new extents. 314347ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 314447ea3bb5SYongqiang Yang * 314547ea3bb5SYongqiang Yang * 314647ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 314747ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 314847ea3bb5SYongqiang Yang * 314947ea3bb5SYongqiang Yang * There are two cases: 315047ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 315147ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 315247ea3bb5SYongqiang Yang * 315347ea3bb5SYongqiang Yang * return 0 on success. 315447ea3bb5SYongqiang Yang */ 315547ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 315647ea3bb5SYongqiang Yang struct inode *inode, 3157dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 315847ea3bb5SYongqiang Yang ext4_lblk_t split, 315947ea3bb5SYongqiang Yang int split_flag, 316047ea3bb5SYongqiang Yang int flags) 316147ea3bb5SYongqiang Yang { 3162dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 316347ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 316447ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3165adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 316647ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 316747ea3bb5SYongqiang Yang unsigned int ee_len, depth; 316847ea3bb5SYongqiang Yang int err = 0; 316947ea3bb5SYongqiang Yang 3170dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3171dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3172dee1f973SDmitry Monakhov 317347ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 317447ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 317547ea3bb5SYongqiang Yang 317647ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 317747ea3bb5SYongqiang Yang 317847ea3bb5SYongqiang Yang depth = ext_depth(inode); 317947ea3bb5SYongqiang Yang ex = path[depth].p_ext; 318047ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 318147ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 318247ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 318347ea3bb5SYongqiang Yang 318447ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3185556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex) && 3186357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3187556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT1 | 3188556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2)); 318947ea3bb5SYongqiang Yang 319047ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 319147ea3bb5SYongqiang Yang if (err) 319247ea3bb5SYongqiang Yang goto out; 319347ea3bb5SYongqiang Yang 319447ea3bb5SYongqiang Yang if (split == ee_block) { 319547ea3bb5SYongqiang Yang /* 319647ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 319747ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 319847ea3bb5SYongqiang Yang * is not needed. 319947ea3bb5SYongqiang Yang */ 3200556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3201556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 320247ea3bb5SYongqiang Yang else 320347ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 320447ea3bb5SYongqiang Yang 320547ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3206ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 320747ea3bb5SYongqiang Yang 3208ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 320947ea3bb5SYongqiang Yang goto out; 321047ea3bb5SYongqiang Yang } 321147ea3bb5SYongqiang Yang 321247ea3bb5SYongqiang Yang /* case a */ 321347ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 321447ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 3215556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT1) 3216556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); 321747ea3bb5SYongqiang Yang 321847ea3bb5SYongqiang Yang /* 321947ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 322047ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 322147ea3bb5SYongqiang Yang */ 322247ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 322347ea3bb5SYongqiang Yang if (err) 322447ea3bb5SYongqiang Yang goto fix_extent_len; 322547ea3bb5SYongqiang Yang 322647ea3bb5SYongqiang Yang ex2 = &newex; 322747ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 322847ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 322947ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 3230556615dcSLukas Czerner if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3231556615dcSLukas Czerner ext4_ext_mark_unwritten(ex2); 323247ea3bb5SYongqiang Yang 3233dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); 323447ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3235dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3236adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3237dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3238adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 32398cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32408cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3241adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3242adb23551SZheng Liu ext4_ext_pblock(ex2)); 3243adb23551SZheng Liu } else { 3244dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3245adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 32468cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32478cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3248adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3249adb23551SZheng Liu ext4_ext_pblock(ex)); 3250adb23551SZheng Liu } 3251adb23551SZheng Liu } else { 325247ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3253adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 32548cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32558cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3256adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3257adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3258adb23551SZheng Liu } 3259dee1f973SDmitry Monakhov 326047ea3bb5SYongqiang Yang if (err) 326147ea3bb5SYongqiang Yang goto fix_extent_len; 326247ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3263af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3264ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3265ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3266adb23551SZheng Liu if (err) 3267adb23551SZheng Liu goto fix_extent_len; 3268adb23551SZheng Liu 3269adb23551SZheng Liu /* update extent status tree */ 3270d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3271adb23551SZheng Liu 327247ea3bb5SYongqiang Yang goto out; 327347ea3bb5SYongqiang Yang } else if (err) 327447ea3bb5SYongqiang Yang goto fix_extent_len; 327547ea3bb5SYongqiang Yang 327647ea3bb5SYongqiang Yang out: 327747ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 327847ea3bb5SYongqiang Yang return err; 327947ea3bb5SYongqiang Yang 328047ea3bb5SYongqiang Yang fix_extent_len: 328147ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 328229faed16SDmitry Monakhov ext4_ext_dirty(handle, inode, path + path->p_depth); 328347ea3bb5SYongqiang Yang return err; 328447ea3bb5SYongqiang Yang } 328547ea3bb5SYongqiang Yang 328647ea3bb5SYongqiang Yang /* 328747ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 328847ea3bb5SYongqiang Yang * by @map as split_flags indicates 328947ea3bb5SYongqiang Yang * 329047ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three) 329147ea3bb5SYongqiang Yang * There are three possibilities: 329247ea3bb5SYongqiang Yang * a> There is no split required 329347ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 329447ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 329547ea3bb5SYongqiang Yang * 329647ea3bb5SYongqiang Yang */ 329747ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 329847ea3bb5SYongqiang Yang struct inode *inode, 3299dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 330047ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 330147ea3bb5SYongqiang Yang int split_flag, 330247ea3bb5SYongqiang Yang int flags) 330347ea3bb5SYongqiang Yang { 3304dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 330547ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 330647ea3bb5SYongqiang Yang struct ext4_extent *ex; 330747ea3bb5SYongqiang Yang unsigned int ee_len, depth; 330847ea3bb5SYongqiang Yang int err = 0; 3309556615dcSLukas Czerner int unwritten; 331047ea3bb5SYongqiang Yang int split_flag1, flags1; 33113a225670SZheng Liu int allocated = map->m_len; 331247ea3bb5SYongqiang Yang 331347ea3bb5SYongqiang Yang depth = ext_depth(inode); 331447ea3bb5SYongqiang Yang ex = path[depth].p_ext; 331547ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 331647ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 3317556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 331847ea3bb5SYongqiang Yang 331947ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3320dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 332147ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 3322556615dcSLukas Czerner if (unwritten) 3323556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1 | 3324556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2; 3325dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3326dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 3327dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 332847ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 332993917411SYongqiang Yang if (err) 333093917411SYongqiang Yang goto out; 33313a225670SZheng Liu } else { 33323a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 333347ea3bb5SYongqiang Yang } 3334357b66fdSDmitry Monakhov /* 3335357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3336357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3337357b66fdSDmitry Monakhov */ 3338ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 333947ea3bb5SYongqiang Yang if (IS_ERR(path)) 334047ea3bb5SYongqiang Yang return PTR_ERR(path); 3341357b66fdSDmitry Monakhov depth = ext_depth(inode); 3342357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3343a18ed359SDmitry Monakhov if (!ex) { 3344a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3345a18ed359SDmitry Monakhov (unsigned long) map->m_lblk); 3346a18ed359SDmitry Monakhov return -EIO; 3347a18ed359SDmitry Monakhov } 3348556615dcSLukas Czerner unwritten = ext4_ext_is_unwritten(ex); 3349357b66fdSDmitry Monakhov split_flag1 = 0; 335047ea3bb5SYongqiang Yang 335147ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3352357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3353556615dcSLukas Czerner if (unwritten) { 3354556615dcSLukas Czerner split_flag1 |= EXT4_EXT_MARK_UNWRIT1; 3355357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3356556615dcSLukas Czerner EXT4_EXT_MARK_UNWRIT2); 3357357b66fdSDmitry Monakhov } 3358dfe50809STheodore Ts'o err = ext4_split_extent_at(handle, inode, ppath, 335947ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 336047ea3bb5SYongqiang Yang if (err) 336147ea3bb5SYongqiang Yang goto out; 336247ea3bb5SYongqiang Yang } 336347ea3bb5SYongqiang Yang 336447ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 336547ea3bb5SYongqiang Yang out: 33663a225670SZheng Liu return err ? err : allocated; 336747ea3bb5SYongqiang Yang } 336847ea3bb5SYongqiang Yang 336956055d3aSAmit Arora /* 3370e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 3371556615dcSLukas Czerner * to an unwritten extent. It may result in splitting the unwritten 337256055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 3373556615dcSLukas Czerner * unwritten). 337456055d3aSAmit Arora * There are three possibilities: 337556055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 337656055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 337756055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 33786f91bc5fSEric Gouriou * 33796f91bc5fSEric Gouriou * Pre-conditions: 3380556615dcSLukas Czerner * - The extent pointed to by 'path' is unwritten. 33816f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 33826f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 33836f91bc5fSEric Gouriou * 33846f91bc5fSEric Gouriou * Post-conditions on success: 33856f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 33866f91bc5fSEric Gouriou * that are allocated and initialized. 33876f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 338856055d3aSAmit Arora */ 3389725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3390725d26d3SAneesh Kumar K.V struct inode *inode, 3391e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3392dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 339327dd4385SLukas Czerner int flags) 339456055d3aSAmit Arora { 3395dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 339667a5da56SZheng Liu struct ext4_sb_info *sbi; 33976f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3398667eff35SYongqiang Yang struct ext4_map_blocks split_map; 3399667eff35SYongqiang Yang struct ext4_extent zero_ex; 3400bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 340121ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3402bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 3403bc2d9db4SLukas Czerner int allocated = 0, max_zeroout = 0; 340456055d3aSAmit Arora int err = 0; 3405667eff35SYongqiang Yang int split_flag = 0; 340621ca087aSDmitry Monakhov 340721ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 340821ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3409bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 341021ca087aSDmitry Monakhov 341167a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 341221ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 341321ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3414bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3415bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 341656055d3aSAmit Arora 341756055d3aSAmit Arora depth = ext_depth(inode); 34186f91bc5fSEric Gouriou eh = path[depth].p_hdr; 341956055d3aSAmit Arora ex = path[depth].p_ext; 342056055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 342156055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3422adb23551SZheng Liu zero_ex.ee_len = 0; 342321ca087aSDmitry Monakhov 34246f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 34256f91bc5fSEric Gouriou 34266f91bc5fSEric Gouriou /* Pre-conditions */ 3427556615dcSLukas Czerner BUG_ON(!ext4_ext_is_unwritten(ex)); 34286f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 34296f91bc5fSEric Gouriou 34306f91bc5fSEric Gouriou /* 34316f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3432556615dcSLukas Czerner * unwritten extent to its neighbor. This is much cheaper 34336f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3434bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3435bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3436bc2d9db4SLukas Czerner * followed by append writes. 34376f91bc5fSEric Gouriou * 34386f91bc5fSEric Gouriou * Limitations of the current logic: 3439bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 34406f91bc5fSEric Gouriou * This would require removing the extent if the transfer 34416f91bc5fSEric Gouriou * is possible. 3442bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 34436f91bc5fSEric Gouriou * same extent tree node. 34446f91bc5fSEric Gouriou */ 3445bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3446bc2d9db4SLukas Czerner /* See if we can merge left */ 3447bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3448bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 34496f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 34506f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3451bc2d9db4SLukas Czerner unsigned int prev_len; 34526f91bc5fSEric Gouriou 3453bc2d9db4SLukas Czerner abut_ex = ex - 1; 3454bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3455bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3456bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 34576f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 34586f91bc5fSEric Gouriou 34596f91bc5fSEric Gouriou /* 3460bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 34616f91bc5fSEric Gouriou * upon those conditions: 3462bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3463bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3464bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3465bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 34666f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 34676f91bc5fSEric Gouriou */ 3468556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 34696f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 34706f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3471bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 34726f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 34736f91bc5fSEric Gouriou if (err) 34746f91bc5fSEric Gouriou goto out; 34756f91bc5fSEric Gouriou 34766f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3477bc2d9db4SLukas Czerner map, ex, abut_ex); 34786f91bc5fSEric Gouriou 3479bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3480bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3481bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3482bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3483556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 34846f91bc5fSEric Gouriou 3485bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3486bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 34876f91bc5fSEric Gouriou 3488bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3489bc2d9db4SLukas Czerner allocated = map_len; 3490bc2d9db4SLukas Czerner } 3491bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3492bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3493bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3494bc2d9db4SLukas Czerner /* See if we can merge right */ 3495bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3496bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3497bc2d9db4SLukas Czerner unsigned int next_len; 3498bc2d9db4SLukas Czerner 3499bc2d9db4SLukas Czerner abut_ex = ex + 1; 3500bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3501bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3502bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3503bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3504bc2d9db4SLukas Czerner 3505bc2d9db4SLukas Czerner /* 3506bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3507bc2d9db4SLukas Czerner * upon those conditions: 3508bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3509bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3510bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3511bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3512bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3513bc2d9db4SLukas Czerner */ 3514556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/ 3515bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3516bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3517bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3518bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3519bc2d9db4SLukas Czerner if (err) 3520bc2d9db4SLukas Czerner goto out; 3521bc2d9db4SLukas Czerner 3522bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3523bc2d9db4SLukas Czerner map, ex, abut_ex); 3524bc2d9db4SLukas Czerner 3525bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3526bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3527bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3528bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3529556615dcSLukas Czerner ext4_ext_mark_unwritten(ex); /* Restore the flag */ 3530bc2d9db4SLukas Czerner 3531bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3532bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3533bc2d9db4SLukas Czerner 3534bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3535bc2d9db4SLukas Czerner allocated = map_len; 3536bc2d9db4SLukas Czerner } 3537bc2d9db4SLukas Czerner } 3538bc2d9db4SLukas Czerner if (allocated) { 35396f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 35406f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 35416f91bc5fSEric Gouriou 35426f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3543bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 35446f91bc5fSEric Gouriou goto out; 3545bc2d9db4SLukas Czerner } else 3546bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 35476f91bc5fSEric Gouriou 3548667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 354921ca087aSDmitry Monakhov /* 355021ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 35519e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size. 355221ca087aSDmitry Monakhov */ 3553667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 355421ca087aSDmitry Monakhov 355567a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 355667a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 35574f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 355867a5da56SZheng Liu 355967a5da56SZheng Liu /* If extent is less than s_max_zeroout_kb, zeroout directly */ 356067a5da56SZheng Liu if (max_zeroout && (ee_len <= max_zeroout)) { 3561667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 35623977c965SAneesh Kumar K.V if (err) 356356055d3aSAmit Arora goto out; 3564adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 35658cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3566adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 35679df5643aSAneesh Kumar K.V 35689df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 35699df5643aSAneesh Kumar K.V if (err) 35709df5643aSAneesh Kumar K.V goto out; 3571667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3572ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3573ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 357456055d3aSAmit Arora goto out; 3575667eff35SYongqiang Yang } 3576093a088bSAneesh Kumar K.V 3577667eff35SYongqiang Yang /* 3578667eff35SYongqiang Yang * four cases: 3579667eff35SYongqiang Yang * 1. split the extent into three extents. 3580667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3581667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3582667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3583667eff35SYongqiang Yang */ 3584667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3585667eff35SYongqiang Yang split_map.m_len = map->m_len; 3586667eff35SYongqiang Yang 358767a5da56SZheng Liu if (max_zeroout && (allocated > map->m_len)) { 358867a5da56SZheng Liu if (allocated <= max_zeroout) { 3589667eff35SYongqiang Yang /* case 3 */ 3590667eff35SYongqiang Yang zero_ex.ee_block = 35919b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 35929b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3593667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3594667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3595667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3596667eff35SYongqiang Yang if (err) 3597667eff35SYongqiang Yang goto out; 3598667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3599667eff35SYongqiang Yang split_map.m_len = allocated; 360067a5da56SZheng Liu } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3601667eff35SYongqiang Yang /* case 2 */ 3602667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3603667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3604667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3605667eff35SYongqiang Yang ee_block); 3606667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3607667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3608667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3609667eff35SYongqiang Yang if (err) 3610667eff35SYongqiang Yang goto out; 3611667eff35SYongqiang Yang } 3612667eff35SYongqiang Yang 3613667eff35SYongqiang Yang split_map.m_lblk = ee_block; 36149b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 36159b940f8eSAllison Henderson allocated = map->m_len; 3616667eff35SYongqiang Yang } 3617667eff35SYongqiang Yang } 3618667eff35SYongqiang Yang 3619ae9e9c6aSJan Kara err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag, 3620ae9e9c6aSJan Kara flags); 3621ae9e9c6aSJan Kara if (err > 0) 3622ae9e9c6aSJan Kara err = 0; 3623667eff35SYongqiang Yang out: 3624adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 3625adb23551SZheng Liu if (!err) 3626d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3627667eff35SYongqiang Yang return err ? err : allocated; 362856055d3aSAmit Arora } 362956055d3aSAmit Arora 3630c278bfecSAneesh Kumar K.V /* 3631e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 36320031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 3633556615dcSLukas Czerner * to an unwritten extent. 36340031462bSMingming Cao * 3635556615dcSLukas Czerner * Writing to an unwritten extent may result in splitting the unwritten 3636556615dcSLukas Czerner * extent into multiple initialized/unwritten extents (up to three) 36370031462bSMingming Cao * There are three possibilities: 3638556615dcSLukas Czerner * a> There is no split required: Entire extent should be unwritten 36390031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 36400031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 36410031462bSMingming Cao * 3642b8a86845SLukas Czerner * This works the same way in the case of initialized -> unwritten conversion. 3643b8a86845SLukas Czerner * 36440031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3645556615dcSLukas Czerner * the unwritten extent split. To prevent ENOSPC occur at the IO 3646556615dcSLukas Czerner * complete, we need to split the unwritten extent before DIO submit 3647556615dcSLukas Czerner * the IO. The unwritten extent called at this time will be split 3648556615dcSLukas Czerner * into three unwritten extent(at most). After IO complete, the part 36490031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 36500031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3651ba230c3fSMingming * 3652556615dcSLukas Czerner * Returns the size of unwritten extent to be written on success. 36530031462bSMingming Cao */ 3654b8a86845SLukas Czerner static int ext4_split_convert_extents(handle_t *handle, 36550031462bSMingming Cao struct inode *inode, 3656e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3657dfe50809STheodore Ts'o struct ext4_ext_path **ppath, 36580031462bSMingming Cao int flags) 36590031462bSMingming Cao { 3660dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 3661667eff35SYongqiang Yang ext4_lblk_t eof_block; 3662667eff35SYongqiang Yang ext4_lblk_t ee_block; 3663667eff35SYongqiang Yang struct ext4_extent *ex; 3664667eff35SYongqiang Yang unsigned int ee_len; 3665667eff35SYongqiang Yang int split_flag = 0, depth; 36660031462bSMingming Cao 3667b8a86845SLukas Czerner ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n", 3668b8a86845SLukas Czerner __func__, inode->i_ino, 3669e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 367021ca087aSDmitry Monakhov 367121ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 367221ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3673e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3674e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 36750031462bSMingming Cao /* 367621ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 367721ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 367821ca087aSDmitry Monakhov */ 3679667eff35SYongqiang Yang depth = ext_depth(inode); 36800031462bSMingming Cao ex = path[depth].p_ext; 3681667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3682667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 36830031462bSMingming Cao 3684b8a86845SLukas Czerner /* Convert to unwritten */ 3685b8a86845SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) { 3686b8a86845SLukas Czerner split_flag |= EXT4_EXT_DATA_VALID1; 3687b8a86845SLukas Czerner /* Convert to initialized */ 3688b8a86845SLukas Czerner } else if (flags & EXT4_GET_BLOCKS_CONVERT) { 3689b8a86845SLukas Czerner split_flag |= ee_block + ee_len <= eof_block ? 3690b8a86845SLukas Czerner EXT4_EXT_MAY_ZEROOUT : 0; 3691556615dcSLukas Czerner split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3692b8a86845SLukas Czerner } 3693667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3694dfe50809STheodore Ts'o return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); 36950031462bSMingming Cao } 3696197217a5SYongqiang Yang 3697c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 36980031462bSMingming Cao struct inode *inode, 3699dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 3700dfe50809STheodore Ts'o struct ext4_ext_path **ppath) 37010031462bSMingming Cao { 3702dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 37030031462bSMingming Cao struct ext4_extent *ex; 3704dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3705dee1f973SDmitry Monakhov unsigned int ee_len; 37060031462bSMingming Cao int depth; 37070031462bSMingming Cao int err = 0; 37080031462bSMingming Cao 37090031462bSMingming Cao depth = ext_depth(inode); 37100031462bSMingming Cao ex = path[depth].p_ext; 3711dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3712dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 37130031462bSMingming Cao 3714197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3715197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3716dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3717dee1f973SDmitry Monakhov 3718ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3719ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3720ff95ec22SDmitry Monakhov * required. 3721ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3722ff95ec22SDmitry Monakhov * illegal. 3723ff95ec22SDmitry Monakhov */ 3724dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3725ff95ec22SDmitry Monakhov #ifdef EXT4_DEBUG 3726ff95ec22SDmitry Monakhov ext4_warning("Inode (%ld) finished: extent logical block %llu," 3727ff95ec22SDmitry Monakhov " len %u; IO logical block %llu, len %u\n", 3728ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3729ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3730ff95ec22SDmitry Monakhov #endif 3731dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3732dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3733dee1f973SDmitry Monakhov if (err < 0) 3734dfe50809STheodore Ts'o return err; 3735ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3736dfe50809STheodore Ts'o if (IS_ERR(path)) 3737dfe50809STheodore Ts'o return PTR_ERR(path); 3738dee1f973SDmitry Monakhov depth = ext_depth(inode); 3739dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3740dee1f973SDmitry Monakhov } 3741197217a5SYongqiang Yang 37420031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 37430031462bSMingming Cao if (err) 37440031462bSMingming Cao goto out; 37450031462bSMingming Cao /* first mark the extent as initialized */ 37460031462bSMingming Cao ext4_ext_mark_initialized(ex); 37470031462bSMingming Cao 3748197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3749197217a5SYongqiang Yang * borders are not changed 37500031462bSMingming Cao */ 3751ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3752197217a5SYongqiang Yang 37530031462bSMingming Cao /* Mark modified extent as dirty */ 3754ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 37550031462bSMingming Cao out: 37560031462bSMingming Cao ext4_ext_show_leaf(inode, path); 37570031462bSMingming Cao return err; 37580031462bSMingming Cao } 37590031462bSMingming Cao 3760515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3761515f41c3SAneesh Kumar K.V sector_t block, int count) 3762515f41c3SAneesh Kumar K.V { 3763515f41c3SAneesh Kumar K.V int i; 3764515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3765515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3766515f41c3SAneesh Kumar K.V } 3767515f41c3SAneesh Kumar K.V 376858590b06STheodore Ts'o /* 376958590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 377058590b06STheodore Ts'o */ 377158590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3772d002ebf1SEric Sandeen ext4_lblk_t lblk, 377358590b06STheodore Ts'o struct ext4_ext_path *path, 377458590b06STheodore Ts'o unsigned int len) 377558590b06STheodore Ts'o { 377658590b06STheodore Ts'o int i, depth; 377758590b06STheodore Ts'o struct ext4_extent_header *eh; 377865922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 377958590b06STheodore Ts'o 378058590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 378158590b06STheodore Ts'o return 0; 378258590b06STheodore Ts'o 378358590b06STheodore Ts'o depth = ext_depth(inode); 378458590b06STheodore Ts'o eh = path[depth].p_hdr; 378558590b06STheodore Ts'o 3786afcff5d8SLukas Czerner /* 3787afcff5d8SLukas Czerner * We're going to remove EOFBLOCKS_FL entirely in future so we 3788afcff5d8SLukas Czerner * do not care for this case anymore. Simply remove the flag 3789afcff5d8SLukas Czerner * if there are no extents. 3790afcff5d8SLukas Czerner */ 3791afcff5d8SLukas Czerner if (unlikely(!eh->eh_entries)) 3792afcff5d8SLukas Czerner goto out; 379358590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 379458590b06STheodore Ts'o /* 379558590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 379658590b06STheodore Ts'o * last block in the last extent in the file. We test this by 379758590b06STheodore Ts'o * first checking to see if the caller to 379858590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 379958590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 380058590b06STheodore Ts'o * this turns out to be false, we can bail out from this 380158590b06STheodore Ts'o * function immediately. 380258590b06STheodore Ts'o */ 3803d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 380458590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 380558590b06STheodore Ts'o return 0; 380658590b06STheodore Ts'o /* 380758590b06STheodore Ts'o * If the caller does appear to be planning to write at or 380858590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 380958590b06STheodore Ts'o * if the current extent is the last extent in the file, by 381058590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 381158590b06STheodore Ts'o * at each level of the tree. 381258590b06STheodore Ts'o */ 381358590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 381458590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 381558590b06STheodore Ts'o return 0; 3816afcff5d8SLukas Czerner out: 381758590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 381858590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 381958590b06STheodore Ts'o } 382058590b06STheodore Ts'o 38217b415bf6SAditya Kali /** 38227b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 38237b415bf6SAditya Kali * 38247d1b1fbcSZheng Liu * Return 1 if there is a delalloc block in the range, otherwise 0. 38257b415bf6SAditya Kali */ 3826f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode, 38277b415bf6SAditya Kali ext4_lblk_t lblk_start, 38287d1b1fbcSZheng Liu ext4_lblk_t lblk_end) 38297b415bf6SAditya Kali { 38307d1b1fbcSZheng Liu struct extent_status es; 38317b415bf6SAditya Kali 3832e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); 383306b0c886SZheng Liu if (es.es_len == 0) 38347d1b1fbcSZheng Liu return 0; /* there is no delay extent in this tree */ 383506b0c886SZheng Liu else if (es.es_lblk <= lblk_start && 383606b0c886SZheng Liu lblk_start < es.es_lblk + es.es_len) 38377b415bf6SAditya Kali return 1; 383806b0c886SZheng Liu else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 38397d1b1fbcSZheng Liu return 1; 38407b415bf6SAditya Kali else 38417b415bf6SAditya Kali return 0; 38427b415bf6SAditya Kali } 38437b415bf6SAditya Kali 38447d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 38457b415bf6SAditya Kali { 38467b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 38477b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 3848f5a44db5STheodore Ts'o lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 38497b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 38507b415bf6SAditya Kali 38517d1b1fbcSZheng Liu return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 38527b415bf6SAditya Kali } 38537b415bf6SAditya Kali 38547b415bf6SAditya Kali /** 38557b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 38567b415bf6SAditya Kali * are under delalloc and were reserved quota for. 38577b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 38587b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 38597b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 38607b415bf6SAditya Kali * The cases to look for are: 38617b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 38627b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 38637b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 38647b415bf6SAditya Kali * are not delalloc'ed. 38657b415bf6SAditya Kali * Ex: 38667b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 38677b415bf6SAditya Kali * |++++++ allocated ++++++| 38687b415bf6SAditya Kali * ==> 4 complete clusters in above example 38697b415bf6SAditya Kali * 38707b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 38717b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 38727b415bf6SAditya Kali * cluster. 38737b415bf6SAditya Kali * Ex: 38747b415bf6SAditya Kali * |----====c========|========c========| 38757b415bf6SAditya Kali * |++++++ allocated ++++++| 38767b415bf6SAditya Kali * ==> 1 complete clusters in above example 38777b415bf6SAditya Kali * 38787b415bf6SAditya Kali * Ex: 38797b415bf6SAditya Kali * |================c================| 38807b415bf6SAditya Kali * |++++++ allocated ++++++| 38817b415bf6SAditya Kali * ==> 0 complete clusters in above example 38827b415bf6SAditya Kali * 38837b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 38847b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 38857b415bf6SAditya Kali * this 'allocated' range. 38867b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 38877b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 38887b415bf6SAditya Kali */ 38897b415bf6SAditya Kali static unsigned int 38907b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 38917b415bf6SAditya Kali unsigned int num_blks) 38927b415bf6SAditya Kali { 38937b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 38947b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 38957b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 38967b415bf6SAditya Kali unsigned int allocated_clusters = 0; 38977b415bf6SAditya Kali 38987b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 38997b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 39007b415bf6SAditya Kali 39017b415bf6SAditya Kali /* max possible clusters for this allocation */ 39027b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 39037b415bf6SAditya Kali 3904d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3905d8990240SAditya Kali 39067b415bf6SAditya Kali /* Check towards left side */ 3907f5a44db5STheodore Ts'o c_offset = EXT4_LBLK_COFF(sbi, lblk_start); 39087b415bf6SAditya Kali if (c_offset) { 3909f5a44db5STheodore Ts'o lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); 39107b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 39117b415bf6SAditya Kali 39127d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 39137b415bf6SAditya Kali allocated_clusters--; 39147b415bf6SAditya Kali } 39157b415bf6SAditya Kali 39167b415bf6SAditya Kali /* Now check towards right. */ 3917f5a44db5STheodore Ts'o c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); 39187b415bf6SAditya Kali if (allocated_clusters && c_offset) { 39197b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 39207b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 39217b415bf6SAditya Kali 39227d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 39237b415bf6SAditya Kali allocated_clusters--; 39247b415bf6SAditya Kali } 39257b415bf6SAditya Kali 39267b415bf6SAditya Kali return allocated_clusters; 39277b415bf6SAditya Kali } 39287b415bf6SAditya Kali 39290031462bSMingming Cao static int 3930e8b83d93STheodore Ts'o convert_initialized_extent(handle_t *handle, struct inode *inode, 3931b8a86845SLukas Czerner struct ext4_map_blocks *map, 39324f224b8bSTheodore Ts'o struct ext4_ext_path **ppath, int flags, 3933b8a86845SLukas Czerner unsigned int allocated, ext4_fsblk_t newblock) 3934b8a86845SLukas Czerner { 39354f224b8bSTheodore Ts'o struct ext4_ext_path *path = *ppath; 3936e8b83d93STheodore Ts'o struct ext4_extent *ex; 3937e8b83d93STheodore Ts'o ext4_lblk_t ee_block; 3938e8b83d93STheodore Ts'o unsigned int ee_len; 3939e8b83d93STheodore Ts'o int depth; 3940b8a86845SLukas Czerner int err = 0; 3941b8a86845SLukas Czerner 3942b8a86845SLukas Czerner /* 3943b8a86845SLukas Czerner * Make sure that the extent is no bigger than we support with 3944556615dcSLukas Czerner * unwritten extent 3945b8a86845SLukas Czerner */ 3946556615dcSLukas Czerner if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3947556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3948b8a86845SLukas Czerner 3949e8b83d93STheodore Ts'o depth = ext_depth(inode); 3950e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3951e8b83d93STheodore Ts'o ee_block = le32_to_cpu(ex->ee_block); 3952e8b83d93STheodore Ts'o ee_len = ext4_ext_get_actual_len(ex); 3953e8b83d93STheodore Ts'o 3954e8b83d93STheodore Ts'o ext_debug("%s: inode %lu, logical" 3955e8b83d93STheodore Ts'o "block %llu, max_blocks %u\n", __func__, inode->i_ino, 3956e8b83d93STheodore Ts'o (unsigned long long)ee_block, ee_len); 3957e8b83d93STheodore Ts'o 3958e8b83d93STheodore Ts'o if (ee_block != map->m_lblk || ee_len > map->m_len) { 3959dfe50809STheodore Ts'o err = ext4_split_convert_extents(handle, inode, map, ppath, 3960e8b83d93STheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN); 3961e8b83d93STheodore Ts'o if (err < 0) 3962e8b83d93STheodore Ts'o return err; 3963ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, ppath, 0); 3964e8b83d93STheodore Ts'o if (IS_ERR(path)) 3965e8b83d93STheodore Ts'o return PTR_ERR(path); 3966e8b83d93STheodore Ts'o depth = ext_depth(inode); 3967e8b83d93STheodore Ts'o ex = path[depth].p_ext; 3968e8b83d93STheodore Ts'o if (!ex) { 3969e8b83d93STheodore Ts'o EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 3970e8b83d93STheodore Ts'o (unsigned long) map->m_lblk); 3971e8b83d93STheodore Ts'o return -EIO; 3972e8b83d93STheodore Ts'o } 3973e8b83d93STheodore Ts'o } 3974e8b83d93STheodore Ts'o 3975e8b83d93STheodore Ts'o err = ext4_ext_get_access(handle, inode, path + depth); 3976e8b83d93STheodore Ts'o if (err) 3977e8b83d93STheodore Ts'o return err; 3978e8b83d93STheodore Ts'o /* first mark the extent as unwritten */ 3979e8b83d93STheodore Ts'o ext4_ext_mark_unwritten(ex); 3980e8b83d93STheodore Ts'o 3981e8b83d93STheodore Ts'o /* note: ext4_ext_correct_indexes() isn't needed here because 3982e8b83d93STheodore Ts'o * borders are not changed 3983e8b83d93STheodore Ts'o */ 3984e8b83d93STheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3985e8b83d93STheodore Ts'o 3986e8b83d93STheodore Ts'o /* Mark modified extent as dirty */ 3987e8b83d93STheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3988e8b83d93STheodore Ts'o if (err) 3989e8b83d93STheodore Ts'o return err; 3990e8b83d93STheodore Ts'o ext4_ext_show_leaf(inode, path); 3991e8b83d93STheodore Ts'o 3992b8a86845SLukas Czerner ext4_update_inode_fsync_trans(handle, inode, 1); 3993e8b83d93STheodore Ts'o err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); 3994e8b83d93STheodore Ts'o if (err) 3995e8b83d93STheodore Ts'o return err; 3996b8a86845SLukas Czerner map->m_flags |= EXT4_MAP_UNWRITTEN; 3997b8a86845SLukas Czerner if (allocated > map->m_len) 3998b8a86845SLukas Czerner allocated = map->m_len; 3999b8a86845SLukas Czerner map->m_len = allocated; 4000e8b83d93STheodore Ts'o return allocated; 4001b8a86845SLukas Czerner } 4002b8a86845SLukas Czerner 4003b8a86845SLukas Czerner static int 4004556615dcSLukas Czerner ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 4005e35fd660STheodore Ts'o struct ext4_map_blocks *map, 4006dfe50809STheodore Ts'o struct ext4_ext_path **ppath, int flags, 4007e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 40080031462bSMingming Cao { 4009dfe50809STheodore Ts'o struct ext4_ext_path *path = *ppath; 40100031462bSMingming Cao int ret = 0; 40110031462bSMingming Cao int err = 0; 4012f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 40130031462bSMingming Cao 4014556615dcSLukas Czerner ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical " 401588635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 4016e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 40170031462bSMingming Cao flags, allocated); 40180031462bSMingming Cao ext4_ext_show_leaf(inode, path); 40190031462bSMingming Cao 402027dd4385SLukas Czerner /* 4021556615dcSLukas Czerner * When writing into unwritten space, we should not fail to 402227dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 402327dd4385SLukas Czerner */ 402427dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 402527dd4385SLukas Czerner 4026556615dcSLukas Czerner trace_ext4_ext_handle_unwritten_extents(inode, map, flags, 4027b5645534SZheng Liu allocated, newblock); 4028d8990240SAditya Kali 4029c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 4030c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) { 4031dfe50809STheodore Ts'o ret = ext4_split_convert_extents(handle, inode, map, ppath, 4032dfe50809STheodore Ts'o flags | EXT4_GET_BLOCKS_CONVERT); 403382e54229SDmitry Monakhov if (ret <= 0) 403482e54229SDmitry Monakhov goto out; 40355f524950SMingming /* 40365f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 403725985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 40385f524950SMingming * completed 40395f524950SMingming */ 40400edeb71dSTao Ma if (io) 40410edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io); 40420edeb71dSTao Ma else 404319f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 4044a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 40450031462bSMingming Cao goto out; 40460031462bSMingming Cao } 4047c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 4048c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_CONVERT) { 4049dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 4050dfe50809STheodore Ts'o ppath); 405158590b06STheodore Ts'o if (ret >= 0) { 4052b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4053d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 4054d002ebf1SEric Sandeen path, map->m_len); 405558590b06STheodore Ts'o } else 405658590b06STheodore Ts'o err = ret; 4057cdee7843SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 405815cc1767SEric Whitney map->m_pblk = newblock; 4059cdee7843SZheng Liu if (allocated > map->m_len) 4060cdee7843SZheng Liu allocated = map->m_len; 4061cdee7843SZheng Liu map->m_len = allocated; 40620031462bSMingming Cao goto out2; 40630031462bSMingming Cao } 40640031462bSMingming Cao /* buffered IO case */ 40650031462bSMingming Cao /* 40660031462bSMingming Cao * repeat fallocate creation request 40670031462bSMingming Cao * we already have an unwritten extent 40680031462bSMingming Cao */ 4069556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) { 4070a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 40710031462bSMingming Cao goto map_out; 4072a25a4e1aSZheng Liu } 40730031462bSMingming Cao 40740031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 40750031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 40760031462bSMingming Cao /* 40770031462bSMingming Cao * We have blocks reserved already. We 40780031462bSMingming Cao * return allocated blocks so that delalloc 40790031462bSMingming Cao * won't do block reservation for us. But 40800031462bSMingming Cao * the buffer head will be unmapped so that 40810031462bSMingming Cao * a read from the block returns 0s. 40820031462bSMingming Cao */ 4083e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 40840031462bSMingming Cao goto out1; 40850031462bSMingming Cao } 40860031462bSMingming Cao 40870031462bSMingming Cao /* buffered write, writepage time, convert*/ 4088dfe50809STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); 4089a4e5d88bSDmitry Monakhov if (ret >= 0) 4090b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 40910031462bSMingming Cao out: 40920031462bSMingming Cao if (ret <= 0) { 40930031462bSMingming Cao err = ret; 40940031462bSMingming Cao goto out2; 40950031462bSMingming Cao } else 40960031462bSMingming Cao allocated = ret; 4097e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4098515f41c3SAneesh Kumar K.V /* 4099515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 4100515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 4101515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 4102515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 4103515f41c3SAneesh Kumar K.V * new. 4104515f41c3SAneesh Kumar K.V */ 4105e35fd660STheodore Ts'o if (allocated > map->m_len) { 4106515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 4107e35fd660STheodore Ts'o newblock + map->m_len, 4108e35fd660STheodore Ts'o allocated - map->m_len); 4109e35fd660STheodore Ts'o allocated = map->m_len; 4110515f41c3SAneesh Kumar K.V } 41113a225670SZheng Liu map->m_len = allocated; 41125f634d06SAneesh Kumar K.V 41135f634d06SAneesh Kumar K.V /* 41145f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 41155f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 41165f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 41175f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 41185f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 41195f634d06SAneesh Kumar K.V */ 41207b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 41217b415bf6SAditya Kali unsigned int reserved_clusters; 41227b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 41237b415bf6SAditya Kali map->m_lblk, map->m_len); 41247b415bf6SAditya Kali if (reserved_clusters) 41257b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 41267b415bf6SAditya Kali reserved_clusters, 41277b415bf6SAditya Kali 0); 41287b415bf6SAditya Kali } 41295f634d06SAneesh Kumar K.V 41300031462bSMingming Cao map_out: 4131e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4132a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 4133a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 4134a4e5d88bSDmitry Monakhov map->m_len); 4135a4e5d88bSDmitry Monakhov if (err < 0) 4136a4e5d88bSDmitry Monakhov goto out2; 4137a4e5d88bSDmitry Monakhov } 41380031462bSMingming Cao out1: 4139e35fd660STheodore Ts'o if (allocated > map->m_len) 4140e35fd660STheodore Ts'o allocated = map->m_len; 41410031462bSMingming Cao ext4_ext_show_leaf(inode, path); 4142e35fd660STheodore Ts'o map->m_pblk = newblock; 4143e35fd660STheodore Ts'o map->m_len = allocated; 41440031462bSMingming Cao out2: 41450031462bSMingming Cao return err ? err : allocated; 41460031462bSMingming Cao } 414758590b06STheodore Ts'o 41480031462bSMingming Cao /* 41494d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 41504d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 41514d33b1efSTheodore Ts'o * allocated in an extent. 4152d8990240SAditya Kali * @sb The filesystem superblock structure 41534d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 41544d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 41554d33b1efSTheodore Ts'o * cluster allocation 41564d33b1efSTheodore Ts'o * 41574d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 41584d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 41594d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 41604d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 41614d33b1efSTheodore Ts'o * want to catch. The first is this case: 41624d33b1efSTheodore Ts'o * 41634d33b1efSTheodore Ts'o * |--- cluster # N--| 41644d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 41654d33b1efSTheodore Ts'o * |==========| 41664d33b1efSTheodore Ts'o * 41674d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 41684d33b1efSTheodore Ts'o * 41694d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 41704d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 41714d33b1efSTheodore Ts'o * |=======================| 41724d33b1efSTheodore Ts'o * 41734d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 41744d33b1efSTheodore Ts'o * within the same cluster: 41754d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 41764d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 41774d33b1efSTheodore Ts'o * |------ requested region ------| 41784d33b1efSTheodore Ts'o * |================| 41794d33b1efSTheodore Ts'o * 41804d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 41814d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 41824d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 41834d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 41844d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 41854d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 41864d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 41874d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 41884d33b1efSTheodore Ts'o */ 4189d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 41904d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 41914d33b1efSTheodore Ts'o struct ext4_extent *ex, 41924d33b1efSTheodore Ts'o struct ext4_ext_path *path) 41934d33b1efSTheodore Ts'o { 4194d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 4195f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 41964d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 419714d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 41984d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 41994d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 42004d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 42014d33b1efSTheodore Ts'o 42024d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 42034d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 42044d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 42054d33b1efSTheodore Ts'o 42064d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 42074d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 42084d33b1efSTheodore Ts'o 42094d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 42104d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 42114d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 42124d33b1efSTheodore Ts'o ee_start += ee_len - 1; 4213f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 42144d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 42154d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 42164d33b1efSTheodore Ts'o /* 42174d33b1efSTheodore Ts'o * Check for and handle this case: 42184d33b1efSTheodore Ts'o * 42194d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 42204d33b1efSTheodore Ts'o * |------- extent ----| 42214d33b1efSTheodore Ts'o * |--- requested region ---| 42224d33b1efSTheodore Ts'o * |===========| 42234d33b1efSTheodore Ts'o */ 42244d33b1efSTheodore Ts'o 42254d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 42264d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 42274d33b1efSTheodore Ts'o 42284d33b1efSTheodore Ts'o /* 42294d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 42304d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 42314d33b1efSTheodore Ts'o * 42324d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 42334d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 42344d33b1efSTheodore Ts'o * |------ requested region ------| 42354d33b1efSTheodore Ts'o * |================| 42364d33b1efSTheodore Ts'o */ 42374d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 42384d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 42394d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 42404d33b1efSTheodore Ts'o } 4241d8990240SAditya Kali 4242d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 42434d33b1efSTheodore Ts'o return 1; 42444d33b1efSTheodore Ts'o } 4245d8990240SAditya Kali 4246d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 42474d33b1efSTheodore Ts'o return 0; 42484d33b1efSTheodore Ts'o } 42494d33b1efSTheodore Ts'o 42504d33b1efSTheodore Ts'o 42514d33b1efSTheodore Ts'o /* 4252f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4253f5ab0d1fSMingming Cao * 4254f5ab0d1fSMingming Cao * 4255c278bfecSAneesh Kumar K.V * Need to be called with 42560e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 42570e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4258f5ab0d1fSMingming Cao * 4259f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 4260f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4261f5ab0d1fSMingming Cao * buffer head is unmapped 4262f5ab0d1fSMingming Cao * otherwise blocks are mapped 4263f5ab0d1fSMingming Cao * 4264f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4265f5ab0d1fSMingming Cao * buffer head is unmapped 4266f5ab0d1fSMingming Cao * 4267f5ab0d1fSMingming Cao * return < 0, error case. 4268c278bfecSAneesh Kumar K.V */ 4269e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4270e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4271a86c6181SAlex Tomas { 4272a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 42734d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 42744d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 42750562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 4276ce37c429SEric Whitney int free_on_err = 0, err = 0, depth, ret; 42774d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 427881fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4279c9de560dSAlex Tomas struct ext4_allocation_request ar; 4280f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 42814d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 428282e54229SDmitry Monakhov int set_unwritten = 0; 4283a86c6181SAlex Tomas 428484fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 4285e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 42860562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4287a86c6181SAlex Tomas 4288a86c6181SAlex Tomas /* find extent for this block */ 4289ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, map->m_lblk, NULL, 0); 4290a86c6181SAlex Tomas if (IS_ERR(path)) { 4291a86c6181SAlex Tomas err = PTR_ERR(path); 4292a86c6181SAlex Tomas path = NULL; 4293a86c6181SAlex Tomas goto out2; 4294a86c6181SAlex Tomas } 4295a86c6181SAlex Tomas 4296a86c6181SAlex Tomas depth = ext_depth(inode); 4297a86c6181SAlex Tomas 4298a86c6181SAlex Tomas /* 4299d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4300d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4301ed8a1a76STheodore Ts'o * this is why assert can't be put in ext4_find_extent() 4302a86c6181SAlex Tomas */ 4303273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4304273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4305f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4306f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4307f70f362bSTheodore Ts'o path[depth].p_block); 4308034fb4c9SSurbhi Palande err = -EIO; 4309034fb4c9SSurbhi Palande goto out2; 4310034fb4c9SSurbhi Palande } 4311a86c6181SAlex Tomas 43127e028976SAvantika Mathur ex = path[depth].p_ext; 43137e028976SAvantika Mathur if (ex) { 4314725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4315bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4316a2df2a63SAmit Arora unsigned short ee_len; 4317471d4011SSuparna Bhattacharya 4318b8a86845SLukas Czerner 4319471d4011SSuparna Bhattacharya /* 4320556615dcSLukas Czerner * unwritten extents are treated as holes, except that 432156055d3aSAmit Arora * we split out initialized portions during a write. 4322471d4011SSuparna Bhattacharya */ 4323a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4324d8990240SAditya Kali 4325d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4326d8990240SAditya Kali 4327d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4328e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4329e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4330d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4331e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 4332e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4333a86c6181SAlex Tomas ee_block, ee_len, newblock); 433456055d3aSAmit Arora 4335b8a86845SLukas Czerner /* 4336b8a86845SLukas Czerner * If the extent is initialized check whether the 4337b8a86845SLukas Czerner * caller wants to convert it to unwritten. 4338b8a86845SLukas Czerner */ 4339556615dcSLukas Czerner if ((!ext4_ext_is_unwritten(ex)) && 4340b8a86845SLukas Czerner (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4341e8b83d93STheodore Ts'o allocated = convert_initialized_extent( 43424f224b8bSTheodore Ts'o handle, inode, map, &path, 43434f224b8bSTheodore Ts'o flags, allocated, newblock); 4344b8a86845SLukas Czerner goto out2; 4345556615dcSLukas Czerner } else if (!ext4_ext_is_unwritten(ex)) 4346a86c6181SAlex Tomas goto out; 434769eb33dcSZheng Liu 4348556615dcSLukas Czerner ret = ext4_ext_handle_unwritten_extents( 4349dfe50809STheodore Ts'o handle, inode, map, &path, flags, 4350e861304bSAllison Henderson allocated, newblock); 4351ce37c429SEric Whitney if (ret < 0) 4352ce37c429SEric Whitney err = ret; 4353ce37c429SEric Whitney else 4354ce37c429SEric Whitney allocated = ret; 435531cf0f2cSEric Whitney goto out2; 435656055d3aSAmit Arora } 4357a86c6181SAlex Tomas } 4358a86c6181SAlex Tomas 43597b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 43607d1b1fbcSZheng Liu ext4_find_delalloc_cluster(inode, map->m_lblk)) 43617b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 43627b415bf6SAditya Kali 4363a86c6181SAlex Tomas /* 4364d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4365a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4366a86c6181SAlex Tomas */ 4367c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 436856055d3aSAmit Arora /* 436956055d3aSAmit Arora * put just found gap into cache to speed up 437056055d3aSAmit Arora * subsequent requests 437156055d3aSAmit Arora */ 4372d100eef2SZheng Liu if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4373e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4374a86c6181SAlex Tomas goto out2; 4375a86c6181SAlex Tomas } 43764d33b1efSTheodore Ts'o 4377a86c6181SAlex Tomas /* 4378c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4379a86c6181SAlex Tomas */ 43807b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 43814d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 4382d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 43834d33b1efSTheodore Ts'o 43844d33b1efSTheodore Ts'o /* 43854d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 4386ed8a1a76STheodore Ts'o * by ext4_find_extent() implies a cluster we can use. 43874d33b1efSTheodore Ts'o */ 43884d33b1efSTheodore Ts'o if (cluster_offset && ex && 4389d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 43904d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 43914d33b1efSTheodore Ts'o newblock = map->m_pblk; 43927b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 43934d33b1efSTheodore Ts'o goto got_allocated_blocks; 43944d33b1efSTheodore Ts'o } 4395a86c6181SAlex Tomas 4396c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4397e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4398c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4399c9de560dSAlex Tomas if (err) 4400c9de560dSAlex Tomas goto out2; 4401e35fd660STheodore Ts'o ar.lright = map->m_lblk; 44024d33b1efSTheodore Ts'o ex2 = NULL; 44034d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4404c9de560dSAlex Tomas if (err) 4405c9de560dSAlex Tomas goto out2; 440625d14f98SAmit Arora 44074d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 44084d33b1efSTheodore Ts'o * cluster we can use. */ 44094d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4410d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 44114d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 44124d33b1efSTheodore Ts'o newblock = map->m_pblk; 44137b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 44144d33b1efSTheodore Ts'o goto got_allocated_blocks; 44154d33b1efSTheodore Ts'o } 44164d33b1efSTheodore Ts'o 4417749269faSAmit Arora /* 4418749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4419749269faSAmit Arora * a single extent. For an initialized extent this limit is 4420556615dcSLukas Czerner * EXT_INIT_MAX_LEN and for an unwritten extent this limit is 4421556615dcSLukas Czerner * EXT_UNWRITTEN_MAX_LEN. 4422749269faSAmit Arora */ 4423e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4424556615dcSLukas Czerner !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4425e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4426556615dcSLukas Czerner else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && 4427556615dcSLukas Czerner (flags & EXT4_GET_BLOCKS_UNWRIT_EXT)) 4428556615dcSLukas Czerner map->m_len = EXT_UNWRITTEN_MAX_LEN; 4429749269faSAmit Arora 4430e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4431e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 44324d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 443325d14f98SAmit Arora if (err) 4434b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 443525d14f98SAmit Arora else 4436e35fd660STheodore Ts'o allocated = map->m_len; 4437c9de560dSAlex Tomas 4438c9de560dSAlex Tomas /* allocate new block */ 4439c9de560dSAlex Tomas ar.inode = inode; 4440e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4441e35fd660STheodore Ts'o ar.logical = map->m_lblk; 44424d33b1efSTheodore Ts'o /* 44434d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 44444d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 44454d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 44464d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 44474d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 44484d33b1efSTheodore Ts'o * work correctly. 44494d33b1efSTheodore Ts'o */ 4450f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 44514d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 44524d33b1efSTheodore Ts'o ar.goal -= offset; 44534d33b1efSTheodore Ts'o ar.logical -= offset; 4454c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4455c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4456c9de560dSAlex Tomas else 4457c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4458c9de560dSAlex Tomas ar.flags = 0; 4459556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4460556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4461e3cf5d5dSTheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 4462e3cf5d5dSTheodore Ts'o ar.flags |= EXT4_MB_DELALLOC_RESERVED; 4463c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4464a86c6181SAlex Tomas if (!newblock) 4465a86c6181SAlex Tomas goto out2; 446684fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4467498e5f24STheodore Ts'o ar.goal, newblock, allocated); 44684d33b1efSTheodore Ts'o free_on_err = 1; 44697b415bf6SAditya Kali allocated_clusters = ar.len; 44704d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 44714d33b1efSTheodore Ts'o if (ar.len > allocated) 44724d33b1efSTheodore Ts'o ar.len = allocated; 4473a86c6181SAlex Tomas 44744d33b1efSTheodore Ts'o got_allocated_blocks: 4475a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 44764d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4477c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 4478556615dcSLukas Czerner /* Mark unwritten */ 4479556615dcSLukas Czerner if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ 4480556615dcSLukas Czerner ext4_ext_mark_unwritten(&newex); 4481a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 44828d5d02e6SMingming Cao /* 4483744692dcSJiaying Zhang * io_end structure was created for every IO write to an 4484556615dcSLukas Czerner * unwritten extent. To avoid unnecessary conversion, 4485744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 44865f524950SMingming * For non asycn direct IO case, flag the inode state 448725985edcSLucas De Marchi * that we need to perform conversion when IO is done. 44888d5d02e6SMingming Cao */ 4489c8b459f4SLukas Czerner if (flags & EXT4_GET_BLOCKS_PRE_IO) 449082e54229SDmitry Monakhov set_unwritten = 1; 44918d5d02e6SMingming Cao } 4492c8d46e41SJiaying Zhang 4493a4e5d88bSDmitry Monakhov err = 0; 4494a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4495a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4496a4e5d88bSDmitry Monakhov path, ar.len); 4497575a1d4bSJiaying Zhang if (!err) 4498dfe50809STheodore Ts'o err = ext4_ext_insert_extent(handle, inode, &path, 4499575a1d4bSJiaying Zhang &newex, flags); 450082e54229SDmitry Monakhov 450182e54229SDmitry Monakhov if (!err && set_unwritten) { 450282e54229SDmitry Monakhov if (io) 450382e54229SDmitry Monakhov ext4_set_io_unwritten_flag(inode, io); 450482e54229SDmitry Monakhov else 450582e54229SDmitry Monakhov ext4_set_inode_state(inode, 450682e54229SDmitry Monakhov EXT4_STATE_DIO_UNWRITTEN); 450782e54229SDmitry Monakhov } 450882e54229SDmitry Monakhov 45094d33b1efSTheodore Ts'o if (err && free_on_err) { 45107132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 45117132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4512315054f0SAlex Tomas /* free data blocks we just allocated */ 4513c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4514c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4515c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4516c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock, 4517c8e15130STheodore Ts'o EXT4_C2B(sbi, allocated_clusters), fb_flags); 4518a86c6181SAlex Tomas goto out2; 4519315054f0SAlex Tomas } 4520a86c6181SAlex Tomas 4521a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4522bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4523b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4524e35fd660STheodore Ts'o if (allocated > map->m_len) 4525e35fd660STheodore Ts'o allocated = map->m_len; 4526e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4527a86c6181SAlex Tomas 4528b436b9beSJan Kara /* 45295f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 45305f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 45315f634d06SAneesh Kumar K.V */ 45327b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 453381fdbb4aSYongqiang Yang unsigned int reserved_clusters; 45347b415bf6SAditya Kali /* 453581fdbb4aSYongqiang Yang * Check how many clusters we had reserved this allocated range 45367b415bf6SAditya Kali */ 45377b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 45387b415bf6SAditya Kali map->m_lblk, allocated); 45397b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 45407b415bf6SAditya Kali if (reserved_clusters) { 45417b415bf6SAditya Kali /* 45427b415bf6SAditya Kali * We have clusters reserved for this range. 45437b415bf6SAditya Kali * But since we are not doing actual allocation 45447b415bf6SAditya Kali * and are simply using blocks from previously 45457b415bf6SAditya Kali * allocated cluster, we should release the 45467b415bf6SAditya Kali * reservation and not claim quota. 45477b415bf6SAditya Kali */ 45487b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 45497b415bf6SAditya Kali reserved_clusters, 0); 45507b415bf6SAditya Kali } 45517b415bf6SAditya Kali } else { 45527b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 45537b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 45545356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 45557b415bf6SAditya Kali int reservation = allocated_clusters - 45567b415bf6SAditya Kali reserved_clusters; 45577b415bf6SAditya Kali /* 45587b415bf6SAditya Kali * It seems we claimed few clusters outside of 45597b415bf6SAditya Kali * the range of this allocation. We should give 45607b415bf6SAditya Kali * it back to the reservation pool. This can 45617b415bf6SAditya Kali * happen in the following case: 45627b415bf6SAditya Kali * 45637b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 45647b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 45657b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 45667b415bf6SAditya Kali * * First comes delayed allocation write for 45677b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 45687b415bf6SAditya Kali * previous delayed allocated blocks in the 45697b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 45707b415bf6SAditya Kali * for this write. 45717b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 45727b415bf6SAditya Kali * In this case, we will reserve 2 clusters 45737b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 45747b415bf6SAditya Kali * that range has a delayed allocated blocks. 45757b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 45767b415bf6SAditya Kali * * Now, during the delayed allocation writeout 45777b415bf6SAditya Kali * time, we will first write blocks [3-8] and 45787b415bf6SAditya Kali * allocate 3 clusters for writing these 45797b415bf6SAditya Kali * blocks. Also, we would claim all these 45807b415bf6SAditya Kali * three clusters above. 45817b415bf6SAditya Kali * * Now when we come here to writeout the 45827b415bf6SAditya Kali * blocks [10-11], we would expect to claim 45837b415bf6SAditya Kali * the reservation of 1 cluster we had made 45847b415bf6SAditya Kali * (and we would claim it since there are no 45857b415bf6SAditya Kali * more delayed allocated blocks in the range 45867b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 45877b415bf6SAditya Kali * already gone to 0. 45887b415bf6SAditya Kali * 45897b415bf6SAditya Kali * Thus, at the step 4 above when we determine 45907b415bf6SAditya Kali * that there are still some unwritten delayed 45917b415bf6SAditya Kali * allocated blocks outside of our current 45927b415bf6SAditya Kali * block range, we should increment the 45937b415bf6SAditya Kali * reserved clusters count so that when the 45947b415bf6SAditya Kali * remaining blocks finally gets written, we 45957b415bf6SAditya Kali * could claim them. 45967b415bf6SAditya Kali */ 45975356f261SAditya Kali dquot_reserve_block(inode, 45985356f261SAditya Kali EXT4_C2B(sbi, reservation)); 45995356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 46005356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 46015356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 46027b415bf6SAditya Kali } 4603232ec872SLukas Czerner /* 4604232ec872SLukas Czerner * We will claim quota for all newly allocated blocks. 4605232ec872SLukas Czerner * We're updating the reserved space *after* the 4606232ec872SLukas Czerner * correction above so we do not accidentally free 4607232ec872SLukas Czerner * all the metadata reservation because we might 4608232ec872SLukas Czerner * actually need it later on. 4609232ec872SLukas Czerner */ 4610232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4611232ec872SLukas Czerner 1); 46127b415bf6SAditya Kali } 46137b415bf6SAditya Kali } 46145f634d06SAneesh Kumar K.V 46155f634d06SAneesh Kumar K.V /* 4616b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4617556615dcSLukas Czerner * when it is _not_ an unwritten extent. 4618b436b9beSJan Kara */ 4619556615dcSLukas Czerner if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0) 4620b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 462169eb33dcSZheng Liu else 4622b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4623a86c6181SAlex Tomas out: 4624e35fd660STheodore Ts'o if (allocated > map->m_len) 4625e35fd660STheodore Ts'o allocated = map->m_len; 4626a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4627e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4628e35fd660STheodore Ts'o map->m_pblk = newblock; 4629e35fd660STheodore Ts'o map->m_len = allocated; 4630a86c6181SAlex Tomas out2: 4631a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4632a86c6181SAlex Tomas kfree(path); 4633e861304bSAllison Henderson 463463b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map, 463563b99968STheodore Ts'o err ? err : allocated); 463663b99968STheodore Ts'o ext4_es_lru_add(inode); 46377877191cSLukas Czerner return err ? err : allocated; 4638a86c6181SAlex Tomas } 4639a86c6181SAlex Tomas 4640819c4920STheodore Ts'o void ext4_ext_truncate(handle_t *handle, struct inode *inode) 4641a86c6181SAlex Tomas { 4642a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4643725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4644a86c6181SAlex Tomas int err = 0; 4645a86c6181SAlex Tomas 4646a86c6181SAlex Tomas /* 4647d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4648d0d856e8SRandy Dunlap * Probably we need not scan at all, 4649d0d856e8SRandy Dunlap * because page truncation is enough. 4650a86c6181SAlex Tomas */ 4651a86c6181SAlex Tomas 4652a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4653a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4654a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4655a86c6181SAlex Tomas 4656a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4657a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 46588acd5e9bSTheodore Ts'o retry: 465951865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 466051865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 466194eec0fcSTheodore Ts'o if (err == -ENOMEM) { 46628acd5e9bSTheodore Ts'o cond_resched(); 46638acd5e9bSTheodore Ts'o congestion_wait(BLK_RW_ASYNC, HZ/50); 46648acd5e9bSTheodore Ts'o goto retry; 46658acd5e9bSTheodore Ts'o } 46668acd5e9bSTheodore Ts'o if (err) { 46678acd5e9bSTheodore Ts'o ext4_std_error(inode->i_sb, err); 46688acd5e9bSTheodore Ts'o return; 46698acd5e9bSTheodore Ts'o } 46705f95d21fSLukas Czerner err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 46718acd5e9bSTheodore Ts'o ext4_std_error(inode->i_sb, err); 4672a86c6181SAlex Tomas } 4673a86c6181SAlex Tomas 46740e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4675c174e6d6SDmitry Monakhov ext4_lblk_t len, loff_t new_size, 4676c174e6d6SDmitry Monakhov int flags, int mode) 4677a2df2a63SAmit Arora { 4678496ad9aaSAl Viro struct inode *inode = file_inode(file); 4679a2df2a63SAmit Arora handle_t *handle; 4680a2df2a63SAmit Arora int ret = 0; 4681a2df2a63SAmit Arora int ret2 = 0; 4682a2df2a63SAmit Arora int retries = 0; 46832ed88685STheodore Ts'o struct ext4_map_blocks map; 46840e8b6879SLukas Czerner unsigned int credits; 4685c174e6d6SDmitry Monakhov loff_t epos; 4686a2df2a63SAmit Arora 46870e8b6879SLukas Czerner map.m_lblk = offset; 4688c174e6d6SDmitry Monakhov map.m_len = len; 46893c6fe770SGreg Harm /* 46903c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 46913c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 46923c6fe770SGreg Harm * extents. 46933c6fe770SGreg Harm */ 4694556615dcSLukas Czerner if (len <= EXT_UNWRITTEN_MAX_LEN) 46953c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 469660d4616fSDmitry Monakhov 46970e8b6879SLukas Czerner /* 46980e8b6879SLukas Czerner * credits to insert 1 extent into extent tree 46990e8b6879SLukas Czerner */ 47000e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 47010e8b6879SLukas Czerner 4702a2df2a63SAmit Arora retry: 4703c174e6d6SDmitry Monakhov while (ret >= 0 && len) { 47049924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 47059924a92aSTheodore Ts'o credits); 4706a2df2a63SAmit Arora if (IS_ERR(handle)) { 4707a2df2a63SAmit Arora ret = PTR_ERR(handle); 4708a2df2a63SAmit Arora break; 4709a2df2a63SAmit Arora } 4710a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4711221879c9SAneesh Kumar K.V if (ret <= 0) { 4712f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: " 4713b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4714b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4715b06acd38SLukas Czerner map.m_len, ret); 4716a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4717a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4718a2df2a63SAmit Arora break; 4719a2df2a63SAmit Arora } 4720c174e6d6SDmitry Monakhov map.m_lblk += ret; 4721c174e6d6SDmitry Monakhov map.m_len = len = len - ret; 4722c174e6d6SDmitry Monakhov epos = (loff_t)map.m_lblk << inode->i_blkbits; 4723c174e6d6SDmitry Monakhov inode->i_ctime = ext4_current_time(inode); 4724c174e6d6SDmitry Monakhov if (new_size) { 4725c174e6d6SDmitry Monakhov if (epos > new_size) 4726c174e6d6SDmitry Monakhov epos = new_size; 4727c174e6d6SDmitry Monakhov if (ext4_update_inode_size(inode, epos) & 0x1) 4728c174e6d6SDmitry Monakhov inode->i_mtime = inode->i_ctime; 4729c174e6d6SDmitry Monakhov } else { 4730c174e6d6SDmitry Monakhov if (epos > inode->i_size) 4731c174e6d6SDmitry Monakhov ext4_set_inode_flag(inode, 4732c174e6d6SDmitry Monakhov EXT4_INODE_EOFBLOCKS); 4733c174e6d6SDmitry Monakhov } 4734c174e6d6SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 4735a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4736a2df2a63SAmit Arora if (ret2) 4737a2df2a63SAmit Arora break; 4738a2df2a63SAmit Arora } 4739fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4740fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4741fd28784aSAneesh Kumar K.V ret = 0; 4742a2df2a63SAmit Arora goto retry; 4743a2df2a63SAmit Arora } 4744f282ac19SLukas Czerner 47450e8b6879SLukas Czerner return ret > 0 ? ret2 : ret; 47460e8b6879SLukas Czerner } 47470e8b6879SLukas Czerner 4748b8a86845SLukas Czerner static long ext4_zero_range(struct file *file, loff_t offset, 4749b8a86845SLukas Czerner loff_t len, int mode) 4750b8a86845SLukas Czerner { 4751b8a86845SLukas Czerner struct inode *inode = file_inode(file); 4752b8a86845SLukas Czerner handle_t *handle = NULL; 4753b8a86845SLukas Czerner unsigned int max_blocks; 4754b8a86845SLukas Czerner loff_t new_size = 0; 4755b8a86845SLukas Czerner int ret = 0; 4756b8a86845SLukas Czerner int flags; 475769dc9536SDmitry Monakhov int credits; 4758c174e6d6SDmitry Monakhov int partial_begin, partial_end; 4759b8a86845SLukas Czerner loff_t start, end; 4760b8a86845SLukas Czerner ext4_lblk_t lblk; 4761b8a86845SLukas Czerner struct address_space *mapping = inode->i_mapping; 4762b8a86845SLukas Czerner unsigned int blkbits = inode->i_blkbits; 4763b8a86845SLukas Czerner 4764b8a86845SLukas Czerner trace_ext4_zero_range(inode, offset, len, mode); 4765b8a86845SLukas Czerner 47666c5e73d3Sjon ernst if (!S_ISREG(inode->i_mode)) 47676c5e73d3Sjon ernst return -EINVAL; 47686c5e73d3Sjon ernst 4769e1ee60fdSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 4770e1ee60fdSNamjae Jeon if (ext4_should_journal_data(inode)) { 4771e1ee60fdSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 4772e1ee60fdSNamjae Jeon if (ret) 4773e1ee60fdSNamjae Jeon return ret; 4774e1ee60fdSNamjae Jeon } 4775e1ee60fdSNamjae Jeon 4776b8a86845SLukas Czerner /* 4777b8a86845SLukas Czerner * Write out all dirty pages to avoid race conditions 4778b8a86845SLukas Czerner * Then release them. 4779b8a86845SLukas Czerner */ 4780b8a86845SLukas Czerner if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4781b8a86845SLukas Czerner ret = filemap_write_and_wait_range(mapping, offset, 4782b8a86845SLukas Czerner offset + len - 1); 4783b8a86845SLukas Czerner if (ret) 4784b8a86845SLukas Czerner return ret; 4785b8a86845SLukas Czerner } 4786b8a86845SLukas Czerner 4787b8a86845SLukas Czerner /* 4788b8a86845SLukas Czerner * Round up offset. This is not fallocate, we neet to zero out 4789b8a86845SLukas Czerner * blocks, so convert interior block aligned part of the range to 4790b8a86845SLukas Czerner * unwritten and possibly manually zero out unaligned parts of the 4791b8a86845SLukas Czerner * range. 4792b8a86845SLukas Czerner */ 4793b8a86845SLukas Czerner start = round_up(offset, 1 << blkbits); 4794b8a86845SLukas Czerner end = round_down((offset + len), 1 << blkbits); 4795b8a86845SLukas Czerner 4796b8a86845SLukas Czerner if (start < offset || end > offset + len) 4797b8a86845SLukas Czerner return -EINVAL; 4798c174e6d6SDmitry Monakhov partial_begin = offset & ((1 << blkbits) - 1); 4799c174e6d6SDmitry Monakhov partial_end = (offset + len) & ((1 << blkbits) - 1); 4800b8a86845SLukas Czerner 4801b8a86845SLukas Czerner lblk = start >> blkbits; 4802b8a86845SLukas Czerner max_blocks = (end >> blkbits); 4803b8a86845SLukas Czerner if (max_blocks < lblk) 4804b8a86845SLukas Czerner max_blocks = 0; 4805b8a86845SLukas Czerner else 4806b8a86845SLukas Czerner max_blocks -= lblk; 4807b8a86845SLukas Czerner 4808556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT | 4809713e8ddeSTheodore Ts'o EXT4_GET_BLOCKS_CONVERT_UNWRITTEN | 4810713e8ddeSTheodore Ts'o EXT4_EX_NOCACHE; 4811b8a86845SLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 4812b8a86845SLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4813b8a86845SLukas Czerner 4814b8a86845SLukas Czerner mutex_lock(&inode->i_mutex); 4815b8a86845SLukas Czerner 4816b8a86845SLukas Czerner /* 4817b8a86845SLukas Czerner * Indirect files do not support unwritten extnets 4818b8a86845SLukas Czerner */ 4819b8a86845SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4820b8a86845SLukas Czerner ret = -EOPNOTSUPP; 4821b8a86845SLukas Czerner goto out_mutex; 4822b8a86845SLukas Czerner } 4823b8a86845SLukas Czerner 4824b8a86845SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 4825b8a86845SLukas Czerner offset + len > i_size_read(inode)) { 4826b8a86845SLukas Czerner new_size = offset + len; 4827b8a86845SLukas Czerner ret = inode_newsize_ok(inode, new_size); 4828b8a86845SLukas Czerner if (ret) 4829b8a86845SLukas Czerner goto out_mutex; 4830b8a86845SLukas Czerner /* 4831b8a86845SLukas Czerner * If we have a partial block after EOF we have to allocate 4832b8a86845SLukas Czerner * the entire block. 4833b8a86845SLukas Czerner */ 4834c174e6d6SDmitry Monakhov if (partial_end) 4835b8a86845SLukas Czerner max_blocks += 1; 4836b8a86845SLukas Czerner } 4837b8a86845SLukas Czerner 4838b8a86845SLukas Czerner if (max_blocks > 0) { 4839b8a86845SLukas Czerner 4840b8a86845SLukas Czerner /* Now release the pages and zero block aligned part of pages*/ 4841b8a86845SLukas Czerner truncate_pagecache_range(inode, start, end - 1); 4842c174e6d6SDmitry Monakhov inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4843b8a86845SLukas Czerner 4844b8a86845SLukas Czerner /* Wait all existing dio workers, newcomers will block on i_mutex */ 4845b8a86845SLukas Czerner ext4_inode_block_unlocked_dio(inode); 4846b8a86845SLukas Czerner inode_dio_wait(inode); 4847b8a86845SLukas Czerner 4848c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4849c174e6d6SDmitry Monakhov flags, mode); 4850b8a86845SLukas Czerner if (ret) 4851b8a86845SLukas Czerner goto out_dio; 4852713e8ddeSTheodore Ts'o /* 4853713e8ddeSTheodore Ts'o * Remove entire range from the extent status tree. 4854713e8ddeSTheodore Ts'o * 4855713e8ddeSTheodore Ts'o * ext4_es_remove_extent(inode, lblk, max_blocks) is 4856713e8ddeSTheodore Ts'o * NOT sufficient. I'm not sure why this is the case, 4857713e8ddeSTheodore Ts'o * but let's be conservative and remove the extent 4858713e8ddeSTheodore Ts'o * status tree for the entire inode. There should be 4859713e8ddeSTheodore Ts'o * no outstanding delalloc extents thanks to the 4860713e8ddeSTheodore Ts'o * filemap_write_and_wait_range() call above. 4861713e8ddeSTheodore Ts'o */ 4862713e8ddeSTheodore Ts'o ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 4863713e8ddeSTheodore Ts'o if (ret) 4864713e8ddeSTheodore Ts'o goto out_dio; 4865b8a86845SLukas Czerner } 4866c174e6d6SDmitry Monakhov if (!partial_begin && !partial_end) 4867c174e6d6SDmitry Monakhov goto out_dio; 4868c174e6d6SDmitry Monakhov 486969dc9536SDmitry Monakhov /* 487069dc9536SDmitry Monakhov * In worst case we have to writeout two nonadjacent unwritten 487169dc9536SDmitry Monakhov * blocks and update the inode 487269dc9536SDmitry Monakhov */ 487369dc9536SDmitry Monakhov credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; 487469dc9536SDmitry Monakhov if (ext4_should_journal_data(inode)) 487569dc9536SDmitry Monakhov credits += 2; 487669dc9536SDmitry Monakhov handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); 4877b8a86845SLukas Czerner if (IS_ERR(handle)) { 4878b8a86845SLukas Czerner ret = PTR_ERR(handle); 4879b8a86845SLukas Czerner ext4_std_error(inode->i_sb, ret); 4880b8a86845SLukas Czerner goto out_dio; 4881b8a86845SLukas Czerner } 4882b8a86845SLukas Czerner 4883b8a86845SLukas Czerner inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4884e5b30416SLukas Czerner if (new_size) { 48854631dbf6SDmitry Monakhov ext4_update_inode_size(inode, new_size); 4886e5b30416SLukas Czerner } else { 4887b8a86845SLukas Czerner /* 4888b8a86845SLukas Czerner * Mark that we allocate beyond EOF so the subsequent truncate 4889b8a86845SLukas Czerner * can proceed even if the new size is the same as i_size. 4890b8a86845SLukas Czerner */ 4891b8a86845SLukas Czerner if ((offset + len) > i_size_read(inode)) 4892b8a86845SLukas Czerner ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4893b8a86845SLukas Czerner } 4894b8a86845SLukas Czerner ext4_mark_inode_dirty(handle, inode); 4895b8a86845SLukas Czerner 4896b8a86845SLukas Czerner /* Zero out partial block at the edges of the range */ 4897b8a86845SLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4898b8a86845SLukas Czerner 4899b8a86845SLukas Czerner if (file->f_flags & O_SYNC) 4900b8a86845SLukas Czerner ext4_handle_sync(handle); 4901b8a86845SLukas Czerner 4902b8a86845SLukas Czerner ext4_journal_stop(handle); 4903b8a86845SLukas Czerner out_dio: 4904b8a86845SLukas Czerner ext4_inode_resume_unlocked_dio(inode); 4905b8a86845SLukas Czerner out_mutex: 4906b8a86845SLukas Czerner mutex_unlock(&inode->i_mutex); 4907b8a86845SLukas Czerner return ret; 4908b8a86845SLukas Czerner } 4909b8a86845SLukas Czerner 49100e8b6879SLukas Czerner /* 49110e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file 49120e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call. 49130e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method 49140e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is 49150e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call). 49160e8b6879SLukas Czerner */ 49170e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 49180e8b6879SLukas Czerner { 49190e8b6879SLukas Czerner struct inode *inode = file_inode(file); 49200e8b6879SLukas Czerner loff_t new_size = 0; 49210e8b6879SLukas Czerner unsigned int max_blocks; 49220e8b6879SLukas Czerner int ret = 0; 49230e8b6879SLukas Czerner int flags; 49240e8b6879SLukas Czerner ext4_lblk_t lblk; 49250e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits; 49260e8b6879SLukas Czerner 49270e8b6879SLukas Czerner /* Return error if mode is not supported */ 49280e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4929b8a86845SLukas Czerner FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) 49300e8b6879SLukas Czerner return -EOPNOTSUPP; 49310e8b6879SLukas Czerner 49320e8b6879SLukas Czerner if (mode & FALLOC_FL_PUNCH_HOLE) 49330e8b6879SLukas Czerner return ext4_punch_hole(inode, offset, len); 49340e8b6879SLukas Czerner 49350e8b6879SLukas Czerner ret = ext4_convert_inline_data(inode); 49360e8b6879SLukas Czerner if (ret) 49370e8b6879SLukas Czerner return ret; 49380e8b6879SLukas Czerner 49390e8b6879SLukas Czerner /* 49400e8b6879SLukas Czerner * currently supporting (pre)allocate mode for extent-based 49410e8b6879SLukas Czerner * files _only_ 49420e8b6879SLukas Czerner */ 49430e8b6879SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 49440e8b6879SLukas Czerner return -EOPNOTSUPP; 49450e8b6879SLukas Czerner 494640c406c7STheodore Ts'o if (mode & FALLOC_FL_COLLAPSE_RANGE) 494740c406c7STheodore Ts'o return ext4_collapse_range(inode, offset, len); 494840c406c7STheodore Ts'o 4949b8a86845SLukas Czerner if (mode & FALLOC_FL_ZERO_RANGE) 4950b8a86845SLukas Czerner return ext4_zero_range(file, offset, len, mode); 4951b8a86845SLukas Czerner 49520e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode); 49530e8b6879SLukas Czerner lblk = offset >> blkbits; 49540e8b6879SLukas Czerner /* 49550e8b6879SLukas Czerner * We can't just convert len to max_blocks because 49560e8b6879SLukas Czerner * If blocksize = 4096 offset = 3072 and len = 2048 49570e8b6879SLukas Czerner */ 49580e8b6879SLukas Czerner max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 49590e8b6879SLukas Czerner - lblk; 49600e8b6879SLukas Czerner 4961556615dcSLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT; 49620e8b6879SLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 49630e8b6879SLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 49640e8b6879SLukas Czerner 49650e8b6879SLukas Czerner mutex_lock(&inode->i_mutex); 49660e8b6879SLukas Czerner 49670e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 49680e8b6879SLukas Czerner offset + len > i_size_read(inode)) { 49690e8b6879SLukas Czerner new_size = offset + len; 49700e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size); 49710e8b6879SLukas Czerner if (ret) 49720e8b6879SLukas Czerner goto out; 49730e8b6879SLukas Czerner } 49740e8b6879SLukas Czerner 4975c174e6d6SDmitry Monakhov ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, 4976c174e6d6SDmitry Monakhov flags, mode); 49770e8b6879SLukas Czerner if (ret) 49780e8b6879SLukas Czerner goto out; 49790e8b6879SLukas Czerner 4980c174e6d6SDmitry Monakhov if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { 4981c174e6d6SDmitry Monakhov ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, 4982c174e6d6SDmitry Monakhov EXT4_I(inode)->i_sync_tid); 4983f282ac19SLukas Czerner } 4984f282ac19SLukas Czerner out: 498555bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 49860e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 49870e8b6879SLukas Czerner return ret; 4988a2df2a63SAmit Arora } 49896873fa0dSEric Sandeen 49906873fa0dSEric Sandeen /* 49910031462bSMingming Cao * This function convert a range of blocks to written extents 49920031462bSMingming Cao * The caller of this function will pass the start offset and the size. 49930031462bSMingming Cao * all unwritten extents within this range will be converted to 49940031462bSMingming Cao * written extents. 49950031462bSMingming Cao * 49960031462bSMingming Cao * This function is called from the direct IO end io call back 49970031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4998109f5565SMingming * Returns 0 on success. 49990031462bSMingming Cao */ 50006b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 50016b523df4SJan Kara loff_t offset, ssize_t len) 50020031462bSMingming Cao { 50030031462bSMingming Cao unsigned int max_blocks; 50040031462bSMingming Cao int ret = 0; 50050031462bSMingming Cao int ret2 = 0; 50062ed88685STheodore Ts'o struct ext4_map_blocks map; 50070031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 50080031462bSMingming Cao 50092ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 50100031462bSMingming Cao /* 50110031462bSMingming Cao * We can't just convert len to max_blocks because 50120031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 50130031462bSMingming Cao */ 50142ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 50152ed88685STheodore Ts'o map.m_lblk); 50160031462bSMingming Cao /* 50176b523df4SJan Kara * This is somewhat ugly but the idea is clear: When transaction is 50186b523df4SJan Kara * reserved, everything goes into it. Otherwise we rather start several 50196b523df4SJan Kara * smaller transactions for conversion of each extent separately. 50206b523df4SJan Kara */ 50216b523df4SJan Kara if (handle) { 50226b523df4SJan Kara handle = ext4_journal_start_reserved(handle, 50236b523df4SJan Kara EXT4_HT_EXT_CONVERT); 50246b523df4SJan Kara if (IS_ERR(handle)) 50256b523df4SJan Kara return PTR_ERR(handle); 50266b523df4SJan Kara credits = 0; 50276b523df4SJan Kara } else { 50286b523df4SJan Kara /* 50290031462bSMingming Cao * credits to insert 1 extent into extent tree 50300031462bSMingming Cao */ 50310031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 50326b523df4SJan Kara } 50330031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 50342ed88685STheodore Ts'o map.m_lblk += ret; 50352ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 50366b523df4SJan Kara if (credits) { 50376b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 50386b523df4SJan Kara credits); 50390031462bSMingming Cao if (IS_ERR(handle)) { 50400031462bSMingming Cao ret = PTR_ERR(handle); 50410031462bSMingming Cao break; 50420031462bSMingming Cao } 50436b523df4SJan Kara } 50442ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 5045c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 5046b06acd38SLukas Czerner if (ret <= 0) 5047b06acd38SLukas Czerner ext4_warning(inode->i_sb, 5048b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 504992b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 5050b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 505192b97816STheodore Ts'o map.m_len, ret); 50520031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 50536b523df4SJan Kara if (credits) 50540031462bSMingming Cao ret2 = ext4_journal_stop(handle); 50550031462bSMingming Cao if (ret <= 0 || ret2) 50560031462bSMingming Cao break; 50570031462bSMingming Cao } 50586b523df4SJan Kara if (!credits) 50596b523df4SJan Kara ret2 = ext4_journal_stop(handle); 50600031462bSMingming Cao return ret > 0 ? ret2 : ret; 50610031462bSMingming Cao } 50626d9c85ebSYongqiang Yang 50630031462bSMingming Cao /* 506469eb33dcSZheng Liu * If newes is not existing extent (newes->ec_pblk equals zero) find 506569eb33dcSZheng Liu * delayed extent at start of newes and update newes accordingly and 506691dd8c11SLukas Czerner * return start of the next delayed extent. 506791dd8c11SLukas Czerner * 506869eb33dcSZheng Liu * If newes is existing extent (newes->ec_pblk is not equal zero) 506991dd8c11SLukas Czerner * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 507069eb33dcSZheng Liu * extent found. Leave newes unmodified. 50716873fa0dSEric Sandeen */ 507291dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 507369eb33dcSZheng Liu struct extent_status *newes) 50746873fa0dSEric Sandeen { 5075b3aff3e3SZheng Liu struct extent_status es; 5076be401363SZheng Liu ext4_lblk_t block, next_del; 50776873fa0dSEric Sandeen 507869eb33dcSZheng Liu if (newes->es_pblk == 0) { 5079e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, newes->es_lblk, 5080e30b5dcaSYan, Zheng newes->es_lblk + newes->es_len - 1, &es); 5081e30b5dcaSYan, Zheng 50826d9c85ebSYongqiang Yang /* 508369eb33dcSZheng Liu * No extent in extent-tree contains block @newes->es_pblk, 50846d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 50856d9c85ebSYongqiang Yang */ 508606b0c886SZheng Liu if (es.es_len == 0) 5087b3aff3e3SZheng Liu /* A hole found. */ 508891dd8c11SLukas Czerner return 0; 50896d9c85ebSYongqiang Yang 509069eb33dcSZheng Liu if (es.es_lblk > newes->es_lblk) { 5091b3aff3e3SZheng Liu /* A hole found. */ 509269eb33dcSZheng Liu newes->es_len = min(es.es_lblk - newes->es_lblk, 509369eb33dcSZheng Liu newes->es_len); 509491dd8c11SLukas Czerner return 0; 50956873fa0dSEric Sandeen } 50966d9c85ebSYongqiang Yang 509769eb33dcSZheng Liu newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 50986d9c85ebSYongqiang Yang } 50996873fa0dSEric Sandeen 510069eb33dcSZheng Liu block = newes->es_lblk + newes->es_len; 5101e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); 5102be401363SZheng Liu if (es.es_len == 0) 5103be401363SZheng Liu next_del = EXT_MAX_BLOCKS; 5104be401363SZheng Liu else 5105be401363SZheng Liu next_del = es.es_lblk; 5106be401363SZheng Liu 510791dd8c11SLukas Czerner return next_del; 51086873fa0dSEric Sandeen } 51096873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 51106873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 51116873fa0dSEric Sandeen 51123a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 51133a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 51146873fa0dSEric Sandeen { 51156873fa0dSEric Sandeen __u64 physical = 0; 51166873fa0dSEric Sandeen __u64 length; 51176873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 51186873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 51196873fa0dSEric Sandeen int error = 0; 51206873fa0dSEric Sandeen 51216873fa0dSEric Sandeen /* in-inode? */ 512219f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 51236873fa0dSEric Sandeen struct ext4_iloc iloc; 51246873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 51256873fa0dSEric Sandeen 51266873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 51276873fa0dSEric Sandeen if (error) 51286873fa0dSEric Sandeen return error; 5129a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 51306873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 51316873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 51326873fa0dSEric Sandeen physical += offset; 51336873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 51346873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 5135fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 51366873fa0dSEric Sandeen } else { /* external block */ 5137a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 51386873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 51396873fa0dSEric Sandeen } 51406873fa0dSEric Sandeen 51416873fa0dSEric Sandeen if (physical) 51426873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 51436873fa0dSEric Sandeen length, flags); 51446873fa0dSEric Sandeen return (error < 0 ? error : 0); 51456873fa0dSEric Sandeen } 51466873fa0dSEric Sandeen 51476873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 51486873fa0dSEric Sandeen __u64 start, __u64 len) 51496873fa0dSEric Sandeen { 51506873fa0dSEric Sandeen ext4_lblk_t start_blk; 51516873fa0dSEric Sandeen int error = 0; 51526873fa0dSEric Sandeen 515394191985STao Ma if (ext4_has_inline_data(inode)) { 515494191985STao Ma int has_inline = 1; 515594191985STao Ma 515694191985STao Ma error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 515794191985STao Ma 515894191985STao Ma if (has_inline) 515994191985STao Ma return error; 516094191985STao Ma } 516194191985STao Ma 51627869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 51637869a4a6STheodore Ts'o error = ext4_ext_precache(inode); 51647869a4a6STheodore Ts'o if (error) 51657869a4a6STheodore Ts'o return error; 51667869a4a6STheodore Ts'o } 51677869a4a6STheodore Ts'o 51686873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 516912e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 51706873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 51716873fa0dSEric Sandeen ext4_get_block); 51726873fa0dSEric Sandeen 51736873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 51746873fa0dSEric Sandeen return -EBADR; 51756873fa0dSEric Sandeen 51766873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 51776873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 51786873fa0dSEric Sandeen } else { 5179aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 5180aca92ff6SLeonard Michlmayr __u64 last_blk; 5181aca92ff6SLeonard Michlmayr 51826873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 5183aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 5184f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 5185f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 5186aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 51876873fa0dSEric Sandeen 51886873fa0dSEric Sandeen /* 518991dd8c11SLukas Czerner * Walk the extent tree gathering extent information 519091dd8c11SLukas Czerner * and pushing extents back to the user. 51916873fa0dSEric Sandeen */ 519291dd8c11SLukas Czerner error = ext4_fill_fiemap_extents(inode, start_blk, 519391dd8c11SLukas Czerner len_blks, fieinfo); 51946873fa0dSEric Sandeen } 5195107a7bd3STheodore Ts'o ext4_es_lru_add(inode); 51966873fa0dSEric Sandeen return error; 51976873fa0dSEric Sandeen } 51989eb79482SNamjae Jeon 51999eb79482SNamjae Jeon /* 52009eb79482SNamjae Jeon * ext4_access_path: 52019eb79482SNamjae Jeon * Function to access the path buffer for marking it dirty. 52029eb79482SNamjae Jeon * It also checks if there are sufficient credits left in the journal handle 52039eb79482SNamjae Jeon * to update path. 52049eb79482SNamjae Jeon */ 52059eb79482SNamjae Jeon static int 52069eb79482SNamjae Jeon ext4_access_path(handle_t *handle, struct inode *inode, 52079eb79482SNamjae Jeon struct ext4_ext_path *path) 52089eb79482SNamjae Jeon { 52099eb79482SNamjae Jeon int credits, err; 52109eb79482SNamjae Jeon 52119eb79482SNamjae Jeon if (!ext4_handle_valid(handle)) 52129eb79482SNamjae Jeon return 0; 52139eb79482SNamjae Jeon 52149eb79482SNamjae Jeon /* 52159eb79482SNamjae Jeon * Check if need to extend journal credits 52169eb79482SNamjae Jeon * 3 for leaf, sb, and inode plus 2 (bmap and group 52179eb79482SNamjae Jeon * descriptor) for each block group; assume two block 52189eb79482SNamjae Jeon * groups 52199eb79482SNamjae Jeon */ 52209eb79482SNamjae Jeon if (handle->h_buffer_credits < 7) { 52219eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 52229eb79482SNamjae Jeon err = ext4_ext_truncate_extend_restart(handle, inode, credits); 52239eb79482SNamjae Jeon /* EAGAIN is success */ 52249eb79482SNamjae Jeon if (err && err != -EAGAIN) 52259eb79482SNamjae Jeon return err; 52269eb79482SNamjae Jeon } 52279eb79482SNamjae Jeon 52289eb79482SNamjae Jeon err = ext4_ext_get_access(handle, inode, path); 52299eb79482SNamjae Jeon return err; 52309eb79482SNamjae Jeon } 52319eb79482SNamjae Jeon 52329eb79482SNamjae Jeon /* 52339eb79482SNamjae Jeon * ext4_ext_shift_path_extents: 52349eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext 52359eb79482SNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift 52369eb79482SNamjae Jeon * from starting block for each extent. 52379eb79482SNamjae Jeon */ 52389eb79482SNamjae Jeon static int 52399eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 52409eb79482SNamjae Jeon struct inode *inode, handle_t *handle, 52419eb79482SNamjae Jeon ext4_lblk_t *start) 52429eb79482SNamjae Jeon { 52439eb79482SNamjae Jeon int depth, err = 0; 52449eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last; 52459eb79482SNamjae Jeon bool update = 0; 52469eb79482SNamjae Jeon depth = path->p_depth; 52479eb79482SNamjae Jeon 52489eb79482SNamjae Jeon while (depth >= 0) { 52499eb79482SNamjae Jeon if (depth == path->p_depth) { 52509eb79482SNamjae Jeon ex_start = path[depth].p_ext; 52519eb79482SNamjae Jeon if (!ex_start) 52529eb79482SNamjae Jeon return -EIO; 52539eb79482SNamjae Jeon 52549eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 52559eb79482SNamjae Jeon if (!ex_last) 52569eb79482SNamjae Jeon return -EIO; 52579eb79482SNamjae Jeon 52589eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 52599eb79482SNamjae Jeon if (err) 52609eb79482SNamjae Jeon goto out; 52619eb79482SNamjae Jeon 52629eb79482SNamjae Jeon if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 52639eb79482SNamjae Jeon update = 1; 52649eb79482SNamjae Jeon 5265847c6c42SZheng Liu *start = le32_to_cpu(ex_last->ee_block) + 52669eb79482SNamjae Jeon ext4_ext_get_actual_len(ex_last); 52679eb79482SNamjae Jeon 52689eb79482SNamjae Jeon while (ex_start <= ex_last) { 5269847c6c42SZheng Liu le32_add_cpu(&ex_start->ee_block, -shift); 52706dd834efSLukas Czerner /* Try to merge to the left. */ 52716dd834efSLukas Czerner if ((ex_start > 52726dd834efSLukas Czerner EXT_FIRST_EXTENT(path[depth].p_hdr)) && 52736dd834efSLukas Czerner ext4_ext_try_to_merge_right(inode, 52749eb79482SNamjae Jeon path, ex_start - 1)) 52759eb79482SNamjae Jeon ex_last--; 52766dd834efSLukas Czerner else 52779eb79482SNamjae Jeon ex_start++; 52789eb79482SNamjae Jeon } 52799eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 52809eb79482SNamjae Jeon if (err) 52819eb79482SNamjae Jeon goto out; 52829eb79482SNamjae Jeon 52839eb79482SNamjae Jeon if (--depth < 0 || !update) 52849eb79482SNamjae Jeon break; 52859eb79482SNamjae Jeon } 52869eb79482SNamjae Jeon 52879eb79482SNamjae Jeon /* Update index too */ 52889eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 52899eb79482SNamjae Jeon if (err) 52909eb79482SNamjae Jeon goto out; 52919eb79482SNamjae Jeon 5292847c6c42SZheng Liu le32_add_cpu(&path[depth].p_idx->ei_block, -shift); 52939eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 52949eb79482SNamjae Jeon if (err) 52959eb79482SNamjae Jeon goto out; 52969eb79482SNamjae Jeon 52979eb79482SNamjae Jeon /* we are done if current index is not a starting index */ 52989eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 52999eb79482SNamjae Jeon break; 53009eb79482SNamjae Jeon 53019eb79482SNamjae Jeon depth--; 53029eb79482SNamjae Jeon } 53039eb79482SNamjae Jeon 53049eb79482SNamjae Jeon out: 53059eb79482SNamjae Jeon return err; 53069eb79482SNamjae Jeon } 53079eb79482SNamjae Jeon 53089eb79482SNamjae Jeon /* 53099eb79482SNamjae Jeon * ext4_ext_shift_extents: 53109eb79482SNamjae Jeon * All the extents which lies in the range from start to the last allocated 53119eb79482SNamjae Jeon * block for the file are shifted downwards by shift blocks. 53129eb79482SNamjae Jeon * On success, 0 is returned, error otherwise. 53139eb79482SNamjae Jeon */ 53149eb79482SNamjae Jeon static int 53159eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 53169eb79482SNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift) 53179eb79482SNamjae Jeon { 53189eb79482SNamjae Jeon struct ext4_ext_path *path; 53199eb79482SNamjae Jeon int ret = 0, depth; 53209eb79482SNamjae Jeon struct ext4_extent *extent; 5321f8fb4f41SDmitry Monakhov ext4_lblk_t stop_block; 53229eb79482SNamjae Jeon ext4_lblk_t ex_start, ex_end; 53239eb79482SNamjae Jeon 53249eb79482SNamjae Jeon /* Let path point to the last extent */ 5325ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); 53269eb79482SNamjae Jeon if (IS_ERR(path)) 53279eb79482SNamjae Jeon return PTR_ERR(path); 53289eb79482SNamjae Jeon 53299eb79482SNamjae Jeon depth = path->p_depth; 53309eb79482SNamjae Jeon extent = path[depth].p_ext; 5331ee4bd0d9STheodore Ts'o if (!extent) 5332ee4bd0d9STheodore Ts'o goto out; 53339eb79482SNamjae Jeon 5334847c6c42SZheng Liu stop_block = le32_to_cpu(extent->ee_block) + 5335847c6c42SZheng Liu ext4_ext_get_actual_len(extent); 53369eb79482SNamjae Jeon 53379eb79482SNamjae Jeon /* Nothing to shift, if hole is at the end of file */ 53389eb79482SNamjae Jeon if (start >= stop_block) 5339ee4bd0d9STheodore Ts'o goto out; 53409eb79482SNamjae Jeon 53419eb79482SNamjae Jeon /* 53429eb79482SNamjae Jeon * Don't start shifting extents until we make sure the hole is big 53439eb79482SNamjae Jeon * enough to accomodate the shift. 53449eb79482SNamjae Jeon */ 5345ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, start - 1, &path, 0); 53468dc79ec4SDmitry Monakhov if (IS_ERR(path)) 53478dc79ec4SDmitry Monakhov return PTR_ERR(path); 53489eb79482SNamjae Jeon depth = path->p_depth; 53499eb79482SNamjae Jeon extent = path[depth].p_ext; 53508dc79ec4SDmitry Monakhov if (extent) { 5351847c6c42SZheng Liu ex_start = le32_to_cpu(extent->ee_block); 5352847c6c42SZheng Liu ex_end = le32_to_cpu(extent->ee_block) + 5353847c6c42SZheng Liu ext4_ext_get_actual_len(extent); 53548dc79ec4SDmitry Monakhov } else { 53558dc79ec4SDmitry Monakhov ex_start = 0; 53568dc79ec4SDmitry Monakhov ex_end = 0; 53578dc79ec4SDmitry Monakhov } 53589eb79482SNamjae Jeon 53599eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) || 53609eb79482SNamjae Jeon (shift > start - ex_end)) 53619eb79482SNamjae Jeon return -EINVAL; 53629eb79482SNamjae Jeon 53639eb79482SNamjae Jeon /* Its safe to start updating extents */ 53649eb79482SNamjae Jeon while (start < stop_block) { 5365ed8a1a76STheodore Ts'o path = ext4_find_extent(inode, start, &path, 0); 53669eb79482SNamjae Jeon if (IS_ERR(path)) 53679eb79482SNamjae Jeon return PTR_ERR(path); 53689eb79482SNamjae Jeon depth = path->p_depth; 53699eb79482SNamjae Jeon extent = path[depth].p_ext; 5370a18ed359SDmitry Monakhov if (!extent) { 5371a18ed359SDmitry Monakhov EXT4_ERROR_INODE(inode, "unexpected hole at %lu", 5372a18ed359SDmitry Monakhov (unsigned long) start); 5373a18ed359SDmitry Monakhov return -EIO; 5374a18ed359SDmitry Monakhov } 5375f8fb4f41SDmitry Monakhov if (start > le32_to_cpu(extent->ee_block)) { 53769eb79482SNamjae Jeon /* Hole, move to the next extent */ 5377f8fb4f41SDmitry Monakhov if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) { 5378f8fb4f41SDmitry Monakhov path[depth].p_ext++; 5379f8fb4f41SDmitry Monakhov } else { 5380f8fb4f41SDmitry Monakhov start = ext4_ext_next_allocated_block(path); 5381f8fb4f41SDmitry Monakhov continue; 53829eb79482SNamjae Jeon } 53839eb79482SNamjae Jeon } 53849eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode, 53859eb79482SNamjae Jeon handle, &start); 53869eb79482SNamjae Jeon if (ret) 53879eb79482SNamjae Jeon break; 53889eb79482SNamjae Jeon } 5389ee4bd0d9STheodore Ts'o out: 5390ee4bd0d9STheodore Ts'o ext4_ext_drop_refs(path); 5391ee4bd0d9STheodore Ts'o kfree(path); 53929eb79482SNamjae Jeon return ret; 53939eb79482SNamjae Jeon } 53949eb79482SNamjae Jeon 53959eb79482SNamjae Jeon /* 53969eb79482SNamjae Jeon * ext4_collapse_range: 53979eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4 53989eb79482SNamjae Jeon * Returns: 0 and non-zero on error. 53999eb79482SNamjae Jeon */ 54009eb79482SNamjae Jeon int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 54019eb79482SNamjae Jeon { 54029eb79482SNamjae Jeon struct super_block *sb = inode->i_sb; 54039eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop; 54049eb79482SNamjae Jeon handle_t *handle; 54059eb79482SNamjae Jeon unsigned int credits; 5406a8680e0dSNamjae Jeon loff_t new_size, ioffset; 54079eb79482SNamjae Jeon int ret; 54089eb79482SNamjae Jeon 54099eb79482SNamjae Jeon /* Collapse range works only on fs block size aligned offsets. */ 5410ee98fa3aSNamjae Jeon if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) || 5411ee98fa3aSNamjae Jeon len & (EXT4_CLUSTER_SIZE(sb) - 1)) 54129eb79482SNamjae Jeon return -EINVAL; 54139eb79482SNamjae Jeon 54149eb79482SNamjae Jeon if (!S_ISREG(inode->i_mode)) 541586f1ca38STheodore Ts'o return -EINVAL; 54169eb79482SNamjae Jeon 54179eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len); 54189eb79482SNamjae Jeon 54199eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 54209eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 54219eb79482SNamjae Jeon 54221ce01c4aSNamjae Jeon /* Call ext4_force_commit to flush all data in case of data=journal. */ 54231ce01c4aSNamjae Jeon if (ext4_should_journal_data(inode)) { 54241ce01c4aSNamjae Jeon ret = ext4_force_commit(inode->i_sb); 54251ce01c4aSNamjae Jeon if (ret) 54261ce01c4aSNamjae Jeon return ret; 54271ce01c4aSNamjae Jeon } 54281ce01c4aSNamjae Jeon 5429a8680e0dSNamjae Jeon /* 5430a8680e0dSNamjae Jeon * Need to round down offset to be aligned with page size boundary 5431a8680e0dSNamjae Jeon * for page size > block size. 5432a8680e0dSNamjae Jeon */ 5433a8680e0dSNamjae Jeon ioffset = round_down(offset, PAGE_SIZE); 5434a8680e0dSNamjae Jeon 54359eb79482SNamjae Jeon /* Write out all dirty pages */ 5436a8680e0dSNamjae Jeon ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, 5437a8680e0dSNamjae Jeon LLONG_MAX); 54389eb79482SNamjae Jeon if (ret) 54399eb79482SNamjae Jeon return ret; 54409eb79482SNamjae Jeon 54419eb79482SNamjae Jeon /* Take mutex lock */ 54429eb79482SNamjae Jeon mutex_lock(&inode->i_mutex); 54439eb79482SNamjae Jeon 544423fffa92SLukas Czerner /* 544523fffa92SLukas Czerner * There is no need to overlap collapse range with EOF, in which case 544623fffa92SLukas Czerner * it is effectively a truncate operation 544723fffa92SLukas Czerner */ 544823fffa92SLukas Czerner if (offset + len >= i_size_read(inode)) { 544923fffa92SLukas Czerner ret = -EINVAL; 545023fffa92SLukas Czerner goto out_mutex; 545123fffa92SLukas Czerner } 545223fffa92SLukas Czerner 54539eb79482SNamjae Jeon /* Currently just for extent based files */ 54549eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 54559eb79482SNamjae Jeon ret = -EOPNOTSUPP; 54569eb79482SNamjae Jeon goto out_mutex; 54579eb79482SNamjae Jeon } 54589eb79482SNamjae Jeon 5459a8680e0dSNamjae Jeon truncate_pagecache(inode, ioffset); 54609eb79482SNamjae Jeon 54619eb79482SNamjae Jeon /* Wait for existing dio to complete */ 54629eb79482SNamjae Jeon ext4_inode_block_unlocked_dio(inode); 54639eb79482SNamjae Jeon inode_dio_wait(inode); 54649eb79482SNamjae Jeon 54659eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 54669eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 54679eb79482SNamjae Jeon if (IS_ERR(handle)) { 54689eb79482SNamjae Jeon ret = PTR_ERR(handle); 54699eb79482SNamjae Jeon goto out_dio; 54709eb79482SNamjae Jeon } 54719eb79482SNamjae Jeon 54729eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 54739eb79482SNamjae Jeon ext4_discard_preallocations(inode); 54749eb79482SNamjae Jeon 54759eb79482SNamjae Jeon ret = ext4_es_remove_extent(inode, punch_start, 54762c1d2328SLukas Czerner EXT_MAX_BLOCKS - punch_start); 54779eb79482SNamjae Jeon if (ret) { 54789eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54799eb79482SNamjae Jeon goto out_stop; 54809eb79482SNamjae Jeon } 54819eb79482SNamjae Jeon 54829eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 54839eb79482SNamjae Jeon if (ret) { 54849eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54859eb79482SNamjae Jeon goto out_stop; 54869eb79482SNamjae Jeon } 5487ef24f6c2SLukas Czerner ext4_discard_preallocations(inode); 54889eb79482SNamjae Jeon 54899eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop, 54909eb79482SNamjae Jeon punch_stop - punch_start); 54919eb79482SNamjae Jeon if (ret) { 54929eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 54939eb79482SNamjae Jeon goto out_stop; 54949eb79482SNamjae Jeon } 54959eb79482SNamjae Jeon 54969eb79482SNamjae Jeon new_size = i_size_read(inode) - len; 54979337d5d3SLukas Czerner i_size_write(inode, new_size); 54989eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size; 54999eb79482SNamjae Jeon 55009eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 55019eb79482SNamjae Jeon if (IS_SYNC(inode)) 55029eb79482SNamjae Jeon ext4_handle_sync(handle); 55039eb79482SNamjae Jeon inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 55049eb79482SNamjae Jeon ext4_mark_inode_dirty(handle, inode); 55059eb79482SNamjae Jeon 55069eb79482SNamjae Jeon out_stop: 55079eb79482SNamjae Jeon ext4_journal_stop(handle); 55089eb79482SNamjae Jeon out_dio: 55099eb79482SNamjae Jeon ext4_inode_resume_unlocked_dio(inode); 55109eb79482SNamjae Jeon out_mutex: 55119eb79482SNamjae Jeon mutex_unlock(&inode->i_mutex); 55129eb79482SNamjae Jeon return ret; 55139eb79482SNamjae Jeon } 5514fcf6b1b7SDmitry Monakhov 5515fcf6b1b7SDmitry Monakhov /** 5516fcf6b1b7SDmitry Monakhov * ext4_swap_extents - Swap extents between two inodes 5517fcf6b1b7SDmitry Monakhov * 5518fcf6b1b7SDmitry Monakhov * @inode1: First inode 5519fcf6b1b7SDmitry Monakhov * @inode2: Second inode 5520fcf6b1b7SDmitry Monakhov * @lblk1: Start block for first inode 5521fcf6b1b7SDmitry Monakhov * @lblk2: Start block for second inode 5522fcf6b1b7SDmitry Monakhov * @count: Number of blocks to swap 5523fcf6b1b7SDmitry Monakhov * @mark_unwritten: Mark second inode's extents as unwritten after swap 5524fcf6b1b7SDmitry Monakhov * @erp: Pointer to save error value 5525fcf6b1b7SDmitry Monakhov * 5526fcf6b1b7SDmitry Monakhov * This helper routine does exactly what is promise "swap extents". All other 5527fcf6b1b7SDmitry Monakhov * stuff such as page-cache locking consistency, bh mapping consistency or 5528fcf6b1b7SDmitry Monakhov * extent's data copying must be performed by caller. 5529fcf6b1b7SDmitry Monakhov * Locking: 5530fcf6b1b7SDmitry Monakhov * i_mutex is held for both inodes 5531fcf6b1b7SDmitry Monakhov * i_data_sem is locked for write for both inodes 5532fcf6b1b7SDmitry Monakhov * Assumptions: 5533fcf6b1b7SDmitry Monakhov * All pages from requested range are locked for both inodes 5534fcf6b1b7SDmitry Monakhov */ 5535fcf6b1b7SDmitry Monakhov int 5536fcf6b1b7SDmitry Monakhov ext4_swap_extents(handle_t *handle, struct inode *inode1, 5537fcf6b1b7SDmitry Monakhov struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2, 5538fcf6b1b7SDmitry Monakhov ext4_lblk_t count, int unwritten, int *erp) 5539fcf6b1b7SDmitry Monakhov { 5540fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path1 = NULL; 5541fcf6b1b7SDmitry Monakhov struct ext4_ext_path *path2 = NULL; 5542fcf6b1b7SDmitry Monakhov int replaced_count = 0; 5543fcf6b1b7SDmitry Monakhov 5544fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem)); 5545fcf6b1b7SDmitry Monakhov BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem)); 5546fcf6b1b7SDmitry Monakhov BUG_ON(!mutex_is_locked(&inode1->i_mutex)); 5547fcf6b1b7SDmitry Monakhov BUG_ON(!mutex_is_locked(&inode1->i_mutex)); 5548fcf6b1b7SDmitry Monakhov 5549fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode1, lblk1, count); 555019008f6dSTheodore Ts'o if (unlikely(*erp)) 5551fcf6b1b7SDmitry Monakhov return 0; 5552fcf6b1b7SDmitry Monakhov *erp = ext4_es_remove_extent(inode2, lblk2, count); 555319008f6dSTheodore Ts'o if (unlikely(*erp)) 5554fcf6b1b7SDmitry Monakhov return 0; 5555fcf6b1b7SDmitry Monakhov 5556fcf6b1b7SDmitry Monakhov while (count) { 5557fcf6b1b7SDmitry Monakhov struct ext4_extent *ex1, *ex2, tmp_ex; 5558fcf6b1b7SDmitry Monakhov ext4_lblk_t e1_blk, e2_blk; 5559fcf6b1b7SDmitry Monakhov int e1_len, e2_len, len; 5560fcf6b1b7SDmitry Monakhov int split = 0; 5561fcf6b1b7SDmitry Monakhov 5562ed8a1a76STheodore Ts'o path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE); 556319008f6dSTheodore Ts'o if (unlikely(IS_ERR(path1))) { 5564fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path1); 556519008f6dSTheodore Ts'o path1 = NULL; 556619008f6dSTheodore Ts'o finish: 556719008f6dSTheodore Ts'o count = 0; 556819008f6dSTheodore Ts'o goto repeat; 5569fcf6b1b7SDmitry Monakhov } 5570ed8a1a76STheodore Ts'o path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE); 557119008f6dSTheodore Ts'o if (unlikely(IS_ERR(path2))) { 5572fcf6b1b7SDmitry Monakhov *erp = PTR_ERR(path2); 557319008f6dSTheodore Ts'o path2 = NULL; 557419008f6dSTheodore Ts'o goto finish; 5575fcf6b1b7SDmitry Monakhov } 5576fcf6b1b7SDmitry Monakhov ex1 = path1[path1->p_depth].p_ext; 5577fcf6b1b7SDmitry Monakhov ex2 = path2[path2->p_depth].p_ext; 5578fcf6b1b7SDmitry Monakhov /* Do we have somthing to swap ? */ 5579fcf6b1b7SDmitry Monakhov if (unlikely(!ex2 || !ex1)) 558019008f6dSTheodore Ts'o goto finish; 5581fcf6b1b7SDmitry Monakhov 5582fcf6b1b7SDmitry Monakhov e1_blk = le32_to_cpu(ex1->ee_block); 5583fcf6b1b7SDmitry Monakhov e2_blk = le32_to_cpu(ex2->ee_block); 5584fcf6b1b7SDmitry Monakhov e1_len = ext4_ext_get_actual_len(ex1); 5585fcf6b1b7SDmitry Monakhov e2_len = ext4_ext_get_actual_len(ex2); 5586fcf6b1b7SDmitry Monakhov 5587fcf6b1b7SDmitry Monakhov /* Hole handling */ 5588fcf6b1b7SDmitry Monakhov if (!in_range(lblk1, e1_blk, e1_len) || 5589fcf6b1b7SDmitry Monakhov !in_range(lblk2, e2_blk, e2_len)) { 5590fcf6b1b7SDmitry Monakhov ext4_lblk_t next1, next2; 5591fcf6b1b7SDmitry Monakhov 5592fcf6b1b7SDmitry Monakhov /* if hole after extent, then go to next extent */ 5593fcf6b1b7SDmitry Monakhov next1 = ext4_ext_next_allocated_block(path1); 5594fcf6b1b7SDmitry Monakhov next2 = ext4_ext_next_allocated_block(path2); 5595fcf6b1b7SDmitry Monakhov /* If hole before extent, then shift to that extent */ 5596fcf6b1b7SDmitry Monakhov if (e1_blk > lblk1) 5597fcf6b1b7SDmitry Monakhov next1 = e1_blk; 5598fcf6b1b7SDmitry Monakhov if (e2_blk > lblk2) 5599fcf6b1b7SDmitry Monakhov next2 = e1_blk; 5600fcf6b1b7SDmitry Monakhov /* Do we have something to swap */ 5601fcf6b1b7SDmitry Monakhov if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS) 560219008f6dSTheodore Ts'o goto finish; 5603fcf6b1b7SDmitry Monakhov /* Move to the rightest boundary */ 5604fcf6b1b7SDmitry Monakhov len = next1 - lblk1; 5605fcf6b1b7SDmitry Monakhov if (len < next2 - lblk2) 5606fcf6b1b7SDmitry Monakhov len = next2 - lblk2; 5607fcf6b1b7SDmitry Monakhov if (len > count) 5608fcf6b1b7SDmitry Monakhov len = count; 5609fcf6b1b7SDmitry Monakhov lblk1 += len; 5610fcf6b1b7SDmitry Monakhov lblk2 += len; 5611fcf6b1b7SDmitry Monakhov count -= len; 5612fcf6b1b7SDmitry Monakhov goto repeat; 5613fcf6b1b7SDmitry Monakhov } 5614fcf6b1b7SDmitry Monakhov 5615fcf6b1b7SDmitry Monakhov /* Prepare left boundary */ 5616fcf6b1b7SDmitry Monakhov if (e1_blk < lblk1) { 5617fcf6b1b7SDmitry Monakhov split = 1; 5618fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5619dfe50809STheodore Ts'o &path1, lblk1, 0); 562019008f6dSTheodore Ts'o if (unlikely(*erp)) 562119008f6dSTheodore Ts'o goto finish; 5622fcf6b1b7SDmitry Monakhov } 5623fcf6b1b7SDmitry Monakhov if (e2_blk < lblk2) { 5624fcf6b1b7SDmitry Monakhov split = 1; 5625fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5626dfe50809STheodore Ts'o &path2, lblk2, 0); 562719008f6dSTheodore Ts'o if (unlikely(*erp)) 562819008f6dSTheodore Ts'o goto finish; 5629fcf6b1b7SDmitry Monakhov } 5630dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5631fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5632fcf6b1b7SDmitry Monakhov if (split) 5633fcf6b1b7SDmitry Monakhov goto repeat; 5634fcf6b1b7SDmitry Monakhov 5635fcf6b1b7SDmitry Monakhov /* Prepare right boundary */ 5636fcf6b1b7SDmitry Monakhov len = count; 5637fcf6b1b7SDmitry Monakhov if (len > e1_blk + e1_len - lblk1) 5638fcf6b1b7SDmitry Monakhov len = e1_blk + e1_len - lblk1; 5639fcf6b1b7SDmitry Monakhov if (len > e2_blk + e2_len - lblk2) 5640fcf6b1b7SDmitry Monakhov len = e2_blk + e2_len - lblk2; 5641fcf6b1b7SDmitry Monakhov 5642fcf6b1b7SDmitry Monakhov if (len != e1_len) { 5643fcf6b1b7SDmitry Monakhov split = 1; 5644fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode1, 5645dfe50809STheodore Ts'o &path1, lblk1 + len, 0); 564619008f6dSTheodore Ts'o if (unlikely(*erp)) 564719008f6dSTheodore Ts'o goto finish; 5648fcf6b1b7SDmitry Monakhov } 5649fcf6b1b7SDmitry Monakhov if (len != e2_len) { 5650fcf6b1b7SDmitry Monakhov split = 1; 5651fcf6b1b7SDmitry Monakhov *erp = ext4_force_split_extent_at(handle, inode2, 5652dfe50809STheodore Ts'o &path2, lblk2 + len, 0); 5653fcf6b1b7SDmitry Monakhov if (*erp) 565419008f6dSTheodore Ts'o goto finish; 5655fcf6b1b7SDmitry Monakhov } 5656dfe50809STheodore Ts'o /* ext4_split_extent_at() may result in leaf extent split, 5657fcf6b1b7SDmitry Monakhov * path must to be revalidated. */ 5658fcf6b1b7SDmitry Monakhov if (split) 5659fcf6b1b7SDmitry Monakhov goto repeat; 5660fcf6b1b7SDmitry Monakhov 5661fcf6b1b7SDmitry Monakhov BUG_ON(e2_len != e1_len); 5662fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth); 566319008f6dSTheodore Ts'o if (unlikely(*erp)) 566419008f6dSTheodore Ts'o goto finish; 5665fcf6b1b7SDmitry Monakhov *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth); 566619008f6dSTheodore Ts'o if (unlikely(*erp)) 566719008f6dSTheodore Ts'o goto finish; 5668fcf6b1b7SDmitry Monakhov 5669fcf6b1b7SDmitry Monakhov /* Both extents are fully inside boundaries. Swap it now */ 5670fcf6b1b7SDmitry Monakhov tmp_ex = *ex1; 5671fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2)); 5672fcf6b1b7SDmitry Monakhov ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex)); 5673fcf6b1b7SDmitry Monakhov ex1->ee_len = cpu_to_le16(e2_len); 5674fcf6b1b7SDmitry Monakhov ex2->ee_len = cpu_to_le16(e1_len); 5675fcf6b1b7SDmitry Monakhov if (unwritten) 5676fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex2); 5677fcf6b1b7SDmitry Monakhov if (ext4_ext_is_unwritten(&tmp_ex)) 5678fcf6b1b7SDmitry Monakhov ext4_ext_mark_unwritten(ex1); 5679fcf6b1b7SDmitry Monakhov 5680fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode2, path2, ex2); 5681fcf6b1b7SDmitry Monakhov ext4_ext_try_to_merge(handle, inode1, path1, ex1); 5682fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode2, path2 + 5683fcf6b1b7SDmitry Monakhov path2->p_depth); 568419008f6dSTheodore Ts'o if (unlikely(*erp)) 568519008f6dSTheodore Ts'o goto finish; 5686fcf6b1b7SDmitry Monakhov *erp = ext4_ext_dirty(handle, inode1, path1 + 5687fcf6b1b7SDmitry Monakhov path1->p_depth); 5688fcf6b1b7SDmitry Monakhov /* 5689fcf6b1b7SDmitry Monakhov * Looks scarry ah..? second inode already points to new blocks, 5690fcf6b1b7SDmitry Monakhov * and it was successfully dirtied. But luckily error may happen 5691fcf6b1b7SDmitry Monakhov * only due to journal error, so full transaction will be 5692fcf6b1b7SDmitry Monakhov * aborted anyway. 5693fcf6b1b7SDmitry Monakhov */ 569419008f6dSTheodore Ts'o if (unlikely(*erp)) 569519008f6dSTheodore Ts'o goto finish; 5696fcf6b1b7SDmitry Monakhov lblk1 += len; 5697fcf6b1b7SDmitry Monakhov lblk2 += len; 5698fcf6b1b7SDmitry Monakhov replaced_count += len; 5699fcf6b1b7SDmitry Monakhov count -= len; 5700fcf6b1b7SDmitry Monakhov 5701fcf6b1b7SDmitry Monakhov repeat: 5702fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path1); 5703fcf6b1b7SDmitry Monakhov kfree(path1); 5704fcf6b1b7SDmitry Monakhov ext4_ext_drop_refs(path2); 5705fcf6b1b7SDmitry Monakhov kfree(path2); 5706b7ea89adSTheodore Ts'o path1 = path2 = NULL; 5707fcf6b1b7SDmitry Monakhov } 5708fcf6b1b7SDmitry Monakhov return replaced_count; 5709fcf6b1b7SDmitry Monakhov } 5710