1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/fs.h> 33a86c6181SAlex Tomas #include <linux/time.h> 34cd02ff0bSMingming Cao #include <linux/jbd2.h> 35a86c6181SAlex Tomas #include <linux/highuid.h> 36a86c6181SAlex Tomas #include <linux/pagemap.h> 37a86c6181SAlex Tomas #include <linux/quotaops.h> 38a86c6181SAlex Tomas #include <linux/string.h> 39a86c6181SAlex Tomas #include <linux/slab.h> 40a2df2a63SAmit Arora #include <linux/falloc.h> 41a86c6181SAlex Tomas #include <asm/uaccess.h> 426873fa0dSEric Sandeen #include <linux/fiemap.h> 433dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 444a092d73STheodore Ts'o #include "ext4_extents.h" 45f19d5870STao Ma #include "xattr.h" 46a86c6181SAlex Tomas 470562e0baSJiaying Zhang #include <trace/events/ext4.h> 480562e0baSJiaying Zhang 495f95d21fSLukas Czerner /* 505f95d21fSLukas Czerner * used by extent splitting. 515f95d21fSLukas Czerner */ 525f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 535f95d21fSLukas Czerner due to ENOSPC */ 545f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 555f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 565f95d21fSLukas Czerner 57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 58dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 59dee1f973SDmitry Monakhov 607ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 617ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 627ac5990dSDarrick J. Wong { 637ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 647ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 657ac5990dSDarrick J. Wong __u32 csum; 667ac5990dSDarrick J. Wong 677ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 687ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 697ac5990dSDarrick J. Wong return cpu_to_le32(csum); 707ac5990dSDarrick J. Wong } 717ac5990dSDarrick J. Wong 727ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 737ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 747ac5990dSDarrick J. Wong { 757ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 767ac5990dSDarrick J. Wong 777ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 787ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 797ac5990dSDarrick J. Wong return 1; 807ac5990dSDarrick J. Wong 817ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 827ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 837ac5990dSDarrick J. Wong return 0; 847ac5990dSDarrick J. Wong return 1; 857ac5990dSDarrick J. Wong } 867ac5990dSDarrick J. Wong 877ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 887ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 897ac5990dSDarrick J. Wong { 907ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 917ac5990dSDarrick J. Wong 927ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 937ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 947ac5990dSDarrick J. Wong return; 957ac5990dSDarrick J. Wong 967ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 977ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 987ac5990dSDarrick J. Wong } 997ac5990dSDarrick J. Wong 100d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 101d583fb87SAllison Henderson struct inode *inode, 102d583fb87SAllison Henderson struct ext4_ext_path *path, 103d583fb87SAllison Henderson struct ext4_map_blocks *map, 104d583fb87SAllison Henderson int split_flag, 105d583fb87SAllison Henderson int flags); 106d583fb87SAllison Henderson 1075f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 1085f95d21fSLukas Czerner struct inode *inode, 1095f95d21fSLukas Czerner struct ext4_ext_path *path, 1105f95d21fSLukas Czerner ext4_lblk_t split, 1115f95d21fSLukas Czerner int split_flag, 1125f95d21fSLukas Czerner int flags); 1135f95d21fSLukas Czerner 11491dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 11591dd8c11SLukas Czerner struct ext4_ext_cache *newex); 11691dd8c11SLukas Czerner 117487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 118487caeefSJan Kara struct inode *inode, 119487caeefSJan Kara int needed) 120a86c6181SAlex Tomas { 121a86c6181SAlex Tomas int err; 122a86c6181SAlex Tomas 1230390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 1240390131bSFrank Mayhar return 0; 125a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 1269102e4faSShen Feng return 0; 1279102e4faSShen Feng err = ext4_journal_extend(handle, needed); 1280123c939STheodore Ts'o if (err <= 0) 1299102e4faSShen Feng return err; 130487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 1310617b83fSDmitry Monakhov if (err == 0) 1320617b83fSDmitry Monakhov err = -EAGAIN; 133487caeefSJan Kara 134487caeefSJan Kara return err; 135a86c6181SAlex Tomas } 136a86c6181SAlex Tomas 137a86c6181SAlex Tomas /* 138a86c6181SAlex Tomas * could return: 139a86c6181SAlex Tomas * - EROFS 140a86c6181SAlex Tomas * - ENOMEM 141a86c6181SAlex Tomas */ 142a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 143a86c6181SAlex Tomas struct ext4_ext_path *path) 144a86c6181SAlex Tomas { 145a86c6181SAlex Tomas if (path->p_bh) { 146a86c6181SAlex Tomas /* path points to block */ 147a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 148a86c6181SAlex Tomas } 149a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 150a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 151a86c6181SAlex Tomas return 0; 152a86c6181SAlex Tomas } 153a86c6181SAlex Tomas 154a86c6181SAlex Tomas /* 155a86c6181SAlex Tomas * could return: 156a86c6181SAlex Tomas * - EROFS 157a86c6181SAlex Tomas * - ENOMEM 158a86c6181SAlex Tomas * - EIO 159a86c6181SAlex Tomas */ 1609ea7a0dfSTheodore Ts'o #define ext4_ext_dirty(handle, inode, path) \ 1619ea7a0dfSTheodore Ts'o __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 1629ea7a0dfSTheodore Ts'o static int __ext4_ext_dirty(const char *where, unsigned int line, 1639ea7a0dfSTheodore Ts'o handle_t *handle, struct inode *inode, 164a86c6181SAlex Tomas struct ext4_ext_path *path) 165a86c6181SAlex Tomas { 166a86c6181SAlex Tomas int err; 167a86c6181SAlex Tomas if (path->p_bh) { 1687ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 169a86c6181SAlex Tomas /* path points to block */ 1709ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1719ea7a0dfSTheodore Ts'o inode, path->p_bh); 172a86c6181SAlex Tomas } else { 173a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 174a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 175a86c6181SAlex Tomas } 176a86c6181SAlex Tomas return err; 177a86c6181SAlex Tomas } 178a86c6181SAlex Tomas 179f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 180a86c6181SAlex Tomas struct ext4_ext_path *path, 181725d26d3SAneesh Kumar K.V ext4_lblk_t block) 182a86c6181SAlex Tomas { 183a86c6181SAlex Tomas if (path) { 18481fdbb4aSYongqiang Yang int depth = path->p_depth; 185a86c6181SAlex Tomas struct ext4_extent *ex; 186a86c6181SAlex Tomas 187ad4fb9caSKazuya Mio /* 188ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 189ad4fb9caSKazuya Mio * filling in a file which will eventually be 190ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 191ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 192ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 193ad4fb9caSKazuya Mio * executable file, or some database extending a table 194ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 195ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 196ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 197ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 198ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 199ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 200ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 201b8d6568aSTao Ma * especially if the latter case turns out to be 202ad4fb9caSKazuya Mio * common. 203ad4fb9caSKazuya Mio */ 2047e028976SAvantika Mathur ex = path[depth].p_ext; 205ad4fb9caSKazuya Mio if (ex) { 206ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 207ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 208ad4fb9caSKazuya Mio 209ad4fb9caSKazuya Mio if (block > ext_block) 210ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 211ad4fb9caSKazuya Mio else 212ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 213ad4fb9caSKazuya Mio } 214a86c6181SAlex Tomas 215d0d856e8SRandy Dunlap /* it looks like index is empty; 216d0d856e8SRandy Dunlap * try to find starting block from index itself */ 217a86c6181SAlex Tomas if (path[depth].p_bh) 218a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 219a86c6181SAlex Tomas } 220a86c6181SAlex Tomas 221a86c6181SAlex Tomas /* OK. use inode's group */ 222f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 223a86c6181SAlex Tomas } 224a86c6181SAlex Tomas 225654b4908SAneesh Kumar K.V /* 226654b4908SAneesh Kumar K.V * Allocation for a meta data block 227654b4908SAneesh Kumar K.V */ 228f65e6fbaSAlex Tomas static ext4_fsblk_t 229654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 230a86c6181SAlex Tomas struct ext4_ext_path *path, 23155f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 232a86c6181SAlex Tomas { 233f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 234a86c6181SAlex Tomas 235a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23655f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23755f020dbSAllison Henderson NULL, err); 238a86c6181SAlex Tomas return newblock; 239a86c6181SAlex Tomas } 240a86c6181SAlex Tomas 24155ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 242a86c6181SAlex Tomas { 243a86c6181SAlex Tomas int size; 244a86c6181SAlex Tomas 245a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 246a86c6181SAlex Tomas / sizeof(struct ext4_extent); 247bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24802dc62fbSYongqiang Yang if (!check && size > 6) 249a86c6181SAlex Tomas size = 6; 250a86c6181SAlex Tomas #endif 251a86c6181SAlex Tomas return size; 252a86c6181SAlex Tomas } 253a86c6181SAlex Tomas 25455ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 255a86c6181SAlex Tomas { 256a86c6181SAlex Tomas int size; 257a86c6181SAlex Tomas 258a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 259a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 260bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 26102dc62fbSYongqiang Yang if (!check && size > 5) 262a86c6181SAlex Tomas size = 5; 263a86c6181SAlex Tomas #endif 264a86c6181SAlex Tomas return size; 265a86c6181SAlex Tomas } 266a86c6181SAlex Tomas 26755ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 268a86c6181SAlex Tomas { 269a86c6181SAlex Tomas int size; 270a86c6181SAlex Tomas 271a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 272a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 273a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 274bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27502dc62fbSYongqiang Yang if (!check && size > 3) 276a86c6181SAlex Tomas size = 3; 277a86c6181SAlex Tomas #endif 278a86c6181SAlex Tomas return size; 279a86c6181SAlex Tomas } 280a86c6181SAlex Tomas 28155ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 282a86c6181SAlex Tomas { 283a86c6181SAlex Tomas int size; 284a86c6181SAlex Tomas 285a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 286a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 287a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 288bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28902dc62fbSYongqiang Yang if (!check && size > 4) 290a86c6181SAlex Tomas size = 4; 291a86c6181SAlex Tomas #endif 292a86c6181SAlex Tomas return size; 293a86c6181SAlex Tomas } 294a86c6181SAlex Tomas 295d2a17637SMingming Cao /* 296d2a17637SMingming Cao * Calculate the number of metadata blocks needed 297d2a17637SMingming Cao * to allocate @blocks 298d2a17637SMingming Cao * Worse case is one block per extent 299d2a17637SMingming Cao */ 30001f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 301d2a17637SMingming Cao { 3029d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 30381fdbb4aSYongqiang Yang int idxs; 304d2a17637SMingming Cao 3059d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 3069d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 307d2a17637SMingming Cao 308d2a17637SMingming Cao /* 3099d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 3109d0be502STheodore Ts'o * previous da block, it can share index blocks with the 3119d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 3129d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 3139d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 3149d0be502STheodore Ts'o * another index blocks. 315d2a17637SMingming Cao */ 3169d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 3179d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 31881fdbb4aSYongqiang Yang int num = 0; 31981fdbb4aSYongqiang Yang 3209d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 3219d0be502STheodore Ts'o num++; 3229d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 3239d0be502STheodore Ts'o num++; 3249d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 3259d0be502STheodore Ts'o num++; 3269d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3279d0be502STheodore Ts'o } else 3289d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 3299d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 330d2a17637SMingming Cao return num; 331d2a17637SMingming Cao } 332d2a17637SMingming Cao 3339d0be502STheodore Ts'o /* 3349d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 3359d0be502STheodore Ts'o * every level of the inode's extent tree. 3369d0be502STheodore Ts'o */ 3379d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 3389d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 3399d0be502STheodore Ts'o return ext_depth(inode) + 1; 3409d0be502STheodore Ts'o } 3419d0be502STheodore Ts'o 342c29c0ae7SAlex Tomas static int 343c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 344c29c0ae7SAlex Tomas { 345c29c0ae7SAlex Tomas int max; 346c29c0ae7SAlex Tomas 347c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 348c29c0ae7SAlex Tomas if (depth == 0) 34955ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 350c29c0ae7SAlex Tomas else 35155ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 352c29c0ae7SAlex Tomas } else { 353c29c0ae7SAlex Tomas if (depth == 0) 35455ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 355c29c0ae7SAlex Tomas else 35655ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 357c29c0ae7SAlex Tomas } 358c29c0ae7SAlex Tomas 359c29c0ae7SAlex Tomas return max; 360c29c0ae7SAlex Tomas } 361c29c0ae7SAlex Tomas 36256b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 36356b19868SAneesh Kumar K.V { 364bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 36556b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 366e84a26ceSTheodore Ts'o 36731d4f3a2STheodore Ts'o if (len == 0) 36831d4f3a2STheodore Ts'o return 0; 3696fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 37056b19868SAneesh Kumar K.V } 37156b19868SAneesh Kumar K.V 37256b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 37356b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 37456b19868SAneesh Kumar K.V { 375bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 376e84a26ceSTheodore Ts'o 3776fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 37856b19868SAneesh Kumar K.V } 37956b19868SAneesh Kumar K.V 38056b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 38156b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 38256b19868SAneesh Kumar K.V int depth) 38356b19868SAneesh Kumar K.V { 38456b19868SAneesh Kumar K.V unsigned short entries; 38556b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 38656b19868SAneesh Kumar K.V return 1; 38756b19868SAneesh Kumar K.V 38856b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 38956b19868SAneesh Kumar K.V 39056b19868SAneesh Kumar K.V if (depth == 0) { 39156b19868SAneesh Kumar K.V /* leaf entries */ 39281fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 39356b19868SAneesh Kumar K.V while (entries) { 39456b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 39556b19868SAneesh Kumar K.V return 0; 39656b19868SAneesh Kumar K.V ext++; 39756b19868SAneesh Kumar K.V entries--; 39856b19868SAneesh Kumar K.V } 39956b19868SAneesh Kumar K.V } else { 40081fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 40156b19868SAneesh Kumar K.V while (entries) { 40256b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 40356b19868SAneesh Kumar K.V return 0; 40456b19868SAneesh Kumar K.V ext_idx++; 40556b19868SAneesh Kumar K.V entries--; 40656b19868SAneesh Kumar K.V } 40756b19868SAneesh Kumar K.V } 40856b19868SAneesh Kumar K.V return 1; 40956b19868SAneesh Kumar K.V } 41056b19868SAneesh Kumar K.V 411c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 412c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 413c29c0ae7SAlex Tomas int depth) 414c29c0ae7SAlex Tomas { 415c29c0ae7SAlex Tomas const char *error_msg; 416c29c0ae7SAlex Tomas int max = 0; 417c29c0ae7SAlex Tomas 418c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 419c29c0ae7SAlex Tomas error_msg = "invalid magic"; 420c29c0ae7SAlex Tomas goto corrupted; 421c29c0ae7SAlex Tomas } 422c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 423c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 424c29c0ae7SAlex Tomas goto corrupted; 425c29c0ae7SAlex Tomas } 426c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 427c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 428c29c0ae7SAlex Tomas goto corrupted; 429c29c0ae7SAlex Tomas } 430c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 431c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 432c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 433c29c0ae7SAlex Tomas goto corrupted; 434c29c0ae7SAlex Tomas } 435c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 436c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 437c29c0ae7SAlex Tomas goto corrupted; 438c29c0ae7SAlex Tomas } 43956b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 44056b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 44156b19868SAneesh Kumar K.V goto corrupted; 44256b19868SAneesh Kumar K.V } 4437ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4447ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4457ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4467ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4477ac5990dSDarrick J. Wong goto corrupted; 4487ac5990dSDarrick J. Wong } 449c29c0ae7SAlex Tomas return 0; 450c29c0ae7SAlex Tomas 451c29c0ae7SAlex Tomas corrupted: 452c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 45324676da4STheodore Ts'o "bad header/extent: %s - magic %x, " 454c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 45524676da4STheodore Ts'o error_msg, le16_to_cpu(eh->eh_magic), 456c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 457c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 458c29c0ae7SAlex Tomas 459c29c0ae7SAlex Tomas return -EIO; 460c29c0ae7SAlex Tomas } 461c29c0ae7SAlex Tomas 46256b19868SAneesh Kumar K.V #define ext4_ext_check(inode, eh, depth) \ 463c398eda0STheodore Ts'o __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 464c29c0ae7SAlex Tomas 4657a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4667a262f7cSAneesh Kumar K.V { 4677a262f7cSAneesh Kumar K.V return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 4687a262f7cSAneesh Kumar K.V } 4697a262f7cSAneesh Kumar K.V 470f8489128SDarrick J. Wong static int __ext4_ext_check_block(const char *function, unsigned int line, 471f8489128SDarrick J. Wong struct inode *inode, 472f8489128SDarrick J. Wong struct ext4_extent_header *eh, 473f8489128SDarrick J. Wong int depth, 474f8489128SDarrick J. Wong struct buffer_head *bh) 475f8489128SDarrick J. Wong { 476f8489128SDarrick J. Wong int ret; 477f8489128SDarrick J. Wong 478f8489128SDarrick J. Wong if (buffer_verified(bh)) 479f8489128SDarrick J. Wong return 0; 480f8489128SDarrick J. Wong ret = ext4_ext_check(inode, eh, depth); 481f8489128SDarrick J. Wong if (ret) 482f8489128SDarrick J. Wong return ret; 483f8489128SDarrick J. Wong set_buffer_verified(bh); 484f8489128SDarrick J. Wong return ret; 485f8489128SDarrick J. Wong } 486f8489128SDarrick J. Wong 487f8489128SDarrick J. Wong #define ext4_ext_check_block(inode, eh, depth, bh) \ 488f8489128SDarrick J. Wong __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) 489f8489128SDarrick J. Wong 490a86c6181SAlex Tomas #ifdef EXT_DEBUG 491a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 492a86c6181SAlex Tomas { 493a86c6181SAlex Tomas int k, l = path->p_depth; 494a86c6181SAlex Tomas 495a86c6181SAlex Tomas ext_debug("path:"); 496a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 497a86c6181SAlex Tomas if (path->p_idx) { 4982ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 499bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 500a86c6181SAlex Tomas } else if (path->p_ext) { 501553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 502a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 503553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 504a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 505bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 506a86c6181SAlex Tomas } else 507a86c6181SAlex Tomas ext_debug(" []"); 508a86c6181SAlex Tomas } 509a86c6181SAlex Tomas ext_debug("\n"); 510a86c6181SAlex Tomas } 511a86c6181SAlex Tomas 512a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 513a86c6181SAlex Tomas { 514a86c6181SAlex Tomas int depth = ext_depth(inode); 515a86c6181SAlex Tomas struct ext4_extent_header *eh; 516a86c6181SAlex Tomas struct ext4_extent *ex; 517a86c6181SAlex Tomas int i; 518a86c6181SAlex Tomas 519a86c6181SAlex Tomas if (!path) 520a86c6181SAlex Tomas return; 521a86c6181SAlex Tomas 522a86c6181SAlex Tomas eh = path[depth].p_hdr; 523a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 524a86c6181SAlex Tomas 525553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 526553f9008SMingming 527a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 528553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 529553f9008SMingming ext4_ext_is_uninitialized(ex), 530bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 531a86c6181SAlex Tomas } 532a86c6181SAlex Tomas ext_debug("\n"); 533a86c6181SAlex Tomas } 5341b16da77SYongqiang Yang 5351b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 5361b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 5371b16da77SYongqiang Yang { 5381b16da77SYongqiang Yang int depth = ext_depth(inode); 5391b16da77SYongqiang Yang struct ext4_extent *ex; 5401b16da77SYongqiang Yang 5411b16da77SYongqiang Yang if (depth != level) { 5421b16da77SYongqiang Yang struct ext4_extent_idx *idx; 5431b16da77SYongqiang Yang idx = path[level].p_idx; 5441b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 5451b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 5461b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 5471b16da77SYongqiang Yang ext4_idx_pblock(idx), 5481b16da77SYongqiang Yang newblock); 5491b16da77SYongqiang Yang idx++; 5501b16da77SYongqiang Yang } 5511b16da77SYongqiang Yang 5521b16da77SYongqiang Yang return; 5531b16da77SYongqiang Yang } 5541b16da77SYongqiang Yang 5551b16da77SYongqiang Yang ex = path[depth].p_ext; 5561b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 5571b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 5581b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 5591b16da77SYongqiang Yang ext4_ext_pblock(ex), 5601b16da77SYongqiang Yang ext4_ext_is_uninitialized(ex), 5611b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 5621b16da77SYongqiang Yang newblock); 5631b16da77SYongqiang Yang ex++; 5641b16da77SYongqiang Yang } 5651b16da77SYongqiang Yang } 5661b16da77SYongqiang Yang 567a86c6181SAlex Tomas #else 568a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 569a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 5701b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 571a86c6181SAlex Tomas #endif 572a86c6181SAlex Tomas 573b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 574a86c6181SAlex Tomas { 575a86c6181SAlex Tomas int depth = path->p_depth; 576a86c6181SAlex Tomas int i; 577a86c6181SAlex Tomas 578a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 579a86c6181SAlex Tomas if (path->p_bh) { 580a86c6181SAlex Tomas brelse(path->p_bh); 581a86c6181SAlex Tomas path->p_bh = NULL; 582a86c6181SAlex Tomas } 583a86c6181SAlex Tomas } 584a86c6181SAlex Tomas 585a86c6181SAlex Tomas /* 586d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 587d0d856e8SRandy Dunlap * binary search for the closest index of the given block 588c29c0ae7SAlex Tomas * the header must be checked before calling this 589a86c6181SAlex Tomas */ 590a86c6181SAlex Tomas static void 591725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 592725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 593a86c6181SAlex Tomas { 594a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 595a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 596a86c6181SAlex Tomas 597a86c6181SAlex Tomas 598bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 599a86c6181SAlex Tomas 600a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 601e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 602a86c6181SAlex Tomas while (l <= r) { 603a86c6181SAlex Tomas m = l + (r - l) / 2; 604a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 605a86c6181SAlex Tomas r = m - 1; 606a86c6181SAlex Tomas else 607a86c6181SAlex Tomas l = m + 1; 60826d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 60926d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 61026d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 611a86c6181SAlex Tomas } 612a86c6181SAlex Tomas 613a86c6181SAlex Tomas path->p_idx = l - 1; 6144a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 615bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 616a86c6181SAlex Tomas 617a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 618a86c6181SAlex Tomas { 619a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 620a86c6181SAlex Tomas int k; 621a86c6181SAlex Tomas 622a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 623a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 624a86c6181SAlex Tomas if (k != 0 && 625a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 6264776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 6274776004fSTheodore Ts'o "first=0x%p\n", k, 628a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 6294776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 630a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 631a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 632a86c6181SAlex Tomas } 633a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 634a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 635a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 636a86c6181SAlex Tomas break; 637a86c6181SAlex Tomas chix = ix; 638a86c6181SAlex Tomas } 639a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 640a86c6181SAlex Tomas } 641a86c6181SAlex Tomas #endif 642a86c6181SAlex Tomas 643a86c6181SAlex Tomas } 644a86c6181SAlex Tomas 645a86c6181SAlex Tomas /* 646d0d856e8SRandy Dunlap * ext4_ext_binsearch: 647d0d856e8SRandy Dunlap * binary search for closest extent of the given block 648c29c0ae7SAlex Tomas * the header must be checked before calling this 649a86c6181SAlex Tomas */ 650a86c6181SAlex Tomas static void 651725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 652725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 653a86c6181SAlex Tomas { 654a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 655a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 656a86c6181SAlex Tomas 657a86c6181SAlex Tomas if (eh->eh_entries == 0) { 658a86c6181SAlex Tomas /* 659d0d856e8SRandy Dunlap * this leaf is empty: 660a86c6181SAlex Tomas * we get such a leaf in split/add case 661a86c6181SAlex Tomas */ 662a86c6181SAlex Tomas return; 663a86c6181SAlex Tomas } 664a86c6181SAlex Tomas 665bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 666a86c6181SAlex Tomas 667a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 668e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 669a86c6181SAlex Tomas 670a86c6181SAlex Tomas while (l <= r) { 671a86c6181SAlex Tomas m = l + (r - l) / 2; 672a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 673a86c6181SAlex Tomas r = m - 1; 674a86c6181SAlex Tomas else 675a86c6181SAlex Tomas l = m + 1; 67626d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 67726d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 67826d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 679a86c6181SAlex Tomas } 680a86c6181SAlex Tomas 681a86c6181SAlex Tomas path->p_ext = l - 1; 682553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 683a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 684bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 685553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 686a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 687a86c6181SAlex Tomas 688a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 689a86c6181SAlex Tomas { 690a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 691a86c6181SAlex Tomas int k; 692a86c6181SAlex Tomas 693a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 694a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 695a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 696a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 697a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 698a86c6181SAlex Tomas break; 699a86c6181SAlex Tomas chex = ex; 700a86c6181SAlex Tomas } 701a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 702a86c6181SAlex Tomas } 703a86c6181SAlex Tomas #endif 704a86c6181SAlex Tomas 705a86c6181SAlex Tomas } 706a86c6181SAlex Tomas 707a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 708a86c6181SAlex Tomas { 709a86c6181SAlex Tomas struct ext4_extent_header *eh; 710a86c6181SAlex Tomas 711a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 712a86c6181SAlex Tomas eh->eh_depth = 0; 713a86c6181SAlex Tomas eh->eh_entries = 0; 714a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 71555ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 716a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 717a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 718a86c6181SAlex Tomas return 0; 719a86c6181SAlex Tomas } 720a86c6181SAlex Tomas 721a86c6181SAlex Tomas struct ext4_ext_path * 722725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 723725d26d3SAneesh Kumar K.V struct ext4_ext_path *path) 724a86c6181SAlex Tomas { 725a86c6181SAlex Tomas struct ext4_extent_header *eh; 726a86c6181SAlex Tomas struct buffer_head *bh; 727a86c6181SAlex Tomas short int depth, i, ppos = 0, alloc = 0; 728860d21e2STheodore Ts'o int ret; 729a86c6181SAlex Tomas 730a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 731c29c0ae7SAlex Tomas depth = ext_depth(inode); 732a86c6181SAlex Tomas 733a86c6181SAlex Tomas /* account possible depth increase */ 734a86c6181SAlex Tomas if (!path) { 7355d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 736a86c6181SAlex Tomas GFP_NOFS); 737a86c6181SAlex Tomas if (!path) 738a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 739a86c6181SAlex Tomas alloc = 1; 740a86c6181SAlex Tomas } 741a86c6181SAlex Tomas path[0].p_hdr = eh; 7421973adcbSShen Feng path[0].p_bh = NULL; 743a86c6181SAlex Tomas 744c29c0ae7SAlex Tomas i = depth; 745a86c6181SAlex Tomas /* walk through the tree */ 746a86c6181SAlex Tomas while (i) { 747a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 748a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 749c29c0ae7SAlex Tomas 750a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 751bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 752a86c6181SAlex Tomas path[ppos].p_depth = i; 753a86c6181SAlex Tomas path[ppos].p_ext = NULL; 754a86c6181SAlex Tomas 7557a262f7cSAneesh Kumar K.V bh = sb_getblk(inode->i_sb, path[ppos].p_block); 756860d21e2STheodore Ts'o if (unlikely(!bh)) { 757860d21e2STheodore Ts'o ret = -ENOMEM; 758a86c6181SAlex Tomas goto err; 759860d21e2STheodore Ts'o } 7607a262f7cSAneesh Kumar K.V if (!bh_uptodate_or_lock(bh)) { 7610562e0baSJiaying Zhang trace_ext4_ext_load_extent(inode, block, 7620562e0baSJiaying Zhang path[ppos].p_block); 763860d21e2STheodore Ts'o ret = bh_submit_read(bh); 764860d21e2STheodore Ts'o if (ret < 0) { 7657a262f7cSAneesh Kumar K.V put_bh(bh); 7667a262f7cSAneesh Kumar K.V goto err; 7677a262f7cSAneesh Kumar K.V } 7687a262f7cSAneesh Kumar K.V } 769a86c6181SAlex Tomas eh = ext_block_hdr(bh); 770a86c6181SAlex Tomas ppos++; 771273df556SFrank Mayhar if (unlikely(ppos > depth)) { 772273df556SFrank Mayhar put_bh(bh); 773273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 774273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 775860d21e2STheodore Ts'o ret = -EIO; 776273df556SFrank Mayhar goto err; 777273df556SFrank Mayhar } 778a86c6181SAlex Tomas path[ppos].p_bh = bh; 779a86c6181SAlex Tomas path[ppos].p_hdr = eh; 780a86c6181SAlex Tomas i--; 781a86c6181SAlex Tomas 782860d21e2STheodore Ts'o ret = ext4_ext_check_block(inode, eh, i, bh); 783860d21e2STheodore Ts'o if (ret < 0) 784a86c6181SAlex Tomas goto err; 785a86c6181SAlex Tomas } 786a86c6181SAlex Tomas 787a86c6181SAlex Tomas path[ppos].p_depth = i; 788a86c6181SAlex Tomas path[ppos].p_ext = NULL; 789a86c6181SAlex Tomas path[ppos].p_idx = NULL; 790a86c6181SAlex Tomas 791a86c6181SAlex Tomas /* find extent */ 792a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 7931973adcbSShen Feng /* if not an empty leaf */ 7941973adcbSShen Feng if (path[ppos].p_ext) 795bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 796a86c6181SAlex Tomas 797a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 798a86c6181SAlex Tomas 799a86c6181SAlex Tomas return path; 800a86c6181SAlex Tomas 801a86c6181SAlex Tomas err: 802a86c6181SAlex Tomas ext4_ext_drop_refs(path); 803a86c6181SAlex Tomas if (alloc) 804a86c6181SAlex Tomas kfree(path); 805860d21e2STheodore Ts'o return ERR_PTR(ret); 806a86c6181SAlex Tomas } 807a86c6181SAlex Tomas 808a86c6181SAlex Tomas /* 809d0d856e8SRandy Dunlap * ext4_ext_insert_index: 810d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 811d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 812a86c6181SAlex Tomas */ 8131f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 814a86c6181SAlex Tomas struct ext4_ext_path *curp, 815f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 816a86c6181SAlex Tomas { 817a86c6181SAlex Tomas struct ext4_extent_idx *ix; 818a86c6181SAlex Tomas int len, err; 819a86c6181SAlex Tomas 8207e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 8217e028976SAvantika Mathur if (err) 822a86c6181SAlex Tomas return err; 823a86c6181SAlex Tomas 824273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 825273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 826273df556SFrank Mayhar "logical %d == ei_block %d!", 827273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 828273df556SFrank Mayhar return -EIO; 829273df556SFrank Mayhar } 830d4620315SRobin Dong 831d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 832d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 833d4620315SRobin Dong EXT4_ERROR_INODE(inode, 834d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 835d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 836d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 837d4620315SRobin Dong return -EIO; 838d4620315SRobin Dong } 839d4620315SRobin Dong 840a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 841a86c6181SAlex Tomas /* insert after */ 84280e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 843a86c6181SAlex Tomas ix = curp->p_idx + 1; 844a86c6181SAlex Tomas } else { 845a86c6181SAlex Tomas /* insert before */ 84680e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 847a86c6181SAlex Tomas ix = curp->p_idx; 848a86c6181SAlex Tomas } 849a86c6181SAlex Tomas 85080e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 85180e675f9SEric Gouriou BUG_ON(len < 0); 85280e675f9SEric Gouriou if (len > 0) { 85380e675f9SEric Gouriou ext_debug("insert new index %d: " 85480e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 85580e675f9SEric Gouriou logical, len, ix, ix + 1); 85680e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 85780e675f9SEric Gouriou } 85880e675f9SEric Gouriou 859f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 860f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 861f472e026STao Ma return -EIO; 862f472e026STao Ma } 863f472e026STao Ma 864a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 865f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 866e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 867a86c6181SAlex Tomas 868273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 869273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 870273df556SFrank Mayhar return -EIO; 871273df556SFrank Mayhar } 872a86c6181SAlex Tomas 873a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 874a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 875a86c6181SAlex Tomas 876a86c6181SAlex Tomas return err; 877a86c6181SAlex Tomas } 878a86c6181SAlex Tomas 879a86c6181SAlex Tomas /* 880d0d856e8SRandy Dunlap * ext4_ext_split: 881d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 882d0d856e8SRandy Dunlap * at depth @at: 883a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 884a86c6181SAlex Tomas * - makes decision where to split 885d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 886a86c6181SAlex Tomas * into the newly allocated blocks 887d0d856e8SRandy Dunlap * - initializes subtree 888a86c6181SAlex Tomas */ 889a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 89055f020dbSAllison Henderson unsigned int flags, 891a86c6181SAlex Tomas struct ext4_ext_path *path, 892a86c6181SAlex Tomas struct ext4_extent *newext, int at) 893a86c6181SAlex Tomas { 894a86c6181SAlex Tomas struct buffer_head *bh = NULL; 895a86c6181SAlex Tomas int depth = ext_depth(inode); 896a86c6181SAlex Tomas struct ext4_extent_header *neh; 897a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 898a86c6181SAlex Tomas int i = at, k, m, a; 899f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 900a86c6181SAlex Tomas __le32 border; 901f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 902a86c6181SAlex Tomas int err = 0; 903a86c6181SAlex Tomas 904a86c6181SAlex Tomas /* make decision: where to split? */ 905d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 906a86c6181SAlex Tomas 907d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 908a86c6181SAlex Tomas * border from split point */ 909273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 910273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 911273df556SFrank Mayhar return -EIO; 912273df556SFrank Mayhar } 913a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 914a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 915d0d856e8SRandy Dunlap ext_debug("leaf will be split." 916a86c6181SAlex Tomas " next leaf starts at %d\n", 917a86c6181SAlex Tomas le32_to_cpu(border)); 918a86c6181SAlex Tomas } else { 919a86c6181SAlex Tomas border = newext->ee_block; 920a86c6181SAlex Tomas ext_debug("leaf will be added." 921a86c6181SAlex Tomas " next leaf starts at %d\n", 922a86c6181SAlex Tomas le32_to_cpu(border)); 923a86c6181SAlex Tomas } 924a86c6181SAlex Tomas 925a86c6181SAlex Tomas /* 926d0d856e8SRandy Dunlap * If error occurs, then we break processing 927d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 928a86c6181SAlex Tomas * be inserted and tree will be in consistent 929d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 930a86c6181SAlex Tomas */ 931a86c6181SAlex Tomas 932a86c6181SAlex Tomas /* 933d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 934d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 935d0d856e8SRandy Dunlap * upon them. 936a86c6181SAlex Tomas */ 9375d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 938a86c6181SAlex Tomas if (!ablocks) 939a86c6181SAlex Tomas return -ENOMEM; 940a86c6181SAlex Tomas 941a86c6181SAlex Tomas /* allocate all needed blocks */ 942a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 943a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 944654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 94555f020dbSAllison Henderson newext, &err, flags); 946a86c6181SAlex Tomas if (newblock == 0) 947a86c6181SAlex Tomas goto cleanup; 948a86c6181SAlex Tomas ablocks[a] = newblock; 949a86c6181SAlex Tomas } 950a86c6181SAlex Tomas 951a86c6181SAlex Tomas /* initialize new leaf */ 952a86c6181SAlex Tomas newblock = ablocks[--a]; 953273df556SFrank Mayhar if (unlikely(newblock == 0)) { 954273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 955273df556SFrank Mayhar err = -EIO; 956273df556SFrank Mayhar goto cleanup; 957273df556SFrank Mayhar } 958a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 959aebf0243SWang Shilong if (unlikely(!bh)) { 960860d21e2STheodore Ts'o err = -ENOMEM; 961a86c6181SAlex Tomas goto cleanup; 962a86c6181SAlex Tomas } 963a86c6181SAlex Tomas lock_buffer(bh); 964a86c6181SAlex Tomas 9657e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 9667e028976SAvantika Mathur if (err) 967a86c6181SAlex Tomas goto cleanup; 968a86c6181SAlex Tomas 969a86c6181SAlex Tomas neh = ext_block_hdr(bh); 970a86c6181SAlex Tomas neh->eh_entries = 0; 97155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 972a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 973a86c6181SAlex Tomas neh->eh_depth = 0; 974a86c6181SAlex Tomas 975d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 976273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 977273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 978273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 979273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 980273df556SFrank Mayhar path[depth].p_hdr->eh_max); 981273df556SFrank Mayhar err = -EIO; 982273df556SFrank Mayhar goto cleanup; 983273df556SFrank Mayhar } 984a86c6181SAlex Tomas /* start copy from next extent */ 9851b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 9861b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 987a86c6181SAlex Tomas if (m) { 9881b16da77SYongqiang Yang struct ext4_extent *ex; 9891b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 9901b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 991e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 992a86c6181SAlex Tomas } 993a86c6181SAlex Tomas 9947ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 995a86c6181SAlex Tomas set_buffer_uptodate(bh); 996a86c6181SAlex Tomas unlock_buffer(bh); 997a86c6181SAlex Tomas 9980390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9997e028976SAvantika Mathur if (err) 1000a86c6181SAlex Tomas goto cleanup; 1001a86c6181SAlex Tomas brelse(bh); 1002a86c6181SAlex Tomas bh = NULL; 1003a86c6181SAlex Tomas 1004a86c6181SAlex Tomas /* correct old leaf */ 1005a86c6181SAlex Tomas if (m) { 10067e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 10077e028976SAvantika Mathur if (err) 1008a86c6181SAlex Tomas goto cleanup; 1009e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 10107e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 10117e028976SAvantika Mathur if (err) 1012a86c6181SAlex Tomas goto cleanup; 1013a86c6181SAlex Tomas 1014a86c6181SAlex Tomas } 1015a86c6181SAlex Tomas 1016a86c6181SAlex Tomas /* create intermediate indexes */ 1017a86c6181SAlex Tomas k = depth - at - 1; 1018273df556SFrank Mayhar if (unlikely(k < 0)) { 1019273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1020273df556SFrank Mayhar err = -EIO; 1021273df556SFrank Mayhar goto cleanup; 1022273df556SFrank Mayhar } 1023a86c6181SAlex Tomas if (k) 1024a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1025a86c6181SAlex Tomas /* insert new index into current index block */ 1026a86c6181SAlex Tomas /* current depth stored in i var */ 1027a86c6181SAlex Tomas i = depth - 1; 1028a86c6181SAlex Tomas while (k--) { 1029a86c6181SAlex Tomas oldblock = newblock; 1030a86c6181SAlex Tomas newblock = ablocks[--a]; 1031bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1032aebf0243SWang Shilong if (unlikely(!bh)) { 1033860d21e2STheodore Ts'o err = -ENOMEM; 1034a86c6181SAlex Tomas goto cleanup; 1035a86c6181SAlex Tomas } 1036a86c6181SAlex Tomas lock_buffer(bh); 1037a86c6181SAlex Tomas 10387e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10397e028976SAvantika Mathur if (err) 1040a86c6181SAlex Tomas goto cleanup; 1041a86c6181SAlex Tomas 1042a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1043a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1044a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 104555ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1046a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1047a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1048a86c6181SAlex Tomas fidx->ei_block = border; 1049f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1050a86c6181SAlex Tomas 1051bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1052bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1053a86c6181SAlex Tomas 10541b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1055273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1056273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1057273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1058273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1059273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 1060273df556SFrank Mayhar err = -EIO; 1061273df556SFrank Mayhar goto cleanup; 1062273df556SFrank Mayhar } 10631b16da77SYongqiang Yang /* start copy indexes */ 10641b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 10651b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 10661b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 10671b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1068a86c6181SAlex Tomas if (m) { 10691b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1070a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1071e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1072a86c6181SAlex Tomas } 10737ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1074a86c6181SAlex Tomas set_buffer_uptodate(bh); 1075a86c6181SAlex Tomas unlock_buffer(bh); 1076a86c6181SAlex Tomas 10770390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 10787e028976SAvantika Mathur if (err) 1079a86c6181SAlex Tomas goto cleanup; 1080a86c6181SAlex Tomas brelse(bh); 1081a86c6181SAlex Tomas bh = NULL; 1082a86c6181SAlex Tomas 1083a86c6181SAlex Tomas /* correct old index */ 1084a86c6181SAlex Tomas if (m) { 1085a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1086a86c6181SAlex Tomas if (err) 1087a86c6181SAlex Tomas goto cleanup; 1088e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1089a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1090a86c6181SAlex Tomas if (err) 1091a86c6181SAlex Tomas goto cleanup; 1092a86c6181SAlex Tomas } 1093a86c6181SAlex Tomas 1094a86c6181SAlex Tomas i--; 1095a86c6181SAlex Tomas } 1096a86c6181SAlex Tomas 1097a86c6181SAlex Tomas /* insert new index */ 1098a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1099a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1100a86c6181SAlex Tomas 1101a86c6181SAlex Tomas cleanup: 1102a86c6181SAlex Tomas if (bh) { 1103a86c6181SAlex Tomas if (buffer_locked(bh)) 1104a86c6181SAlex Tomas unlock_buffer(bh); 1105a86c6181SAlex Tomas brelse(bh); 1106a86c6181SAlex Tomas } 1107a86c6181SAlex Tomas 1108a86c6181SAlex Tomas if (err) { 1109a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1110a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1111a86c6181SAlex Tomas if (!ablocks[i]) 1112a86c6181SAlex Tomas continue; 11137dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1114e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1115a86c6181SAlex Tomas } 1116a86c6181SAlex Tomas } 1117a86c6181SAlex Tomas kfree(ablocks); 1118a86c6181SAlex Tomas 1119a86c6181SAlex Tomas return err; 1120a86c6181SAlex Tomas } 1121a86c6181SAlex Tomas 1122a86c6181SAlex Tomas /* 1123d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1124d0d856e8SRandy Dunlap * implements tree growing procedure: 1125a86c6181SAlex Tomas * - allocates new block 1126a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1127d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1128a86c6181SAlex Tomas * just created block 1129a86c6181SAlex Tomas */ 1130a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 113155f020dbSAllison Henderson unsigned int flags, 1132a86c6181SAlex Tomas struct ext4_extent *newext) 1133a86c6181SAlex Tomas { 1134a86c6181SAlex Tomas struct ext4_extent_header *neh; 1135a86c6181SAlex Tomas struct buffer_head *bh; 1136f65e6fbaSAlex Tomas ext4_fsblk_t newblock; 1137a86c6181SAlex Tomas int err = 0; 1138a86c6181SAlex Tomas 11391939dd84SDmitry Monakhov newblock = ext4_ext_new_meta_block(handle, inode, NULL, 114055f020dbSAllison Henderson newext, &err, flags); 1141a86c6181SAlex Tomas if (newblock == 0) 1142a86c6181SAlex Tomas return err; 1143a86c6181SAlex Tomas 1144a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1145aebf0243SWang Shilong if (unlikely(!bh)) 1146860d21e2STheodore Ts'o return -ENOMEM; 1147a86c6181SAlex Tomas lock_buffer(bh); 1148a86c6181SAlex Tomas 11497e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11507e028976SAvantika Mathur if (err) { 1151a86c6181SAlex Tomas unlock_buffer(bh); 1152a86c6181SAlex Tomas goto out; 1153a86c6181SAlex Tomas } 1154a86c6181SAlex Tomas 1155a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 11561939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 11571939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1158a86c6181SAlex Tomas 1159a86c6181SAlex Tomas /* set size of new block */ 1160a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1161a86c6181SAlex Tomas /* old root could have indexes or leaves 1162a86c6181SAlex Tomas * so calculate e_max right way */ 1163a86c6181SAlex Tomas if (ext_depth(inode)) 116455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1165a86c6181SAlex Tomas else 116655ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1167a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 11687ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1169a86c6181SAlex Tomas set_buffer_uptodate(bh); 1170a86c6181SAlex Tomas unlock_buffer(bh); 1171a86c6181SAlex Tomas 11720390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11737e028976SAvantika Mathur if (err) 1174a86c6181SAlex Tomas goto out; 1175a86c6181SAlex Tomas 11761939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1177a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 11781939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 11791939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 11801939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 11811939dd84SDmitry Monakhov /* Root extent block becomes index block */ 11821939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 11831939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 11841939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 11851939dd84SDmitry Monakhov } 11862ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1187a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 11885a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1189bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1190a86c6181SAlex Tomas 1191ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 11921939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1193a86c6181SAlex Tomas out: 1194a86c6181SAlex Tomas brelse(bh); 1195a86c6181SAlex Tomas 1196a86c6181SAlex Tomas return err; 1197a86c6181SAlex Tomas } 1198a86c6181SAlex Tomas 1199a86c6181SAlex Tomas /* 1200d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1201d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1202d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1203a86c6181SAlex Tomas */ 1204a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 120555f020dbSAllison Henderson unsigned int flags, 1206a86c6181SAlex Tomas struct ext4_ext_path *path, 1207a86c6181SAlex Tomas struct ext4_extent *newext) 1208a86c6181SAlex Tomas { 1209a86c6181SAlex Tomas struct ext4_ext_path *curp; 1210a86c6181SAlex Tomas int depth, i, err = 0; 1211a86c6181SAlex Tomas 1212a86c6181SAlex Tomas repeat: 1213a86c6181SAlex Tomas i = depth = ext_depth(inode); 1214a86c6181SAlex Tomas 1215a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1216a86c6181SAlex Tomas curp = path + depth; 1217a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1218a86c6181SAlex Tomas i--; 1219a86c6181SAlex Tomas curp--; 1220a86c6181SAlex Tomas } 1221a86c6181SAlex Tomas 1222d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1223d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1224a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1225a86c6181SAlex Tomas /* if we found index with free entry, then use that 1226a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 122755f020dbSAllison Henderson err = ext4_ext_split(handle, inode, flags, path, newext, i); 1228787e0981SShen Feng if (err) 1229787e0981SShen Feng goto out; 1230a86c6181SAlex Tomas 1231a86c6181SAlex Tomas /* refill path */ 1232a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1233a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1234725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1235a86c6181SAlex Tomas path); 1236a86c6181SAlex Tomas if (IS_ERR(path)) 1237a86c6181SAlex Tomas err = PTR_ERR(path); 1238a86c6181SAlex Tomas } else { 1239a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 12401939dd84SDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, flags, newext); 1241a86c6181SAlex Tomas if (err) 1242a86c6181SAlex Tomas goto out; 1243a86c6181SAlex Tomas 1244a86c6181SAlex Tomas /* refill path */ 1245a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1246a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1247725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1248a86c6181SAlex Tomas path); 1249a86c6181SAlex Tomas if (IS_ERR(path)) { 1250a86c6181SAlex Tomas err = PTR_ERR(path); 1251a86c6181SAlex Tomas goto out; 1252a86c6181SAlex Tomas } 1253a86c6181SAlex Tomas 1254a86c6181SAlex Tomas /* 1255d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1256d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1257a86c6181SAlex Tomas */ 1258a86c6181SAlex Tomas depth = ext_depth(inode); 1259a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1260d0d856e8SRandy Dunlap /* now we need to split */ 1261a86c6181SAlex Tomas goto repeat; 1262a86c6181SAlex Tomas } 1263a86c6181SAlex Tomas } 1264a86c6181SAlex Tomas 1265a86c6181SAlex Tomas out: 1266a86c6181SAlex Tomas return err; 1267a86c6181SAlex Tomas } 1268a86c6181SAlex Tomas 1269a86c6181SAlex Tomas /* 12701988b51eSAlex Tomas * search the closest allocated block to the left for *logical 12711988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 12721988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 12731988b51eSAlex Tomas * returns 0 at @phys 12741988b51eSAlex Tomas * return value contains 0 (success) or error code 12751988b51eSAlex Tomas */ 12761f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 12771f109d5aSTheodore Ts'o struct ext4_ext_path *path, 12781988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 12791988b51eSAlex Tomas { 12801988b51eSAlex Tomas struct ext4_extent_idx *ix; 12811988b51eSAlex Tomas struct ext4_extent *ex; 1282b939e376SAneesh Kumar K.V int depth, ee_len; 12831988b51eSAlex Tomas 1284273df556SFrank Mayhar if (unlikely(path == NULL)) { 1285273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1286273df556SFrank Mayhar return -EIO; 1287273df556SFrank Mayhar } 12881988b51eSAlex Tomas depth = path->p_depth; 12891988b51eSAlex Tomas *phys = 0; 12901988b51eSAlex Tomas 12911988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12921988b51eSAlex Tomas return 0; 12931988b51eSAlex Tomas 12941988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12951988b51eSAlex Tomas * then *logical, but it can be that extent is the 12961988b51eSAlex Tomas * first one in the file */ 12971988b51eSAlex Tomas 12981988b51eSAlex Tomas ex = path[depth].p_ext; 1299b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 13001988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1301273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1302273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1303273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1304273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1305273df556SFrank Mayhar return -EIO; 1306273df556SFrank Mayhar } 13071988b51eSAlex Tomas while (--depth >= 0) { 13081988b51eSAlex Tomas ix = path[depth].p_idx; 1309273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1310273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1311273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 13126ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1313273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 13146ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1315273df556SFrank Mayhar depth); 1316273df556SFrank Mayhar return -EIO; 1317273df556SFrank Mayhar } 13181988b51eSAlex Tomas } 13191988b51eSAlex Tomas return 0; 13201988b51eSAlex Tomas } 13211988b51eSAlex Tomas 1322273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1323273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1324273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1325273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1326273df556SFrank Mayhar return -EIO; 1327273df556SFrank Mayhar } 13281988b51eSAlex Tomas 1329b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1330bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 13311988b51eSAlex Tomas return 0; 13321988b51eSAlex Tomas } 13331988b51eSAlex Tomas 13341988b51eSAlex Tomas /* 13351988b51eSAlex Tomas * search the closest allocated block to the right for *logical 13361988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1337df3ab170STao Ma * if *logical is the largest allocated block, the function 13381988b51eSAlex Tomas * returns 0 at @phys 13391988b51eSAlex Tomas * return value contains 0 (success) or error code 13401988b51eSAlex Tomas */ 13411f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 13421f109d5aSTheodore Ts'o struct ext4_ext_path *path, 13434d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 13444d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 13451988b51eSAlex Tomas { 13461988b51eSAlex Tomas struct buffer_head *bh = NULL; 13471988b51eSAlex Tomas struct ext4_extent_header *eh; 13481988b51eSAlex Tomas struct ext4_extent_idx *ix; 13491988b51eSAlex Tomas struct ext4_extent *ex; 13501988b51eSAlex Tomas ext4_fsblk_t block; 1351395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1352395a87bfSEric Sandeen int ee_len; 13531988b51eSAlex Tomas 1354273df556SFrank Mayhar if (unlikely(path == NULL)) { 1355273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1356273df556SFrank Mayhar return -EIO; 1357273df556SFrank Mayhar } 13581988b51eSAlex Tomas depth = path->p_depth; 13591988b51eSAlex Tomas *phys = 0; 13601988b51eSAlex Tomas 13611988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 13621988b51eSAlex Tomas return 0; 13631988b51eSAlex Tomas 13641988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 13651988b51eSAlex Tomas * then *logical, but it can be that extent is the 13661988b51eSAlex Tomas * first one in the file */ 13671988b51eSAlex Tomas 13681988b51eSAlex Tomas ex = path[depth].p_ext; 1369b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 13701988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1371273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1372273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1373273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1374273df556SFrank Mayhar depth); 1375273df556SFrank Mayhar return -EIO; 1376273df556SFrank Mayhar } 13771988b51eSAlex Tomas while (--depth >= 0) { 13781988b51eSAlex Tomas ix = path[depth].p_idx; 1379273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1380273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1381273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1382273df556SFrank Mayhar *logical); 1383273df556SFrank Mayhar return -EIO; 1384273df556SFrank Mayhar } 13851988b51eSAlex Tomas } 13864d33b1efSTheodore Ts'o goto found_extent; 13871988b51eSAlex Tomas } 13881988b51eSAlex Tomas 1389273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1390273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1391273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1392273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1393273df556SFrank Mayhar return -EIO; 1394273df556SFrank Mayhar } 13951988b51eSAlex Tomas 13961988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 13971988b51eSAlex Tomas /* next allocated block in this leaf */ 13981988b51eSAlex Tomas ex++; 13994d33b1efSTheodore Ts'o goto found_extent; 14001988b51eSAlex Tomas } 14011988b51eSAlex Tomas 14021988b51eSAlex Tomas /* go up and search for index to the right */ 14031988b51eSAlex Tomas while (--depth >= 0) { 14041988b51eSAlex Tomas ix = path[depth].p_idx; 14051988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 140625f1ee3aSWu Fengguang goto got_index; 14071988b51eSAlex Tomas } 14081988b51eSAlex Tomas 140925f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 14101988b51eSAlex Tomas return 0; 14111988b51eSAlex Tomas 141225f1ee3aSWu Fengguang got_index: 14131988b51eSAlex Tomas /* we've found index to the right, let's 14141988b51eSAlex Tomas * follow it and find the closest allocated 14151988b51eSAlex Tomas * block to the right */ 14161988b51eSAlex Tomas ix++; 1417bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 14181988b51eSAlex Tomas while (++depth < path->p_depth) { 14191988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 14201988b51eSAlex Tomas if (bh == NULL) 14211988b51eSAlex Tomas return -EIO; 14221988b51eSAlex Tomas eh = ext_block_hdr(bh); 1423395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 1424f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, eh, 1425f8489128SDarrick J. Wong path->p_depth - depth, bh)) { 14261988b51eSAlex Tomas put_bh(bh); 14271988b51eSAlex Tomas return -EIO; 14281988b51eSAlex Tomas } 14291988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1430bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 14311988b51eSAlex Tomas put_bh(bh); 14321988b51eSAlex Tomas } 14331988b51eSAlex Tomas 14341988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 14351988b51eSAlex Tomas if (bh == NULL) 14361988b51eSAlex Tomas return -EIO; 14371988b51eSAlex Tomas eh = ext_block_hdr(bh); 1438f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { 14391988b51eSAlex Tomas put_bh(bh); 14401988b51eSAlex Tomas return -EIO; 14411988b51eSAlex Tomas } 14421988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 14434d33b1efSTheodore Ts'o found_extent: 14441988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1445bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 14464d33b1efSTheodore Ts'o *ret_ex = ex; 14474d33b1efSTheodore Ts'o if (bh) 14481988b51eSAlex Tomas put_bh(bh); 14491988b51eSAlex Tomas return 0; 14501988b51eSAlex Tomas } 14511988b51eSAlex Tomas 14521988b51eSAlex Tomas /* 1453d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1454f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1455d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1456d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1457d0d856e8SRandy Dunlap * with leaves. 1458a86c6181SAlex Tomas */ 1459725d26d3SAneesh Kumar K.V static ext4_lblk_t 1460a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1461a86c6181SAlex Tomas { 1462a86c6181SAlex Tomas int depth; 1463a86c6181SAlex Tomas 1464a86c6181SAlex Tomas BUG_ON(path == NULL); 1465a86c6181SAlex Tomas depth = path->p_depth; 1466a86c6181SAlex Tomas 1467a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1468f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1469a86c6181SAlex Tomas 1470a86c6181SAlex Tomas while (depth >= 0) { 1471a86c6181SAlex Tomas if (depth == path->p_depth) { 1472a86c6181SAlex Tomas /* leaf */ 14736f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 14746f8ff537SCurt Wohlgemuth path[depth].p_ext != 1475a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1476a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1477a86c6181SAlex Tomas } else { 1478a86c6181SAlex Tomas /* index */ 1479a86c6181SAlex Tomas if (path[depth].p_idx != 1480a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1481a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1482a86c6181SAlex Tomas } 1483a86c6181SAlex Tomas depth--; 1484a86c6181SAlex Tomas } 1485a86c6181SAlex Tomas 1486f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1487a86c6181SAlex Tomas } 1488a86c6181SAlex Tomas 1489a86c6181SAlex Tomas /* 1490d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1491f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1492a86c6181SAlex Tomas */ 14935718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1494a86c6181SAlex Tomas { 1495a86c6181SAlex Tomas int depth; 1496a86c6181SAlex Tomas 1497a86c6181SAlex Tomas BUG_ON(path == NULL); 1498a86c6181SAlex Tomas depth = path->p_depth; 1499a86c6181SAlex Tomas 1500a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1501a86c6181SAlex Tomas if (depth == 0) 1502f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1503a86c6181SAlex Tomas 1504a86c6181SAlex Tomas /* go to index block */ 1505a86c6181SAlex Tomas depth--; 1506a86c6181SAlex Tomas 1507a86c6181SAlex Tomas while (depth >= 0) { 1508a86c6181SAlex Tomas if (path[depth].p_idx != 1509a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1510725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1511725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1512a86c6181SAlex Tomas depth--; 1513a86c6181SAlex Tomas } 1514a86c6181SAlex Tomas 1515f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1516a86c6181SAlex Tomas } 1517a86c6181SAlex Tomas 1518a86c6181SAlex Tomas /* 1519d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1520d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1521d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1522a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1523a86c6181SAlex Tomas */ 15241d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1525a86c6181SAlex Tomas struct ext4_ext_path *path) 1526a86c6181SAlex Tomas { 1527a86c6181SAlex Tomas struct ext4_extent_header *eh; 1528a86c6181SAlex Tomas int depth = ext_depth(inode); 1529a86c6181SAlex Tomas struct ext4_extent *ex; 1530a86c6181SAlex Tomas __le32 border; 1531a86c6181SAlex Tomas int k, err = 0; 1532a86c6181SAlex Tomas 1533a86c6181SAlex Tomas eh = path[depth].p_hdr; 1534a86c6181SAlex Tomas ex = path[depth].p_ext; 1535273df556SFrank Mayhar 1536273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1537273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1538273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1539273df556SFrank Mayhar return -EIO; 1540273df556SFrank Mayhar } 1541a86c6181SAlex Tomas 1542a86c6181SAlex Tomas if (depth == 0) { 1543a86c6181SAlex Tomas /* there is no tree at all */ 1544a86c6181SAlex Tomas return 0; 1545a86c6181SAlex Tomas } 1546a86c6181SAlex Tomas 1547a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1548a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1549a86c6181SAlex Tomas return 0; 1550a86c6181SAlex Tomas } 1551a86c6181SAlex Tomas 1552a86c6181SAlex Tomas /* 1553d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1554a86c6181SAlex Tomas */ 1555a86c6181SAlex Tomas k = depth - 1; 1556a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 15577e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 15587e028976SAvantika Mathur if (err) 1559a86c6181SAlex Tomas return err; 1560a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 15617e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 15627e028976SAvantika Mathur if (err) 1563a86c6181SAlex Tomas return err; 1564a86c6181SAlex Tomas 1565a86c6181SAlex Tomas while (k--) { 1566a86c6181SAlex Tomas /* change all left-side indexes */ 1567a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1568a86c6181SAlex Tomas break; 15697e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 15707e028976SAvantika Mathur if (err) 1571a86c6181SAlex Tomas break; 1572a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 15737e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 15747e028976SAvantika Mathur if (err) 1575a86c6181SAlex Tomas break; 1576a86c6181SAlex Tomas } 1577a86c6181SAlex Tomas 1578a86c6181SAlex Tomas return err; 1579a86c6181SAlex Tomas } 1580a86c6181SAlex Tomas 1581748de673SAkira Fujita int 1582a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1583a86c6181SAlex Tomas struct ext4_extent *ex2) 1584a86c6181SAlex Tomas { 1585749269faSAmit Arora unsigned short ext1_ee_len, ext2_ee_len, max_len; 1586a2df2a63SAmit Arora 1587a2df2a63SAmit Arora /* 1588a2df2a63SAmit Arora * Make sure that either both extents are uninitialized, or 1589a2df2a63SAmit Arora * both are _not_. 1590a2df2a63SAmit Arora */ 1591a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1592a2df2a63SAmit Arora return 0; 1593a2df2a63SAmit Arora 1594749269faSAmit Arora if (ext4_ext_is_uninitialized(ex1)) 1595749269faSAmit Arora max_len = EXT_UNINIT_MAX_LEN; 1596749269faSAmit Arora else 1597749269faSAmit Arora max_len = EXT_INIT_MAX_LEN; 1598749269faSAmit Arora 1599a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1600a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1601a2df2a63SAmit Arora 1602a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 160363f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1604a86c6181SAlex Tomas return 0; 1605a86c6181SAlex Tomas 1606471d4011SSuparna Bhattacharya /* 1607471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1608471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1609d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1610471d4011SSuparna Bhattacharya */ 1611749269faSAmit Arora if (ext1_ee_len + ext2_ee_len > max_len) 1612471d4011SSuparna Bhattacharya return 0; 1613bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1614b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1615a86c6181SAlex Tomas return 0; 1616a86c6181SAlex Tomas #endif 1617a86c6181SAlex Tomas 1618bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1619a86c6181SAlex Tomas return 1; 1620a86c6181SAlex Tomas return 0; 1621a86c6181SAlex Tomas } 1622a86c6181SAlex Tomas 1623a86c6181SAlex Tomas /* 162456055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 162556055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 162656055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 162756055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 162856055d3aSAmit Arora * 1 if they got merged. 162956055d3aSAmit Arora */ 1630197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 163156055d3aSAmit Arora struct ext4_ext_path *path, 163256055d3aSAmit Arora struct ext4_extent *ex) 163356055d3aSAmit Arora { 163456055d3aSAmit Arora struct ext4_extent_header *eh; 163556055d3aSAmit Arora unsigned int depth, len; 163656055d3aSAmit Arora int merge_done = 0; 163756055d3aSAmit Arora int uninitialized = 0; 163856055d3aSAmit Arora 163956055d3aSAmit Arora depth = ext_depth(inode); 164056055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 164156055d3aSAmit Arora eh = path[depth].p_hdr; 164256055d3aSAmit Arora 164356055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 164456055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 164556055d3aSAmit Arora break; 164656055d3aSAmit Arora /* merge with next extent! */ 164756055d3aSAmit Arora if (ext4_ext_is_uninitialized(ex)) 164856055d3aSAmit Arora uninitialized = 1; 164956055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 165056055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 165156055d3aSAmit Arora if (uninitialized) 165256055d3aSAmit Arora ext4_ext_mark_uninitialized(ex); 165356055d3aSAmit Arora 165456055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 165556055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 165656055d3aSAmit Arora * sizeof(struct ext4_extent); 165756055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 165856055d3aSAmit Arora } 1659e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 166056055d3aSAmit Arora merge_done = 1; 166156055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 166256055d3aSAmit Arora if (!eh->eh_entries) 166324676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 166456055d3aSAmit Arora } 166556055d3aSAmit Arora 166656055d3aSAmit Arora return merge_done; 166756055d3aSAmit Arora } 166856055d3aSAmit Arora 166956055d3aSAmit Arora /* 1670ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1671ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1672ecb94f5fSTheodore Ts'o */ 1673ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1674ecb94f5fSTheodore Ts'o struct inode *inode, 1675ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1676ecb94f5fSTheodore Ts'o { 1677ecb94f5fSTheodore Ts'o size_t s; 1678ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1679ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1680ecb94f5fSTheodore Ts'o 1681ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1682ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1683ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1684ecb94f5fSTheodore Ts'o return; 1685ecb94f5fSTheodore Ts'o 1686ecb94f5fSTheodore Ts'o /* 1687ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1688ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1689ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1690ecb94f5fSTheodore Ts'o */ 1691ecb94f5fSTheodore Ts'o if (ext4_journal_extend(handle, 2)) 1692ecb94f5fSTheodore Ts'o return; 1693ecb94f5fSTheodore Ts'o 1694ecb94f5fSTheodore Ts'o /* 1695ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1696ecb94f5fSTheodore Ts'o */ 1697ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1698ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1699ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1700ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1701ecb94f5fSTheodore Ts'o 1702ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1703ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1704ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1705ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1706ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1707ecb94f5fSTheodore Ts'o 1708ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1709ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 1710ecb94f5fSTheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1711ecb94f5fSTheodore Ts'o } 1712ecb94f5fSTheodore Ts'o 1713ecb94f5fSTheodore Ts'o /* 1714197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1715197217a5SYongqiang Yang * return 1 if merge left else 0. 1716197217a5SYongqiang Yang */ 1717ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1718ecb94f5fSTheodore Ts'o struct inode *inode, 1719197217a5SYongqiang Yang struct ext4_ext_path *path, 1720197217a5SYongqiang Yang struct ext4_extent *ex) { 1721197217a5SYongqiang Yang struct ext4_extent_header *eh; 1722197217a5SYongqiang Yang unsigned int depth; 1723197217a5SYongqiang Yang int merge_done = 0; 1724197217a5SYongqiang Yang 1725197217a5SYongqiang Yang depth = ext_depth(inode); 1726197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1727197217a5SYongqiang Yang eh = path[depth].p_hdr; 1728197217a5SYongqiang Yang 1729197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1730197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1731197217a5SYongqiang Yang 1732197217a5SYongqiang Yang if (!merge_done) 1733ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1734197217a5SYongqiang Yang 1735ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1736197217a5SYongqiang Yang } 1737197217a5SYongqiang Yang 1738197217a5SYongqiang Yang /* 173925d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 174025d14f98SAmit Arora * existing extent. 174125d14f98SAmit Arora * 174225d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 174325d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 174425d14f98SAmit Arora * If there is no overlap found, it returns 0. 174525d14f98SAmit Arora */ 17464d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 17474d33b1efSTheodore Ts'o struct inode *inode, 174825d14f98SAmit Arora struct ext4_extent *newext, 174925d14f98SAmit Arora struct ext4_ext_path *path) 175025d14f98SAmit Arora { 1751725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 175225d14f98SAmit Arora unsigned int depth, len1; 175325d14f98SAmit Arora unsigned int ret = 0; 175425d14f98SAmit Arora 175525d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1756a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 175725d14f98SAmit Arora depth = ext_depth(inode); 175825d14f98SAmit Arora if (!path[depth].p_ext) 175925d14f98SAmit Arora goto out; 176025d14f98SAmit Arora b2 = le32_to_cpu(path[depth].p_ext->ee_block); 17614d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 176225d14f98SAmit Arora 176325d14f98SAmit Arora /* 176425d14f98SAmit Arora * get the next allocated block if the extent in the path 176525d14f98SAmit Arora * is before the requested block(s) 176625d14f98SAmit Arora */ 176725d14f98SAmit Arora if (b2 < b1) { 176825d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1769f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 177025d14f98SAmit Arora goto out; 17714d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 177225d14f98SAmit Arora } 177325d14f98SAmit Arora 1774725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 177525d14f98SAmit Arora if (b1 + len1 < b1) { 1776f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 177725d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 177825d14f98SAmit Arora ret = 1; 177925d14f98SAmit Arora } 178025d14f98SAmit Arora 178125d14f98SAmit Arora /* check for overlap */ 178225d14f98SAmit Arora if (b1 + len1 > b2) { 178325d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 178425d14f98SAmit Arora ret = 1; 178525d14f98SAmit Arora } 178625d14f98SAmit Arora out: 178725d14f98SAmit Arora return ret; 178825d14f98SAmit Arora } 178925d14f98SAmit Arora 179025d14f98SAmit Arora /* 1791d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1792d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1793d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1794d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1795a86c6181SAlex Tomas */ 1796a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1797a86c6181SAlex Tomas struct ext4_ext_path *path, 17980031462bSMingming Cao struct ext4_extent *newext, int flag) 1799a86c6181SAlex Tomas { 1800a86c6181SAlex Tomas struct ext4_extent_header *eh; 1801a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1802a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1803a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1804725d26d3SAneesh Kumar K.V int depth, len, err; 1805725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1806a2df2a63SAmit Arora unsigned uninitialized = 0; 180755f020dbSAllison Henderson int flags = 0; 1808a86c6181SAlex Tomas 1809273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1810273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1811273df556SFrank Mayhar return -EIO; 1812273df556SFrank Mayhar } 1813a86c6181SAlex Tomas depth = ext_depth(inode); 1814a86c6181SAlex Tomas ex = path[depth].p_ext; 1815273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1816273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1817273df556SFrank Mayhar return -EIO; 1818273df556SFrank Mayhar } 1819a86c6181SAlex Tomas 1820a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1821744692dcSJiaying Zhang if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 18220031462bSMingming Cao && ext4_can_extents_be_merged(inode, ex, newext)) { 182332de6756SYongqiang Yang ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n", 1824553f9008SMingming ext4_ext_is_uninitialized(newext), 1825a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1826a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1827553f9008SMingming ext4_ext_is_uninitialized(ex), 1828bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1829bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 18307e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 18317e028976SAvantika Mathur if (err) 1832a86c6181SAlex Tomas return err; 1833a2df2a63SAmit Arora 1834a2df2a63SAmit Arora /* 1835a2df2a63SAmit Arora * ext4_can_extents_be_merged should have checked that either 1836a2df2a63SAmit Arora * both extents are uninitialized, or both aren't. Thus we 1837a2df2a63SAmit Arora * need to check only one of them here. 1838a2df2a63SAmit Arora */ 1839a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex)) 1840a2df2a63SAmit Arora uninitialized = 1; 1841a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1842a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1843a2df2a63SAmit Arora if (uninitialized) 1844a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 1845a86c6181SAlex Tomas eh = path[depth].p_hdr; 1846a86c6181SAlex Tomas nearex = ex; 1847a86c6181SAlex Tomas goto merge; 1848a86c6181SAlex Tomas } 1849a86c6181SAlex Tomas 1850a86c6181SAlex Tomas depth = ext_depth(inode); 1851a86c6181SAlex Tomas eh = path[depth].p_hdr; 1852a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1853a86c6181SAlex Tomas goto has_space; 1854a86c6181SAlex Tomas 1855a86c6181SAlex Tomas /* probably next leaf has space for us? */ 1856a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 1857598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 1858598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 18595718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 1860598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 186132de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 1862a86c6181SAlex Tomas BUG_ON(npath != NULL); 1863a86c6181SAlex Tomas npath = ext4_ext_find_extent(inode, next, NULL); 1864a86c6181SAlex Tomas if (IS_ERR(npath)) 1865a86c6181SAlex Tomas return PTR_ERR(npath); 1866a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 1867a86c6181SAlex Tomas eh = npath[depth].p_hdr; 1868a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 186925985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 1870a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 1871a86c6181SAlex Tomas path = npath; 1872ffb505ffSRobin Dong goto has_space; 1873a86c6181SAlex Tomas } 1874a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 1875a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1876a86c6181SAlex Tomas } 1877a86c6181SAlex Tomas 1878a86c6181SAlex Tomas /* 1879d0d856e8SRandy Dunlap * There is no free space in the found leaf. 1880d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 1881a86c6181SAlex Tomas */ 188255f020dbSAllison Henderson if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 188355f020dbSAllison Henderson flags = EXT4_MB_USE_ROOT_BLOCKS; 188455f020dbSAllison Henderson err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1885a86c6181SAlex Tomas if (err) 1886a86c6181SAlex Tomas goto cleanup; 1887a86c6181SAlex Tomas depth = ext_depth(inode); 1888a86c6181SAlex Tomas eh = path[depth].p_hdr; 1889a86c6181SAlex Tomas 1890a86c6181SAlex Tomas has_space: 1891a86c6181SAlex Tomas nearex = path[depth].p_ext; 1892a86c6181SAlex Tomas 18937e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 18947e028976SAvantika Mathur if (err) 1895a86c6181SAlex Tomas goto cleanup; 1896a86c6181SAlex Tomas 1897a86c6181SAlex Tomas if (!nearex) { 1898a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 189932de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 1900a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1901bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1902553f9008SMingming ext4_ext_is_uninitialized(newext), 1903a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 190480e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 1905a86c6181SAlex Tomas } else { 190680e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 190780e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 190880e675f9SEric Gouriou /* Insert after */ 190932de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 191032de6756SYongqiang Yang "nearest %p\n", 1911a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1912bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1913553f9008SMingming ext4_ext_is_uninitialized(newext), 1914a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 191580e675f9SEric Gouriou nearex); 191680e675f9SEric Gouriou nearex++; 191780e675f9SEric Gouriou } else { 191880e675f9SEric Gouriou /* Insert before */ 191980e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 192032de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 192132de6756SYongqiang Yang "nearest %p\n", 192280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 192380e675f9SEric Gouriou ext4_ext_pblock(newext), 192480e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 192580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 192680e675f9SEric Gouriou nearex); 192780e675f9SEric Gouriou } 192880e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 192980e675f9SEric Gouriou if (len > 0) { 193032de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 193180e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 193280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 193380e675f9SEric Gouriou ext4_ext_pblock(newext), 193480e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 193580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 193680e675f9SEric Gouriou len, nearex, nearex + 1); 193780e675f9SEric Gouriou memmove(nearex + 1, nearex, 193880e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 193980e675f9SEric Gouriou } 1940a86c6181SAlex Tomas } 1941a86c6181SAlex Tomas 1942e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 194380e675f9SEric Gouriou path[depth].p_ext = nearex; 1944a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 1945bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 1946a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 1947a86c6181SAlex Tomas 1948a86c6181SAlex Tomas merge: 1949e7bcf823SHaiboLiu /* try to merge extents */ 1950744692dcSJiaying Zhang if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 1951ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 1952a86c6181SAlex Tomas 1953a86c6181SAlex Tomas 1954a86c6181SAlex Tomas /* time to correct all indexes above */ 1955a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 1956a86c6181SAlex Tomas if (err) 1957a86c6181SAlex Tomas goto cleanup; 1958a86c6181SAlex Tomas 1959ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 1960a86c6181SAlex Tomas 1961a86c6181SAlex Tomas cleanup: 1962a86c6181SAlex Tomas if (npath) { 1963a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 1964a86c6181SAlex Tomas kfree(npath); 1965a86c6181SAlex Tomas } 1966a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 1967a86c6181SAlex Tomas return err; 1968a86c6181SAlex Tomas } 1969a86c6181SAlex Tomas 197091dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode, 197191dd8c11SLukas Czerner ext4_lblk_t block, ext4_lblk_t num, 197291dd8c11SLukas Czerner struct fiemap_extent_info *fieinfo) 19736873fa0dSEric Sandeen { 19746873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 197506348679SLukas Czerner struct ext4_ext_cache newex; 19766873fa0dSEric Sandeen struct ext4_extent *ex; 197791dd8c11SLukas Czerner ext4_lblk_t next, next_del, start = 0, end = 0; 19786873fa0dSEric Sandeen ext4_lblk_t last = block + num; 197991dd8c11SLukas Czerner int exists, depth = 0, err = 0; 198091dd8c11SLukas Czerner unsigned int flags = 0; 198191dd8c11SLukas Czerner unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 19826873fa0dSEric Sandeen 1983f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 19846873fa0dSEric Sandeen num = last - block; 19856873fa0dSEric Sandeen /* find extent for this block */ 1986fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 198791dd8c11SLukas Czerner 198891dd8c11SLukas Czerner if (path && ext_depth(inode) != depth) { 198991dd8c11SLukas Czerner /* depth was changed. we have to realloc path */ 199091dd8c11SLukas Czerner kfree(path); 199191dd8c11SLukas Czerner path = NULL; 199291dd8c11SLukas Czerner } 199391dd8c11SLukas Czerner 19946873fa0dSEric Sandeen path = ext4_ext_find_extent(inode, block, path); 19956873fa0dSEric Sandeen if (IS_ERR(path)) { 199691dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 19976873fa0dSEric Sandeen err = PTR_ERR(path); 19986873fa0dSEric Sandeen path = NULL; 19996873fa0dSEric Sandeen break; 20006873fa0dSEric Sandeen } 20016873fa0dSEric Sandeen 20026873fa0dSEric Sandeen depth = ext_depth(inode); 2003273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 200491dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 2005273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2006273df556SFrank Mayhar err = -EIO; 2007273df556SFrank Mayhar break; 2008273df556SFrank Mayhar } 20096873fa0dSEric Sandeen ex = path[depth].p_ext; 20106873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 201191dd8c11SLukas Czerner ext4_ext_drop_refs(path); 20126873fa0dSEric Sandeen 201391dd8c11SLukas Czerner flags = 0; 20146873fa0dSEric Sandeen exists = 0; 20156873fa0dSEric Sandeen if (!ex) { 20166873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 20176873fa0dSEric Sandeen * all requested space */ 20186873fa0dSEric Sandeen start = block; 20196873fa0dSEric Sandeen end = block + num; 20206873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 20216873fa0dSEric Sandeen /* need to allocate space before found extent */ 20226873fa0dSEric Sandeen start = block; 20236873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 20246873fa0dSEric Sandeen if (block + num < end) 20256873fa0dSEric Sandeen end = block + num; 20266873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 20276873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 20286873fa0dSEric Sandeen /* need to allocate space after found extent */ 20296873fa0dSEric Sandeen start = block; 20306873fa0dSEric Sandeen end = block + num; 20316873fa0dSEric Sandeen if (end >= next) 20326873fa0dSEric Sandeen end = next; 20336873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 20346873fa0dSEric Sandeen /* 20356873fa0dSEric Sandeen * some part of requested space is covered 20366873fa0dSEric Sandeen * by found extent 20376873fa0dSEric Sandeen */ 20386873fa0dSEric Sandeen start = block; 20396873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 20406873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 20416873fa0dSEric Sandeen if (block + num < end) 20426873fa0dSEric Sandeen end = block + num; 20436873fa0dSEric Sandeen exists = 1; 20446873fa0dSEric Sandeen } else { 20456873fa0dSEric Sandeen BUG(); 20466873fa0dSEric Sandeen } 20476873fa0dSEric Sandeen BUG_ON(end <= start); 20486873fa0dSEric Sandeen 20496873fa0dSEric Sandeen if (!exists) { 205006348679SLukas Czerner newex.ec_block = start; 205106348679SLukas Czerner newex.ec_len = end - start; 205206348679SLukas Czerner newex.ec_start = 0; 20536873fa0dSEric Sandeen } else { 205406348679SLukas Czerner newex.ec_block = le32_to_cpu(ex->ee_block); 205506348679SLukas Czerner newex.ec_len = ext4_ext_get_actual_len(ex); 205606348679SLukas Czerner newex.ec_start = ext4_ext_pblock(ex); 205791dd8c11SLukas Czerner if (ext4_ext_is_uninitialized(ex)) 205891dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_UNWRITTEN; 20596873fa0dSEric Sandeen } 20606873fa0dSEric Sandeen 206191dd8c11SLukas Czerner /* 206206348679SLukas Czerner * Find delayed extent and update newex accordingly. We call 206306348679SLukas Czerner * it even in !exists case to find out whether newex is the 206491dd8c11SLukas Czerner * last existing extent or not. 206591dd8c11SLukas Czerner */ 206606348679SLukas Czerner next_del = ext4_find_delayed_extent(inode, &newex); 206791dd8c11SLukas Czerner if (!exists && next_del) { 206891dd8c11SLukas Czerner exists = 1; 206991dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_DELALLOC; 207091dd8c11SLukas Czerner } 207191dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 207291dd8c11SLukas Czerner 207306348679SLukas Czerner if (unlikely(newex.ec_len == 0)) { 207406348679SLukas Czerner EXT4_ERROR_INODE(inode, "newex.ec_len == 0"); 2075273df556SFrank Mayhar err = -EIO; 2076273df556SFrank Mayhar break; 2077273df556SFrank Mayhar } 20786873fa0dSEric Sandeen 2079f7fec032SZheng Liu /* 2080f7fec032SZheng Liu * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2081f7fec032SZheng Liu * we need to check next == EXT_MAX_BLOCKS because it is 2082f7fec032SZheng Liu * possible that an extent is with unwritten and delayed 2083f7fec032SZheng Liu * status due to when an extent is delayed allocated and 2084f7fec032SZheng Liu * is allocated by fallocate status tree will track both of 2085f7fec032SZheng Liu * them in a extent. 2086f7fec032SZheng Liu * 2087f7fec032SZheng Liu * So we could return a unwritten and delayed extent, and 2088f7fec032SZheng Liu * its block is equal to 'next'. 2089f7fec032SZheng Liu */ 2090f7fec032SZheng Liu if (next == next_del && next == EXT_MAX_BLOCKS) { 209191dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_LAST; 209291dd8c11SLukas Czerner if (unlikely(next_del != EXT_MAX_BLOCKS || 209391dd8c11SLukas Czerner next != EXT_MAX_BLOCKS)) { 209491dd8c11SLukas Czerner EXT4_ERROR_INODE(inode, 209591dd8c11SLukas Czerner "next extent == %u, next " 209691dd8c11SLukas Czerner "delalloc extent = %u", 209791dd8c11SLukas Czerner next, next_del); 209891dd8c11SLukas Czerner err = -EIO; 209991dd8c11SLukas Czerner break; 210091dd8c11SLukas Czerner } 210191dd8c11SLukas Czerner } 210291dd8c11SLukas Czerner 210391dd8c11SLukas Czerner if (exists) { 210491dd8c11SLukas Czerner err = fiemap_fill_next_extent(fieinfo, 210506348679SLukas Czerner (__u64)newex.ec_block << blksize_bits, 210606348679SLukas Czerner (__u64)newex.ec_start << blksize_bits, 210706348679SLukas Czerner (__u64)newex.ec_len << blksize_bits, 210891dd8c11SLukas Czerner flags); 21096873fa0dSEric Sandeen if (err < 0) 21106873fa0dSEric Sandeen break; 211191dd8c11SLukas Czerner if (err == 1) { 21126873fa0dSEric Sandeen err = 0; 21136873fa0dSEric Sandeen break; 21146873fa0dSEric Sandeen } 21156873fa0dSEric Sandeen } 21166873fa0dSEric Sandeen 211706348679SLukas Czerner block = newex.ec_block + newex.ec_len; 21186873fa0dSEric Sandeen } 21196873fa0dSEric Sandeen 21206873fa0dSEric Sandeen if (path) { 21216873fa0dSEric Sandeen ext4_ext_drop_refs(path); 21226873fa0dSEric Sandeen kfree(path); 21236873fa0dSEric Sandeen } 21246873fa0dSEric Sandeen 21256873fa0dSEric Sandeen return err; 21266873fa0dSEric Sandeen } 21276873fa0dSEric Sandeen 212809b88252SAvantika Mathur static void 2129725d26d3SAneesh Kumar K.V ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 2130b05e6ae5STheodore Ts'o __u32 len, ext4_fsblk_t start) 2131a86c6181SAlex Tomas { 2132a86c6181SAlex Tomas struct ext4_ext_cache *cex; 2133a86c6181SAlex Tomas BUG_ON(len == 0); 21342ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2135d8990240SAditya Kali trace_ext4_ext_put_in_cache(inode, block, len, start); 2136a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 2137a86c6181SAlex Tomas cex->ec_block = block; 2138a86c6181SAlex Tomas cex->ec_len = len; 2139a86c6181SAlex Tomas cex->ec_start = start; 21402ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2141a86c6181SAlex Tomas } 2142a86c6181SAlex Tomas 2143a86c6181SAlex Tomas /* 2144d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2145d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2146a86c6181SAlex Tomas * and cache this gap 2147a86c6181SAlex Tomas */ 214809b88252SAvantika Mathur static void 2149a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2150725d26d3SAneesh Kumar K.V ext4_lblk_t block) 2151a86c6181SAlex Tomas { 2152a86c6181SAlex Tomas int depth = ext_depth(inode); 2153725d26d3SAneesh Kumar K.V unsigned long len; 2154725d26d3SAneesh Kumar K.V ext4_lblk_t lblock; 2155a86c6181SAlex Tomas struct ext4_extent *ex; 2156a86c6181SAlex Tomas 2157a86c6181SAlex Tomas ex = path[depth].p_ext; 2158a86c6181SAlex Tomas if (ex == NULL) { 2159a86c6181SAlex Tomas /* there is no extent yet, so gap is [0;-] */ 2160a86c6181SAlex Tomas lblock = 0; 2161f17722f9SLukas Czerner len = EXT_MAX_BLOCKS; 2162a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 2163a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 2164a86c6181SAlex Tomas lblock = block; 2165a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 2166bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 2167bba90743SEric Sandeen block, 2168bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2169bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2170*d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2171*d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2172*d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2173a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2174a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2175725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2176a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2177a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2178725d26d3SAneesh Kumar K.V 2179725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2180bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2181bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2182bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2183bba90743SEric Sandeen block); 2184725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2185725d26d3SAneesh Kumar K.V len = next - lblock; 2186*d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2187*d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2188*d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2189a86c6181SAlex Tomas } else { 2190a86c6181SAlex Tomas lblock = len = 0; 2191a86c6181SAlex Tomas BUG(); 2192a86c6181SAlex Tomas } 2193a86c6181SAlex Tomas 2194bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2195b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, lblock, len, 0); 2196a86c6181SAlex Tomas } 2197a86c6181SAlex Tomas 2198b05e6ae5STheodore Ts'o /* 219963fedaf1SLukas Czerner * ext4_ext_in_cache() 2200a4bb6b64SAllison Henderson * Checks to see if the given block is in the cache. 2201a4bb6b64SAllison Henderson * If it is, the cached extent is stored in the given 220263fedaf1SLukas Czerner * cache extent pointer. 2203a4bb6b64SAllison Henderson * 2204a4bb6b64SAllison Henderson * @inode: The files inode 2205a4bb6b64SAllison Henderson * @block: The block to look for in the cache 2206a4bb6b64SAllison Henderson * @ex: Pointer where the cached extent will be stored 2207a4bb6b64SAllison Henderson * if it contains block 2208a4bb6b64SAllison Henderson * 2209b05e6ae5STheodore Ts'o * Return 0 if cache is invalid; 1 if the cache is valid 2210b05e6ae5STheodore Ts'o */ 221163fedaf1SLukas Czerner static int 221263fedaf1SLukas Czerner ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 221363fedaf1SLukas Czerner struct ext4_extent *ex) 221463fedaf1SLukas Czerner { 2215a86c6181SAlex Tomas struct ext4_ext_cache *cex; 2216b05e6ae5STheodore Ts'o int ret = 0; 2217a86c6181SAlex Tomas 22182ec0ae3aSTheodore Ts'o /* 22192ec0ae3aSTheodore Ts'o * We borrow i_block_reservation_lock to protect i_cached_extent 22202ec0ae3aSTheodore Ts'o */ 22212ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2222a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 2223a86c6181SAlex Tomas 2224a86c6181SAlex Tomas /* has cache valid data? */ 2225b05e6ae5STheodore Ts'o if (cex->ec_len == 0) 22262ec0ae3aSTheodore Ts'o goto errout; 2227a86c6181SAlex Tomas 2228731eb1a0SAkinobu Mita if (in_range(block, cex->ec_block, cex->ec_len)) { 222963fedaf1SLukas Czerner ex->ee_block = cpu_to_le32(cex->ec_block); 223063fedaf1SLukas Czerner ext4_ext_store_pblock(ex, cex->ec_start); 223163fedaf1SLukas Czerner ex->ee_len = cpu_to_le16(cex->ec_len); 2232bba90743SEric Sandeen ext_debug("%u cached by %u:%u:%llu\n", 2233bba90743SEric Sandeen block, 2234bba90743SEric Sandeen cex->ec_block, cex->ec_len, cex->ec_start); 2235b05e6ae5STheodore Ts'o ret = 1; 2236a86c6181SAlex Tomas } 22372ec0ae3aSTheodore Ts'o errout: 2238d8990240SAditya Kali trace_ext4_ext_in_cache(inode, block, ret); 22392ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 22402ec0ae3aSTheodore Ts'o return ret; 2241a86c6181SAlex Tomas } 2242a86c6181SAlex Tomas 2243a86c6181SAlex Tomas /* 2244d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2245d0d856e8SRandy Dunlap * removes index from the index block. 2246a86c6181SAlex Tomas */ 22471d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2248c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2249a86c6181SAlex Tomas { 2250a86c6181SAlex Tomas int err; 2251f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2252a86c6181SAlex Tomas 2253a86c6181SAlex Tomas /* free index block */ 2254c36575e6SForrest Liu depth--; 2255c36575e6SForrest Liu path = path + depth; 2256bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2257273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2258273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2259273df556SFrank Mayhar return -EIO; 2260273df556SFrank Mayhar } 22617e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 22627e028976SAvantika Mathur if (err) 2263a86c6181SAlex Tomas return err; 22640e1147b0SRobin Dong 22650e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 22660e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 22670e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 22680e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 22690e1147b0SRobin Dong } 22700e1147b0SRobin Dong 2271e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 22727e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 22737e028976SAvantika Mathur if (err) 2274a86c6181SAlex Tomas return err; 22752ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2276d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2277d8990240SAditya Kali 22787dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2279e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2280c36575e6SForrest Liu 2281c36575e6SForrest Liu while (--depth >= 0) { 2282c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2283c36575e6SForrest Liu break; 2284c36575e6SForrest Liu path--; 2285c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2286c36575e6SForrest Liu if (err) 2287c36575e6SForrest Liu break; 2288c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2289c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2290c36575e6SForrest Liu if (err) 2291c36575e6SForrest Liu break; 2292c36575e6SForrest Liu } 2293a86c6181SAlex Tomas return err; 2294a86c6181SAlex Tomas } 2295a86c6181SAlex Tomas 2296a86c6181SAlex Tomas /* 2297ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2298ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2299ee12b630SMingming Cao * to the extent tree. 2300ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2301ee12b630SMingming Cao * under i_data_sem. 2302a86c6181SAlex Tomas */ 2303525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2304a86c6181SAlex Tomas struct ext4_ext_path *path) 2305a86c6181SAlex Tomas { 2306a86c6181SAlex Tomas if (path) { 2307ee12b630SMingming Cao int depth = ext_depth(inode); 2308f3bd1f3fSMingming Cao int ret = 0; 2309ee12b630SMingming Cao 2310a86c6181SAlex Tomas /* probably there is space in leaf? */ 2311a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2312ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2313ee12b630SMingming Cao 2314ee12b630SMingming Cao /* 2315ee12b630SMingming Cao * There are some space in the leaf tree, no 2316ee12b630SMingming Cao * need to account for leaf block credit 2317ee12b630SMingming Cao * 2318ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2319df3ab170STao Ma * and other metadata blocks still need to be 2320ee12b630SMingming Cao * accounted. 2321ee12b630SMingming Cao */ 2322525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2323ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 23245887e98bSAneesh Kumar K.V return ret; 2325ee12b630SMingming Cao } 2326ee12b630SMingming Cao } 2327ee12b630SMingming Cao 2328525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2329a86c6181SAlex Tomas } 2330a86c6181SAlex Tomas 2331a86c6181SAlex Tomas /* 2332ee12b630SMingming Cao * How many index/leaf blocks need to change/allocate to modify nrblocks? 2333ee12b630SMingming Cao * 2334ee12b630SMingming Cao * if nrblocks are fit in a single extent (chunk flag is 1), then 2335ee12b630SMingming Cao * in the worse case, each tree level index/leaf need to be changed 2336ee12b630SMingming Cao * if the tree split due to insert a new extent, then the old tree 2337ee12b630SMingming Cao * index/leaf need to be updated too 2338ee12b630SMingming Cao * 2339ee12b630SMingming Cao * If the nrblocks are discontiguous, they could cause 2340ee12b630SMingming Cao * the whole tree split more than once, but this is really rare. 2341a86c6181SAlex Tomas */ 2342525f4ed8SMingming Cao int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2343ee12b630SMingming Cao { 2344ee12b630SMingming Cao int index; 2345f19d5870STao Ma int depth; 2346f19d5870STao Ma 2347f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2348f19d5870STao Ma if (ext4_has_inline_data(inode)) 2349f19d5870STao Ma return 1; 2350f19d5870STao Ma 2351f19d5870STao Ma depth = ext_depth(inode); 2352a86c6181SAlex Tomas 2353ee12b630SMingming Cao if (chunk) 2354ee12b630SMingming Cao index = depth * 2; 2355ee12b630SMingming Cao else 2356ee12b630SMingming Cao index = depth * 3; 2357a86c6181SAlex Tomas 2358ee12b630SMingming Cao return index; 2359a86c6181SAlex Tomas } 2360a86c6181SAlex Tomas 2361a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2362a86c6181SAlex Tomas struct ext4_extent *ex, 23630aa06000STheodore Ts'o ext4_fsblk_t *partial_cluster, 2364725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2365a86c6181SAlex Tomas { 23660aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2367a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 23680aa06000STheodore Ts'o ext4_fsblk_t pblk; 236918888cf0SAndrey Sidorov int flags = 0; 2370a86c6181SAlex Tomas 2371c9de560dSAlex Tomas if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 237218888cf0SAndrey Sidorov flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 237318888cf0SAndrey Sidorov else if (ext4_should_journal_data(inode)) 237418888cf0SAndrey Sidorov flags |= EXT4_FREE_BLOCKS_FORGET; 237518888cf0SAndrey Sidorov 23760aa06000STheodore Ts'o /* 23770aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 23780aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 23790aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 23800aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 23810aa06000STheodore Ts'o * or at the end of the ext4_truncate() operation. 23820aa06000STheodore Ts'o */ 23830aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 23840aa06000STheodore Ts'o 2385d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 23860aa06000STheodore Ts'o /* 23870aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 23880aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 23890aa06000STheodore Ts'o * partial cluster here. 23900aa06000STheodore Ts'o */ 23910aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 23920aa06000STheodore Ts'o if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 23930aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 23940aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 23950aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 23960aa06000STheodore Ts'o *partial_cluster = 0; 23970aa06000STheodore Ts'o } 23980aa06000STheodore Ts'o 2399a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2400a86c6181SAlex Tomas { 2401a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2402a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2403a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2404a86c6181SAlex Tomas sbi->s_ext_extents++; 2405a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2406a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2407a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2408a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2409a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2410a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2411a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2412a86c6181SAlex Tomas } 2413a86c6181SAlex Tomas #endif 2414a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2415a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2416a86c6181SAlex Tomas /* tail removal */ 2417725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2418725d26d3SAneesh Kumar K.V 2419a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 24200aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 24210aa06000STheodore Ts'o ext_debug("free last %u blocks starting %llu\n", num, pblk); 24220aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 24230aa06000STheodore Ts'o /* 24240aa06000STheodore Ts'o * If the block range to be freed didn't start at the 24250aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 24260aa06000STheodore Ts'o * extent, save the partial cluster here, since we 24270aa06000STheodore Ts'o * might need to delete if we determine that the 24280aa06000STheodore Ts'o * truncate operation has removed all of the blocks in 24290aa06000STheodore Ts'o * the cluster. 24300aa06000STheodore Ts'o */ 24310aa06000STheodore Ts'o if (pblk & (sbi->s_cluster_ratio - 1) && 24320aa06000STheodore Ts'o (ee_len == num)) 24330aa06000STheodore Ts'o *partial_cluster = EXT4_B2C(sbi, pblk); 24340aa06000STheodore Ts'o else 24350aa06000STheodore Ts'o *partial_cluster = 0; 2436a86c6181SAlex Tomas } else if (from == le32_to_cpu(ex->ee_block) 2437a2df2a63SAmit Arora && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2438d583fb87SAllison Henderson /* head removal */ 2439d583fb87SAllison Henderson ext4_lblk_t num; 2440d583fb87SAllison Henderson ext4_fsblk_t start; 2441d583fb87SAllison Henderson 2442d583fb87SAllison Henderson num = to - from; 2443d583fb87SAllison Henderson start = ext4_ext_pblock(ex); 2444d583fb87SAllison Henderson 2445d583fb87SAllison Henderson ext_debug("free first %u blocks starting %llu\n", num, start); 2446ee90d57eSH Hartley Sweeten ext4_free_blocks(handle, inode, NULL, start, num, flags); 2447d583fb87SAllison Henderson 2448a86c6181SAlex Tomas } else { 2449725d26d3SAneesh Kumar K.V printk(KERN_INFO "strange request: removal(2) " 2450725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2451a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2452a86c6181SAlex Tomas } 2453a86c6181SAlex Tomas return 0; 2454a86c6181SAlex Tomas } 2455a86c6181SAlex Tomas 2456d583fb87SAllison Henderson 2457d583fb87SAllison Henderson /* 2458d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 2459d583fb87SAllison Henderson * blocks appearing between "start" and "end", and splits the extents 2460d583fb87SAllison Henderson * if "start" and "end" appear in the same extent 2461d583fb87SAllison Henderson * 2462d583fb87SAllison Henderson * @handle: The journal handle 2463d583fb87SAllison Henderson * @inode: The files inode 2464d583fb87SAllison Henderson * @path: The path to the leaf 2465d583fb87SAllison Henderson * @start: The first block to remove 2466d583fb87SAllison Henderson * @end: The last block to remove 2467d583fb87SAllison Henderson */ 2468a86c6181SAlex Tomas static int 2469a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 24700aa06000STheodore Ts'o struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, 24710aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2472a86c6181SAlex Tomas { 24730aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2474a86c6181SAlex Tomas int err = 0, correct_index = 0; 2475a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2476a86c6181SAlex Tomas struct ext4_extent_header *eh; 2477750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2478725d26d3SAneesh Kumar K.V unsigned num; 2479725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2480a86c6181SAlex Tomas unsigned short ex_ee_len; 2481a2df2a63SAmit Arora unsigned uninitialized = 0; 2482a86c6181SAlex Tomas struct ext4_extent *ex; 2483a86c6181SAlex Tomas 2484c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 24855f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2486a86c6181SAlex Tomas if (!path[depth].p_hdr) 2487a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2488a86c6181SAlex Tomas eh = path[depth].p_hdr; 2489273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2490273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2491273df556SFrank Mayhar return -EIO; 2492273df556SFrank Mayhar } 2493a86c6181SAlex Tomas /* find where to start removing */ 2494a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2495a86c6181SAlex Tomas 2496a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2497a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2498a86c6181SAlex Tomas 2499d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2500d8990240SAditya Kali 2501a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2502a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2503a41f2071SAneesh Kumar K.V 2504a41f2071SAneesh Kumar K.V if (ext4_ext_is_uninitialized(ex)) 2505a41f2071SAneesh Kumar K.V uninitialized = 1; 2506a41f2071SAneesh Kumar K.V else 2507a41f2071SAneesh Kumar K.V uninitialized = 0; 2508a41f2071SAneesh Kumar K.V 2509553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2510553f9008SMingming uninitialized, ex_ee_len); 2511a86c6181SAlex Tomas path[depth].p_ext = ex; 2512a86c6181SAlex Tomas 2513a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2514d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2515d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2516a86c6181SAlex Tomas 2517a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2518a86c6181SAlex Tomas 2519d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 25205f95d21fSLukas Czerner if (end < ex_ee_block) { 2521d583fb87SAllison Henderson ex--; 2522d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2523d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2524d583fb87SAllison Henderson continue; 2525750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2526dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2527dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2528dc1841d6SLukas Czerner "on extent %u:%u", 2529dc1841d6SLukas Czerner start, end, ex_ee_block, 2530dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 2531d583fb87SAllison Henderson err = -EIO; 2532d583fb87SAllison Henderson goto out; 2533a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2534a86c6181SAlex Tomas /* remove tail of the extent */ 2535750c9c47SDmitry Monakhov num = a - ex_ee_block; 2536a86c6181SAlex Tomas } else { 2537a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2538a86c6181SAlex Tomas num = 0; 2539d583fb87SAllison Henderson } 254034071da7STheodore Ts'o /* 254134071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 254234071da7STheodore Ts'o * descriptor) for each block group; assume two block 254334071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 254434071da7STheodore Ts'o * the worst case 254534071da7STheodore Ts'o */ 254634071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2547a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2548a86c6181SAlex Tomas correct_index = 1; 2549a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2550a86c6181SAlex Tomas } 25515aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2552a86c6181SAlex Tomas 2553487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 25549102e4faSShen Feng if (err) 2555a86c6181SAlex Tomas goto out; 2556a86c6181SAlex Tomas 2557a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2558a86c6181SAlex Tomas if (err) 2559a86c6181SAlex Tomas goto out; 2560a86c6181SAlex Tomas 25610aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 25620aa06000STheodore Ts'o a, b); 2563a86c6181SAlex Tomas if (err) 2564a86c6181SAlex Tomas goto out; 2565a86c6181SAlex Tomas 2566750c9c47SDmitry Monakhov if (num == 0) 2567d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2568f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2569a86c6181SAlex Tomas 2570a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2571749269faSAmit Arora /* 2572749269faSAmit Arora * Do not mark uninitialized if all the blocks in the 2573749269faSAmit Arora * extent have been removed. 2574749269faSAmit Arora */ 2575749269faSAmit Arora if (uninitialized && num) 2576a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 2577d583fb87SAllison Henderson /* 2578d583fb87SAllison Henderson * If the extent was completely released, 2579d583fb87SAllison Henderson * we need to remove it from the leaf 2580d583fb87SAllison Henderson */ 2581d583fb87SAllison Henderson if (num == 0) { 2582f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2583d583fb87SAllison Henderson /* 2584d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2585d583fb87SAllison Henderson * extents up when an extent is removed so that 2586d583fb87SAllison Henderson * we dont have blank extents in the middle 2587d583fb87SAllison Henderson */ 2588d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2589d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2590d583fb87SAllison Henderson 2591d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2592d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2593d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2594d583fb87SAllison Henderson } 2595d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 25960aa06000STheodore Ts'o } else 25970aa06000STheodore Ts'o *partial_cluster = 0; 2598d583fb87SAllison Henderson 2599750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2600750c9c47SDmitry Monakhov if (err) 2601750c9c47SDmitry Monakhov goto out; 2602750c9c47SDmitry Monakhov 2603bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2604bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2605a86c6181SAlex Tomas ex--; 2606a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2607a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2608a86c6181SAlex Tomas } 2609a86c6181SAlex Tomas 2610a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2611a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2612a86c6181SAlex Tomas 26130aa06000STheodore Ts'o /* 26140aa06000STheodore Ts'o * If there is still a entry in the leaf node, check to see if 26150aa06000STheodore Ts'o * it references the partial cluster. This is the only place 26160aa06000STheodore Ts'o * where it could; if it doesn't, we can free the cluster. 26170aa06000STheodore Ts'o */ 26180aa06000STheodore Ts'o if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && 26190aa06000STheodore Ts'o (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 26200aa06000STheodore Ts'o *partial_cluster)) { 26210aa06000STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 26220aa06000STheodore Ts'o 26230aa06000STheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 26240aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 26250aa06000STheodore Ts'o 26260aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 26270aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 26280aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 26290aa06000STheodore Ts'o *partial_cluster = 0; 26300aa06000STheodore Ts'o } 26310aa06000STheodore Ts'o 2632a86c6181SAlex Tomas /* if this leaf is free, then we should 2633a86c6181SAlex Tomas * remove it from index block above */ 2634a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2635c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2636a86c6181SAlex Tomas 2637a86c6181SAlex Tomas out: 2638a86c6181SAlex Tomas return err; 2639a86c6181SAlex Tomas } 2640a86c6181SAlex Tomas 2641a86c6181SAlex Tomas /* 2642d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2643d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2644a86c6181SAlex Tomas */ 264509b88252SAvantika Mathur static int 2646a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2647a86c6181SAlex Tomas { 2648a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2649a86c6181SAlex Tomas 2650a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2651a86c6181SAlex Tomas return 0; 2652a86c6181SAlex Tomas 2653a86c6181SAlex Tomas /* 2654d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2655a86c6181SAlex Tomas * so we have to consider current index for truncation 2656a86c6181SAlex Tomas */ 2657a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2658a86c6181SAlex Tomas return 0; 2659a86c6181SAlex Tomas return 1; 2660a86c6181SAlex Tomas } 2661a86c6181SAlex Tomas 26625f95d21fSLukas Czerner static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 26635f95d21fSLukas Czerner ext4_lblk_t end) 2664a86c6181SAlex Tomas { 2665a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 2666a86c6181SAlex Tomas int depth = ext_depth(inode); 2667968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 26680aa06000STheodore Ts'o ext4_fsblk_t partial_cluster = 0; 2669a86c6181SAlex Tomas handle_t *handle; 26706f2080e6SDmitry Monakhov int i = 0, err = 0; 2671a86c6181SAlex Tomas 26725f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2673a86c6181SAlex Tomas 2674a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 26759924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2676a86c6181SAlex Tomas if (IS_ERR(handle)) 2677a86c6181SAlex Tomas return PTR_ERR(handle); 2678a86c6181SAlex Tomas 26790617b83fSDmitry Monakhov again: 2680a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 2681a86c6181SAlex Tomas 2682d8990240SAditya Kali trace_ext4_ext_remove_space(inode, start, depth); 2683d8990240SAditya Kali 2684a86c6181SAlex Tomas /* 26855f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 26865f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 26875f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 26885f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 26895f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 26905f95d21fSLukas Czerner */ 26915f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 26925f95d21fSLukas Czerner struct ext4_extent *ex; 26935f95d21fSLukas Czerner ext4_lblk_t ee_block; 26945f95d21fSLukas Czerner 26955f95d21fSLukas Czerner /* find extent for this block */ 26965f95d21fSLukas Czerner path = ext4_ext_find_extent(inode, end, NULL); 26975f95d21fSLukas Czerner if (IS_ERR(path)) { 26985f95d21fSLukas Czerner ext4_journal_stop(handle); 26995f95d21fSLukas Czerner return PTR_ERR(path); 27005f95d21fSLukas Czerner } 27015f95d21fSLukas Czerner depth = ext_depth(inode); 27026f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 27035f95d21fSLukas Czerner ex = path[depth].p_ext; 2704968dee77SAshish Sangwan if (!ex) { 27056f2080e6SDmitry Monakhov if (depth) { 27066f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 27076f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 27086f2080e6SDmitry Monakhov depth); 27096f2080e6SDmitry Monakhov err = -EIO; 27106f2080e6SDmitry Monakhov } 27116f2080e6SDmitry Monakhov goto out; 2712968dee77SAshish Sangwan } 27135f95d21fSLukas Czerner 27145f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 27155f95d21fSLukas Czerner 27165f95d21fSLukas Czerner /* 27175f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 27185f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 27195f95d21fSLukas Czerner * tail of the first part of the split extent in 27205f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 27215f95d21fSLukas Czerner */ 27225f95d21fSLukas Czerner if (end >= ee_block && 27235f95d21fSLukas Czerner end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 27245f95d21fSLukas Czerner int split_flag = 0; 27255f95d21fSLukas Czerner 27265f95d21fSLukas Czerner if (ext4_ext_is_uninitialized(ex)) 27275f95d21fSLukas Czerner split_flag = EXT4_EXT_MARK_UNINIT1 | 27285f95d21fSLukas Czerner EXT4_EXT_MARK_UNINIT2; 27295f95d21fSLukas Czerner 27305f95d21fSLukas Czerner /* 27315f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 27325f95d21fSLukas Czerner * block in the first new extent 27335f95d21fSLukas Czerner */ 27345f95d21fSLukas Czerner err = ext4_split_extent_at(handle, inode, path, 27355f95d21fSLukas Czerner end + 1, split_flag, 27365f95d21fSLukas Czerner EXT4_GET_BLOCKS_PRE_IO | 27375f95d21fSLukas Czerner EXT4_GET_BLOCKS_PUNCH_OUT_EXT); 27385f95d21fSLukas Czerner 27395f95d21fSLukas Czerner if (err < 0) 27405f95d21fSLukas Czerner goto out; 27415f95d21fSLukas Czerner } 27425f95d21fSLukas Czerner } 27435f95d21fSLukas Czerner /* 2744d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2745d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2746a86c6181SAlex Tomas */ 27470617b83fSDmitry Monakhov depth = ext_depth(inode); 2748968dee77SAshish Sangwan if (path) { 2749968dee77SAshish Sangwan int k = i = depth; 2750968dee77SAshish Sangwan while (--k > 0) 2751968dee77SAshish Sangwan path[k].p_block = 2752968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2753968dee77SAshish Sangwan } else { 2754968dee77SAshish Sangwan path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2755968dee77SAshish Sangwan GFP_NOFS); 2756a86c6181SAlex Tomas if (path == NULL) { 2757a86c6181SAlex Tomas ext4_journal_stop(handle); 2758a86c6181SAlex Tomas return -ENOMEM; 2759a86c6181SAlex Tomas } 27600617b83fSDmitry Monakhov path[0].p_depth = depth; 2761a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 276289a4e48fSTheodore Ts'o i = 0; 27635f95d21fSLukas Czerner 276456b19868SAneesh Kumar K.V if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2765a86c6181SAlex Tomas err = -EIO; 2766a86c6181SAlex Tomas goto out; 2767a86c6181SAlex Tomas } 2768968dee77SAshish Sangwan } 2769968dee77SAshish Sangwan err = 0; 2770a86c6181SAlex Tomas 2771a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2772a86c6181SAlex Tomas if (i == depth) { 2773a86c6181SAlex Tomas /* this is leaf block */ 2774d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 27750aa06000STheodore Ts'o &partial_cluster, start, 27765f95d21fSLukas Czerner end); 2777d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2778a86c6181SAlex Tomas brelse(path[i].p_bh); 2779a86c6181SAlex Tomas path[i].p_bh = NULL; 2780a86c6181SAlex Tomas i--; 2781a86c6181SAlex Tomas continue; 2782a86c6181SAlex Tomas } 2783a86c6181SAlex Tomas 2784a86c6181SAlex Tomas /* this is index block */ 2785a86c6181SAlex Tomas if (!path[i].p_hdr) { 2786a86c6181SAlex Tomas ext_debug("initialize header\n"); 2787a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2788a86c6181SAlex Tomas } 2789a86c6181SAlex Tomas 2790a86c6181SAlex Tomas if (!path[i].p_idx) { 2791d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2792a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2793a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2794a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2795a86c6181SAlex Tomas path[i].p_hdr, 2796a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2797a86c6181SAlex Tomas } else { 2798d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2799a86c6181SAlex Tomas path[i].p_idx--; 2800a86c6181SAlex Tomas } 2801a86c6181SAlex Tomas 2802a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2803a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2804a86c6181SAlex Tomas path[i].p_idx); 2805a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2806c29c0ae7SAlex Tomas struct buffer_head *bh; 2807a86c6181SAlex Tomas /* go to the next level */ 28082ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2809bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2810a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 2811bf89d16fSTheodore Ts'o bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2812c29c0ae7SAlex Tomas if (!bh) { 2813a86c6181SAlex Tomas /* should we reset i_size? */ 2814a86c6181SAlex Tomas err = -EIO; 2815a86c6181SAlex Tomas break; 2816a86c6181SAlex Tomas } 2817c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2818c29c0ae7SAlex Tomas err = -EIO; 2819c29c0ae7SAlex Tomas break; 2820c29c0ae7SAlex Tomas } 2821f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, ext_block_hdr(bh), 2822f8489128SDarrick J. Wong depth - i - 1, bh)) { 2823c29c0ae7SAlex Tomas err = -EIO; 2824c29c0ae7SAlex Tomas break; 2825c29c0ae7SAlex Tomas } 2826c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2827a86c6181SAlex Tomas 2828d0d856e8SRandy Dunlap /* save actual number of indexes since this 2829d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2830a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2831a86c6181SAlex Tomas i++; 2832a86c6181SAlex Tomas } else { 2833d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2834a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2835d0d856e8SRandy Dunlap /* index is empty, remove it; 2836a86c6181SAlex Tomas * handle must be already prepared by the 2837a86c6181SAlex Tomas * truncatei_leaf() */ 2838c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 2839a86c6181SAlex Tomas } 2840d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2841a86c6181SAlex Tomas brelse(path[i].p_bh); 2842a86c6181SAlex Tomas path[i].p_bh = NULL; 2843a86c6181SAlex Tomas i--; 2844a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2845a86c6181SAlex Tomas } 2846a86c6181SAlex Tomas } 2847a86c6181SAlex Tomas 2848d8990240SAditya Kali trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, 2849d8990240SAditya Kali path->p_hdr->eh_entries); 2850d8990240SAditya Kali 28517b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 28527b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 28537b415bf6SAditya Kali * cluster as well. */ 28547b415bf6SAditya Kali if (partial_cluster && path->p_hdr->eh_entries == 0) { 28557b415bf6SAditya Kali int flags = EXT4_FREE_BLOCKS_FORGET; 28567b415bf6SAditya Kali 28577b415bf6SAditya Kali if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 28587b415bf6SAditya Kali flags |= EXT4_FREE_BLOCKS_METADATA; 28597b415bf6SAditya Kali 28607b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 28617b415bf6SAditya Kali EXT4_C2B(EXT4_SB(sb), partial_cluster), 28627b415bf6SAditya Kali EXT4_SB(sb)->s_cluster_ratio, flags); 28637b415bf6SAditya Kali partial_cluster = 0; 28647b415bf6SAditya Kali } 28657b415bf6SAditya Kali 2866a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2867a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2868a86c6181SAlex Tomas /* 2869d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 2870d0d856e8SRandy Dunlap * so we need to correct eh_depth 2871a86c6181SAlex Tomas */ 2872a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 2873a86c6181SAlex Tomas if (err == 0) { 2874a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 2875a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 287655ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 2877a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 2878a86c6181SAlex Tomas } 2879a86c6181SAlex Tomas } 2880a86c6181SAlex Tomas out: 2881a86c6181SAlex Tomas ext4_ext_drop_refs(path); 2882a86c6181SAlex Tomas kfree(path); 2883968dee77SAshish Sangwan if (err == -EAGAIN) { 2884968dee77SAshish Sangwan path = NULL; 28850617b83fSDmitry Monakhov goto again; 2886968dee77SAshish Sangwan } 2887a86c6181SAlex Tomas ext4_journal_stop(handle); 2888a86c6181SAlex Tomas 2889a86c6181SAlex Tomas return err; 2890a86c6181SAlex Tomas } 2891a86c6181SAlex Tomas 2892a86c6181SAlex Tomas /* 2893a86c6181SAlex Tomas * called at mount time 2894a86c6181SAlex Tomas */ 2895a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 2896a86c6181SAlex Tomas { 2897a86c6181SAlex Tomas /* 2898a86c6181SAlex Tomas * possible initialization would be here 2899a86c6181SAlex Tomas */ 2900a86c6181SAlex Tomas 290183982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 290290576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 290392b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 2904bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 290592b97816STheodore Ts'o ", aggressive tests" 2906a86c6181SAlex Tomas #endif 2907a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 290892b97816STheodore Ts'o ", check binsearch" 2909a86c6181SAlex Tomas #endif 2910a86c6181SAlex Tomas #ifdef EXTENTS_STATS 291192b97816STheodore Ts'o ", stats" 2912a86c6181SAlex Tomas #endif 291392b97816STheodore Ts'o "\n"); 291490576c0bSTheodore Ts'o #endif 2915a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2916a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2917a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 2918a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 2919a86c6181SAlex Tomas #endif 2920a86c6181SAlex Tomas } 2921a86c6181SAlex Tomas } 2922a86c6181SAlex Tomas 2923a86c6181SAlex Tomas /* 2924a86c6181SAlex Tomas * called at umount time 2925a86c6181SAlex Tomas */ 2926a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 2927a86c6181SAlex Tomas { 292883982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2929a86c6181SAlex Tomas return; 2930a86c6181SAlex Tomas 2931a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2932a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2933a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2934a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2935a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 2936a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 2937a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2938a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2939a86c6181SAlex Tomas } 2940a86c6181SAlex Tomas #endif 2941a86c6181SAlex Tomas } 2942a86c6181SAlex Tomas 2943093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 2944093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2945093a088bSAneesh Kumar K.V { 29462407518dSLukas Czerner ext4_fsblk_t ee_pblock; 29472407518dSLukas Czerner unsigned int ee_len; 2948b720303dSJing Zhang int ret; 2949093a088bSAneesh Kumar K.V 2950093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 2951bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 2952093a088bSAneesh Kumar K.V 2953a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 29542407518dSLukas Czerner if (ret > 0) 29552407518dSLukas Czerner ret = 0; 2956093a088bSAneesh Kumar K.V 29572407518dSLukas Czerner return ret; 2958093a088bSAneesh Kumar K.V } 2959093a088bSAneesh Kumar K.V 296047ea3bb5SYongqiang Yang /* 296147ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 296247ea3bb5SYongqiang Yang * 296347ea3bb5SYongqiang Yang * @handle: the journal handle 296447ea3bb5SYongqiang Yang * @inode: the file inode 296547ea3bb5SYongqiang Yang * @path: the path to the extent 296647ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 296747ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 296847ea3bb5SYongqiang Yang * the states(init or uninit) of new extents. 296947ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 297047ea3bb5SYongqiang Yang * 297147ea3bb5SYongqiang Yang * 297247ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 297347ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 297447ea3bb5SYongqiang Yang * 297547ea3bb5SYongqiang Yang * There are two cases: 297647ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 297747ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 297847ea3bb5SYongqiang Yang * 297947ea3bb5SYongqiang Yang * return 0 on success. 298047ea3bb5SYongqiang Yang */ 298147ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 298247ea3bb5SYongqiang Yang struct inode *inode, 298347ea3bb5SYongqiang Yang struct ext4_ext_path *path, 298447ea3bb5SYongqiang Yang ext4_lblk_t split, 298547ea3bb5SYongqiang Yang int split_flag, 298647ea3bb5SYongqiang Yang int flags) 298747ea3bb5SYongqiang Yang { 298847ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 298947ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 299047ea3bb5SYongqiang Yang struct ext4_extent *ex, newex, orig_ex; 299147ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 299247ea3bb5SYongqiang Yang unsigned int ee_len, depth; 299347ea3bb5SYongqiang Yang int err = 0; 299447ea3bb5SYongqiang Yang 2995dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 2996dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 2997dee1f973SDmitry Monakhov 299847ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 299947ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 300047ea3bb5SYongqiang Yang 300147ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 300247ea3bb5SYongqiang Yang 300347ea3bb5SYongqiang Yang depth = ext_depth(inode); 300447ea3bb5SYongqiang Yang ex = path[depth].p_ext; 300547ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 300647ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 300747ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 300847ea3bb5SYongqiang Yang 300947ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 301047ea3bb5SYongqiang Yang 301147ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 301247ea3bb5SYongqiang Yang if (err) 301347ea3bb5SYongqiang Yang goto out; 301447ea3bb5SYongqiang Yang 301547ea3bb5SYongqiang Yang if (split == ee_block) { 301647ea3bb5SYongqiang Yang /* 301747ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 301847ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 301947ea3bb5SYongqiang Yang * is not needed. 302047ea3bb5SYongqiang Yang */ 302147ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 302247ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 302347ea3bb5SYongqiang Yang else 302447ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 302547ea3bb5SYongqiang Yang 302647ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3027ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 302847ea3bb5SYongqiang Yang 3029ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 303047ea3bb5SYongqiang Yang goto out; 303147ea3bb5SYongqiang Yang } 303247ea3bb5SYongqiang Yang 303347ea3bb5SYongqiang Yang /* case a */ 303447ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 303547ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 303647ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT1) 303747ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 303847ea3bb5SYongqiang Yang 303947ea3bb5SYongqiang Yang /* 304047ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 304147ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 304247ea3bb5SYongqiang Yang */ 304347ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 304447ea3bb5SYongqiang Yang if (err) 304547ea3bb5SYongqiang Yang goto fix_extent_len; 304647ea3bb5SYongqiang Yang 304747ea3bb5SYongqiang Yang ex2 = &newex; 304847ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 304947ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 305047ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 305147ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 305247ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex2); 305347ea3bb5SYongqiang Yang 305447ea3bb5SYongqiang Yang err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 305547ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3056dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3057dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID1) 3058dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3059dee1f973SDmitry Monakhov else 3060dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3061dee1f973SDmitry Monakhov } else 306247ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3063dee1f973SDmitry Monakhov 306447ea3bb5SYongqiang Yang if (err) 306547ea3bb5SYongqiang Yang goto fix_extent_len; 306647ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3067af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3068ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3069ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 307047ea3bb5SYongqiang Yang goto out; 307147ea3bb5SYongqiang Yang } else if (err) 307247ea3bb5SYongqiang Yang goto fix_extent_len; 307347ea3bb5SYongqiang Yang 307447ea3bb5SYongqiang Yang out: 307547ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 307647ea3bb5SYongqiang Yang return err; 307747ea3bb5SYongqiang Yang 307847ea3bb5SYongqiang Yang fix_extent_len: 307947ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 308047ea3bb5SYongqiang Yang ext4_ext_dirty(handle, inode, path + depth); 308147ea3bb5SYongqiang Yang return err; 308247ea3bb5SYongqiang Yang } 308347ea3bb5SYongqiang Yang 308447ea3bb5SYongqiang Yang /* 308547ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 308647ea3bb5SYongqiang Yang * by @map as split_flags indicates 308747ea3bb5SYongqiang Yang * 308847ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (upto three) 308947ea3bb5SYongqiang Yang * There are three possibilities: 309047ea3bb5SYongqiang Yang * a> There is no split required 309147ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 309247ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 309347ea3bb5SYongqiang Yang * 309447ea3bb5SYongqiang Yang */ 309547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 309647ea3bb5SYongqiang Yang struct inode *inode, 309747ea3bb5SYongqiang Yang struct ext4_ext_path *path, 309847ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 309947ea3bb5SYongqiang Yang int split_flag, 310047ea3bb5SYongqiang Yang int flags) 310147ea3bb5SYongqiang Yang { 310247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 310347ea3bb5SYongqiang Yang struct ext4_extent *ex; 310447ea3bb5SYongqiang Yang unsigned int ee_len, depth; 310547ea3bb5SYongqiang Yang int err = 0; 310647ea3bb5SYongqiang Yang int uninitialized; 310747ea3bb5SYongqiang Yang int split_flag1, flags1; 310847ea3bb5SYongqiang Yang 310947ea3bb5SYongqiang Yang depth = ext_depth(inode); 311047ea3bb5SYongqiang Yang ex = path[depth].p_ext; 311147ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 311247ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 311347ea3bb5SYongqiang Yang uninitialized = ext4_ext_is_uninitialized(ex); 311447ea3bb5SYongqiang Yang 311547ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3116dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 311747ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 311847ea3bb5SYongqiang Yang if (uninitialized) 311947ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 312047ea3bb5SYongqiang Yang EXT4_EXT_MARK_UNINIT2; 3121dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3122dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 312347ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 312447ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 312593917411SYongqiang Yang if (err) 312693917411SYongqiang Yang goto out; 312747ea3bb5SYongqiang Yang } 312847ea3bb5SYongqiang Yang 312947ea3bb5SYongqiang Yang ext4_ext_drop_refs(path); 313047ea3bb5SYongqiang Yang path = ext4_ext_find_extent(inode, map->m_lblk, path); 313147ea3bb5SYongqiang Yang if (IS_ERR(path)) 313247ea3bb5SYongqiang Yang return PTR_ERR(path); 313347ea3bb5SYongqiang Yang 313447ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3135dee1f973SDmitry Monakhov split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | 3136dee1f973SDmitry Monakhov EXT4_EXT_DATA_VALID2); 313747ea3bb5SYongqiang Yang if (uninitialized) 313847ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1; 313947ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 314047ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT2; 314147ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 314247ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 314347ea3bb5SYongqiang Yang if (err) 314447ea3bb5SYongqiang Yang goto out; 314547ea3bb5SYongqiang Yang } 314647ea3bb5SYongqiang Yang 314747ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 314847ea3bb5SYongqiang Yang out: 314947ea3bb5SYongqiang Yang return err ? err : map->m_len; 315047ea3bb5SYongqiang Yang } 315147ea3bb5SYongqiang Yang 315256055d3aSAmit Arora /* 3153e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 315456055d3aSAmit Arora * to an uninitialized extent. It may result in splitting the uninitialized 315556055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 315656055d3aSAmit Arora * uninitialized). 315756055d3aSAmit Arora * There are three possibilities: 315856055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 315956055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 316056055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 31616f91bc5fSEric Gouriou * 31626f91bc5fSEric Gouriou * Pre-conditions: 31636f91bc5fSEric Gouriou * - The extent pointed to by 'path' is uninitialized. 31646f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 31656f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 31666f91bc5fSEric Gouriou * 31676f91bc5fSEric Gouriou * Post-conditions on success: 31686f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 31696f91bc5fSEric Gouriou * that are allocated and initialized. 31706f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 317156055d3aSAmit Arora */ 3172725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3173725d26d3SAneesh Kumar K.V struct inode *inode, 3174e35fd660STheodore Ts'o struct ext4_map_blocks *map, 3175e35fd660STheodore Ts'o struct ext4_ext_path *path) 317656055d3aSAmit Arora { 317767a5da56SZheng Liu struct ext4_sb_info *sbi; 31786f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3179667eff35SYongqiang Yang struct ext4_map_blocks split_map; 3180667eff35SYongqiang Yang struct ext4_extent zero_ex; 3181667eff35SYongqiang Yang struct ext4_extent *ex; 318221ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3183f85b287aSDan Carpenter unsigned int ee_len, depth; 318467a5da56SZheng Liu int allocated, max_zeroout = 0; 318556055d3aSAmit Arora int err = 0; 3186667eff35SYongqiang Yang int split_flag = 0; 318721ca087aSDmitry Monakhov 318821ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 318921ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3190e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 319121ca087aSDmitry Monakhov 319267a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 319321ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 319421ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3195e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3196e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 319756055d3aSAmit Arora 319856055d3aSAmit Arora depth = ext_depth(inode); 31996f91bc5fSEric Gouriou eh = path[depth].p_hdr; 320056055d3aSAmit Arora ex = path[depth].p_ext; 320156055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 320256055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3203e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 320421ca087aSDmitry Monakhov 32056f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 32066f91bc5fSEric Gouriou 32076f91bc5fSEric Gouriou /* Pre-conditions */ 32086f91bc5fSEric Gouriou BUG_ON(!ext4_ext_is_uninitialized(ex)); 32096f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 32106f91bc5fSEric Gouriou 32116f91bc5fSEric Gouriou /* 32126f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 32136f91bc5fSEric Gouriou * uninitialized extent to its left neighbor. This is much cheaper 32146f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 32156f91bc5fSEric Gouriou * memmove() calls. This is the common case in steady state for 32166f91bc5fSEric Gouriou * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append 32176f91bc5fSEric Gouriou * writes. 32186f91bc5fSEric Gouriou * 32196f91bc5fSEric Gouriou * Limitations of the current logic: 32206f91bc5fSEric Gouriou * - L1: we only deal with writes at the start of the extent. 32216f91bc5fSEric Gouriou * The approach could be extended to writes at the end 32226f91bc5fSEric Gouriou * of the extent but this scenario was deemed less common. 32236f91bc5fSEric Gouriou * - L2: we do not deal with writes covering the whole extent. 32246f91bc5fSEric Gouriou * This would require removing the extent if the transfer 32256f91bc5fSEric Gouriou * is possible. 32266f91bc5fSEric Gouriou * - L3: we only attempt to merge with an extent stored in the 32276f91bc5fSEric Gouriou * same extent tree node. 32286f91bc5fSEric Gouriou */ 32296f91bc5fSEric Gouriou if ((map->m_lblk == ee_block) && /*L1*/ 32306f91bc5fSEric Gouriou (map->m_len < ee_len) && /*L2*/ 32316f91bc5fSEric Gouriou (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ 32326f91bc5fSEric Gouriou struct ext4_extent *prev_ex; 32336f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 32346f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 32356f91bc5fSEric Gouriou unsigned int prev_len, write_len; 32366f91bc5fSEric Gouriou 32376f91bc5fSEric Gouriou prev_ex = ex - 1; 32386f91bc5fSEric Gouriou prev_lblk = le32_to_cpu(prev_ex->ee_block); 32396f91bc5fSEric Gouriou prev_len = ext4_ext_get_actual_len(prev_ex); 32406f91bc5fSEric Gouriou prev_pblk = ext4_ext_pblock(prev_ex); 32416f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 32426f91bc5fSEric Gouriou write_len = map->m_len; 32436f91bc5fSEric Gouriou 32446f91bc5fSEric Gouriou /* 32456f91bc5fSEric Gouriou * A transfer of blocks from 'ex' to 'prev_ex' is allowed 32466f91bc5fSEric Gouriou * upon those conditions: 32476f91bc5fSEric Gouriou * - C1: prev_ex is initialized, 32486f91bc5fSEric Gouriou * - C2: prev_ex is logically abutting ex, 32496f91bc5fSEric Gouriou * - C3: prev_ex is physically abutting ex, 32506f91bc5fSEric Gouriou * - C4: prev_ex can receive the additional blocks without 32516f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 32526f91bc5fSEric Gouriou */ 32536f91bc5fSEric Gouriou if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ 32546f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 32556f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 32566f91bc5fSEric Gouriou (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ 32576f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 32586f91bc5fSEric Gouriou if (err) 32596f91bc5fSEric Gouriou goto out; 32606f91bc5fSEric Gouriou 32616f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 32626f91bc5fSEric Gouriou map, ex, prev_ex); 32636f91bc5fSEric Gouriou 32646f91bc5fSEric Gouriou /* Shift the start of ex by 'write_len' blocks */ 32656f91bc5fSEric Gouriou ex->ee_block = cpu_to_le32(ee_block + write_len); 32666f91bc5fSEric Gouriou ext4_ext_store_pblock(ex, ee_pblk + write_len); 32676f91bc5fSEric Gouriou ex->ee_len = cpu_to_le16(ee_len - write_len); 32686f91bc5fSEric Gouriou ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 32696f91bc5fSEric Gouriou 32706f91bc5fSEric Gouriou /* Extend prev_ex by 'write_len' blocks */ 32716f91bc5fSEric Gouriou prev_ex->ee_len = cpu_to_le16(prev_len + write_len); 32726f91bc5fSEric Gouriou 32736f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 32746f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 32756f91bc5fSEric Gouriou 32766f91bc5fSEric Gouriou /* Update path to point to the right extent */ 32776f91bc5fSEric Gouriou path[depth].p_ext = prev_ex; 32786f91bc5fSEric Gouriou 32796f91bc5fSEric Gouriou /* Result: number of initialized blocks past m_lblk */ 32806f91bc5fSEric Gouriou allocated = write_len; 32816f91bc5fSEric Gouriou goto out; 32826f91bc5fSEric Gouriou } 32836f91bc5fSEric Gouriou } 32846f91bc5fSEric Gouriou 3285667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 328621ca087aSDmitry Monakhov /* 328721ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 328821ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 328921ca087aSDmitry Monakhov */ 3290667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 329121ca087aSDmitry Monakhov 329267a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 329367a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 329467a5da56SZheng Liu inode->i_sb->s_blocksize_bits; 329567a5da56SZheng Liu 329667a5da56SZheng Liu /* If extent is less than s_max_zeroout_kb, zeroout directly */ 329767a5da56SZheng Liu if (max_zeroout && (ee_len <= max_zeroout)) { 3298667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 32993977c965SAneesh Kumar K.V if (err) 330056055d3aSAmit Arora goto out; 33019df5643aSAneesh Kumar K.V 33029df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 33039df5643aSAneesh Kumar K.V if (err) 33049df5643aSAneesh Kumar K.V goto out; 3305667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3306ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3307ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 330856055d3aSAmit Arora goto out; 3309667eff35SYongqiang Yang } 3310093a088bSAneesh Kumar K.V 3311667eff35SYongqiang Yang /* 3312667eff35SYongqiang Yang * four cases: 3313667eff35SYongqiang Yang * 1. split the extent into three extents. 3314667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3315667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3316667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3317667eff35SYongqiang Yang */ 3318667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3319667eff35SYongqiang Yang split_map.m_len = map->m_len; 3320667eff35SYongqiang Yang 332167a5da56SZheng Liu if (max_zeroout && (allocated > map->m_len)) { 332267a5da56SZheng Liu if (allocated <= max_zeroout) { 3323667eff35SYongqiang Yang /* case 3 */ 3324667eff35SYongqiang Yang zero_ex.ee_block = 33259b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 33269b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3327667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3328667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3329667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3330667eff35SYongqiang Yang if (err) 3331667eff35SYongqiang Yang goto out; 3332667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3333667eff35SYongqiang Yang split_map.m_len = allocated; 333467a5da56SZheng Liu } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3335667eff35SYongqiang Yang /* case 2 */ 3336667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3337667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3338667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3339667eff35SYongqiang Yang ee_block); 3340667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3341667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3342667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3343667eff35SYongqiang Yang if (err) 3344667eff35SYongqiang Yang goto out; 3345667eff35SYongqiang Yang } 3346667eff35SYongqiang Yang 3347667eff35SYongqiang Yang split_map.m_lblk = ee_block; 33489b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 33499b940f8eSAllison Henderson allocated = map->m_len; 3350667eff35SYongqiang Yang } 3351667eff35SYongqiang Yang } 3352667eff35SYongqiang Yang 3353667eff35SYongqiang Yang allocated = ext4_split_extent(handle, inode, path, 3354667eff35SYongqiang Yang &split_map, split_flag, 0); 3355667eff35SYongqiang Yang if (allocated < 0) 3356667eff35SYongqiang Yang err = allocated; 3357667eff35SYongqiang Yang 3358667eff35SYongqiang Yang out: 3359667eff35SYongqiang Yang return err ? err : allocated; 336056055d3aSAmit Arora } 336156055d3aSAmit Arora 3362c278bfecSAneesh Kumar K.V /* 3363e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 33640031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 33650031462bSMingming Cao * to an uninitialized extent. 33660031462bSMingming Cao * 3367fd018fe8SPaul Bolle * Writing to an uninitialized extent may result in splitting the uninitialized 336830cb27d6SWang Sheng-Hui * extent into multiple initialized/uninitialized extents (up to three) 33690031462bSMingming Cao * There are three possibilities: 33700031462bSMingming Cao * a> There is no split required: Entire extent should be uninitialized 33710031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 33720031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 33730031462bSMingming Cao * 33740031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3375b595076aSUwe Kleine-König * the uninitialized extent split. To prevent ENOSPC occur at the IO 33760031462bSMingming Cao * complete, we need to split the uninitialized extent before DIO submit 3377421f91d2SUwe Kleine-König * the IO. The uninitialized extent called at this time will be split 33780031462bSMingming Cao * into three uninitialized extent(at most). After IO complete, the part 33790031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 33800031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3381ba230c3fSMingming * 3382ba230c3fSMingming * Returns the size of uninitialized extent to be written on success. 33830031462bSMingming Cao */ 33840031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle, 33850031462bSMingming Cao struct inode *inode, 3386e35fd660STheodore Ts'o struct ext4_map_blocks *map, 33870031462bSMingming Cao struct ext4_ext_path *path, 33880031462bSMingming Cao int flags) 33890031462bSMingming Cao { 3390667eff35SYongqiang Yang ext4_lblk_t eof_block; 3391667eff35SYongqiang Yang ext4_lblk_t ee_block; 3392667eff35SYongqiang Yang struct ext4_extent *ex; 3393667eff35SYongqiang Yang unsigned int ee_len; 3394667eff35SYongqiang Yang int split_flag = 0, depth; 33950031462bSMingming Cao 339621ca087aSDmitry Monakhov ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 339721ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3398e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 339921ca087aSDmitry Monakhov 340021ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 340121ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3402e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3403e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 34040031462bSMingming Cao /* 340521ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 340621ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 340721ca087aSDmitry Monakhov */ 3408667eff35SYongqiang Yang depth = ext_depth(inode); 34090031462bSMingming Cao ex = path[depth].p_ext; 3410667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3411667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 34120031462bSMingming Cao 3413667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3414667eff35SYongqiang Yang split_flag |= EXT4_EXT_MARK_UNINIT2; 3415dee1f973SDmitry Monakhov if (flags & EXT4_GET_BLOCKS_CONVERT) 3416dee1f973SDmitry Monakhov split_flag |= EXT4_EXT_DATA_VALID2; 3417667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3418667eff35SYongqiang Yang return ext4_split_extent(handle, inode, path, map, split_flag, flags); 34190031462bSMingming Cao } 3420197217a5SYongqiang Yang 3421c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 34220031462bSMingming Cao struct inode *inode, 3423dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 34240031462bSMingming Cao struct ext4_ext_path *path) 34250031462bSMingming Cao { 34260031462bSMingming Cao struct ext4_extent *ex; 3427dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3428dee1f973SDmitry Monakhov unsigned int ee_len; 34290031462bSMingming Cao int depth; 34300031462bSMingming Cao int err = 0; 34310031462bSMingming Cao 34320031462bSMingming Cao depth = ext_depth(inode); 34330031462bSMingming Cao ex = path[depth].p_ext; 3434dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3435dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 34360031462bSMingming Cao 3437197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3438197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3439dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3440dee1f973SDmitry Monakhov 3441dee1f973SDmitry Monakhov /* If extent is larger than requested then split is required */ 3442dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3443dee1f973SDmitry Monakhov err = ext4_split_unwritten_extents(handle, inode, map, path, 3444dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3445dee1f973SDmitry Monakhov if (err < 0) 3446dee1f973SDmitry Monakhov goto out; 3447dee1f973SDmitry Monakhov ext4_ext_drop_refs(path); 3448dee1f973SDmitry Monakhov path = ext4_ext_find_extent(inode, map->m_lblk, path); 3449dee1f973SDmitry Monakhov if (IS_ERR(path)) { 3450dee1f973SDmitry Monakhov err = PTR_ERR(path); 3451dee1f973SDmitry Monakhov goto out; 3452dee1f973SDmitry Monakhov } 3453dee1f973SDmitry Monakhov depth = ext_depth(inode); 3454dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3455dee1f973SDmitry Monakhov } 3456197217a5SYongqiang Yang 34570031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 34580031462bSMingming Cao if (err) 34590031462bSMingming Cao goto out; 34600031462bSMingming Cao /* first mark the extent as initialized */ 34610031462bSMingming Cao ext4_ext_mark_initialized(ex); 34620031462bSMingming Cao 3463197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3464197217a5SYongqiang Yang * borders are not changed 34650031462bSMingming Cao */ 3466ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3467197217a5SYongqiang Yang 34680031462bSMingming Cao /* Mark modified extent as dirty */ 3469ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 34700031462bSMingming Cao out: 34710031462bSMingming Cao ext4_ext_show_leaf(inode, path); 34720031462bSMingming Cao return err; 34730031462bSMingming Cao } 34740031462bSMingming Cao 3475515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3476515f41c3SAneesh Kumar K.V sector_t block, int count) 3477515f41c3SAneesh Kumar K.V { 3478515f41c3SAneesh Kumar K.V int i; 3479515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3480515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3481515f41c3SAneesh Kumar K.V } 3482515f41c3SAneesh Kumar K.V 348358590b06STheodore Ts'o /* 348458590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 348558590b06STheodore Ts'o */ 348658590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3487d002ebf1SEric Sandeen ext4_lblk_t lblk, 348858590b06STheodore Ts'o struct ext4_ext_path *path, 348958590b06STheodore Ts'o unsigned int len) 349058590b06STheodore Ts'o { 349158590b06STheodore Ts'o int i, depth; 349258590b06STheodore Ts'o struct ext4_extent_header *eh; 349365922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 349458590b06STheodore Ts'o 349558590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 349658590b06STheodore Ts'o return 0; 349758590b06STheodore Ts'o 349858590b06STheodore Ts'o depth = ext_depth(inode); 349958590b06STheodore Ts'o eh = path[depth].p_hdr; 350058590b06STheodore Ts'o 3501afcff5d8SLukas Czerner /* 3502afcff5d8SLukas Czerner * We're going to remove EOFBLOCKS_FL entirely in future so we 3503afcff5d8SLukas Czerner * do not care for this case anymore. Simply remove the flag 3504afcff5d8SLukas Czerner * if there are no extents. 3505afcff5d8SLukas Czerner */ 3506afcff5d8SLukas Czerner if (unlikely(!eh->eh_entries)) 3507afcff5d8SLukas Czerner goto out; 350858590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 350958590b06STheodore Ts'o /* 351058590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 351158590b06STheodore Ts'o * last block in the last extent in the file. We test this by 351258590b06STheodore Ts'o * first checking to see if the caller to 351358590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 351458590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 351558590b06STheodore Ts'o * this turns out to be false, we can bail out from this 351658590b06STheodore Ts'o * function immediately. 351758590b06STheodore Ts'o */ 3518d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 351958590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 352058590b06STheodore Ts'o return 0; 352158590b06STheodore Ts'o /* 352258590b06STheodore Ts'o * If the caller does appear to be planning to write at or 352358590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 352458590b06STheodore Ts'o * if the current extent is the last extent in the file, by 352558590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 352658590b06STheodore Ts'o * at each level of the tree. 352758590b06STheodore Ts'o */ 352858590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 352958590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 353058590b06STheodore Ts'o return 0; 3531afcff5d8SLukas Czerner out: 353258590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 353358590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 353458590b06STheodore Ts'o } 353558590b06STheodore Ts'o 35367b415bf6SAditya Kali /** 35377b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 35387b415bf6SAditya Kali * 35397d1b1fbcSZheng Liu * Return 1 if there is a delalloc block in the range, otherwise 0. 35407b415bf6SAditya Kali */ 3541f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode, 35427b415bf6SAditya Kali ext4_lblk_t lblk_start, 35437d1b1fbcSZheng Liu ext4_lblk_t lblk_end) 35447b415bf6SAditya Kali { 35457d1b1fbcSZheng Liu struct extent_status es; 35467b415bf6SAditya Kali 3547be401363SZheng Liu ext4_es_find_delayed_extent(inode, lblk_start, &es); 354806b0c886SZheng Liu if (es.es_len == 0) 35497d1b1fbcSZheng Liu return 0; /* there is no delay extent in this tree */ 355006b0c886SZheng Liu else if (es.es_lblk <= lblk_start && 355106b0c886SZheng Liu lblk_start < es.es_lblk + es.es_len) 35527b415bf6SAditya Kali return 1; 355306b0c886SZheng Liu else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 35547d1b1fbcSZheng Liu return 1; 35557b415bf6SAditya Kali else 35567b415bf6SAditya Kali return 0; 35577b415bf6SAditya Kali } 35587b415bf6SAditya Kali 35597d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 35607b415bf6SAditya Kali { 35617b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 35627b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 35637b415bf6SAditya Kali lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 35647b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 35657b415bf6SAditya Kali 35667d1b1fbcSZheng Liu return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 35677b415bf6SAditya Kali } 35687b415bf6SAditya Kali 35697b415bf6SAditya Kali /** 35707b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 35717b415bf6SAditya Kali * are under delalloc and were reserved quota for. 35727b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 35737b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 35747b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 35757b415bf6SAditya Kali * The cases to look for are: 35767b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 35777b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 35787b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 35797b415bf6SAditya Kali * are not delalloc'ed. 35807b415bf6SAditya Kali * Ex: 35817b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 35827b415bf6SAditya Kali * |++++++ allocated ++++++| 35837b415bf6SAditya Kali * ==> 4 complete clusters in above example 35847b415bf6SAditya Kali * 35857b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 35867b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 35877b415bf6SAditya Kali * cluster. 35887b415bf6SAditya Kali * Ex: 35897b415bf6SAditya Kali * |----====c========|========c========| 35907b415bf6SAditya Kali * |++++++ allocated ++++++| 35917b415bf6SAditya Kali * ==> 1 complete clusters in above example 35927b415bf6SAditya Kali * 35937b415bf6SAditya Kali * Ex: 35947b415bf6SAditya Kali * |================c================| 35957b415bf6SAditya Kali * |++++++ allocated ++++++| 35967b415bf6SAditya Kali * ==> 0 complete clusters in above example 35977b415bf6SAditya Kali * 35987b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 35997b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 36007b415bf6SAditya Kali * this 'allocated' range. 36017b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 36027b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 36037b415bf6SAditya Kali */ 36047b415bf6SAditya Kali static unsigned int 36057b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 36067b415bf6SAditya Kali unsigned int num_blks) 36077b415bf6SAditya Kali { 36087b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 36097b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 36107b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 36117b415bf6SAditya Kali unsigned int allocated_clusters = 0; 36127b415bf6SAditya Kali 36137b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 36147b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 36157b415bf6SAditya Kali 36167b415bf6SAditya Kali /* max possible clusters for this allocation */ 36177b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 36187b415bf6SAditya Kali 3619d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3620d8990240SAditya Kali 36217b415bf6SAditya Kali /* Check towards left side */ 36227b415bf6SAditya Kali c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 36237b415bf6SAditya Kali if (c_offset) { 36247b415bf6SAditya Kali lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 36257b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 36267b415bf6SAditya Kali 36277d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 36287b415bf6SAditya Kali allocated_clusters--; 36297b415bf6SAditya Kali } 36307b415bf6SAditya Kali 36317b415bf6SAditya Kali /* Now check towards right. */ 36327b415bf6SAditya Kali c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 36337b415bf6SAditya Kali if (allocated_clusters && c_offset) { 36347b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 36357b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 36367b415bf6SAditya Kali 36377d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 36387b415bf6SAditya Kali allocated_clusters--; 36397b415bf6SAditya Kali } 36407b415bf6SAditya Kali 36417b415bf6SAditya Kali return allocated_clusters; 36427b415bf6SAditya Kali } 36437b415bf6SAditya Kali 36440031462bSMingming Cao static int 36450031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3646e35fd660STheodore Ts'o struct ext4_map_blocks *map, 36470031462bSMingming Cao struct ext4_ext_path *path, int flags, 3648e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 36490031462bSMingming Cao { 36500031462bSMingming Cao int ret = 0; 36510031462bSMingming Cao int err = 0; 3652f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 36530031462bSMingming Cao 36540031462bSMingming Cao ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " 365588635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 3656e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 36570031462bSMingming Cao flags, allocated); 36580031462bSMingming Cao ext4_ext_show_leaf(inode, path); 36590031462bSMingming Cao 3660b5645534SZheng Liu trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 3661b5645534SZheng Liu allocated, newblock); 3662d8990240SAditya Kali 3663c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3664744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3665e35fd660STheodore Ts'o ret = ext4_split_unwritten_extents(handle, inode, map, 3666e35fd660STheodore Ts'o path, flags); 366782e54229SDmitry Monakhov if (ret <= 0) 366882e54229SDmitry Monakhov goto out; 36695f524950SMingming /* 36705f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 367125985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 36725f524950SMingming * completed 36735f524950SMingming */ 36740edeb71dSTao Ma if (io) 36750edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io); 36760edeb71dSTao Ma else 367719f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3678a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 3679744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 3680e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 36810031462bSMingming Cao goto out; 36820031462bSMingming Cao } 3683c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3684744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3685dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 36860031462bSMingming Cao path); 368758590b06STheodore Ts'o if (ret >= 0) { 3688b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 3689d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 3690d002ebf1SEric Sandeen path, map->m_len); 369158590b06STheodore Ts'o } else 369258590b06STheodore Ts'o err = ret; 36930031462bSMingming Cao goto out2; 36940031462bSMingming Cao } 36950031462bSMingming Cao /* buffered IO case */ 36960031462bSMingming Cao /* 36970031462bSMingming Cao * repeat fallocate creation request 36980031462bSMingming Cao * we already have an unwritten extent 36990031462bSMingming Cao */ 3700a25a4e1aSZheng Liu if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) { 3701a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 37020031462bSMingming Cao goto map_out; 3703a25a4e1aSZheng Liu } 37040031462bSMingming Cao 37050031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 37060031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 37070031462bSMingming Cao /* 37080031462bSMingming Cao * We have blocks reserved already. We 37090031462bSMingming Cao * return allocated blocks so that delalloc 37100031462bSMingming Cao * won't do block reservation for us. But 37110031462bSMingming Cao * the buffer head will be unmapped so that 37120031462bSMingming Cao * a read from the block returns 0s. 37130031462bSMingming Cao */ 3714e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 37150031462bSMingming Cao goto out1; 37160031462bSMingming Cao } 37170031462bSMingming Cao 37180031462bSMingming Cao /* buffered write, writepage time, convert*/ 3719e35fd660STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3720a4e5d88bSDmitry Monakhov if (ret >= 0) 3721b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 37220031462bSMingming Cao out: 37230031462bSMingming Cao if (ret <= 0) { 37240031462bSMingming Cao err = ret; 37250031462bSMingming Cao goto out2; 37260031462bSMingming Cao } else 37270031462bSMingming Cao allocated = ret; 3728e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 3729515f41c3SAneesh Kumar K.V /* 3730515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 3731515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 3732515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 3733515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 3734515f41c3SAneesh Kumar K.V * new. 3735515f41c3SAneesh Kumar K.V */ 3736e35fd660STheodore Ts'o if (allocated > map->m_len) { 3737515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3738e35fd660STheodore Ts'o newblock + map->m_len, 3739e35fd660STheodore Ts'o allocated - map->m_len); 3740e35fd660STheodore Ts'o allocated = map->m_len; 3741515f41c3SAneesh Kumar K.V } 37425f634d06SAneesh Kumar K.V 37435f634d06SAneesh Kumar K.V /* 37445f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 37455f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 37465f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 37475f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 37485f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 37495f634d06SAneesh Kumar K.V */ 37507b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 37517b415bf6SAditya Kali unsigned int reserved_clusters; 37527b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 37537b415bf6SAditya Kali map->m_lblk, map->m_len); 37547b415bf6SAditya Kali if (reserved_clusters) 37557b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 37567b415bf6SAditya Kali reserved_clusters, 37577b415bf6SAditya Kali 0); 37587b415bf6SAditya Kali } 37595f634d06SAneesh Kumar K.V 37600031462bSMingming Cao map_out: 3761e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 3762a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 3763a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3764a4e5d88bSDmitry Monakhov map->m_len); 3765a4e5d88bSDmitry Monakhov if (err < 0) 3766a4e5d88bSDmitry Monakhov goto out2; 3767a4e5d88bSDmitry Monakhov } 37680031462bSMingming Cao out1: 3769e35fd660STheodore Ts'o if (allocated > map->m_len) 3770e35fd660STheodore Ts'o allocated = map->m_len; 37710031462bSMingming Cao ext4_ext_show_leaf(inode, path); 3772e35fd660STheodore Ts'o map->m_pblk = newblock; 3773e35fd660STheodore Ts'o map->m_len = allocated; 37740031462bSMingming Cao out2: 37750031462bSMingming Cao if (path) { 37760031462bSMingming Cao ext4_ext_drop_refs(path); 37770031462bSMingming Cao kfree(path); 37780031462bSMingming Cao } 37790031462bSMingming Cao return err ? err : allocated; 37800031462bSMingming Cao } 378158590b06STheodore Ts'o 37820031462bSMingming Cao /* 37834d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 37844d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 37854d33b1efSTheodore Ts'o * allocated in an extent. 3786d8990240SAditya Kali * @sb The filesystem superblock structure 37874d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 37884d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 37894d33b1efSTheodore Ts'o * cluster allocation 37904d33b1efSTheodore Ts'o * 37914d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 37924d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 37934d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 37944d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 37954d33b1efSTheodore Ts'o * want to catch. The first is this case: 37964d33b1efSTheodore Ts'o * 37974d33b1efSTheodore Ts'o * |--- cluster # N--| 37984d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 37994d33b1efSTheodore Ts'o * |==========| 38004d33b1efSTheodore Ts'o * 38014d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 38024d33b1efSTheodore Ts'o * 38034d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 38044d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 38054d33b1efSTheodore Ts'o * |=======================| 38064d33b1efSTheodore Ts'o * 38074d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 38084d33b1efSTheodore Ts'o * within the same cluster: 38094d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 38104d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 38114d33b1efSTheodore Ts'o * |------ requested region ------| 38124d33b1efSTheodore Ts'o * |================| 38134d33b1efSTheodore Ts'o * 38144d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 38154d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 38164d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 38174d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 38184d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 38194d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 38204d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 38214d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 38224d33b1efSTheodore Ts'o */ 3823d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 38244d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 38254d33b1efSTheodore Ts'o struct ext4_extent *ex, 38264d33b1efSTheodore Ts'o struct ext4_ext_path *path) 38274d33b1efSTheodore Ts'o { 3828d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 38294d33b1efSTheodore Ts'o ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 38304d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 383114d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 38324d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 38334d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 38344d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 38354d33b1efSTheodore Ts'o 38364d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 38374d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 38384d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 38394d33b1efSTheodore Ts'o 38404d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 38414d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 38424d33b1efSTheodore Ts'o 38434d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 38444d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 38454d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 38464d33b1efSTheodore Ts'o ee_start += ee_len - 1; 38474d33b1efSTheodore Ts'o map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 38484d33b1efSTheodore Ts'o c_offset; 38494d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 38504d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 38514d33b1efSTheodore Ts'o /* 38524d33b1efSTheodore Ts'o * Check for and handle this case: 38534d33b1efSTheodore Ts'o * 38544d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 38554d33b1efSTheodore Ts'o * |------- extent ----| 38564d33b1efSTheodore Ts'o * |--- requested region ---| 38574d33b1efSTheodore Ts'o * |===========| 38584d33b1efSTheodore Ts'o */ 38594d33b1efSTheodore Ts'o 38604d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 38614d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 38624d33b1efSTheodore Ts'o 38634d33b1efSTheodore Ts'o /* 38644d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 38654d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 38664d33b1efSTheodore Ts'o * 38674d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 38684d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 38694d33b1efSTheodore Ts'o * |------ requested region ------| 38704d33b1efSTheodore Ts'o * |================| 38714d33b1efSTheodore Ts'o */ 38724d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 38734d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 38744d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 38754d33b1efSTheodore Ts'o } 3876d8990240SAditya Kali 3877d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 38784d33b1efSTheodore Ts'o return 1; 38794d33b1efSTheodore Ts'o } 3880d8990240SAditya Kali 3881d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 38824d33b1efSTheodore Ts'o return 0; 38834d33b1efSTheodore Ts'o } 38844d33b1efSTheodore Ts'o 38854d33b1efSTheodore Ts'o 38864d33b1efSTheodore Ts'o /* 3887f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 3888f5ab0d1fSMingming Cao * 3889f5ab0d1fSMingming Cao * 3890c278bfecSAneesh Kumar K.V * Need to be called with 38910e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 38920e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 3893f5ab0d1fSMingming Cao * 3894f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 3895f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 3896f5ab0d1fSMingming Cao * buffer head is unmapped 3897f5ab0d1fSMingming Cao * otherwise blocks are mapped 3898f5ab0d1fSMingming Cao * 3899f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 3900f5ab0d1fSMingming Cao * buffer head is unmapped 3901f5ab0d1fSMingming Cao * 3902f5ab0d1fSMingming Cao * return < 0, error case. 3903c278bfecSAneesh Kumar K.V */ 3904e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3905e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 3906a86c6181SAlex Tomas { 3907a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 39084d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 39094d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 39100562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 391137794732SZheng Liu int free_on_err = 0, err = 0, depth; 39124d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 391381fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 3914c9de560dSAlex Tomas struct ext4_allocation_request ar; 3915f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 39164d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 391782e54229SDmitry Monakhov int set_unwritten = 0; 3918a86c6181SAlex Tomas 391984fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 3920e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 39210562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3922a86c6181SAlex Tomas 3923a86c6181SAlex Tomas /* check in cache */ 39247877191cSLukas Czerner if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3925b05e6ae5STheodore Ts'o if (!newex.ee_start_lo && !newex.ee_start_hi) { 39267b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 39277d1b1fbcSZheng Liu ext4_find_delalloc_cluster(inode, map->m_lblk)) 39287b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39297b415bf6SAditya Kali 3930c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 393156055d3aSAmit Arora /* 393256055d3aSAmit Arora * block isn't allocated yet and 393356055d3aSAmit Arora * user doesn't want to allocate it 393456055d3aSAmit Arora */ 3935a86c6181SAlex Tomas goto out2; 3936a86c6181SAlex Tomas } 3937a86c6181SAlex Tomas /* we should allocate requested block */ 3938b05e6ae5STheodore Ts'o } else { 3939a86c6181SAlex Tomas /* block is already allocated */ 39407b415bf6SAditya Kali if (sbi->s_cluster_ratio > 1) 39417b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 3942e35fd660STheodore Ts'o newblock = map->m_lblk 3943a86c6181SAlex Tomas - le32_to_cpu(newex.ee_block) 3944bf89d16fSTheodore Ts'o + ext4_ext_pblock(&newex); 3945d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3946b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex) - 3947e35fd660STheodore Ts'o (map->m_lblk - le32_to_cpu(newex.ee_block)); 3948a86c6181SAlex Tomas goto out; 3949a86c6181SAlex Tomas } 3950a86c6181SAlex Tomas } 3951a86c6181SAlex Tomas 3952a86c6181SAlex Tomas /* find extent for this block */ 3953e35fd660STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 3954a86c6181SAlex Tomas if (IS_ERR(path)) { 3955a86c6181SAlex Tomas err = PTR_ERR(path); 3956a86c6181SAlex Tomas path = NULL; 3957a86c6181SAlex Tomas goto out2; 3958a86c6181SAlex Tomas } 3959a86c6181SAlex Tomas 3960a86c6181SAlex Tomas depth = ext_depth(inode); 3961a86c6181SAlex Tomas 3962a86c6181SAlex Tomas /* 3963d0d856e8SRandy Dunlap * consistent leaf must not be empty; 3964d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 3965a86c6181SAlex Tomas * this is why assert can't be put in ext4_ext_find_extent() 3966a86c6181SAlex Tomas */ 3967273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3968273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 3969f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 3970f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 3971f70f362bSTheodore Ts'o path[depth].p_block); 3972034fb4c9SSurbhi Palande err = -EIO; 3973034fb4c9SSurbhi Palande goto out2; 3974034fb4c9SSurbhi Palande } 3975a86c6181SAlex Tomas 39767e028976SAvantika Mathur ex = path[depth].p_ext; 39777e028976SAvantika Mathur if (ex) { 3978725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3979bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3980a2df2a63SAmit Arora unsigned short ee_len; 3981471d4011SSuparna Bhattacharya 3982471d4011SSuparna Bhattacharya /* 3983471d4011SSuparna Bhattacharya * Uninitialized extents are treated as holes, except that 398456055d3aSAmit Arora * we split out initialized portions during a write. 3985471d4011SSuparna Bhattacharya */ 3986a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3987d8990240SAditya Kali 3988d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 3989d8990240SAditya Kali 3990d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 3991e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 3992e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 3993d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3994e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 3995e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3996a86c6181SAlex Tomas ee_block, ee_len, newblock); 399756055d3aSAmit Arora 3998e861304bSAllison Henderson /* 3999e861304bSAllison Henderson * Do not put uninitialized extent 4000e861304bSAllison Henderson * in the cache 4001e861304bSAllison Henderson */ 400256055d3aSAmit Arora if (!ext4_ext_is_uninitialized(ex)) { 4003a2df2a63SAmit Arora ext4_ext_put_in_cache(inode, ee_block, 4004b05e6ae5STheodore Ts'o ee_len, ee_start); 4005a86c6181SAlex Tomas goto out; 4006a86c6181SAlex Tomas } 400737794732SZheng Liu allocated = ext4_ext_handle_uninitialized_extents( 4008e861304bSAllison Henderson handle, inode, map, path, flags, 4009e861304bSAllison Henderson allocated, newblock); 401037794732SZheng Liu goto out3; 401156055d3aSAmit Arora } 4012a86c6181SAlex Tomas } 4013a86c6181SAlex Tomas 40147b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 40157d1b1fbcSZheng Liu ext4_find_delalloc_cluster(inode, map->m_lblk)) 40167b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 40177b415bf6SAditya Kali 4018a86c6181SAlex Tomas /* 4019d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4020a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4021a86c6181SAlex Tomas */ 4022c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 402356055d3aSAmit Arora /* 402456055d3aSAmit Arora * put just found gap into cache to speed up 402556055d3aSAmit Arora * subsequent requests 402656055d3aSAmit Arora */ 4027*d100eef2SZheng Liu if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4028e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4029a86c6181SAlex Tomas goto out2; 4030a86c6181SAlex Tomas } 40314d33b1efSTheodore Ts'o 4032a86c6181SAlex Tomas /* 4033c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4034a86c6181SAlex Tomas */ 40357b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 40364d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 40374d33b1efSTheodore Ts'o cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 40384d33b1efSTheodore Ts'o 40394d33b1efSTheodore Ts'o /* 40404d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 40414d33b1efSTheodore Ts'o * by ext4_ext_find_extent() implies a cluster we can use. 40424d33b1efSTheodore Ts'o */ 40434d33b1efSTheodore Ts'o if (cluster_offset && ex && 4044d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 40454d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 40464d33b1efSTheodore Ts'o newblock = map->m_pblk; 40477b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 40484d33b1efSTheodore Ts'o goto got_allocated_blocks; 40494d33b1efSTheodore Ts'o } 4050a86c6181SAlex Tomas 4051c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4052e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4053c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4054c9de560dSAlex Tomas if (err) 4055c9de560dSAlex Tomas goto out2; 4056e35fd660STheodore Ts'o ar.lright = map->m_lblk; 40574d33b1efSTheodore Ts'o ex2 = NULL; 40584d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4059c9de560dSAlex Tomas if (err) 4060c9de560dSAlex Tomas goto out2; 406125d14f98SAmit Arora 40624d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 40634d33b1efSTheodore Ts'o * cluster we can use. */ 40644d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4065d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 40664d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 40674d33b1efSTheodore Ts'o newblock = map->m_pblk; 40687b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 40694d33b1efSTheodore Ts'o goto got_allocated_blocks; 40704d33b1efSTheodore Ts'o } 40714d33b1efSTheodore Ts'o 4072749269faSAmit Arora /* 4073749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4074749269faSAmit Arora * a single extent. For an initialized extent this limit is 4075749269faSAmit Arora * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 4076749269faSAmit Arora * EXT_UNINIT_MAX_LEN. 4077749269faSAmit Arora */ 4078e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4079c2177057STheodore Ts'o !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4080e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4081e35fd660STheodore Ts'o else if (map->m_len > EXT_UNINIT_MAX_LEN && 4082c2177057STheodore Ts'o (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4083e35fd660STheodore Ts'o map->m_len = EXT_UNINIT_MAX_LEN; 4084749269faSAmit Arora 4085e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4086e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 40874d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 408825d14f98SAmit Arora if (err) 4089b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 409025d14f98SAmit Arora else 4091e35fd660STheodore Ts'o allocated = map->m_len; 4092c9de560dSAlex Tomas 4093c9de560dSAlex Tomas /* allocate new block */ 4094c9de560dSAlex Tomas ar.inode = inode; 4095e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4096e35fd660STheodore Ts'o ar.logical = map->m_lblk; 40974d33b1efSTheodore Ts'o /* 40984d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 40994d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 41004d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 41014d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 41024d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 41034d33b1efSTheodore Ts'o * work correctly. 41044d33b1efSTheodore Ts'o */ 41054d33b1efSTheodore Ts'o offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 41064d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 41074d33b1efSTheodore Ts'o ar.goal -= offset; 41084d33b1efSTheodore Ts'o ar.logical -= offset; 4109c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4110c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4111c9de560dSAlex Tomas else 4112c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4113c9de560dSAlex Tomas ar.flags = 0; 4114556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4115556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4116c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4117a86c6181SAlex Tomas if (!newblock) 4118a86c6181SAlex Tomas goto out2; 411984fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4120498e5f24STheodore Ts'o ar.goal, newblock, allocated); 41214d33b1efSTheodore Ts'o free_on_err = 1; 41227b415bf6SAditya Kali allocated_clusters = ar.len; 41234d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 41244d33b1efSTheodore Ts'o if (ar.len > allocated) 41254d33b1efSTheodore Ts'o ar.len = allocated; 4126a86c6181SAlex Tomas 41274d33b1efSTheodore Ts'o got_allocated_blocks: 4128a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 41294d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4130c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 41318d5d02e6SMingming Cao /* Mark uninitialized */ 41328d5d02e6SMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4133a2df2a63SAmit Arora ext4_ext_mark_uninitialized(&newex); 4134a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 41358d5d02e6SMingming Cao /* 4136744692dcSJiaying Zhang * io_end structure was created for every IO write to an 413725985edcSLucas De Marchi * uninitialized extent. To avoid unnecessary conversion, 4138744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 41395f524950SMingming * For non asycn direct IO case, flag the inode state 414025985edcSLucas De Marchi * that we need to perform conversion when IO is done. 41418d5d02e6SMingming Cao */ 414282e54229SDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_PRE_IO)) 414382e54229SDmitry Monakhov set_unwritten = 1; 4144744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 4145e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 41468d5d02e6SMingming Cao } 4147c8d46e41SJiaying Zhang 4148a4e5d88bSDmitry Monakhov err = 0; 4149a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4150a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4151a4e5d88bSDmitry Monakhov path, ar.len); 4152575a1d4bSJiaying Zhang if (!err) 4153575a1d4bSJiaying Zhang err = ext4_ext_insert_extent(handle, inode, path, 4154575a1d4bSJiaying Zhang &newex, flags); 415582e54229SDmitry Monakhov 415682e54229SDmitry Monakhov if (!err && set_unwritten) { 415782e54229SDmitry Monakhov if (io) 415882e54229SDmitry Monakhov ext4_set_io_unwritten_flag(inode, io); 415982e54229SDmitry Monakhov else 416082e54229SDmitry Monakhov ext4_set_inode_state(inode, 416182e54229SDmitry Monakhov EXT4_STATE_DIO_UNWRITTEN); 416282e54229SDmitry Monakhov } 416382e54229SDmitry Monakhov 41644d33b1efSTheodore Ts'o if (err && free_on_err) { 41657132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 41667132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4167315054f0SAlex Tomas /* free data blocks we just allocated */ 4168c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4169c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4170c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 41717dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 41727132de74SMaxim Patlasov ext4_ext_get_actual_len(&newex), fb_flags); 4173a86c6181SAlex Tomas goto out2; 4174315054f0SAlex Tomas } 4175a86c6181SAlex Tomas 4176a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4177bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4178b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4179e35fd660STheodore Ts'o if (allocated > map->m_len) 4180e35fd660STheodore Ts'o allocated = map->m_len; 4181e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4182a86c6181SAlex Tomas 4183b436b9beSJan Kara /* 41845f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 41855f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 41865f634d06SAneesh Kumar K.V */ 41877b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 418881fdbb4aSYongqiang Yang unsigned int reserved_clusters; 41897b415bf6SAditya Kali /* 419081fdbb4aSYongqiang Yang * Check how many clusters we had reserved this allocated range 41917b415bf6SAditya Kali */ 41927b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 41937b415bf6SAditya Kali map->m_lblk, allocated); 41947b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 41957b415bf6SAditya Kali if (reserved_clusters) { 41967b415bf6SAditya Kali /* 41977b415bf6SAditya Kali * We have clusters reserved for this range. 41987b415bf6SAditya Kali * But since we are not doing actual allocation 41997b415bf6SAditya Kali * and are simply using blocks from previously 42007b415bf6SAditya Kali * allocated cluster, we should release the 42017b415bf6SAditya Kali * reservation and not claim quota. 42027b415bf6SAditya Kali */ 42037b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 42047b415bf6SAditya Kali reserved_clusters, 0); 42057b415bf6SAditya Kali } 42067b415bf6SAditya Kali } else { 42077b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 42087b415bf6SAditya Kali /* We will claim quota for all newly allocated blocks.*/ 42097b415bf6SAditya Kali ext4_da_update_reserve_space(inode, allocated_clusters, 42107b415bf6SAditya Kali 1); 42117b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 42125356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 42137b415bf6SAditya Kali int reservation = allocated_clusters - 42147b415bf6SAditya Kali reserved_clusters; 42157b415bf6SAditya Kali /* 42167b415bf6SAditya Kali * It seems we claimed few clusters outside of 42177b415bf6SAditya Kali * the range of this allocation. We should give 42187b415bf6SAditya Kali * it back to the reservation pool. This can 42197b415bf6SAditya Kali * happen in the following case: 42207b415bf6SAditya Kali * 42217b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 42227b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 42237b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 42247b415bf6SAditya Kali * * First comes delayed allocation write for 42257b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 42267b415bf6SAditya Kali * previous delayed allocated blocks in the 42277b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 42287b415bf6SAditya Kali * for this write. 42297b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 42307b415bf6SAditya Kali * In this case, we will reserve 2 clusters 42317b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 42327b415bf6SAditya Kali * that range has a delayed allocated blocks. 42337b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 42347b415bf6SAditya Kali * * Now, during the delayed allocation writeout 42357b415bf6SAditya Kali * time, we will first write blocks [3-8] and 42367b415bf6SAditya Kali * allocate 3 clusters for writing these 42377b415bf6SAditya Kali * blocks. Also, we would claim all these 42387b415bf6SAditya Kali * three clusters above. 42397b415bf6SAditya Kali * * Now when we come here to writeout the 42407b415bf6SAditya Kali * blocks [10-11], we would expect to claim 42417b415bf6SAditya Kali * the reservation of 1 cluster we had made 42427b415bf6SAditya Kali * (and we would claim it since there are no 42437b415bf6SAditya Kali * more delayed allocated blocks in the range 42447b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 42457b415bf6SAditya Kali * already gone to 0. 42467b415bf6SAditya Kali * 42477b415bf6SAditya Kali * Thus, at the step 4 above when we determine 42487b415bf6SAditya Kali * that there are still some unwritten delayed 42497b415bf6SAditya Kali * allocated blocks outside of our current 42507b415bf6SAditya Kali * block range, we should increment the 42517b415bf6SAditya Kali * reserved clusters count so that when the 42527b415bf6SAditya Kali * remaining blocks finally gets written, we 42537b415bf6SAditya Kali * could claim them. 42547b415bf6SAditya Kali */ 42555356f261SAditya Kali dquot_reserve_block(inode, 42565356f261SAditya Kali EXT4_C2B(sbi, reservation)); 42575356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 42585356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 42595356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 42607b415bf6SAditya Kali } 42617b415bf6SAditya Kali } 42627b415bf6SAditya Kali } 42635f634d06SAneesh Kumar K.V 42645f634d06SAneesh Kumar K.V /* 4265b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4266b436b9beSJan Kara * when it is _not_ an uninitialized extent. 4267b436b9beSJan Kara */ 4268b436b9beSJan Kara if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 4269b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); 4270b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4271b436b9beSJan Kara } else 4272b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4273a86c6181SAlex Tomas out: 4274e35fd660STheodore Ts'o if (allocated > map->m_len) 4275e35fd660STheodore Ts'o allocated = map->m_len; 4276a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4277e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4278e35fd660STheodore Ts'o map->m_pblk = newblock; 4279e35fd660STheodore Ts'o map->m_len = allocated; 4280a86c6181SAlex Tomas out2: 4281a86c6181SAlex Tomas if (path) { 4282a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4283a86c6181SAlex Tomas kfree(path); 4284a86c6181SAlex Tomas } 4285e861304bSAllison Henderson 428637794732SZheng Liu out3: 428719b303d8SZheng Liu trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated); 4288e7b319e3SYongqiang Yang 42897877191cSLukas Czerner return err ? err : allocated; 4290a86c6181SAlex Tomas } 4291a86c6181SAlex Tomas 4292cf108bcaSJan Kara void ext4_ext_truncate(struct inode *inode) 4293a86c6181SAlex Tomas { 4294a86c6181SAlex Tomas struct address_space *mapping = inode->i_mapping; 4295a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4296725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4297a86c6181SAlex Tomas handle_t *handle; 4298189e868fSAllison Henderson loff_t page_len; 4299a86c6181SAlex Tomas int err = 0; 4300a86c6181SAlex Tomas 4301a86c6181SAlex Tomas /* 43023889fd57SJiaying Zhang * finish any pending end_io work so we won't run the risk of 43033889fd57SJiaying Zhang * converting any truncated blocks to initialized later 43043889fd57SJiaying Zhang */ 4305c278531dSDmitry Monakhov ext4_flush_unwritten_io(inode); 43063889fd57SJiaying Zhang 43073889fd57SJiaying Zhang /* 4308a86c6181SAlex Tomas * probably first extent we're gonna free will be last in block 4309a86c6181SAlex Tomas */ 4310f3bd1f3fSMingming Cao err = ext4_writepage_trans_blocks(inode); 43119924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err); 4312cf108bcaSJan Kara if (IS_ERR(handle)) 4313a86c6181SAlex Tomas return; 4314a86c6181SAlex Tomas 4315189e868fSAllison Henderson if (inode->i_size % PAGE_CACHE_SIZE != 0) { 4316189e868fSAllison Henderson page_len = PAGE_CACHE_SIZE - 4317189e868fSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 4318189e868fSAllison Henderson 4319189e868fSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4320189e868fSAllison Henderson mapping, inode->i_size, page_len, 0); 4321189e868fSAllison Henderson 4322189e868fSAllison Henderson if (err) 4323189e868fSAllison Henderson goto out_stop; 4324189e868fSAllison Henderson } 4325a86c6181SAlex Tomas 43269ddfc3dcSJan Kara if (ext4_orphan_add(handle, inode)) 43279ddfc3dcSJan Kara goto out_stop; 43289ddfc3dcSJan Kara 43290e855ac8SAneesh Kumar K.V down_write(&EXT4_I(inode)->i_data_sem); 4330a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 4331a86c6181SAlex Tomas 4332c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4333c9de560dSAlex Tomas 4334a86c6181SAlex Tomas /* 4335d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4336d0d856e8SRandy Dunlap * Probably we need not scan at all, 4337d0d856e8SRandy Dunlap * because page truncation is enough. 4338a86c6181SAlex Tomas */ 4339a86c6181SAlex Tomas 4340a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4341a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4342a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4343a86c6181SAlex Tomas 4344a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4345a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 434651865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 434751865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 43485f95d21fSLukas Czerner err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4349a86c6181SAlex Tomas 4350a86c6181SAlex Tomas /* In a multi-transaction truncate, we only make the final 435156055d3aSAmit Arora * transaction synchronous. 435256055d3aSAmit Arora */ 4353a86c6181SAlex Tomas if (IS_SYNC(inode)) 43540390131bSFrank Mayhar ext4_handle_sync(handle); 4355a86c6181SAlex Tomas 43569ddfc3dcSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 4357f6d2f6b3SEric Gouriou 4358f6d2f6b3SEric Gouriou out_stop: 4359a86c6181SAlex Tomas /* 4360d0d856e8SRandy Dunlap * If this was a simple ftruncate() and the file will remain alive, 4361a86c6181SAlex Tomas * then we need to clear up the orphan record which we created above. 4362a86c6181SAlex Tomas * However, if this was a real unlink then we were called by 4363a86c6181SAlex Tomas * ext4_delete_inode(), and we allow that function to clean up the 4364a86c6181SAlex Tomas * orphan info for us. 4365a86c6181SAlex Tomas */ 4366a86c6181SAlex Tomas if (inode->i_nlink) 4367a86c6181SAlex Tomas ext4_orphan_del(handle, inode); 4368a86c6181SAlex Tomas 4369ef737728SSolofo Ramangalahy inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4370ef737728SSolofo Ramangalahy ext4_mark_inode_dirty(handle, inode); 4371a86c6181SAlex Tomas ext4_journal_stop(handle); 4372a86c6181SAlex Tomas } 4373a86c6181SAlex Tomas 4374fd28784aSAneesh Kumar K.V static void ext4_falloc_update_inode(struct inode *inode, 4375fd28784aSAneesh Kumar K.V int mode, loff_t new_size, int update_ctime) 4376fd28784aSAneesh Kumar K.V { 4377fd28784aSAneesh Kumar K.V struct timespec now; 4378fd28784aSAneesh Kumar K.V 4379fd28784aSAneesh Kumar K.V if (update_ctime) { 4380fd28784aSAneesh Kumar K.V now = current_fs_time(inode->i_sb); 4381fd28784aSAneesh Kumar K.V if (!timespec_equal(&inode->i_ctime, &now)) 4382fd28784aSAneesh Kumar K.V inode->i_ctime = now; 4383fd28784aSAneesh Kumar K.V } 4384fd28784aSAneesh Kumar K.V /* 4385fd28784aSAneesh Kumar K.V * Update only when preallocation was requested beyond 4386fd28784aSAneesh Kumar K.V * the file size. 4387fd28784aSAneesh Kumar K.V */ 4388cf17fea6SAneesh Kumar K.V if (!(mode & FALLOC_FL_KEEP_SIZE)) { 4389cf17fea6SAneesh Kumar K.V if (new_size > i_size_read(inode)) 4390fd28784aSAneesh Kumar K.V i_size_write(inode, new_size); 4391cf17fea6SAneesh Kumar K.V if (new_size > EXT4_I(inode)->i_disksize) 4392cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_size); 4393c8d46e41SJiaying Zhang } else { 4394c8d46e41SJiaying Zhang /* 4395c8d46e41SJiaying Zhang * Mark that we allocate beyond EOF so the subsequent truncate 4396c8d46e41SJiaying Zhang * can proceed even if the new size is the same as i_size. 4397c8d46e41SJiaying Zhang */ 4398c8d46e41SJiaying Zhang if (new_size > i_size_read(inode)) 439912e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4400fd28784aSAneesh Kumar K.V } 4401fd28784aSAneesh Kumar K.V 4402fd28784aSAneesh Kumar K.V } 4403fd28784aSAneesh Kumar K.V 4404a2df2a63SAmit Arora /* 44052fe17c10SChristoph Hellwig * preallocate space for a file. This implements ext4's fallocate file 4406a2df2a63SAmit Arora * operation, which gets called from sys_fallocate system call. 4407a2df2a63SAmit Arora * For block-mapped files, posix_fallocate should fall back to the method 4408a2df2a63SAmit Arora * of writing zeroes to the required new blocks (the same behavior which is 4409a2df2a63SAmit Arora * expected for file systems which do not support fallocate() system call). 4410a2df2a63SAmit Arora */ 44112fe17c10SChristoph Hellwig long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4412a2df2a63SAmit Arora { 44132fe17c10SChristoph Hellwig struct inode *inode = file->f_path.dentry->d_inode; 4414a2df2a63SAmit Arora handle_t *handle; 4415fd28784aSAneesh Kumar K.V loff_t new_size; 4416498e5f24STheodore Ts'o unsigned int max_blocks; 4417a2df2a63SAmit Arora int ret = 0; 4418a2df2a63SAmit Arora int ret2 = 0; 4419a2df2a63SAmit Arora int retries = 0; 4420a4e5d88bSDmitry Monakhov int flags; 44212ed88685STheodore Ts'o struct ext4_map_blocks map; 4422a2df2a63SAmit Arora unsigned int credits, blkbits = inode->i_blkbits; 4423a2df2a63SAmit Arora 4424a4bb6b64SAllison Henderson /* Return error if mode is not supported */ 4425a4bb6b64SAllison Henderson if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 4426a4bb6b64SAllison Henderson return -EOPNOTSUPP; 4427a4bb6b64SAllison Henderson 4428a4bb6b64SAllison Henderson if (mode & FALLOC_FL_PUNCH_HOLE) 4429a4bb6b64SAllison Henderson return ext4_punch_hole(file, offset, len); 4430a4bb6b64SAllison Henderson 44310c8d414fSTao Ma ret = ext4_convert_inline_data(inode); 44320c8d414fSTao Ma if (ret) 44330c8d414fSTao Ma return ret; 44340c8d414fSTao Ma 44358bad6fc8SZheng Liu /* 44368bad6fc8SZheng Liu * currently supporting (pre)allocate mode for extent-based 44378bad6fc8SZheng Liu * files _only_ 44388bad6fc8SZheng Liu */ 44398bad6fc8SZheng Liu if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 44408bad6fc8SZheng Liu return -EOPNOTSUPP; 44418bad6fc8SZheng Liu 44420562e0baSJiaying Zhang trace_ext4_fallocate_enter(inode, offset, len, mode); 44432ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4444fd28784aSAneesh Kumar K.V /* 4445fd28784aSAneesh Kumar K.V * We can't just convert len to max_blocks because 4446fd28784aSAneesh Kumar K.V * If blocksize = 4096 offset = 3072 and len = 2048 4447fd28784aSAneesh Kumar K.V */ 4448a2df2a63SAmit Arora max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 44492ed88685STheodore Ts'o - map.m_lblk; 4450a2df2a63SAmit Arora /* 4451f3bd1f3fSMingming Cao * credits to insert 1 extent into extent tree 4452a2df2a63SAmit Arora */ 4453f3bd1f3fSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 445455bd725aSAneesh Kumar K.V mutex_lock(&inode->i_mutex); 44556d19c42bSNikanth Karthikesan ret = inode_newsize_ok(inode, (len + offset)); 44566d19c42bSNikanth Karthikesan if (ret) { 44576d19c42bSNikanth Karthikesan mutex_unlock(&inode->i_mutex); 44580562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 44596d19c42bSNikanth Karthikesan return ret; 44606d19c42bSNikanth Karthikesan } 44613c6fe770SGreg Harm flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; 4462a4e5d88bSDmitry Monakhov if (mode & FALLOC_FL_KEEP_SIZE) 4463a4e5d88bSDmitry Monakhov flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 44643c6fe770SGreg Harm /* 44653c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 44663c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 44673c6fe770SGreg Harm * extents. 44683c6fe770SGreg Harm */ 44693c6fe770SGreg Harm if (len <= EXT_UNINIT_MAX_LEN << blkbits) 44703c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 447160d4616fSDmitry Monakhov 447260d4616fSDmitry Monakhov /* Prevent race condition between unwritten */ 447360d4616fSDmitry Monakhov ext4_flush_unwritten_io(inode); 4474a2df2a63SAmit Arora retry: 4475a2df2a63SAmit Arora while (ret >= 0 && ret < max_blocks) { 44762ed88685STheodore Ts'o map.m_lblk = map.m_lblk + ret; 44772ed88685STheodore Ts'o map.m_len = max_blocks = max_blocks - ret; 44789924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 44799924a92aSTheodore Ts'o credits); 4480a2df2a63SAmit Arora if (IS_ERR(handle)) { 4481a2df2a63SAmit Arora ret = PTR_ERR(handle); 4482a2df2a63SAmit Arora break; 4483a2df2a63SAmit Arora } 4484a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4485221879c9SAneesh Kumar K.V if (ret <= 0) { 44862c98615dSAneesh Kumar K.V #ifdef EXT4FS_DEBUG 4487b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4488b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 4489b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4490b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4491b06acd38SLukas Czerner map.m_len, ret); 44922c98615dSAneesh Kumar K.V #endif 4493a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4494a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4495a2df2a63SAmit Arora break; 4496a2df2a63SAmit Arora } 44972ed88685STheodore Ts'o if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 4498fd28784aSAneesh Kumar K.V blkbits) >> blkbits)) 4499fd28784aSAneesh Kumar K.V new_size = offset + len; 4500fd28784aSAneesh Kumar K.V else 450129ae07b7SUtako Kusaka new_size = ((loff_t) map.m_lblk + ret) << blkbits; 4502a2df2a63SAmit Arora 4503fd28784aSAneesh Kumar K.V ext4_falloc_update_inode(inode, mode, new_size, 45042ed88685STheodore Ts'o (map.m_flags & EXT4_MAP_NEW)); 4505a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4506f4e95b33SZheng Liu if ((file->f_flags & O_SYNC) && ret >= max_blocks) 4507f4e95b33SZheng Liu ext4_handle_sync(handle); 4508a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4509a2df2a63SAmit Arora if (ret2) 4510a2df2a63SAmit Arora break; 4511a2df2a63SAmit Arora } 4512fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4513fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4514fd28784aSAneesh Kumar K.V ret = 0; 4515a2df2a63SAmit Arora goto retry; 4516a2df2a63SAmit Arora } 451755bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 45180562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, 45190562e0baSJiaying Zhang ret > 0 ? ret2 : ret); 4520a2df2a63SAmit Arora return ret > 0 ? ret2 : ret; 4521a2df2a63SAmit Arora } 45226873fa0dSEric Sandeen 45236873fa0dSEric Sandeen /* 45240031462bSMingming Cao * This function convert a range of blocks to written extents 45250031462bSMingming Cao * The caller of this function will pass the start offset and the size. 45260031462bSMingming Cao * all unwritten extents within this range will be converted to 45270031462bSMingming Cao * written extents. 45280031462bSMingming Cao * 45290031462bSMingming Cao * This function is called from the direct IO end io call back 45300031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4531109f5565SMingming * Returns 0 on success. 45320031462bSMingming Cao */ 45330031462bSMingming Cao int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 4534a1de02dcSEric Sandeen ssize_t len) 45350031462bSMingming Cao { 45360031462bSMingming Cao handle_t *handle; 45370031462bSMingming Cao unsigned int max_blocks; 45380031462bSMingming Cao int ret = 0; 45390031462bSMingming Cao int ret2 = 0; 45402ed88685STheodore Ts'o struct ext4_map_blocks map; 45410031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 45420031462bSMingming Cao 45432ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 45440031462bSMingming Cao /* 45450031462bSMingming Cao * We can't just convert len to max_blocks because 45460031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 45470031462bSMingming Cao */ 45482ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 45492ed88685STheodore Ts'o map.m_lblk); 45500031462bSMingming Cao /* 45510031462bSMingming Cao * credits to insert 1 extent into extent tree 45520031462bSMingming Cao */ 45530031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 45540031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 45552ed88685STheodore Ts'o map.m_lblk += ret; 45562ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 45579924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits); 45580031462bSMingming Cao if (IS_ERR(handle)) { 45590031462bSMingming Cao ret = PTR_ERR(handle); 45600031462bSMingming Cao break; 45610031462bSMingming Cao } 45622ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4563c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4564b06acd38SLukas Czerner if (ret <= 0) 4565b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4566b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 456792b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4568b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 456992b97816STheodore Ts'o map.m_len, ret); 45700031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 45710031462bSMingming Cao ret2 = ext4_journal_stop(handle); 45720031462bSMingming Cao if (ret <= 0 || ret2 ) 45730031462bSMingming Cao break; 45740031462bSMingming Cao } 45750031462bSMingming Cao return ret > 0 ? ret2 : ret; 45760031462bSMingming Cao } 45776d9c85ebSYongqiang Yang 45780031462bSMingming Cao /* 457991dd8c11SLukas Czerner * If newex is not existing extent (newex->ec_start equals zero) find 458091dd8c11SLukas Czerner * delayed extent at start of newex and update newex accordingly and 458191dd8c11SLukas Czerner * return start of the next delayed extent. 458291dd8c11SLukas Czerner * 458391dd8c11SLukas Czerner * If newex is existing extent (newex->ec_start is not equal zero) 458491dd8c11SLukas Czerner * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 458591dd8c11SLukas Czerner * extent found. Leave newex unmodified. 45866873fa0dSEric Sandeen */ 458791dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 458891dd8c11SLukas Czerner struct ext4_ext_cache *newex) 45896873fa0dSEric Sandeen { 4590b3aff3e3SZheng Liu struct extent_status es; 4591be401363SZheng Liu ext4_lblk_t block, next_del; 45926873fa0dSEric Sandeen 4593be401363SZheng Liu ext4_es_find_delayed_extent(inode, newex->ec_block, &es); 45946873fa0dSEric Sandeen 4595b05e6ae5STheodore Ts'o if (newex->ec_start == 0) { 45966d9c85ebSYongqiang Yang /* 45976d9c85ebSYongqiang Yang * No extent in extent-tree contains block @newex->ec_start, 45986d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 45996d9c85ebSYongqiang Yang */ 460006b0c886SZheng Liu if (es.es_len == 0) 4601b3aff3e3SZheng Liu /* A hole found. */ 460291dd8c11SLukas Czerner return 0; 46036d9c85ebSYongqiang Yang 460406b0c886SZheng Liu if (es.es_lblk > newex->ec_block) { 4605b3aff3e3SZheng Liu /* A hole found. */ 460606b0c886SZheng Liu newex->ec_len = min(es.es_lblk - newex->ec_block, 4607b3aff3e3SZheng Liu newex->ec_len); 460891dd8c11SLukas Czerner return 0; 46096873fa0dSEric Sandeen } 46106d9c85ebSYongqiang Yang 461106b0c886SZheng Liu newex->ec_len = es.es_lblk + es.es_len - newex->ec_block; 46126d9c85ebSYongqiang Yang } 46136873fa0dSEric Sandeen 4614be401363SZheng Liu block = newex->ec_block + newex->ec_len; 4615be401363SZheng Liu ext4_es_find_delayed_extent(inode, block, &es); 4616be401363SZheng Liu if (es.es_len == 0) 4617be401363SZheng Liu next_del = EXT_MAX_BLOCKS; 4618be401363SZheng Liu else 4619be401363SZheng Liu next_del = es.es_lblk; 4620be401363SZheng Liu 462191dd8c11SLukas Czerner return next_del; 46226873fa0dSEric Sandeen } 46236873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 46246873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 46256873fa0dSEric Sandeen 46263a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 46273a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 46286873fa0dSEric Sandeen { 46296873fa0dSEric Sandeen __u64 physical = 0; 46306873fa0dSEric Sandeen __u64 length; 46316873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 46326873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 46336873fa0dSEric Sandeen int error = 0; 46346873fa0dSEric Sandeen 46356873fa0dSEric Sandeen /* in-inode? */ 463619f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 46376873fa0dSEric Sandeen struct ext4_iloc iloc; 46386873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 46396873fa0dSEric Sandeen 46406873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 46416873fa0dSEric Sandeen if (error) 46426873fa0dSEric Sandeen return error; 46436873fa0dSEric Sandeen physical = iloc.bh->b_blocknr << blockbits; 46446873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 46456873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 46466873fa0dSEric Sandeen physical += offset; 46476873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 46486873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 4649fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 46506873fa0dSEric Sandeen } else { /* external block */ 46516873fa0dSEric Sandeen physical = EXT4_I(inode)->i_file_acl << blockbits; 46526873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 46536873fa0dSEric Sandeen } 46546873fa0dSEric Sandeen 46556873fa0dSEric Sandeen if (physical) 46566873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 46576873fa0dSEric Sandeen length, flags); 46586873fa0dSEric Sandeen return (error < 0 ? error : 0); 46596873fa0dSEric Sandeen } 46606873fa0dSEric Sandeen 4661a4bb6b64SAllison Henderson /* 4662a4bb6b64SAllison Henderson * ext4_ext_punch_hole 4663a4bb6b64SAllison Henderson * 4664a4bb6b64SAllison Henderson * Punches a hole of "length" bytes in a file starting 4665a4bb6b64SAllison Henderson * at byte "offset" 4666a4bb6b64SAllison Henderson * 4667a4bb6b64SAllison Henderson * @inode: The inode of the file to punch a hole in 4668a4bb6b64SAllison Henderson * @offset: The starting byte offset of the hole 4669a4bb6b64SAllison Henderson * @length: The length of the hole 4670a4bb6b64SAllison Henderson * 4671a4bb6b64SAllison Henderson * Returns the number of blocks removed or negative on err 4672a4bb6b64SAllison Henderson */ 4673a4bb6b64SAllison Henderson int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) 4674a4bb6b64SAllison Henderson { 4675a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 4676a4bb6b64SAllison Henderson struct super_block *sb = inode->i_sb; 46775f95d21fSLukas Czerner ext4_lblk_t first_block, stop_block; 4678a4bb6b64SAllison Henderson struct address_space *mapping = inode->i_mapping; 4679a4bb6b64SAllison Henderson handle_t *handle; 4680ba06208aSAllison Henderson loff_t first_page, last_page, page_len; 4681ba06208aSAllison Henderson loff_t first_page_offset, last_page_offset; 46825f95d21fSLukas Czerner int credits, err = 0; 4683a4bb6b64SAllison Henderson 468402d262dfSDmitry Monakhov /* 468502d262dfSDmitry Monakhov * Write out all dirty pages to avoid race conditions 468602d262dfSDmitry Monakhov * Then release them. 468702d262dfSDmitry Monakhov */ 468802d262dfSDmitry Monakhov if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 468902d262dfSDmitry Monakhov err = filemap_write_and_wait_range(mapping, 469002d262dfSDmitry Monakhov offset, offset + length - 1); 469102d262dfSDmitry Monakhov 469202d262dfSDmitry Monakhov if (err) 469302d262dfSDmitry Monakhov return err; 469402d262dfSDmitry Monakhov } 469502d262dfSDmitry Monakhov 469602d262dfSDmitry Monakhov mutex_lock(&inode->i_mutex); 469702d262dfSDmitry Monakhov /* It's not possible punch hole on append only file */ 469802d262dfSDmitry Monakhov if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 469902d262dfSDmitry Monakhov err = -EPERM; 470002d262dfSDmitry Monakhov goto out_mutex; 470102d262dfSDmitry Monakhov } 470202d262dfSDmitry Monakhov if (IS_SWAPFILE(inode)) { 470302d262dfSDmitry Monakhov err = -ETXTBSY; 470402d262dfSDmitry Monakhov goto out_mutex; 470502d262dfSDmitry Monakhov } 470602d262dfSDmitry Monakhov 47072be4751bSAllison Henderson /* No need to punch hole beyond i_size */ 47082be4751bSAllison Henderson if (offset >= inode->i_size) 470902d262dfSDmitry Monakhov goto out_mutex; 47102be4751bSAllison Henderson 47112be4751bSAllison Henderson /* 47122be4751bSAllison Henderson * If the hole extends beyond i_size, set the hole 47132be4751bSAllison Henderson * to end after the page that contains i_size 47142be4751bSAllison Henderson */ 47152be4751bSAllison Henderson if (offset + length > inode->i_size) { 47162be4751bSAllison Henderson length = inode->i_size + 47172be4751bSAllison Henderson PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 47182be4751bSAllison Henderson offset; 47192be4751bSAllison Henderson } 47202be4751bSAllison Henderson 4721a4bb6b64SAllison Henderson first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 4722a4bb6b64SAllison Henderson last_page = (offset + length) >> PAGE_CACHE_SHIFT; 4723a4bb6b64SAllison Henderson 4724a4bb6b64SAllison Henderson first_page_offset = first_page << PAGE_CACHE_SHIFT; 4725a4bb6b64SAllison Henderson last_page_offset = last_page << PAGE_CACHE_SHIFT; 4726a4bb6b64SAllison Henderson 4727a4bb6b64SAllison Henderson /* Now release the pages */ 4728a4bb6b64SAllison Henderson if (last_page_offset > first_page_offset) { 47295e44f8c3SHugh Dickins truncate_pagecache_range(inode, first_page_offset, 4730a4bb6b64SAllison Henderson last_page_offset - 1); 4731a4bb6b64SAllison Henderson } 4732a4bb6b64SAllison Henderson 473302d262dfSDmitry Monakhov /* Wait all existing dio workers, newcomers will block on i_mutex */ 473402d262dfSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 4735c278531dSDmitry Monakhov err = ext4_flush_unwritten_io(inode); 473628a535f9SDmitry Monakhov if (err) 473702d262dfSDmitry Monakhov goto out_dio; 4738c278531dSDmitry Monakhov inode_dio_wait(inode); 4739a4bb6b64SAllison Henderson 4740a4bb6b64SAllison Henderson credits = ext4_writepage_trans_blocks(inode); 47419924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 474202d262dfSDmitry Monakhov if (IS_ERR(handle)) { 474302d262dfSDmitry Monakhov err = PTR_ERR(handle); 474402d262dfSDmitry Monakhov goto out_dio; 474502d262dfSDmitry Monakhov } 4746a4bb6b64SAllison Henderson 4747a4bb6b64SAllison Henderson 4748a4bb6b64SAllison Henderson /* 4749ba06208aSAllison Henderson * Now we need to zero out the non-page-aligned data in the 4750ba06208aSAllison Henderson * pages at the start and tail of the hole, and unmap the buffer 4751ba06208aSAllison Henderson * heads for the block aligned regions of the page that were 4752ba06208aSAllison Henderson * completely zeroed. 4753a4bb6b64SAllison Henderson */ 4754ba06208aSAllison Henderson if (first_page > last_page) { 4755ba06208aSAllison Henderson /* 4756ba06208aSAllison Henderson * If the file space being truncated is contained within a page 4757ba06208aSAllison Henderson * just zero out and unmap the middle of that page 4758ba06208aSAllison Henderson */ 4759ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4760ba06208aSAllison Henderson mapping, offset, length, 0); 4761a4bb6b64SAllison Henderson 4762ba06208aSAllison Henderson if (err) 4763ba06208aSAllison Henderson goto out; 4764ba06208aSAllison Henderson } else { 4765ba06208aSAllison Henderson /* 4766ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4767ba06208aSAllison Henderson * the start of the hole 4768ba06208aSAllison Henderson */ 4769ba06208aSAllison Henderson page_len = first_page_offset - offset; 4770ba06208aSAllison Henderson if (page_len > 0) { 4771ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4772ba06208aSAllison Henderson offset, page_len, 0); 4773ba06208aSAllison Henderson if (err) 4774ba06208aSAllison Henderson goto out; 4775ba06208aSAllison Henderson } 4776ba06208aSAllison Henderson 4777ba06208aSAllison Henderson /* 4778ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4779ba06208aSAllison Henderson * the end of the hole 4780ba06208aSAllison Henderson */ 4781ba06208aSAllison Henderson page_len = offset + length - last_page_offset; 4782ba06208aSAllison Henderson if (page_len > 0) { 4783ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4784ba06208aSAllison Henderson last_page_offset, page_len, 0); 4785ba06208aSAllison Henderson if (err) 4786ba06208aSAllison Henderson goto out; 4787a4bb6b64SAllison Henderson } 4788a4bb6b64SAllison Henderson } 4789a4bb6b64SAllison Henderson 47902be4751bSAllison Henderson /* 47912be4751bSAllison Henderson * If i_size is contained in the last page, we need to 47922be4751bSAllison Henderson * unmap and zero the partial page after i_size 47932be4751bSAllison Henderson */ 47942be4751bSAllison Henderson if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && 47952be4751bSAllison Henderson inode->i_size % PAGE_CACHE_SIZE != 0) { 47962be4751bSAllison Henderson 47972be4751bSAllison Henderson page_len = PAGE_CACHE_SIZE - 47982be4751bSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 47992be4751bSAllison Henderson 48002be4751bSAllison Henderson if (page_len > 0) { 48012be4751bSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 48022be4751bSAllison Henderson mapping, inode->i_size, page_len, 0); 48032be4751bSAllison Henderson 48042be4751bSAllison Henderson if (err) 48052be4751bSAllison Henderson goto out; 48062be4751bSAllison Henderson } 48072be4751bSAllison Henderson } 48082be4751bSAllison Henderson 48095f95d21fSLukas Czerner first_block = (offset + sb->s_blocksize - 1) >> 48105f95d21fSLukas Czerner EXT4_BLOCK_SIZE_BITS(sb); 48115f95d21fSLukas Czerner stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 48125f95d21fSLukas Czerner 4813a4bb6b64SAllison Henderson /* If there are no blocks to remove, return now */ 48145f95d21fSLukas Czerner if (first_block >= stop_block) 4815a4bb6b64SAllison Henderson goto out; 4816a4bb6b64SAllison Henderson 4817a4bb6b64SAllison Henderson down_write(&EXT4_I(inode)->i_data_sem); 4818a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4819a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4820a4bb6b64SAllison Henderson 482151865fdaSZheng Liu err = ext4_es_remove_extent(inode, first_block, 482251865fdaSZheng Liu stop_block - first_block); 48235f95d21fSLukas Czerner err = ext4_ext_remove_space(inode, first_block, stop_block - 1); 4824a4bb6b64SAllison Henderson 4825a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4826a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4827a4bb6b64SAllison Henderson 4828a4bb6b64SAllison Henderson if (IS_SYNC(inode)) 4829a4bb6b64SAllison Henderson ext4_handle_sync(handle); 4830a4bb6b64SAllison Henderson 4831a4bb6b64SAllison Henderson up_write(&EXT4_I(inode)->i_data_sem); 4832a4bb6b64SAllison Henderson 4833a4bb6b64SAllison Henderson out: 4834a4bb6b64SAllison Henderson inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4835a4bb6b64SAllison Henderson ext4_mark_inode_dirty(handle, inode); 4836a4bb6b64SAllison Henderson ext4_journal_stop(handle); 483702d262dfSDmitry Monakhov out_dio: 483802d262dfSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 483902d262dfSDmitry Monakhov out_mutex: 484002d262dfSDmitry Monakhov mutex_unlock(&inode->i_mutex); 4841a4bb6b64SAllison Henderson return err; 4842a4bb6b64SAllison Henderson } 484391dd8c11SLukas Czerner 48446873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 48456873fa0dSEric Sandeen __u64 start, __u64 len) 48466873fa0dSEric Sandeen { 48476873fa0dSEric Sandeen ext4_lblk_t start_blk; 48486873fa0dSEric Sandeen int error = 0; 48496873fa0dSEric Sandeen 485094191985STao Ma if (ext4_has_inline_data(inode)) { 485194191985STao Ma int has_inline = 1; 485294191985STao Ma 485394191985STao Ma error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 485494191985STao Ma 485594191985STao Ma if (has_inline) 485694191985STao Ma return error; 485794191985STao Ma } 485894191985STao Ma 48596873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 486012e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 48616873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 48626873fa0dSEric Sandeen ext4_get_block); 48636873fa0dSEric Sandeen 48646873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 48656873fa0dSEric Sandeen return -EBADR; 48666873fa0dSEric Sandeen 48676873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 48686873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 48696873fa0dSEric Sandeen } else { 4870aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4871aca92ff6SLeonard Michlmayr __u64 last_blk; 4872aca92ff6SLeonard Michlmayr 48736873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4874aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4875f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4876f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4877aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 48786873fa0dSEric Sandeen 48796873fa0dSEric Sandeen /* 488091dd8c11SLukas Czerner * Walk the extent tree gathering extent information 488191dd8c11SLukas Czerner * and pushing extents back to the user. 48826873fa0dSEric Sandeen */ 488391dd8c11SLukas Czerner error = ext4_fill_fiemap_extents(inode, start_blk, 488491dd8c11SLukas Czerner len_blks, fieinfo); 48856873fa0dSEric Sandeen } 48866873fa0dSEric Sandeen 48876873fa0dSEric Sandeen return error; 48886873fa0dSEric Sandeen } 4889