1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/fs.h> 33a86c6181SAlex Tomas #include <linux/time.h> 34cd02ff0bSMingming Cao #include <linux/jbd2.h> 35a86c6181SAlex Tomas #include <linux/highuid.h> 36a86c6181SAlex Tomas #include <linux/pagemap.h> 37a86c6181SAlex Tomas #include <linux/quotaops.h> 38a86c6181SAlex Tomas #include <linux/string.h> 39a86c6181SAlex Tomas #include <linux/slab.h> 40a86c6181SAlex Tomas #include <asm/uaccess.h> 416873fa0dSEric Sandeen #include <linux/fiemap.h> 423dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 434a092d73STheodore Ts'o #include "ext4_extents.h" 44f19d5870STao Ma #include "xattr.h" 45a86c6181SAlex Tomas 460562e0baSJiaying Zhang #include <trace/events/ext4.h> 470562e0baSJiaying Zhang 485f95d21fSLukas Czerner /* 495f95d21fSLukas Czerner * used by extent splitting. 505f95d21fSLukas Czerner */ 515f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 525f95d21fSLukas Czerner due to ENOSPC */ 535f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 545f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 555f95d21fSLukas Czerner 56dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 58dee1f973SDmitry Monakhov 597ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 607ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 617ac5990dSDarrick J. Wong { 627ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 637ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 647ac5990dSDarrick J. Wong __u32 csum; 657ac5990dSDarrick J. Wong 667ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 677ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 687ac5990dSDarrick J. Wong return cpu_to_le32(csum); 697ac5990dSDarrick J. Wong } 707ac5990dSDarrick J. Wong 717ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 727ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 737ac5990dSDarrick J. Wong { 747ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 757ac5990dSDarrick J. Wong 767ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 777ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 787ac5990dSDarrick J. Wong return 1; 797ac5990dSDarrick J. Wong 807ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 817ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 827ac5990dSDarrick J. Wong return 0; 837ac5990dSDarrick J. Wong return 1; 847ac5990dSDarrick J. Wong } 857ac5990dSDarrick J. Wong 867ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 877ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 887ac5990dSDarrick J. Wong { 897ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 907ac5990dSDarrick J. Wong 917ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 927ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 937ac5990dSDarrick J. Wong return; 947ac5990dSDarrick J. Wong 957ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 967ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 977ac5990dSDarrick J. Wong } 987ac5990dSDarrick J. Wong 99d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 100d583fb87SAllison Henderson struct inode *inode, 101d583fb87SAllison Henderson struct ext4_ext_path *path, 102d583fb87SAllison Henderson struct ext4_map_blocks *map, 103d583fb87SAllison Henderson int split_flag, 104d583fb87SAllison Henderson int flags); 105d583fb87SAllison Henderson 1065f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 1075f95d21fSLukas Czerner struct inode *inode, 1085f95d21fSLukas Czerner struct ext4_ext_path *path, 1095f95d21fSLukas Czerner ext4_lblk_t split, 1105f95d21fSLukas Czerner int split_flag, 1115f95d21fSLukas Czerner int flags); 1125f95d21fSLukas Czerner 11391dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 11469eb33dcSZheng Liu struct extent_status *newes); 11591dd8c11SLukas Czerner 116487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 117487caeefSJan Kara struct inode *inode, 118487caeefSJan Kara int needed) 119a86c6181SAlex Tomas { 120a86c6181SAlex Tomas int err; 121a86c6181SAlex Tomas 1220390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 1230390131bSFrank Mayhar return 0; 124a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 1259102e4faSShen Feng return 0; 1269102e4faSShen Feng err = ext4_journal_extend(handle, needed); 1270123c939STheodore Ts'o if (err <= 0) 1289102e4faSShen Feng return err; 129487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 1300617b83fSDmitry Monakhov if (err == 0) 1310617b83fSDmitry Monakhov err = -EAGAIN; 132487caeefSJan Kara 133487caeefSJan Kara return err; 134a86c6181SAlex Tomas } 135a86c6181SAlex Tomas 136a86c6181SAlex Tomas /* 137a86c6181SAlex Tomas * could return: 138a86c6181SAlex Tomas * - EROFS 139a86c6181SAlex Tomas * - ENOMEM 140a86c6181SAlex Tomas */ 141a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 142a86c6181SAlex Tomas struct ext4_ext_path *path) 143a86c6181SAlex Tomas { 144a86c6181SAlex Tomas if (path->p_bh) { 145a86c6181SAlex Tomas /* path points to block */ 146a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 147a86c6181SAlex Tomas } 148a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 149a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 150a86c6181SAlex Tomas return 0; 151a86c6181SAlex Tomas } 152a86c6181SAlex Tomas 153a86c6181SAlex Tomas /* 154a86c6181SAlex Tomas * could return: 155a86c6181SAlex Tomas * - EROFS 156a86c6181SAlex Tomas * - ENOMEM 157a86c6181SAlex Tomas * - EIO 158a86c6181SAlex Tomas */ 1592656497bSDarrick J. Wong int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, 1602656497bSDarrick J. Wong struct inode *inode, struct ext4_ext_path *path) 161a86c6181SAlex Tomas { 162a86c6181SAlex Tomas int err; 163a86c6181SAlex Tomas if (path->p_bh) { 1647ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 165a86c6181SAlex Tomas /* path points to block */ 1669ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1679ea7a0dfSTheodore Ts'o inode, path->p_bh); 168a86c6181SAlex Tomas } else { 169a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 170a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 171a86c6181SAlex Tomas } 172a86c6181SAlex Tomas return err; 173a86c6181SAlex Tomas } 174a86c6181SAlex Tomas 175f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 176a86c6181SAlex Tomas struct ext4_ext_path *path, 177725d26d3SAneesh Kumar K.V ext4_lblk_t block) 178a86c6181SAlex Tomas { 179a86c6181SAlex Tomas if (path) { 18081fdbb4aSYongqiang Yang int depth = path->p_depth; 181a86c6181SAlex Tomas struct ext4_extent *ex; 182a86c6181SAlex Tomas 183ad4fb9caSKazuya Mio /* 184ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 185ad4fb9caSKazuya Mio * filling in a file which will eventually be 186ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 187ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 188ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 189ad4fb9caSKazuya Mio * executable file, or some database extending a table 190ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 191ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 192ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 193ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 194ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 195ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 196ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 197b8d6568aSTao Ma * especially if the latter case turns out to be 198ad4fb9caSKazuya Mio * common. 199ad4fb9caSKazuya Mio */ 2007e028976SAvantika Mathur ex = path[depth].p_ext; 201ad4fb9caSKazuya Mio if (ex) { 202ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 203ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 204ad4fb9caSKazuya Mio 205ad4fb9caSKazuya Mio if (block > ext_block) 206ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 207ad4fb9caSKazuya Mio else 208ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 209ad4fb9caSKazuya Mio } 210a86c6181SAlex Tomas 211d0d856e8SRandy Dunlap /* it looks like index is empty; 212d0d856e8SRandy Dunlap * try to find starting block from index itself */ 213a86c6181SAlex Tomas if (path[depth].p_bh) 214a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 215a86c6181SAlex Tomas } 216a86c6181SAlex Tomas 217a86c6181SAlex Tomas /* OK. use inode's group */ 218f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 219a86c6181SAlex Tomas } 220a86c6181SAlex Tomas 221654b4908SAneesh Kumar K.V /* 222654b4908SAneesh Kumar K.V * Allocation for a meta data block 223654b4908SAneesh Kumar K.V */ 224f65e6fbaSAlex Tomas static ext4_fsblk_t 225654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 226a86c6181SAlex Tomas struct ext4_ext_path *path, 22755f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 228a86c6181SAlex Tomas { 229f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 230a86c6181SAlex Tomas 231a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23255f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23355f020dbSAllison Henderson NULL, err); 234a86c6181SAlex Tomas return newblock; 235a86c6181SAlex Tomas } 236a86c6181SAlex Tomas 23755ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 238a86c6181SAlex Tomas { 239a86c6181SAlex Tomas int size; 240a86c6181SAlex Tomas 241a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 242a86c6181SAlex Tomas / sizeof(struct ext4_extent); 243bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24402dc62fbSYongqiang Yang if (!check && size > 6) 245a86c6181SAlex Tomas size = 6; 246a86c6181SAlex Tomas #endif 247a86c6181SAlex Tomas return size; 248a86c6181SAlex Tomas } 249a86c6181SAlex Tomas 25055ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 251a86c6181SAlex Tomas { 252a86c6181SAlex Tomas int size; 253a86c6181SAlex Tomas 254a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 255a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 256bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 25702dc62fbSYongqiang Yang if (!check && size > 5) 258a86c6181SAlex Tomas size = 5; 259a86c6181SAlex Tomas #endif 260a86c6181SAlex Tomas return size; 261a86c6181SAlex Tomas } 262a86c6181SAlex Tomas 26355ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 264a86c6181SAlex Tomas { 265a86c6181SAlex Tomas int size; 266a86c6181SAlex Tomas 267a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 268a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 269a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 270bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27102dc62fbSYongqiang Yang if (!check && size > 3) 272a86c6181SAlex Tomas size = 3; 273a86c6181SAlex Tomas #endif 274a86c6181SAlex Tomas return size; 275a86c6181SAlex Tomas } 276a86c6181SAlex Tomas 27755ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 278a86c6181SAlex Tomas { 279a86c6181SAlex Tomas int size; 280a86c6181SAlex Tomas 281a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 282a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 283a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 284bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28502dc62fbSYongqiang Yang if (!check && size > 4) 286a86c6181SAlex Tomas size = 4; 287a86c6181SAlex Tomas #endif 288a86c6181SAlex Tomas return size; 289a86c6181SAlex Tomas } 290a86c6181SAlex Tomas 291d2a17637SMingming Cao /* 292d2a17637SMingming Cao * Calculate the number of metadata blocks needed 293d2a17637SMingming Cao * to allocate @blocks 294d2a17637SMingming Cao * Worse case is one block per extent 295d2a17637SMingming Cao */ 29601f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 297d2a17637SMingming Cao { 2989d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 29981fdbb4aSYongqiang Yang int idxs; 300d2a17637SMingming Cao 3019d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 3029d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 303d2a17637SMingming Cao 304d2a17637SMingming Cao /* 3059d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 3069d0be502STheodore Ts'o * previous da block, it can share index blocks with the 3079d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 3089d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 3099d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 3109d0be502STheodore Ts'o * another index blocks. 311d2a17637SMingming Cao */ 3129d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 3139d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 31481fdbb4aSYongqiang Yang int num = 0; 31581fdbb4aSYongqiang Yang 3169d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 3179d0be502STheodore Ts'o num++; 3189d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 3199d0be502STheodore Ts'o num++; 3209d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 3219d0be502STheodore Ts'o num++; 3229d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3239d0be502STheodore Ts'o } else 3249d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 3259d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 326d2a17637SMingming Cao return num; 327d2a17637SMingming Cao } 328d2a17637SMingming Cao 3299d0be502STheodore Ts'o /* 3309d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 3319d0be502STheodore Ts'o * every level of the inode's extent tree. 3329d0be502STheodore Ts'o */ 3339d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 3349d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 3359d0be502STheodore Ts'o return ext_depth(inode) + 1; 3369d0be502STheodore Ts'o } 3379d0be502STheodore Ts'o 338c29c0ae7SAlex Tomas static int 339c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 340c29c0ae7SAlex Tomas { 341c29c0ae7SAlex Tomas int max; 342c29c0ae7SAlex Tomas 343c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 344c29c0ae7SAlex Tomas if (depth == 0) 34555ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 346c29c0ae7SAlex Tomas else 34755ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 348c29c0ae7SAlex Tomas } else { 349c29c0ae7SAlex Tomas if (depth == 0) 35055ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 351c29c0ae7SAlex Tomas else 35255ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 353c29c0ae7SAlex Tomas } 354c29c0ae7SAlex Tomas 355c29c0ae7SAlex Tomas return max; 356c29c0ae7SAlex Tomas } 357c29c0ae7SAlex Tomas 35856b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 35956b19868SAneesh Kumar K.V { 360bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 36156b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 3625946d089SEryu Guan ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); 3635946d089SEryu Guan ext4_lblk_t last = lblock + len - 1; 364e84a26ceSTheodore Ts'o 3655946d089SEryu Guan if (lblock > last) 36631d4f3a2STheodore Ts'o return 0; 3676fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 36856b19868SAneesh Kumar K.V } 36956b19868SAneesh Kumar K.V 37056b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 37156b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 37256b19868SAneesh Kumar K.V { 373bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 374e84a26ceSTheodore Ts'o 3756fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 37656b19868SAneesh Kumar K.V } 37756b19868SAneesh Kumar K.V 37856b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 37956b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 38056b19868SAneesh Kumar K.V int depth) 38156b19868SAneesh Kumar K.V { 38256b19868SAneesh Kumar K.V unsigned short entries; 38356b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 38456b19868SAneesh Kumar K.V return 1; 38556b19868SAneesh Kumar K.V 38656b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 38756b19868SAneesh Kumar K.V 38856b19868SAneesh Kumar K.V if (depth == 0) { 38956b19868SAneesh Kumar K.V /* leaf entries */ 39081fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 3915946d089SEryu Guan struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 3925946d089SEryu Guan ext4_fsblk_t pblock = 0; 3935946d089SEryu Guan ext4_lblk_t lblock = 0; 3945946d089SEryu Guan ext4_lblk_t prev = 0; 3955946d089SEryu Guan int len = 0; 39656b19868SAneesh Kumar K.V while (entries) { 39756b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 39856b19868SAneesh Kumar K.V return 0; 3995946d089SEryu Guan 4005946d089SEryu Guan /* Check for overlapping extents */ 4015946d089SEryu Guan lblock = le32_to_cpu(ext->ee_block); 4025946d089SEryu Guan len = ext4_ext_get_actual_len(ext); 4035946d089SEryu Guan if ((lblock <= prev) && prev) { 4045946d089SEryu Guan pblock = ext4_ext_pblock(ext); 4055946d089SEryu Guan es->s_last_error_block = cpu_to_le64(pblock); 4065946d089SEryu Guan return 0; 4075946d089SEryu Guan } 40856b19868SAneesh Kumar K.V ext++; 40956b19868SAneesh Kumar K.V entries--; 4105946d089SEryu Guan prev = lblock + len - 1; 41156b19868SAneesh Kumar K.V } 41256b19868SAneesh Kumar K.V } else { 41381fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 41456b19868SAneesh Kumar K.V while (entries) { 41556b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 41656b19868SAneesh Kumar K.V return 0; 41756b19868SAneesh Kumar K.V ext_idx++; 41856b19868SAneesh Kumar K.V entries--; 41956b19868SAneesh Kumar K.V } 42056b19868SAneesh Kumar K.V } 42156b19868SAneesh Kumar K.V return 1; 42256b19868SAneesh Kumar K.V } 42356b19868SAneesh Kumar K.V 424c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 425c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 426c349179bSTheodore Ts'o int depth, ext4_fsblk_t pblk) 427c29c0ae7SAlex Tomas { 428c29c0ae7SAlex Tomas const char *error_msg; 429c29c0ae7SAlex Tomas int max = 0; 430c29c0ae7SAlex Tomas 431c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 432c29c0ae7SAlex Tomas error_msg = "invalid magic"; 433c29c0ae7SAlex Tomas goto corrupted; 434c29c0ae7SAlex Tomas } 435c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 436c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 437c29c0ae7SAlex Tomas goto corrupted; 438c29c0ae7SAlex Tomas } 439c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 440c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 441c29c0ae7SAlex Tomas goto corrupted; 442c29c0ae7SAlex Tomas } 443c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 444c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 445c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 446c29c0ae7SAlex Tomas goto corrupted; 447c29c0ae7SAlex Tomas } 448c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 449c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 450c29c0ae7SAlex Tomas goto corrupted; 451c29c0ae7SAlex Tomas } 45256b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 45356b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 45456b19868SAneesh Kumar K.V goto corrupted; 45556b19868SAneesh Kumar K.V } 4567ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4577ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4587ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4597ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4607ac5990dSDarrick J. Wong goto corrupted; 4617ac5990dSDarrick J. Wong } 462c29c0ae7SAlex Tomas return 0; 463c29c0ae7SAlex Tomas 464c29c0ae7SAlex Tomas corrupted: 465c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 466c349179bSTheodore Ts'o "pblk %llu bad header/extent: %s - magic %x, " 467c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 468c349179bSTheodore Ts'o (unsigned long long) pblk, error_msg, 469c349179bSTheodore Ts'o le16_to_cpu(eh->eh_magic), 470c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 471c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 472c29c0ae7SAlex Tomas return -EIO; 473c29c0ae7SAlex Tomas } 474c29c0ae7SAlex Tomas 475c349179bSTheodore Ts'o #define ext4_ext_check(inode, eh, depth, pblk) \ 476c349179bSTheodore Ts'o __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk)) 477c29c0ae7SAlex Tomas 4787a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4797a262f7cSAneesh Kumar K.V { 480c349179bSTheodore Ts'o return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0); 4817a262f7cSAneesh Kumar K.V } 4827a262f7cSAneesh Kumar K.V 4837d7ea89eSTheodore Ts'o static struct buffer_head * 4847d7ea89eSTheodore Ts'o __read_extent_tree_block(const char *function, unsigned int line, 485107a7bd3STheodore Ts'o struct inode *inode, ext4_fsblk_t pblk, int depth, 486107a7bd3STheodore Ts'o int flags) 487f8489128SDarrick J. Wong { 4887d7ea89eSTheodore Ts'o struct buffer_head *bh; 4897d7ea89eSTheodore Ts'o int err; 490f8489128SDarrick J. Wong 4917d7ea89eSTheodore Ts'o bh = sb_getblk(inode->i_sb, pblk); 4927d7ea89eSTheodore Ts'o if (unlikely(!bh)) 4937d7ea89eSTheodore Ts'o return ERR_PTR(-ENOMEM); 4947d7ea89eSTheodore Ts'o 4957d7ea89eSTheodore Ts'o if (!bh_uptodate_or_lock(bh)) { 4967d7ea89eSTheodore Ts'o trace_ext4_ext_load_extent(inode, pblk, _RET_IP_); 4977d7ea89eSTheodore Ts'o err = bh_submit_read(bh); 4987d7ea89eSTheodore Ts'o if (err < 0) 4997d7ea89eSTheodore Ts'o goto errout; 5007d7ea89eSTheodore Ts'o } 5017869a4a6STheodore Ts'o if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) 5027d7ea89eSTheodore Ts'o return bh; 5037d7ea89eSTheodore Ts'o err = __ext4_ext_check(function, line, inode, 504c349179bSTheodore Ts'o ext_block_hdr(bh), depth, pblk); 5057d7ea89eSTheodore Ts'o if (err) 5067d7ea89eSTheodore Ts'o goto errout; 507f8489128SDarrick J. Wong set_buffer_verified(bh); 508107a7bd3STheodore Ts'o /* 509107a7bd3STheodore Ts'o * If this is a leaf block, cache all of its entries 510107a7bd3STheodore Ts'o */ 511107a7bd3STheodore Ts'o if (!(flags & EXT4_EX_NOCACHE) && depth == 0) { 512107a7bd3STheodore Ts'o struct ext4_extent_header *eh = ext_block_hdr(bh); 513107a7bd3STheodore Ts'o struct ext4_extent *ex = EXT_FIRST_EXTENT(eh); 514107a7bd3STheodore Ts'o ext4_lblk_t prev = 0; 515107a7bd3STheodore Ts'o int i; 516107a7bd3STheodore Ts'o 517107a7bd3STheodore Ts'o for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) { 518107a7bd3STheodore Ts'o unsigned int status = EXTENT_STATUS_WRITTEN; 519107a7bd3STheodore Ts'o ext4_lblk_t lblk = le32_to_cpu(ex->ee_block); 520107a7bd3STheodore Ts'o int len = ext4_ext_get_actual_len(ex); 521107a7bd3STheodore Ts'o 522107a7bd3STheodore Ts'o if (prev && (prev != lblk)) 523107a7bd3STheodore Ts'o ext4_es_cache_extent(inode, prev, 524107a7bd3STheodore Ts'o lblk - prev, ~0, 525107a7bd3STheodore Ts'o EXTENT_STATUS_HOLE); 526107a7bd3STheodore Ts'o 527107a7bd3STheodore Ts'o if (ext4_ext_is_uninitialized(ex)) 528107a7bd3STheodore Ts'o status = EXTENT_STATUS_UNWRITTEN; 529107a7bd3STheodore Ts'o ext4_es_cache_extent(inode, lblk, len, 530107a7bd3STheodore Ts'o ext4_ext_pblock(ex), status); 531107a7bd3STheodore Ts'o prev = lblk + len; 532107a7bd3STheodore Ts'o } 533107a7bd3STheodore Ts'o } 5347d7ea89eSTheodore Ts'o return bh; 5357d7ea89eSTheodore Ts'o errout: 5367d7ea89eSTheodore Ts'o put_bh(bh); 5377d7ea89eSTheodore Ts'o return ERR_PTR(err); 5387d7ea89eSTheodore Ts'o 539f8489128SDarrick J. Wong } 540f8489128SDarrick J. Wong 541107a7bd3STheodore Ts'o #define read_extent_tree_block(inode, pblk, depth, flags) \ 542107a7bd3STheodore Ts'o __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \ 543107a7bd3STheodore Ts'o (depth), (flags)) 544f8489128SDarrick J. Wong 5457869a4a6STheodore Ts'o /* 5467869a4a6STheodore Ts'o * This function is called to cache a file's extent information in the 5477869a4a6STheodore Ts'o * extent status tree 5487869a4a6STheodore Ts'o */ 5497869a4a6STheodore Ts'o int ext4_ext_precache(struct inode *inode) 5507869a4a6STheodore Ts'o { 5517869a4a6STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 5527869a4a6STheodore Ts'o struct ext4_ext_path *path = NULL; 5537869a4a6STheodore Ts'o struct buffer_head *bh; 5547869a4a6STheodore Ts'o int i = 0, depth, ret = 0; 5557869a4a6STheodore Ts'o 5567869a4a6STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 5577869a4a6STheodore Ts'o return 0; /* not an extent-mapped inode */ 5587869a4a6STheodore Ts'o 5597869a4a6STheodore Ts'o down_read(&ei->i_data_sem); 5607869a4a6STheodore Ts'o depth = ext_depth(inode); 5617869a4a6STheodore Ts'o 5627869a4a6STheodore Ts'o path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 5637869a4a6STheodore Ts'o GFP_NOFS); 5647869a4a6STheodore Ts'o if (path == NULL) { 5657869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 5667869a4a6STheodore Ts'o return -ENOMEM; 5677869a4a6STheodore Ts'o } 5687869a4a6STheodore Ts'o 5697869a4a6STheodore Ts'o /* Don't cache anything if there are no external extent blocks */ 5707869a4a6STheodore Ts'o if (depth == 0) 5717869a4a6STheodore Ts'o goto out; 5727869a4a6STheodore Ts'o path[0].p_hdr = ext_inode_hdr(inode); 5737869a4a6STheodore Ts'o ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); 5747869a4a6STheodore Ts'o if (ret) 5757869a4a6STheodore Ts'o goto out; 5767869a4a6STheodore Ts'o path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr); 5777869a4a6STheodore Ts'o while (i >= 0) { 5787869a4a6STheodore Ts'o /* 5797869a4a6STheodore Ts'o * If this is a leaf block or we've reached the end of 5807869a4a6STheodore Ts'o * the index block, go up 5817869a4a6STheodore Ts'o */ 5827869a4a6STheodore Ts'o if ((i == depth) || 5837869a4a6STheodore Ts'o path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) { 5847869a4a6STheodore Ts'o brelse(path[i].p_bh); 5857869a4a6STheodore Ts'o path[i].p_bh = NULL; 5867869a4a6STheodore Ts'o i--; 5877869a4a6STheodore Ts'o continue; 5887869a4a6STheodore Ts'o } 5897869a4a6STheodore Ts'o bh = read_extent_tree_block(inode, 5907869a4a6STheodore Ts'o ext4_idx_pblock(path[i].p_idx++), 5917869a4a6STheodore Ts'o depth - i - 1, 5927869a4a6STheodore Ts'o EXT4_EX_FORCE_CACHE); 5937869a4a6STheodore Ts'o if (IS_ERR(bh)) { 5947869a4a6STheodore Ts'o ret = PTR_ERR(bh); 5957869a4a6STheodore Ts'o break; 5967869a4a6STheodore Ts'o } 5977869a4a6STheodore Ts'o i++; 5987869a4a6STheodore Ts'o path[i].p_bh = bh; 5997869a4a6STheodore Ts'o path[i].p_hdr = ext_block_hdr(bh); 6007869a4a6STheodore Ts'o path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr); 6017869a4a6STheodore Ts'o } 6027869a4a6STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED); 6037869a4a6STheodore Ts'o out: 6047869a4a6STheodore Ts'o up_read(&ei->i_data_sem); 6057869a4a6STheodore Ts'o ext4_ext_drop_refs(path); 6067869a4a6STheodore Ts'o kfree(path); 6077869a4a6STheodore Ts'o return ret; 6087869a4a6STheodore Ts'o } 6097869a4a6STheodore Ts'o 610a86c6181SAlex Tomas #ifdef EXT_DEBUG 611a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 612a86c6181SAlex Tomas { 613a86c6181SAlex Tomas int k, l = path->p_depth; 614a86c6181SAlex Tomas 615a86c6181SAlex Tomas ext_debug("path:"); 616a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 617a86c6181SAlex Tomas if (path->p_idx) { 6182ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 619bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 620a86c6181SAlex Tomas } else if (path->p_ext) { 621553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 622a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 623553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 624a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 625bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 626a86c6181SAlex Tomas } else 627a86c6181SAlex Tomas ext_debug(" []"); 628a86c6181SAlex Tomas } 629a86c6181SAlex Tomas ext_debug("\n"); 630a86c6181SAlex Tomas } 631a86c6181SAlex Tomas 632a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 633a86c6181SAlex Tomas { 634a86c6181SAlex Tomas int depth = ext_depth(inode); 635a86c6181SAlex Tomas struct ext4_extent_header *eh; 636a86c6181SAlex Tomas struct ext4_extent *ex; 637a86c6181SAlex Tomas int i; 638a86c6181SAlex Tomas 639a86c6181SAlex Tomas if (!path) 640a86c6181SAlex Tomas return; 641a86c6181SAlex Tomas 642a86c6181SAlex Tomas eh = path[depth].p_hdr; 643a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 644a86c6181SAlex Tomas 645553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 646553f9008SMingming 647a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 648553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 649553f9008SMingming ext4_ext_is_uninitialized(ex), 650bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 651a86c6181SAlex Tomas } 652a86c6181SAlex Tomas ext_debug("\n"); 653a86c6181SAlex Tomas } 6541b16da77SYongqiang Yang 6551b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 6561b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 6571b16da77SYongqiang Yang { 6581b16da77SYongqiang Yang int depth = ext_depth(inode); 6591b16da77SYongqiang Yang struct ext4_extent *ex; 6601b16da77SYongqiang Yang 6611b16da77SYongqiang Yang if (depth != level) { 6621b16da77SYongqiang Yang struct ext4_extent_idx *idx; 6631b16da77SYongqiang Yang idx = path[level].p_idx; 6641b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 6651b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 6661b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 6671b16da77SYongqiang Yang ext4_idx_pblock(idx), 6681b16da77SYongqiang Yang newblock); 6691b16da77SYongqiang Yang idx++; 6701b16da77SYongqiang Yang } 6711b16da77SYongqiang Yang 6721b16da77SYongqiang Yang return; 6731b16da77SYongqiang Yang } 6741b16da77SYongqiang Yang 6751b16da77SYongqiang Yang ex = path[depth].p_ext; 6761b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 6771b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 6781b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 6791b16da77SYongqiang Yang ext4_ext_pblock(ex), 6801b16da77SYongqiang Yang ext4_ext_is_uninitialized(ex), 6811b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 6821b16da77SYongqiang Yang newblock); 6831b16da77SYongqiang Yang ex++; 6841b16da77SYongqiang Yang } 6851b16da77SYongqiang Yang } 6861b16da77SYongqiang Yang 687a86c6181SAlex Tomas #else 688a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 689a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 6901b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 691a86c6181SAlex Tomas #endif 692a86c6181SAlex Tomas 693b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 694a86c6181SAlex Tomas { 695a86c6181SAlex Tomas int depth = path->p_depth; 696a86c6181SAlex Tomas int i; 697a86c6181SAlex Tomas 698a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 699a86c6181SAlex Tomas if (path->p_bh) { 700a86c6181SAlex Tomas brelse(path->p_bh); 701a86c6181SAlex Tomas path->p_bh = NULL; 702a86c6181SAlex Tomas } 703a86c6181SAlex Tomas } 704a86c6181SAlex Tomas 705a86c6181SAlex Tomas /* 706d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 707d0d856e8SRandy Dunlap * binary search for the closest index of the given block 708c29c0ae7SAlex Tomas * the header must be checked before calling this 709a86c6181SAlex Tomas */ 710a86c6181SAlex Tomas static void 711725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 712725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 713a86c6181SAlex Tomas { 714a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 715a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 716a86c6181SAlex Tomas 717a86c6181SAlex Tomas 718bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 719a86c6181SAlex Tomas 720a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 721e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 722a86c6181SAlex Tomas while (l <= r) { 723a86c6181SAlex Tomas m = l + (r - l) / 2; 724a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 725a86c6181SAlex Tomas r = m - 1; 726a86c6181SAlex Tomas else 727a86c6181SAlex Tomas l = m + 1; 72826d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 72926d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 73026d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 731a86c6181SAlex Tomas } 732a86c6181SAlex Tomas 733a86c6181SAlex Tomas path->p_idx = l - 1; 7344a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 735bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 736a86c6181SAlex Tomas 737a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 738a86c6181SAlex Tomas { 739a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 740a86c6181SAlex Tomas int k; 741a86c6181SAlex Tomas 742a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 743a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 744a86c6181SAlex Tomas if (k != 0 && 745a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 7464776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 7474776004fSTheodore Ts'o "first=0x%p\n", k, 748a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 7494776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 750a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 751a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 752a86c6181SAlex Tomas } 753a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 754a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 755a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 756a86c6181SAlex Tomas break; 757a86c6181SAlex Tomas chix = ix; 758a86c6181SAlex Tomas } 759a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 760a86c6181SAlex Tomas } 761a86c6181SAlex Tomas #endif 762a86c6181SAlex Tomas 763a86c6181SAlex Tomas } 764a86c6181SAlex Tomas 765a86c6181SAlex Tomas /* 766d0d856e8SRandy Dunlap * ext4_ext_binsearch: 767d0d856e8SRandy Dunlap * binary search for closest extent of the given block 768c29c0ae7SAlex Tomas * the header must be checked before calling this 769a86c6181SAlex Tomas */ 770a86c6181SAlex Tomas static void 771725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 772725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 773a86c6181SAlex Tomas { 774a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 775a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 776a86c6181SAlex Tomas 777a86c6181SAlex Tomas if (eh->eh_entries == 0) { 778a86c6181SAlex Tomas /* 779d0d856e8SRandy Dunlap * this leaf is empty: 780a86c6181SAlex Tomas * we get such a leaf in split/add case 781a86c6181SAlex Tomas */ 782a86c6181SAlex Tomas return; 783a86c6181SAlex Tomas } 784a86c6181SAlex Tomas 785bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 786a86c6181SAlex Tomas 787a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 788e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 789a86c6181SAlex Tomas 790a86c6181SAlex Tomas while (l <= r) { 791a86c6181SAlex Tomas m = l + (r - l) / 2; 792a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 793a86c6181SAlex Tomas r = m - 1; 794a86c6181SAlex Tomas else 795a86c6181SAlex Tomas l = m + 1; 79626d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 79726d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 79826d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 799a86c6181SAlex Tomas } 800a86c6181SAlex Tomas 801a86c6181SAlex Tomas path->p_ext = l - 1; 802553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 803a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 804bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 805553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 806a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 807a86c6181SAlex Tomas 808a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 809a86c6181SAlex Tomas { 810a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 811a86c6181SAlex Tomas int k; 812a86c6181SAlex Tomas 813a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 814a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 815a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 816a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 817a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 818a86c6181SAlex Tomas break; 819a86c6181SAlex Tomas chex = ex; 820a86c6181SAlex Tomas } 821a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 822a86c6181SAlex Tomas } 823a86c6181SAlex Tomas #endif 824a86c6181SAlex Tomas 825a86c6181SAlex Tomas } 826a86c6181SAlex Tomas 827a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 828a86c6181SAlex Tomas { 829a86c6181SAlex Tomas struct ext4_extent_header *eh; 830a86c6181SAlex Tomas 831a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 832a86c6181SAlex Tomas eh->eh_depth = 0; 833a86c6181SAlex Tomas eh->eh_entries = 0; 834a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 83555ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 836a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 837a86c6181SAlex Tomas return 0; 838a86c6181SAlex Tomas } 839a86c6181SAlex Tomas 840a86c6181SAlex Tomas struct ext4_ext_path * 841725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 842107a7bd3STheodore Ts'o struct ext4_ext_path *path, int flags) 843a86c6181SAlex Tomas { 844a86c6181SAlex Tomas struct ext4_extent_header *eh; 845a86c6181SAlex Tomas struct buffer_head *bh; 846a86c6181SAlex Tomas short int depth, i, ppos = 0, alloc = 0; 847860d21e2STheodore Ts'o int ret; 848a86c6181SAlex Tomas 849a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 850c29c0ae7SAlex Tomas depth = ext_depth(inode); 851a86c6181SAlex Tomas 852a86c6181SAlex Tomas /* account possible depth increase */ 853a86c6181SAlex Tomas if (!path) { 8545d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 855a86c6181SAlex Tomas GFP_NOFS); 856a86c6181SAlex Tomas if (!path) 857a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 858a86c6181SAlex Tomas alloc = 1; 859a86c6181SAlex Tomas } 860a86c6181SAlex Tomas path[0].p_hdr = eh; 8611973adcbSShen Feng path[0].p_bh = NULL; 862a86c6181SAlex Tomas 863c29c0ae7SAlex Tomas i = depth; 864a86c6181SAlex Tomas /* walk through the tree */ 865a86c6181SAlex Tomas while (i) { 866a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 867a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 868c29c0ae7SAlex Tomas 869a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 870bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 871a86c6181SAlex Tomas path[ppos].p_depth = i; 872a86c6181SAlex Tomas path[ppos].p_ext = NULL; 873a86c6181SAlex Tomas 874107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 875107a7bd3STheodore Ts'o flags); 8767d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 8777d7ea89eSTheodore Ts'o ret = PTR_ERR(bh); 878a86c6181SAlex Tomas goto err; 879860d21e2STheodore Ts'o } 8807d7ea89eSTheodore Ts'o 881a86c6181SAlex Tomas eh = ext_block_hdr(bh); 882a86c6181SAlex Tomas ppos++; 883273df556SFrank Mayhar if (unlikely(ppos > depth)) { 884273df556SFrank Mayhar put_bh(bh); 885273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 886273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 887860d21e2STheodore Ts'o ret = -EIO; 888273df556SFrank Mayhar goto err; 889273df556SFrank Mayhar } 890a86c6181SAlex Tomas path[ppos].p_bh = bh; 891a86c6181SAlex Tomas path[ppos].p_hdr = eh; 892a86c6181SAlex Tomas } 893a86c6181SAlex Tomas 894a86c6181SAlex Tomas path[ppos].p_depth = i; 895a86c6181SAlex Tomas path[ppos].p_ext = NULL; 896a86c6181SAlex Tomas path[ppos].p_idx = NULL; 897a86c6181SAlex Tomas 898a86c6181SAlex Tomas /* find extent */ 899a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 9001973adcbSShen Feng /* if not an empty leaf */ 9011973adcbSShen Feng if (path[ppos].p_ext) 902bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 903a86c6181SAlex Tomas 904a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 905a86c6181SAlex Tomas 906a86c6181SAlex Tomas return path; 907a86c6181SAlex Tomas 908a86c6181SAlex Tomas err: 909a86c6181SAlex Tomas ext4_ext_drop_refs(path); 910a86c6181SAlex Tomas if (alloc) 911a86c6181SAlex Tomas kfree(path); 912860d21e2STheodore Ts'o return ERR_PTR(ret); 913a86c6181SAlex Tomas } 914a86c6181SAlex Tomas 915a86c6181SAlex Tomas /* 916d0d856e8SRandy Dunlap * ext4_ext_insert_index: 917d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 918d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 919a86c6181SAlex Tomas */ 9201f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 921a86c6181SAlex Tomas struct ext4_ext_path *curp, 922f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 923a86c6181SAlex Tomas { 924a86c6181SAlex Tomas struct ext4_extent_idx *ix; 925a86c6181SAlex Tomas int len, err; 926a86c6181SAlex Tomas 9277e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 9287e028976SAvantika Mathur if (err) 929a86c6181SAlex Tomas return err; 930a86c6181SAlex Tomas 931273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 932273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 933273df556SFrank Mayhar "logical %d == ei_block %d!", 934273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 935273df556SFrank Mayhar return -EIO; 936273df556SFrank Mayhar } 937d4620315SRobin Dong 938d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 939d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 940d4620315SRobin Dong EXT4_ERROR_INODE(inode, 941d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 942d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 943d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 944d4620315SRobin Dong return -EIO; 945d4620315SRobin Dong } 946d4620315SRobin Dong 947a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 948a86c6181SAlex Tomas /* insert after */ 94980e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 950a86c6181SAlex Tomas ix = curp->p_idx + 1; 951a86c6181SAlex Tomas } else { 952a86c6181SAlex Tomas /* insert before */ 95380e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 954a86c6181SAlex Tomas ix = curp->p_idx; 955a86c6181SAlex Tomas } 956a86c6181SAlex Tomas 95780e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 95880e675f9SEric Gouriou BUG_ON(len < 0); 95980e675f9SEric Gouriou if (len > 0) { 96080e675f9SEric Gouriou ext_debug("insert new index %d: " 96180e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 96280e675f9SEric Gouriou logical, len, ix, ix + 1); 96380e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 96480e675f9SEric Gouriou } 96580e675f9SEric Gouriou 966f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 967f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 968f472e026STao Ma return -EIO; 969f472e026STao Ma } 970f472e026STao Ma 971a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 972f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 973e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 974a86c6181SAlex Tomas 975273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 976273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 977273df556SFrank Mayhar return -EIO; 978273df556SFrank Mayhar } 979a86c6181SAlex Tomas 980a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 981a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 982a86c6181SAlex Tomas 983a86c6181SAlex Tomas return err; 984a86c6181SAlex Tomas } 985a86c6181SAlex Tomas 986a86c6181SAlex Tomas /* 987d0d856e8SRandy Dunlap * ext4_ext_split: 988d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 989d0d856e8SRandy Dunlap * at depth @at: 990a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 991a86c6181SAlex Tomas * - makes decision where to split 992d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 993a86c6181SAlex Tomas * into the newly allocated blocks 994d0d856e8SRandy Dunlap * - initializes subtree 995a86c6181SAlex Tomas */ 996a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 99755f020dbSAllison Henderson unsigned int flags, 998a86c6181SAlex Tomas struct ext4_ext_path *path, 999a86c6181SAlex Tomas struct ext4_extent *newext, int at) 1000a86c6181SAlex Tomas { 1001a86c6181SAlex Tomas struct buffer_head *bh = NULL; 1002a86c6181SAlex Tomas int depth = ext_depth(inode); 1003a86c6181SAlex Tomas struct ext4_extent_header *neh; 1004a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 1005a86c6181SAlex Tomas int i = at, k, m, a; 1006f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 1007a86c6181SAlex Tomas __le32 border; 1008f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 1009a86c6181SAlex Tomas int err = 0; 1010a86c6181SAlex Tomas 1011a86c6181SAlex Tomas /* make decision: where to split? */ 1012d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 1013a86c6181SAlex Tomas 1014d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 1015a86c6181SAlex Tomas * border from split point */ 1016273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 1017273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 1018273df556SFrank Mayhar return -EIO; 1019273df556SFrank Mayhar } 1020a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 1021a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 1022d0d856e8SRandy Dunlap ext_debug("leaf will be split." 1023a86c6181SAlex Tomas " next leaf starts at %d\n", 1024a86c6181SAlex Tomas le32_to_cpu(border)); 1025a86c6181SAlex Tomas } else { 1026a86c6181SAlex Tomas border = newext->ee_block; 1027a86c6181SAlex Tomas ext_debug("leaf will be added." 1028a86c6181SAlex Tomas " next leaf starts at %d\n", 1029a86c6181SAlex Tomas le32_to_cpu(border)); 1030a86c6181SAlex Tomas } 1031a86c6181SAlex Tomas 1032a86c6181SAlex Tomas /* 1033d0d856e8SRandy Dunlap * If error occurs, then we break processing 1034d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 1035a86c6181SAlex Tomas * be inserted and tree will be in consistent 1036d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 1037a86c6181SAlex Tomas */ 1038a86c6181SAlex Tomas 1039a86c6181SAlex Tomas /* 1040d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 1041d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 1042d0d856e8SRandy Dunlap * upon them. 1043a86c6181SAlex Tomas */ 10445d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 1045a86c6181SAlex Tomas if (!ablocks) 1046a86c6181SAlex Tomas return -ENOMEM; 1047a86c6181SAlex Tomas 1048a86c6181SAlex Tomas /* allocate all needed blocks */ 1049a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 1050a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 1051654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 105255f020dbSAllison Henderson newext, &err, flags); 1053a86c6181SAlex Tomas if (newblock == 0) 1054a86c6181SAlex Tomas goto cleanup; 1055a86c6181SAlex Tomas ablocks[a] = newblock; 1056a86c6181SAlex Tomas } 1057a86c6181SAlex Tomas 1058a86c6181SAlex Tomas /* initialize new leaf */ 1059a86c6181SAlex Tomas newblock = ablocks[--a]; 1060273df556SFrank Mayhar if (unlikely(newblock == 0)) { 1061273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 1062273df556SFrank Mayhar err = -EIO; 1063273df556SFrank Mayhar goto cleanup; 1064273df556SFrank Mayhar } 1065a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1066aebf0243SWang Shilong if (unlikely(!bh)) { 1067860d21e2STheodore Ts'o err = -ENOMEM; 1068a86c6181SAlex Tomas goto cleanup; 1069a86c6181SAlex Tomas } 1070a86c6181SAlex Tomas lock_buffer(bh); 1071a86c6181SAlex Tomas 10727e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10737e028976SAvantika Mathur if (err) 1074a86c6181SAlex Tomas goto cleanup; 1075a86c6181SAlex Tomas 1076a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1077a86c6181SAlex Tomas neh->eh_entries = 0; 107855ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1079a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1080a86c6181SAlex Tomas neh->eh_depth = 0; 1081a86c6181SAlex Tomas 1082d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 1083273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 1084273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 1085273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 1086273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 1087273df556SFrank Mayhar path[depth].p_hdr->eh_max); 1088273df556SFrank Mayhar err = -EIO; 1089273df556SFrank Mayhar goto cleanup; 1090273df556SFrank Mayhar } 1091a86c6181SAlex Tomas /* start copy from next extent */ 10921b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 10931b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 1094a86c6181SAlex Tomas if (m) { 10951b16da77SYongqiang Yang struct ext4_extent *ex; 10961b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 10971b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 1098e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1099a86c6181SAlex Tomas } 1100a86c6181SAlex Tomas 11017ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1102a86c6181SAlex Tomas set_buffer_uptodate(bh); 1103a86c6181SAlex Tomas unlock_buffer(bh); 1104a86c6181SAlex Tomas 11050390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11067e028976SAvantika Mathur if (err) 1107a86c6181SAlex Tomas goto cleanup; 1108a86c6181SAlex Tomas brelse(bh); 1109a86c6181SAlex Tomas bh = NULL; 1110a86c6181SAlex Tomas 1111a86c6181SAlex Tomas /* correct old leaf */ 1112a86c6181SAlex Tomas if (m) { 11137e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 11147e028976SAvantika Mathur if (err) 1115a86c6181SAlex Tomas goto cleanup; 1116e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 11177e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 11187e028976SAvantika Mathur if (err) 1119a86c6181SAlex Tomas goto cleanup; 1120a86c6181SAlex Tomas 1121a86c6181SAlex Tomas } 1122a86c6181SAlex Tomas 1123a86c6181SAlex Tomas /* create intermediate indexes */ 1124a86c6181SAlex Tomas k = depth - at - 1; 1125273df556SFrank Mayhar if (unlikely(k < 0)) { 1126273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1127273df556SFrank Mayhar err = -EIO; 1128273df556SFrank Mayhar goto cleanup; 1129273df556SFrank Mayhar } 1130a86c6181SAlex Tomas if (k) 1131a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1132a86c6181SAlex Tomas /* insert new index into current index block */ 1133a86c6181SAlex Tomas /* current depth stored in i var */ 1134a86c6181SAlex Tomas i = depth - 1; 1135a86c6181SAlex Tomas while (k--) { 1136a86c6181SAlex Tomas oldblock = newblock; 1137a86c6181SAlex Tomas newblock = ablocks[--a]; 1138bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1139aebf0243SWang Shilong if (unlikely(!bh)) { 1140860d21e2STheodore Ts'o err = -ENOMEM; 1141a86c6181SAlex Tomas goto cleanup; 1142a86c6181SAlex Tomas } 1143a86c6181SAlex Tomas lock_buffer(bh); 1144a86c6181SAlex Tomas 11457e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11467e028976SAvantika Mathur if (err) 1147a86c6181SAlex Tomas goto cleanup; 1148a86c6181SAlex Tomas 1149a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1150a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1151a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 115255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1153a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1154a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1155a86c6181SAlex Tomas fidx->ei_block = border; 1156f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1157a86c6181SAlex Tomas 1158bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1159bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1160a86c6181SAlex Tomas 11611b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1162273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1163273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1164273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1165273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1166273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 1167273df556SFrank Mayhar err = -EIO; 1168273df556SFrank Mayhar goto cleanup; 1169273df556SFrank Mayhar } 11701b16da77SYongqiang Yang /* start copy indexes */ 11711b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 11721b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 11731b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 11741b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1175a86c6181SAlex Tomas if (m) { 11761b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1177a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1178e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1179a86c6181SAlex Tomas } 11807ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1181a86c6181SAlex Tomas set_buffer_uptodate(bh); 1182a86c6181SAlex Tomas unlock_buffer(bh); 1183a86c6181SAlex Tomas 11840390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11857e028976SAvantika Mathur if (err) 1186a86c6181SAlex Tomas goto cleanup; 1187a86c6181SAlex Tomas brelse(bh); 1188a86c6181SAlex Tomas bh = NULL; 1189a86c6181SAlex Tomas 1190a86c6181SAlex Tomas /* correct old index */ 1191a86c6181SAlex Tomas if (m) { 1192a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1193a86c6181SAlex Tomas if (err) 1194a86c6181SAlex Tomas goto cleanup; 1195e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1196a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1197a86c6181SAlex Tomas if (err) 1198a86c6181SAlex Tomas goto cleanup; 1199a86c6181SAlex Tomas } 1200a86c6181SAlex Tomas 1201a86c6181SAlex Tomas i--; 1202a86c6181SAlex Tomas } 1203a86c6181SAlex Tomas 1204a86c6181SAlex Tomas /* insert new index */ 1205a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1206a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1207a86c6181SAlex Tomas 1208a86c6181SAlex Tomas cleanup: 1209a86c6181SAlex Tomas if (bh) { 1210a86c6181SAlex Tomas if (buffer_locked(bh)) 1211a86c6181SAlex Tomas unlock_buffer(bh); 1212a86c6181SAlex Tomas brelse(bh); 1213a86c6181SAlex Tomas } 1214a86c6181SAlex Tomas 1215a86c6181SAlex Tomas if (err) { 1216a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1217a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1218a86c6181SAlex Tomas if (!ablocks[i]) 1219a86c6181SAlex Tomas continue; 12207dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1221e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1222a86c6181SAlex Tomas } 1223a86c6181SAlex Tomas } 1224a86c6181SAlex Tomas kfree(ablocks); 1225a86c6181SAlex Tomas 1226a86c6181SAlex Tomas return err; 1227a86c6181SAlex Tomas } 1228a86c6181SAlex Tomas 1229a86c6181SAlex Tomas /* 1230d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1231d0d856e8SRandy Dunlap * implements tree growing procedure: 1232a86c6181SAlex Tomas * - allocates new block 1233a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1234d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1235a86c6181SAlex Tomas * just created block 1236a86c6181SAlex Tomas */ 1237a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 123855f020dbSAllison Henderson unsigned int flags, 1239a86c6181SAlex Tomas struct ext4_extent *newext) 1240a86c6181SAlex Tomas { 1241a86c6181SAlex Tomas struct ext4_extent_header *neh; 1242a86c6181SAlex Tomas struct buffer_head *bh; 1243f65e6fbaSAlex Tomas ext4_fsblk_t newblock; 1244a86c6181SAlex Tomas int err = 0; 1245a86c6181SAlex Tomas 12461939dd84SDmitry Monakhov newblock = ext4_ext_new_meta_block(handle, inode, NULL, 124755f020dbSAllison Henderson newext, &err, flags); 1248a86c6181SAlex Tomas if (newblock == 0) 1249a86c6181SAlex Tomas return err; 1250a86c6181SAlex Tomas 1251a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1252aebf0243SWang Shilong if (unlikely(!bh)) 1253860d21e2STheodore Ts'o return -ENOMEM; 1254a86c6181SAlex Tomas lock_buffer(bh); 1255a86c6181SAlex Tomas 12567e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 12577e028976SAvantika Mathur if (err) { 1258a86c6181SAlex Tomas unlock_buffer(bh); 1259a86c6181SAlex Tomas goto out; 1260a86c6181SAlex Tomas } 1261a86c6181SAlex Tomas 1262a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 12631939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 12641939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1265a86c6181SAlex Tomas 1266a86c6181SAlex Tomas /* set size of new block */ 1267a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1268a86c6181SAlex Tomas /* old root could have indexes or leaves 1269a86c6181SAlex Tomas * so calculate e_max right way */ 1270a86c6181SAlex Tomas if (ext_depth(inode)) 127155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1272a86c6181SAlex Tomas else 127355ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1274a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 12757ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1276a86c6181SAlex Tomas set_buffer_uptodate(bh); 1277a86c6181SAlex Tomas unlock_buffer(bh); 1278a86c6181SAlex Tomas 12790390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 12807e028976SAvantika Mathur if (err) 1281a86c6181SAlex Tomas goto out; 1282a86c6181SAlex Tomas 12831939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1284a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 12851939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 12861939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 12871939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 12881939dd84SDmitry Monakhov /* Root extent block becomes index block */ 12891939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 12901939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 12911939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 12921939dd84SDmitry Monakhov } 12932ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1294a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 12955a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1296bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1297a86c6181SAlex Tomas 1298ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 12991939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1300a86c6181SAlex Tomas out: 1301a86c6181SAlex Tomas brelse(bh); 1302a86c6181SAlex Tomas 1303a86c6181SAlex Tomas return err; 1304a86c6181SAlex Tomas } 1305a86c6181SAlex Tomas 1306a86c6181SAlex Tomas /* 1307d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1308d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1309d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1310a86c6181SAlex Tomas */ 1311a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1312107a7bd3STheodore Ts'o unsigned int mb_flags, 1313107a7bd3STheodore Ts'o unsigned int gb_flags, 1314a86c6181SAlex Tomas struct ext4_ext_path *path, 1315a86c6181SAlex Tomas struct ext4_extent *newext) 1316a86c6181SAlex Tomas { 1317a86c6181SAlex Tomas struct ext4_ext_path *curp; 1318a86c6181SAlex Tomas int depth, i, err = 0; 1319a86c6181SAlex Tomas 1320a86c6181SAlex Tomas repeat: 1321a86c6181SAlex Tomas i = depth = ext_depth(inode); 1322a86c6181SAlex Tomas 1323a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1324a86c6181SAlex Tomas curp = path + depth; 1325a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1326a86c6181SAlex Tomas i--; 1327a86c6181SAlex Tomas curp--; 1328a86c6181SAlex Tomas } 1329a86c6181SAlex Tomas 1330d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1331d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1332a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1333a86c6181SAlex Tomas /* if we found index with free entry, then use that 1334a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 1335107a7bd3STheodore Ts'o err = ext4_ext_split(handle, inode, mb_flags, path, newext, i); 1336787e0981SShen Feng if (err) 1337787e0981SShen Feng goto out; 1338a86c6181SAlex Tomas 1339a86c6181SAlex Tomas /* refill path */ 1340a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1341a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1342725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1343107a7bd3STheodore Ts'o path, gb_flags); 1344a86c6181SAlex Tomas if (IS_ERR(path)) 1345a86c6181SAlex Tomas err = PTR_ERR(path); 1346a86c6181SAlex Tomas } else { 1347a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 1348107a7bd3STheodore Ts'o err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext); 1349a86c6181SAlex Tomas if (err) 1350a86c6181SAlex Tomas goto out; 1351a86c6181SAlex Tomas 1352a86c6181SAlex Tomas /* refill path */ 1353a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1354a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1355725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1356107a7bd3STheodore Ts'o path, gb_flags); 1357a86c6181SAlex Tomas if (IS_ERR(path)) { 1358a86c6181SAlex Tomas err = PTR_ERR(path); 1359a86c6181SAlex Tomas goto out; 1360a86c6181SAlex Tomas } 1361a86c6181SAlex Tomas 1362a86c6181SAlex Tomas /* 1363d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1364d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1365a86c6181SAlex Tomas */ 1366a86c6181SAlex Tomas depth = ext_depth(inode); 1367a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1368d0d856e8SRandy Dunlap /* now we need to split */ 1369a86c6181SAlex Tomas goto repeat; 1370a86c6181SAlex Tomas } 1371a86c6181SAlex Tomas } 1372a86c6181SAlex Tomas 1373a86c6181SAlex Tomas out: 1374a86c6181SAlex Tomas return err; 1375a86c6181SAlex Tomas } 1376a86c6181SAlex Tomas 1377a86c6181SAlex Tomas /* 13781988b51eSAlex Tomas * search the closest allocated block to the left for *logical 13791988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 13801988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 13811988b51eSAlex Tomas * returns 0 at @phys 13821988b51eSAlex Tomas * return value contains 0 (success) or error code 13831988b51eSAlex Tomas */ 13841f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 13851f109d5aSTheodore Ts'o struct ext4_ext_path *path, 13861988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 13871988b51eSAlex Tomas { 13881988b51eSAlex Tomas struct ext4_extent_idx *ix; 13891988b51eSAlex Tomas struct ext4_extent *ex; 1390b939e376SAneesh Kumar K.V int depth, ee_len; 13911988b51eSAlex Tomas 1392273df556SFrank Mayhar if (unlikely(path == NULL)) { 1393273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1394273df556SFrank Mayhar return -EIO; 1395273df556SFrank Mayhar } 13961988b51eSAlex Tomas depth = path->p_depth; 13971988b51eSAlex Tomas *phys = 0; 13981988b51eSAlex Tomas 13991988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14001988b51eSAlex Tomas return 0; 14011988b51eSAlex Tomas 14021988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14031988b51eSAlex Tomas * then *logical, but it can be that extent is the 14041988b51eSAlex Tomas * first one in the file */ 14051988b51eSAlex Tomas 14061988b51eSAlex Tomas ex = path[depth].p_ext; 1407b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14081988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1409273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1410273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1411273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1412273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1413273df556SFrank Mayhar return -EIO; 1414273df556SFrank Mayhar } 14151988b51eSAlex Tomas while (--depth >= 0) { 14161988b51eSAlex Tomas ix = path[depth].p_idx; 1417273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1418273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1419273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 14206ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1421273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 14226ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1423273df556SFrank Mayhar depth); 1424273df556SFrank Mayhar return -EIO; 1425273df556SFrank Mayhar } 14261988b51eSAlex Tomas } 14271988b51eSAlex Tomas return 0; 14281988b51eSAlex Tomas } 14291988b51eSAlex Tomas 1430273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1431273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1432273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1433273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1434273df556SFrank Mayhar return -EIO; 1435273df556SFrank Mayhar } 14361988b51eSAlex Tomas 1437b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1438bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 14391988b51eSAlex Tomas return 0; 14401988b51eSAlex Tomas } 14411988b51eSAlex Tomas 14421988b51eSAlex Tomas /* 14431988b51eSAlex Tomas * search the closest allocated block to the right for *logical 14441988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1445df3ab170STao Ma * if *logical is the largest allocated block, the function 14461988b51eSAlex Tomas * returns 0 at @phys 14471988b51eSAlex Tomas * return value contains 0 (success) or error code 14481988b51eSAlex Tomas */ 14491f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 14501f109d5aSTheodore Ts'o struct ext4_ext_path *path, 14514d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 14524d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 14531988b51eSAlex Tomas { 14541988b51eSAlex Tomas struct buffer_head *bh = NULL; 14551988b51eSAlex Tomas struct ext4_extent_header *eh; 14561988b51eSAlex Tomas struct ext4_extent_idx *ix; 14571988b51eSAlex Tomas struct ext4_extent *ex; 14581988b51eSAlex Tomas ext4_fsblk_t block; 1459395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1460395a87bfSEric Sandeen int ee_len; 14611988b51eSAlex Tomas 1462273df556SFrank Mayhar if (unlikely(path == NULL)) { 1463273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1464273df556SFrank Mayhar return -EIO; 1465273df556SFrank Mayhar } 14661988b51eSAlex Tomas depth = path->p_depth; 14671988b51eSAlex Tomas *phys = 0; 14681988b51eSAlex Tomas 14691988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 14701988b51eSAlex Tomas return 0; 14711988b51eSAlex Tomas 14721988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 14731988b51eSAlex Tomas * then *logical, but it can be that extent is the 14741988b51eSAlex Tomas * first one in the file */ 14751988b51eSAlex Tomas 14761988b51eSAlex Tomas ex = path[depth].p_ext; 1477b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 14781988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1479273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1480273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1481273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1482273df556SFrank Mayhar depth); 1483273df556SFrank Mayhar return -EIO; 1484273df556SFrank Mayhar } 14851988b51eSAlex Tomas while (--depth >= 0) { 14861988b51eSAlex Tomas ix = path[depth].p_idx; 1487273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1488273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1489273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1490273df556SFrank Mayhar *logical); 1491273df556SFrank Mayhar return -EIO; 1492273df556SFrank Mayhar } 14931988b51eSAlex Tomas } 14944d33b1efSTheodore Ts'o goto found_extent; 14951988b51eSAlex Tomas } 14961988b51eSAlex Tomas 1497273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1498273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1499273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1500273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1501273df556SFrank Mayhar return -EIO; 1502273df556SFrank Mayhar } 15031988b51eSAlex Tomas 15041988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 15051988b51eSAlex Tomas /* next allocated block in this leaf */ 15061988b51eSAlex Tomas ex++; 15074d33b1efSTheodore Ts'o goto found_extent; 15081988b51eSAlex Tomas } 15091988b51eSAlex Tomas 15101988b51eSAlex Tomas /* go up and search for index to the right */ 15111988b51eSAlex Tomas while (--depth >= 0) { 15121988b51eSAlex Tomas ix = path[depth].p_idx; 15131988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 151425f1ee3aSWu Fengguang goto got_index; 15151988b51eSAlex Tomas } 15161988b51eSAlex Tomas 151725f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 15181988b51eSAlex Tomas return 0; 15191988b51eSAlex Tomas 152025f1ee3aSWu Fengguang got_index: 15211988b51eSAlex Tomas /* we've found index to the right, let's 15221988b51eSAlex Tomas * follow it and find the closest allocated 15231988b51eSAlex Tomas * block to the right */ 15241988b51eSAlex Tomas ix++; 1525bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15261988b51eSAlex Tomas while (++depth < path->p_depth) { 1527395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 15287d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, block, 1529107a7bd3STheodore Ts'o path->p_depth - depth, 0); 15307d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15317d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15327d7ea89eSTheodore Ts'o eh = ext_block_hdr(bh); 15331988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1534bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 15351988b51eSAlex Tomas put_bh(bh); 15361988b51eSAlex Tomas } 15371988b51eSAlex Tomas 1538107a7bd3STheodore Ts'o bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0); 15397d7ea89eSTheodore Ts'o if (IS_ERR(bh)) 15407d7ea89eSTheodore Ts'o return PTR_ERR(bh); 15411988b51eSAlex Tomas eh = ext_block_hdr(bh); 15421988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 15434d33b1efSTheodore Ts'o found_extent: 15441988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1545bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 15464d33b1efSTheodore Ts'o *ret_ex = ex; 15474d33b1efSTheodore Ts'o if (bh) 15481988b51eSAlex Tomas put_bh(bh); 15491988b51eSAlex Tomas return 0; 15501988b51eSAlex Tomas } 15511988b51eSAlex Tomas 15521988b51eSAlex Tomas /* 1553d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1554f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1555d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1556d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1557d0d856e8SRandy Dunlap * with leaves. 1558a86c6181SAlex Tomas */ 1559725d26d3SAneesh Kumar K.V static ext4_lblk_t 1560a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1561a86c6181SAlex Tomas { 1562a86c6181SAlex Tomas int depth; 1563a86c6181SAlex Tomas 1564a86c6181SAlex Tomas BUG_ON(path == NULL); 1565a86c6181SAlex Tomas depth = path->p_depth; 1566a86c6181SAlex Tomas 1567a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1568f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1569a86c6181SAlex Tomas 1570a86c6181SAlex Tomas while (depth >= 0) { 1571a86c6181SAlex Tomas if (depth == path->p_depth) { 1572a86c6181SAlex Tomas /* leaf */ 15736f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 15746f8ff537SCurt Wohlgemuth path[depth].p_ext != 1575a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1576a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1577a86c6181SAlex Tomas } else { 1578a86c6181SAlex Tomas /* index */ 1579a86c6181SAlex Tomas if (path[depth].p_idx != 1580a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1581a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1582a86c6181SAlex Tomas } 1583a86c6181SAlex Tomas depth--; 1584a86c6181SAlex Tomas } 1585a86c6181SAlex Tomas 1586f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1587a86c6181SAlex Tomas } 1588a86c6181SAlex Tomas 1589a86c6181SAlex Tomas /* 1590d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1591f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1592a86c6181SAlex Tomas */ 15935718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1594a86c6181SAlex Tomas { 1595a86c6181SAlex Tomas int depth; 1596a86c6181SAlex Tomas 1597a86c6181SAlex Tomas BUG_ON(path == NULL); 1598a86c6181SAlex Tomas depth = path->p_depth; 1599a86c6181SAlex Tomas 1600a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1601a86c6181SAlex Tomas if (depth == 0) 1602f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1603a86c6181SAlex Tomas 1604a86c6181SAlex Tomas /* go to index block */ 1605a86c6181SAlex Tomas depth--; 1606a86c6181SAlex Tomas 1607a86c6181SAlex Tomas while (depth >= 0) { 1608a86c6181SAlex Tomas if (path[depth].p_idx != 1609a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1610725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1611725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1612a86c6181SAlex Tomas depth--; 1613a86c6181SAlex Tomas } 1614a86c6181SAlex Tomas 1615f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1616a86c6181SAlex Tomas } 1617a86c6181SAlex Tomas 1618a86c6181SAlex Tomas /* 1619d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1620d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1621d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1622a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1623a86c6181SAlex Tomas */ 16241d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1625a86c6181SAlex Tomas struct ext4_ext_path *path) 1626a86c6181SAlex Tomas { 1627a86c6181SAlex Tomas struct ext4_extent_header *eh; 1628a86c6181SAlex Tomas int depth = ext_depth(inode); 1629a86c6181SAlex Tomas struct ext4_extent *ex; 1630a86c6181SAlex Tomas __le32 border; 1631a86c6181SAlex Tomas int k, err = 0; 1632a86c6181SAlex Tomas 1633a86c6181SAlex Tomas eh = path[depth].p_hdr; 1634a86c6181SAlex Tomas ex = path[depth].p_ext; 1635273df556SFrank Mayhar 1636273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1637273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1638273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1639273df556SFrank Mayhar return -EIO; 1640273df556SFrank Mayhar } 1641a86c6181SAlex Tomas 1642a86c6181SAlex Tomas if (depth == 0) { 1643a86c6181SAlex Tomas /* there is no tree at all */ 1644a86c6181SAlex Tomas return 0; 1645a86c6181SAlex Tomas } 1646a86c6181SAlex Tomas 1647a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1648a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1649a86c6181SAlex Tomas return 0; 1650a86c6181SAlex Tomas } 1651a86c6181SAlex Tomas 1652a86c6181SAlex Tomas /* 1653d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1654a86c6181SAlex Tomas */ 1655a86c6181SAlex Tomas k = depth - 1; 1656a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 16577e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16587e028976SAvantika Mathur if (err) 1659a86c6181SAlex Tomas return err; 1660a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16617e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16627e028976SAvantika Mathur if (err) 1663a86c6181SAlex Tomas return err; 1664a86c6181SAlex Tomas 1665a86c6181SAlex Tomas while (k--) { 1666a86c6181SAlex Tomas /* change all left-side indexes */ 1667a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1668a86c6181SAlex Tomas break; 16697e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 16707e028976SAvantika Mathur if (err) 1671a86c6181SAlex Tomas break; 1672a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 16737e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 16747e028976SAvantika Mathur if (err) 1675a86c6181SAlex Tomas break; 1676a86c6181SAlex Tomas } 1677a86c6181SAlex Tomas 1678a86c6181SAlex Tomas return err; 1679a86c6181SAlex Tomas } 1680a86c6181SAlex Tomas 1681748de673SAkira Fujita int 1682a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1683a86c6181SAlex Tomas struct ext4_extent *ex2) 1684a86c6181SAlex Tomas { 1685da0169b3SEric Sandeen unsigned short ext1_ee_len, ext2_ee_len; 1686a2df2a63SAmit Arora 1687a2df2a63SAmit Arora /* 1688ec22ba8eSDmitry Monakhov * Make sure that both extents are initialized. We don't merge 1689ec22ba8eSDmitry Monakhov * uninitialized extents so that we can be sure that end_io code has 1690ec22ba8eSDmitry Monakhov * the extent that was written properly split out and conversion to 1691ec22ba8eSDmitry Monakhov * initialized is trivial. 1692a2df2a63SAmit Arora */ 1693a9b82415SDarrick J. Wong if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2)) 1694a2df2a63SAmit Arora return 0; 1695a2df2a63SAmit Arora 1696a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1697a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1698a2df2a63SAmit Arora 1699a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 170063f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1701a86c6181SAlex Tomas return 0; 1702a86c6181SAlex Tomas 1703471d4011SSuparna Bhattacharya /* 1704471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1705471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1706d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1707471d4011SSuparna Bhattacharya */ 1708da0169b3SEric Sandeen if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN) 1709471d4011SSuparna Bhattacharya return 0; 1710a9b82415SDarrick J. Wong if (ext4_ext_is_uninitialized(ex1) && 1711a9b82415SDarrick J. Wong (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) || 1712a9b82415SDarrick J. Wong atomic_read(&EXT4_I(inode)->i_unwritten) || 1713a9b82415SDarrick J. Wong (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN))) 1714a9b82415SDarrick J. Wong return 0; 1715bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1716b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1717a86c6181SAlex Tomas return 0; 1718a86c6181SAlex Tomas #endif 1719a86c6181SAlex Tomas 1720bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1721a86c6181SAlex Tomas return 1; 1722a86c6181SAlex Tomas return 0; 1723a86c6181SAlex Tomas } 1724a86c6181SAlex Tomas 1725a86c6181SAlex Tomas /* 172656055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 172756055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 172856055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 172956055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 173056055d3aSAmit Arora * 1 if they got merged. 173156055d3aSAmit Arora */ 1732197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 173356055d3aSAmit Arora struct ext4_ext_path *path, 173456055d3aSAmit Arora struct ext4_extent *ex) 173556055d3aSAmit Arora { 173656055d3aSAmit Arora struct ext4_extent_header *eh; 173756055d3aSAmit Arora unsigned int depth, len; 1738a9b82415SDarrick J. Wong int merge_done = 0, uninit; 173956055d3aSAmit Arora 174056055d3aSAmit Arora depth = ext_depth(inode); 174156055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 174256055d3aSAmit Arora eh = path[depth].p_hdr; 174356055d3aSAmit Arora 174456055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 174556055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 174656055d3aSAmit Arora break; 174756055d3aSAmit Arora /* merge with next extent! */ 1748a9b82415SDarrick J. Wong uninit = ext4_ext_is_uninitialized(ex); 174956055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 175056055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 1751a9b82415SDarrick J. Wong if (uninit) 1752a9b82415SDarrick J. Wong ext4_ext_mark_uninitialized(ex); 175356055d3aSAmit Arora 175456055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 175556055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 175656055d3aSAmit Arora * sizeof(struct ext4_extent); 175756055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 175856055d3aSAmit Arora } 1759e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 176056055d3aSAmit Arora merge_done = 1; 176156055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 176256055d3aSAmit Arora if (!eh->eh_entries) 176324676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 176456055d3aSAmit Arora } 176556055d3aSAmit Arora 176656055d3aSAmit Arora return merge_done; 176756055d3aSAmit Arora } 176856055d3aSAmit Arora 176956055d3aSAmit Arora /* 1770ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1771ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1772ecb94f5fSTheodore Ts'o */ 1773ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1774ecb94f5fSTheodore Ts'o struct inode *inode, 1775ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1776ecb94f5fSTheodore Ts'o { 1777ecb94f5fSTheodore Ts'o size_t s; 1778ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1779ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1780ecb94f5fSTheodore Ts'o 1781ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1782ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1783ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1784ecb94f5fSTheodore Ts'o return; 1785ecb94f5fSTheodore Ts'o 1786ecb94f5fSTheodore Ts'o /* 1787ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1788ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1789ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1790ecb94f5fSTheodore Ts'o */ 1791ecb94f5fSTheodore Ts'o if (ext4_journal_extend(handle, 2)) 1792ecb94f5fSTheodore Ts'o return; 1793ecb94f5fSTheodore Ts'o 1794ecb94f5fSTheodore Ts'o /* 1795ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1796ecb94f5fSTheodore Ts'o */ 1797ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1798ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1799ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1800ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1801ecb94f5fSTheodore Ts'o 1802ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1803ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1804ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1805ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1806ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1807ecb94f5fSTheodore Ts'o 1808ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1809ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 18107d734532SJan Kara EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET | 18117d734532SJan Kara EXT4_FREE_BLOCKS_RESERVE); 1812ecb94f5fSTheodore Ts'o } 1813ecb94f5fSTheodore Ts'o 1814ecb94f5fSTheodore Ts'o /* 1815197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1816197217a5SYongqiang Yang * return 1 if merge left else 0. 1817197217a5SYongqiang Yang */ 1818ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1819ecb94f5fSTheodore Ts'o struct inode *inode, 1820197217a5SYongqiang Yang struct ext4_ext_path *path, 1821197217a5SYongqiang Yang struct ext4_extent *ex) { 1822197217a5SYongqiang Yang struct ext4_extent_header *eh; 1823197217a5SYongqiang Yang unsigned int depth; 1824197217a5SYongqiang Yang int merge_done = 0; 1825197217a5SYongqiang Yang 1826197217a5SYongqiang Yang depth = ext_depth(inode); 1827197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1828197217a5SYongqiang Yang eh = path[depth].p_hdr; 1829197217a5SYongqiang Yang 1830197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1831197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1832197217a5SYongqiang Yang 1833197217a5SYongqiang Yang if (!merge_done) 1834ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1835197217a5SYongqiang Yang 1836ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1837197217a5SYongqiang Yang } 1838197217a5SYongqiang Yang 1839197217a5SYongqiang Yang /* 184025d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 184125d14f98SAmit Arora * existing extent. 184225d14f98SAmit Arora * 184325d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 184425d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 184525d14f98SAmit Arora * If there is no overlap found, it returns 0. 184625d14f98SAmit Arora */ 18474d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 18484d33b1efSTheodore Ts'o struct inode *inode, 184925d14f98SAmit Arora struct ext4_extent *newext, 185025d14f98SAmit Arora struct ext4_ext_path *path) 185125d14f98SAmit Arora { 1852725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 185325d14f98SAmit Arora unsigned int depth, len1; 185425d14f98SAmit Arora unsigned int ret = 0; 185525d14f98SAmit Arora 185625d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1857a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 185825d14f98SAmit Arora depth = ext_depth(inode); 185925d14f98SAmit Arora if (!path[depth].p_ext) 186025d14f98SAmit Arora goto out; 1861f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); 186225d14f98SAmit Arora 186325d14f98SAmit Arora /* 186425d14f98SAmit Arora * get the next allocated block if the extent in the path 186525d14f98SAmit Arora * is before the requested block(s) 186625d14f98SAmit Arora */ 186725d14f98SAmit Arora if (b2 < b1) { 186825d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1869f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 187025d14f98SAmit Arora goto out; 1871f5a44db5STheodore Ts'o b2 = EXT4_LBLK_CMASK(sbi, b2); 187225d14f98SAmit Arora } 187325d14f98SAmit Arora 1874725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 187525d14f98SAmit Arora if (b1 + len1 < b1) { 1876f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 187725d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 187825d14f98SAmit Arora ret = 1; 187925d14f98SAmit Arora } 188025d14f98SAmit Arora 188125d14f98SAmit Arora /* check for overlap */ 188225d14f98SAmit Arora if (b1 + len1 > b2) { 188325d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 188425d14f98SAmit Arora ret = 1; 188525d14f98SAmit Arora } 188625d14f98SAmit Arora out: 188725d14f98SAmit Arora return ret; 188825d14f98SAmit Arora } 188925d14f98SAmit Arora 189025d14f98SAmit Arora /* 1891d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1892d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1893d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1894d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1895a86c6181SAlex Tomas */ 1896a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1897a86c6181SAlex Tomas struct ext4_ext_path *path, 1898107a7bd3STheodore Ts'o struct ext4_extent *newext, int gb_flags) 1899a86c6181SAlex Tomas { 1900a86c6181SAlex Tomas struct ext4_extent_header *eh; 1901a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1902a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1903a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1904725d26d3SAneesh Kumar K.V int depth, len, err; 1905725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1906a9b82415SDarrick J. Wong int mb_flags = 0, uninit; 1907a86c6181SAlex Tomas 1908273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1909273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1910273df556SFrank Mayhar return -EIO; 1911273df556SFrank Mayhar } 1912a86c6181SAlex Tomas depth = ext_depth(inode); 1913a86c6181SAlex Tomas ex = path[depth].p_ext; 1914be8981beSLukas Czerner eh = path[depth].p_hdr; 1915273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1916273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1917273df556SFrank Mayhar return -EIO; 1918273df556SFrank Mayhar } 1919a86c6181SAlex Tomas 1920a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1921107a7bd3STheodore Ts'o if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) { 1922be8981beSLukas Czerner 1923be8981beSLukas Czerner /* 1924be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1925be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 1926be8981beSLukas Czerner * ext4_ext_find_extent() can return either extent on the 1927be8981beSLukas Czerner * left, or on the right from the searched position. This 1928be8981beSLukas Czerner * will make merging more effective. 1929be8981beSLukas Czerner */ 1930be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 1931be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 1932be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 1933be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 1934be8981beSLukas Czerner ex += 1; 1935be8981beSLukas Czerner goto prepend; 1936be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1937be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 1938be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 1939be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 1940be8981beSLukas Czerner ex -= 1; 1941be8981beSLukas Czerner 1942be8981beSLukas Czerner /* Try to append newex to the ex */ 1943be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 1944be8981beSLukas Czerner ext_debug("append [%d]%d block to %u:[%d]%d" 1945be8981beSLukas Czerner "(from %llu)\n", 1946553f9008SMingming ext4_ext_is_uninitialized(newext), 1947a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1948a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1949553f9008SMingming ext4_ext_is_uninitialized(ex), 1950bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1951bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 1952be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1953be8981beSLukas Czerner path + depth); 19547e028976SAvantika Mathur if (err) 1955a86c6181SAlex Tomas return err; 1956a9b82415SDarrick J. Wong uninit = ext4_ext_is_uninitialized(ex); 1957a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1958a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1959a9b82415SDarrick J. Wong if (uninit) 1960a9b82415SDarrick J. Wong ext4_ext_mark_uninitialized(ex); 1961a86c6181SAlex Tomas eh = path[depth].p_hdr; 1962a86c6181SAlex Tomas nearex = ex; 1963a86c6181SAlex Tomas goto merge; 1964a86c6181SAlex Tomas } 1965a86c6181SAlex Tomas 1966be8981beSLukas Czerner prepend: 1967be8981beSLukas Czerner /* Try to prepend newex to the ex */ 1968be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 1969be8981beSLukas Czerner ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 1970be8981beSLukas Czerner "(from %llu)\n", 1971be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 1972be8981beSLukas Czerner ext4_ext_is_uninitialized(newext), 1973be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 1974be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 1975be8981beSLukas Czerner ext4_ext_is_uninitialized(ex), 1976be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 1977be8981beSLukas Czerner ext4_ext_pblock(ex)); 1978be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1979be8981beSLukas Czerner path + depth); 1980be8981beSLukas Czerner if (err) 1981be8981beSLukas Czerner return err; 1982be8981beSLukas Czerner 1983a9b82415SDarrick J. Wong uninit = ext4_ext_is_uninitialized(ex); 1984be8981beSLukas Czerner ex->ee_block = newext->ee_block; 1985be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 1986be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1987be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 1988a9b82415SDarrick J. Wong if (uninit) 1989a9b82415SDarrick J. Wong ext4_ext_mark_uninitialized(ex); 1990be8981beSLukas Czerner eh = path[depth].p_hdr; 1991be8981beSLukas Czerner nearex = ex; 1992be8981beSLukas Czerner goto merge; 1993be8981beSLukas Czerner } 1994be8981beSLukas Czerner } 1995be8981beSLukas Czerner 1996a86c6181SAlex Tomas depth = ext_depth(inode); 1997a86c6181SAlex Tomas eh = path[depth].p_hdr; 1998a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1999a86c6181SAlex Tomas goto has_space; 2000a86c6181SAlex Tomas 2001a86c6181SAlex Tomas /* probably next leaf has space for us? */ 2002a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 2003598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 2004598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 20055718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 2006598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 200732de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 2008a86c6181SAlex Tomas BUG_ON(npath != NULL); 2009107a7bd3STheodore Ts'o npath = ext4_ext_find_extent(inode, next, NULL, 0); 2010a86c6181SAlex Tomas if (IS_ERR(npath)) 2011a86c6181SAlex Tomas return PTR_ERR(npath); 2012a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 2013a86c6181SAlex Tomas eh = npath[depth].p_hdr; 2014a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 201525985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 2016a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 2017a86c6181SAlex Tomas path = npath; 2018ffb505ffSRobin Dong goto has_space; 2019a86c6181SAlex Tomas } 2020a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 2021a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 2022a86c6181SAlex Tomas } 2023a86c6181SAlex Tomas 2024a86c6181SAlex Tomas /* 2025d0d856e8SRandy Dunlap * There is no free space in the found leaf. 2026d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 2027a86c6181SAlex Tomas */ 2028107a7bd3STheodore Ts'o if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2029107a7bd3STheodore Ts'o mb_flags = EXT4_MB_USE_RESERVED; 2030107a7bd3STheodore Ts'o err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2031107a7bd3STheodore Ts'o path, newext); 2032a86c6181SAlex Tomas if (err) 2033a86c6181SAlex Tomas goto cleanup; 2034a86c6181SAlex Tomas depth = ext_depth(inode); 2035a86c6181SAlex Tomas eh = path[depth].p_hdr; 2036a86c6181SAlex Tomas 2037a86c6181SAlex Tomas has_space: 2038a86c6181SAlex Tomas nearex = path[depth].p_ext; 2039a86c6181SAlex Tomas 20407e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 20417e028976SAvantika Mathur if (err) 2042a86c6181SAlex Tomas goto cleanup; 2043a86c6181SAlex Tomas 2044a86c6181SAlex Tomas if (!nearex) { 2045a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 204632de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 2047a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2048bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2049553f9008SMingming ext4_ext_is_uninitialized(newext), 2050a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 205180e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 2052a86c6181SAlex Tomas } else { 205380e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 205480e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 205580e675f9SEric Gouriou /* Insert after */ 205632de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 205732de6756SYongqiang Yang "nearest %p\n", 2058a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 2059bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 2060553f9008SMingming ext4_ext_is_uninitialized(newext), 2061a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 206280e675f9SEric Gouriou nearex); 206380e675f9SEric Gouriou nearex++; 206480e675f9SEric Gouriou } else { 206580e675f9SEric Gouriou /* Insert before */ 206680e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 206732de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 206832de6756SYongqiang Yang "nearest %p\n", 206980e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 207080e675f9SEric Gouriou ext4_ext_pblock(newext), 207180e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 207280e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 207380e675f9SEric Gouriou nearex); 207480e675f9SEric Gouriou } 207580e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 207680e675f9SEric Gouriou if (len > 0) { 207732de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 207880e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 207980e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 208080e675f9SEric Gouriou ext4_ext_pblock(newext), 208180e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 208280e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 208380e675f9SEric Gouriou len, nearex, nearex + 1); 208480e675f9SEric Gouriou memmove(nearex + 1, nearex, 208580e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 208680e675f9SEric Gouriou } 2087a86c6181SAlex Tomas } 2088a86c6181SAlex Tomas 2089e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 209080e675f9SEric Gouriou path[depth].p_ext = nearex; 2091a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2092bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2093a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2094a86c6181SAlex Tomas 2095a86c6181SAlex Tomas merge: 2096e7bcf823SHaiboLiu /* try to merge extents */ 2097107a7bd3STheodore Ts'o if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) 2098ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2099a86c6181SAlex Tomas 2100a86c6181SAlex Tomas 2101a86c6181SAlex Tomas /* time to correct all indexes above */ 2102a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2103a86c6181SAlex Tomas if (err) 2104a86c6181SAlex Tomas goto cleanup; 2105a86c6181SAlex Tomas 2106ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2107a86c6181SAlex Tomas 2108a86c6181SAlex Tomas cleanup: 2109a86c6181SAlex Tomas if (npath) { 2110a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 2111a86c6181SAlex Tomas kfree(npath); 2112a86c6181SAlex Tomas } 2113a86c6181SAlex Tomas return err; 2114a86c6181SAlex Tomas } 2115a86c6181SAlex Tomas 211691dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode, 211791dd8c11SLukas Czerner ext4_lblk_t block, ext4_lblk_t num, 211891dd8c11SLukas Czerner struct fiemap_extent_info *fieinfo) 21196873fa0dSEric Sandeen { 21206873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 21216873fa0dSEric Sandeen struct ext4_extent *ex; 212269eb33dcSZheng Liu struct extent_status es; 212391dd8c11SLukas Czerner ext4_lblk_t next, next_del, start = 0, end = 0; 21246873fa0dSEric Sandeen ext4_lblk_t last = block + num; 212591dd8c11SLukas Czerner int exists, depth = 0, err = 0; 212691dd8c11SLukas Czerner unsigned int flags = 0; 212791dd8c11SLukas Czerner unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 21286873fa0dSEric Sandeen 2129f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 21306873fa0dSEric Sandeen num = last - block; 21316873fa0dSEric Sandeen /* find extent for this block */ 2132fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 213391dd8c11SLukas Czerner 213491dd8c11SLukas Czerner if (path && ext_depth(inode) != depth) { 213591dd8c11SLukas Czerner /* depth was changed. we have to realloc path */ 213691dd8c11SLukas Czerner kfree(path); 213791dd8c11SLukas Czerner path = NULL; 213891dd8c11SLukas Czerner } 213991dd8c11SLukas Czerner 2140107a7bd3STheodore Ts'o path = ext4_ext_find_extent(inode, block, path, 0); 21416873fa0dSEric Sandeen if (IS_ERR(path)) { 214291dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 21436873fa0dSEric Sandeen err = PTR_ERR(path); 21446873fa0dSEric Sandeen path = NULL; 21456873fa0dSEric Sandeen break; 21466873fa0dSEric Sandeen } 21476873fa0dSEric Sandeen 21486873fa0dSEric Sandeen depth = ext_depth(inode); 2149273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 215091dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 2151273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2152273df556SFrank Mayhar err = -EIO; 2153273df556SFrank Mayhar break; 2154273df556SFrank Mayhar } 21556873fa0dSEric Sandeen ex = path[depth].p_ext; 21566873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 215791dd8c11SLukas Czerner ext4_ext_drop_refs(path); 21586873fa0dSEric Sandeen 215991dd8c11SLukas Czerner flags = 0; 21606873fa0dSEric Sandeen exists = 0; 21616873fa0dSEric Sandeen if (!ex) { 21626873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 21636873fa0dSEric Sandeen * all requested space */ 21646873fa0dSEric Sandeen start = block; 21656873fa0dSEric Sandeen end = block + num; 21666873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 21676873fa0dSEric Sandeen /* need to allocate space before found extent */ 21686873fa0dSEric Sandeen start = block; 21696873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 21706873fa0dSEric Sandeen if (block + num < end) 21716873fa0dSEric Sandeen end = block + num; 21726873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 21736873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 21746873fa0dSEric Sandeen /* need to allocate space after found extent */ 21756873fa0dSEric Sandeen start = block; 21766873fa0dSEric Sandeen end = block + num; 21776873fa0dSEric Sandeen if (end >= next) 21786873fa0dSEric Sandeen end = next; 21796873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 21806873fa0dSEric Sandeen /* 21816873fa0dSEric Sandeen * some part of requested space is covered 21826873fa0dSEric Sandeen * by found extent 21836873fa0dSEric Sandeen */ 21846873fa0dSEric Sandeen start = block; 21856873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 21866873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 21876873fa0dSEric Sandeen if (block + num < end) 21886873fa0dSEric Sandeen end = block + num; 21896873fa0dSEric Sandeen exists = 1; 21906873fa0dSEric Sandeen } else { 21916873fa0dSEric Sandeen BUG(); 21926873fa0dSEric Sandeen } 21936873fa0dSEric Sandeen BUG_ON(end <= start); 21946873fa0dSEric Sandeen 21956873fa0dSEric Sandeen if (!exists) { 219669eb33dcSZheng Liu es.es_lblk = start; 219769eb33dcSZheng Liu es.es_len = end - start; 219869eb33dcSZheng Liu es.es_pblk = 0; 21996873fa0dSEric Sandeen } else { 220069eb33dcSZheng Liu es.es_lblk = le32_to_cpu(ex->ee_block); 220169eb33dcSZheng Liu es.es_len = ext4_ext_get_actual_len(ex); 220269eb33dcSZheng Liu es.es_pblk = ext4_ext_pblock(ex); 220391dd8c11SLukas Czerner if (ext4_ext_is_uninitialized(ex)) 220491dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_UNWRITTEN; 22056873fa0dSEric Sandeen } 22066873fa0dSEric Sandeen 220791dd8c11SLukas Czerner /* 220869eb33dcSZheng Liu * Find delayed extent and update es accordingly. We call 220969eb33dcSZheng Liu * it even in !exists case to find out whether es is the 221091dd8c11SLukas Czerner * last existing extent or not. 221191dd8c11SLukas Czerner */ 221269eb33dcSZheng Liu next_del = ext4_find_delayed_extent(inode, &es); 221391dd8c11SLukas Czerner if (!exists && next_del) { 221491dd8c11SLukas Czerner exists = 1; 221572dac95dSJie Liu flags |= (FIEMAP_EXTENT_DELALLOC | 221672dac95dSJie Liu FIEMAP_EXTENT_UNKNOWN); 221791dd8c11SLukas Czerner } 221891dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 221991dd8c11SLukas Czerner 222069eb33dcSZheng Liu if (unlikely(es.es_len == 0)) { 222169eb33dcSZheng Liu EXT4_ERROR_INODE(inode, "es.es_len == 0"); 2222273df556SFrank Mayhar err = -EIO; 2223273df556SFrank Mayhar break; 2224273df556SFrank Mayhar } 22256873fa0dSEric Sandeen 2226f7fec032SZheng Liu /* 2227f7fec032SZheng Liu * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2228f7fec032SZheng Liu * we need to check next == EXT_MAX_BLOCKS because it is 2229f7fec032SZheng Liu * possible that an extent is with unwritten and delayed 2230f7fec032SZheng Liu * status due to when an extent is delayed allocated and 2231f7fec032SZheng Liu * is allocated by fallocate status tree will track both of 2232f7fec032SZheng Liu * them in a extent. 2233f7fec032SZheng Liu * 2234f7fec032SZheng Liu * So we could return a unwritten and delayed extent, and 2235f7fec032SZheng Liu * its block is equal to 'next'. 2236f7fec032SZheng Liu */ 2237f7fec032SZheng Liu if (next == next_del && next == EXT_MAX_BLOCKS) { 223891dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_LAST; 223991dd8c11SLukas Czerner if (unlikely(next_del != EXT_MAX_BLOCKS || 224091dd8c11SLukas Czerner next != EXT_MAX_BLOCKS)) { 224191dd8c11SLukas Czerner EXT4_ERROR_INODE(inode, 224291dd8c11SLukas Czerner "next extent == %u, next " 224391dd8c11SLukas Czerner "delalloc extent = %u", 224491dd8c11SLukas Czerner next, next_del); 224591dd8c11SLukas Czerner err = -EIO; 224691dd8c11SLukas Czerner break; 224791dd8c11SLukas Czerner } 224891dd8c11SLukas Czerner } 224991dd8c11SLukas Czerner 225091dd8c11SLukas Czerner if (exists) { 225191dd8c11SLukas Czerner err = fiemap_fill_next_extent(fieinfo, 225269eb33dcSZheng Liu (__u64)es.es_lblk << blksize_bits, 225369eb33dcSZheng Liu (__u64)es.es_pblk << blksize_bits, 225469eb33dcSZheng Liu (__u64)es.es_len << blksize_bits, 225591dd8c11SLukas Czerner flags); 22566873fa0dSEric Sandeen if (err < 0) 22576873fa0dSEric Sandeen break; 225891dd8c11SLukas Czerner if (err == 1) { 22596873fa0dSEric Sandeen err = 0; 22606873fa0dSEric Sandeen break; 22616873fa0dSEric Sandeen } 22626873fa0dSEric Sandeen } 22636873fa0dSEric Sandeen 226469eb33dcSZheng Liu block = es.es_lblk + es.es_len; 22656873fa0dSEric Sandeen } 22666873fa0dSEric Sandeen 22676873fa0dSEric Sandeen if (path) { 22686873fa0dSEric Sandeen ext4_ext_drop_refs(path); 22696873fa0dSEric Sandeen kfree(path); 22706873fa0dSEric Sandeen } 22716873fa0dSEric Sandeen 22726873fa0dSEric Sandeen return err; 22736873fa0dSEric Sandeen } 22746873fa0dSEric Sandeen 2275a86c6181SAlex Tomas /* 2276d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2277d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2278a86c6181SAlex Tomas * and cache this gap 2279a86c6181SAlex Tomas */ 228009b88252SAvantika Mathur static void 2281a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2282725d26d3SAneesh Kumar K.V ext4_lblk_t block) 2283a86c6181SAlex Tomas { 2284a86c6181SAlex Tomas int depth = ext_depth(inode); 228527b1b228SAndi Shyti unsigned long len = 0; 228627b1b228SAndi Shyti ext4_lblk_t lblock = 0; 2287a86c6181SAlex Tomas struct ext4_extent *ex; 2288a86c6181SAlex Tomas 2289a86c6181SAlex Tomas ex = path[depth].p_ext; 2290a86c6181SAlex Tomas if (ex == NULL) { 229169eb33dcSZheng Liu /* 229269eb33dcSZheng Liu * there is no extent yet, so gap is [0;-] and we 229369eb33dcSZheng Liu * don't cache it 229469eb33dcSZheng Liu */ 2295a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 2296a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 2297a86c6181SAlex Tomas lblock = block; 2298a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 2299bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 2300bba90743SEric Sandeen block, 2301bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2302bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2303d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2304d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2305d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2306a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2307a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2308725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2309a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2310a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2311725d26d3SAneesh Kumar K.V 2312725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2313bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2314bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2315bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2316bba90743SEric Sandeen block); 2317725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2318725d26d3SAneesh Kumar K.V len = next - lblock; 2319d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2320d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2321d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2322a86c6181SAlex Tomas } else { 2323a86c6181SAlex Tomas BUG(); 2324a86c6181SAlex Tomas } 2325a86c6181SAlex Tomas 2326bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2327a86c6181SAlex Tomas } 2328a86c6181SAlex Tomas 2329a86c6181SAlex Tomas /* 2330d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2331d0d856e8SRandy Dunlap * removes index from the index block. 2332a86c6181SAlex Tomas */ 23331d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2334c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2335a86c6181SAlex Tomas { 2336a86c6181SAlex Tomas int err; 2337f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2338a86c6181SAlex Tomas 2339a86c6181SAlex Tomas /* free index block */ 2340c36575e6SForrest Liu depth--; 2341c36575e6SForrest Liu path = path + depth; 2342bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2343273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2344273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2345273df556SFrank Mayhar return -EIO; 2346273df556SFrank Mayhar } 23477e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 23487e028976SAvantika Mathur if (err) 2349a86c6181SAlex Tomas return err; 23500e1147b0SRobin Dong 23510e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 23520e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 23530e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 23540e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 23550e1147b0SRobin Dong } 23560e1147b0SRobin Dong 2357e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 23587e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 23597e028976SAvantika Mathur if (err) 2360a86c6181SAlex Tomas return err; 23612ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2362d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2363d8990240SAditya Kali 23647dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2365e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2366c36575e6SForrest Liu 2367c36575e6SForrest Liu while (--depth >= 0) { 2368c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2369c36575e6SForrest Liu break; 2370c36575e6SForrest Liu path--; 2371c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2372c36575e6SForrest Liu if (err) 2373c36575e6SForrest Liu break; 2374c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2375c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2376c36575e6SForrest Liu if (err) 2377c36575e6SForrest Liu break; 2378c36575e6SForrest Liu } 2379a86c6181SAlex Tomas return err; 2380a86c6181SAlex Tomas } 2381a86c6181SAlex Tomas 2382a86c6181SAlex Tomas /* 2383ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2384ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2385ee12b630SMingming Cao * to the extent tree. 2386ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2387ee12b630SMingming Cao * under i_data_sem. 2388a86c6181SAlex Tomas */ 2389525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2390a86c6181SAlex Tomas struct ext4_ext_path *path) 2391a86c6181SAlex Tomas { 2392a86c6181SAlex Tomas if (path) { 2393ee12b630SMingming Cao int depth = ext_depth(inode); 2394f3bd1f3fSMingming Cao int ret = 0; 2395ee12b630SMingming Cao 2396a86c6181SAlex Tomas /* probably there is space in leaf? */ 2397a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2398ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2399ee12b630SMingming Cao 2400ee12b630SMingming Cao /* 2401ee12b630SMingming Cao * There are some space in the leaf tree, no 2402ee12b630SMingming Cao * need to account for leaf block credit 2403ee12b630SMingming Cao * 2404ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2405df3ab170STao Ma * and other metadata blocks still need to be 2406ee12b630SMingming Cao * accounted. 2407ee12b630SMingming Cao */ 2408525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2409ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 24105887e98bSAneesh Kumar K.V return ret; 2411ee12b630SMingming Cao } 2412ee12b630SMingming Cao } 2413ee12b630SMingming Cao 2414525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2415a86c6181SAlex Tomas } 2416a86c6181SAlex Tomas 2417a86c6181SAlex Tomas /* 2418fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2419ee12b630SMingming Cao * 2420fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2421fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2422ee12b630SMingming Cao * 2423fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2424fffb2739SJan Kara * than once, but this is really rare. 2425a86c6181SAlex Tomas */ 2426fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2427ee12b630SMingming Cao { 2428ee12b630SMingming Cao int index; 2429f19d5870STao Ma int depth; 2430f19d5870STao Ma 2431f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2432f19d5870STao Ma if (ext4_has_inline_data(inode)) 2433f19d5870STao Ma return 1; 2434f19d5870STao Ma 2435f19d5870STao Ma depth = ext_depth(inode); 2436a86c6181SAlex Tomas 2437fffb2739SJan Kara if (extents <= 1) 2438ee12b630SMingming Cao index = depth * 2; 2439ee12b630SMingming Cao else 2440ee12b630SMingming Cao index = depth * 3; 2441a86c6181SAlex Tomas 2442ee12b630SMingming Cao return index; 2443a86c6181SAlex Tomas } 2444a86c6181SAlex Tomas 2445981250caSTheodore Ts'o static inline int get_default_free_blocks_flags(struct inode *inode) 2446981250caSTheodore Ts'o { 2447981250caSTheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2448981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 2449981250caSTheodore Ts'o else if (ext4_should_journal_data(inode)) 2450981250caSTheodore Ts'o return EXT4_FREE_BLOCKS_FORGET; 2451981250caSTheodore Ts'o return 0; 2452981250caSTheodore Ts'o } 2453981250caSTheodore Ts'o 2454a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2455a86c6181SAlex Tomas struct ext4_extent *ex, 2456d23142c6SLukas Czerner long long *partial_cluster, 2457725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2458a86c6181SAlex Tomas { 24590aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2460a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 24610aa06000STheodore Ts'o ext4_fsblk_t pblk; 2462981250caSTheodore Ts'o int flags = get_default_free_blocks_flags(inode); 246318888cf0SAndrey Sidorov 24640aa06000STheodore Ts'o /* 24650aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 24660aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 24670aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 24680aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 24690aa06000STheodore Ts'o * or at the end of the ext4_truncate() operation. 24700aa06000STheodore Ts'o */ 24710aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 24720aa06000STheodore Ts'o 2473d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 24740aa06000STheodore Ts'o /* 24750aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 24760aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 24770aa06000STheodore Ts'o * partial cluster here. 24780aa06000STheodore Ts'o */ 24790aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 2480d23142c6SLukas Czerner if ((*partial_cluster > 0) && 2481d23142c6SLukas Czerner (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 24820aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 24830aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 24840aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 24850aa06000STheodore Ts'o *partial_cluster = 0; 24860aa06000STheodore Ts'o } 24870aa06000STheodore Ts'o 2488a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2489a86c6181SAlex Tomas { 2490a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2491a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2492a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2493a86c6181SAlex Tomas sbi->s_ext_extents++; 2494a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2495a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2496a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2497a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2498a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2499a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2500a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2501a86c6181SAlex Tomas } 2502a86c6181SAlex Tomas #endif 2503a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2504a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2505a86c6181SAlex Tomas /* tail removal */ 2506725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2507d23142c6SLukas Czerner unsigned int unaligned; 2508725d26d3SAneesh Kumar K.V 2509a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 25100aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 2511d23142c6SLukas Czerner /* 2512d23142c6SLukas Czerner * Usually we want to free partial cluster at the end of the 2513d23142c6SLukas Czerner * extent, except for the situation when the cluster is still 2514d23142c6SLukas Czerner * used by any other extent (partial_cluster is negative). 2515d23142c6SLukas Czerner */ 2516d23142c6SLukas Czerner if (*partial_cluster < 0 && 2517d23142c6SLukas Czerner -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) 2518d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2519d23142c6SLukas Czerner 2520d23142c6SLukas Czerner ext_debug("free last %u blocks starting %llu partial %lld\n", 2521d23142c6SLukas Czerner num, pblk, *partial_cluster); 25220aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 25230aa06000STheodore Ts'o /* 25240aa06000STheodore Ts'o * If the block range to be freed didn't start at the 25250aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 2526d23142c6SLukas Czerner * extent and the cluster is not used by any other extent, 2527d23142c6SLukas Czerner * save the partial cluster here, since we might need to 2528d23142c6SLukas Czerner * delete if we determine that the truncate operation has 2529d23142c6SLukas Czerner * removed all of the blocks in the cluster. 2530d23142c6SLukas Czerner * 2531d23142c6SLukas Czerner * On the other hand, if we did not manage to free the whole 2532d23142c6SLukas Czerner * extent, we have to mark the cluster as used (store negative 2533d23142c6SLukas Czerner * cluster number in partial_cluster). 25340aa06000STheodore Ts'o */ 2535f5a44db5STheodore Ts'o unaligned = EXT4_PBLK_COFF(sbi, pblk); 2536d23142c6SLukas Czerner if (unaligned && (ee_len == num) && 2537d23142c6SLukas Czerner (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 25380aa06000STheodore Ts'o *partial_cluster = EXT4_B2C(sbi, pblk); 2539d23142c6SLukas Czerner else if (unaligned) 2540d23142c6SLukas Czerner *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); 2541d23142c6SLukas Czerner else if (*partial_cluster > 0) 25420aa06000STheodore Ts'o *partial_cluster = 0; 254378fb9cdfSLukas Czerner } else 254478fb9cdfSLukas Czerner ext4_error(sbi->s_sb, "strange request: removal(2) " 2545725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2546a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2547a86c6181SAlex Tomas return 0; 2548a86c6181SAlex Tomas } 2549a86c6181SAlex Tomas 2550d583fb87SAllison Henderson 2551d583fb87SAllison Henderson /* 2552d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 2553d583fb87SAllison Henderson * blocks appearing between "start" and "end", and splits the extents 2554d583fb87SAllison Henderson * if "start" and "end" appear in the same extent 2555d583fb87SAllison Henderson * 2556d583fb87SAllison Henderson * @handle: The journal handle 2557d583fb87SAllison Henderson * @inode: The files inode 2558d583fb87SAllison Henderson * @path: The path to the leaf 2559d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 2560d23142c6SLukas Czerner * has been released from it. It gets negative in case 2561d23142c6SLukas Czerner * that the cluster is still used. 2562d583fb87SAllison Henderson * @start: The first block to remove 2563d583fb87SAllison Henderson * @end: The last block to remove 2564d583fb87SAllison Henderson */ 2565a86c6181SAlex Tomas static int 2566a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2567d23142c6SLukas Czerner struct ext4_ext_path *path, 2568d23142c6SLukas Czerner long long *partial_cluster, 25690aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2570a86c6181SAlex Tomas { 25710aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2572a86c6181SAlex Tomas int err = 0, correct_index = 0; 2573a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2574a86c6181SAlex Tomas struct ext4_extent_header *eh; 2575750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2576725d26d3SAneesh Kumar K.V unsigned num; 2577725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2578a86c6181SAlex Tomas unsigned short ex_ee_len; 2579a2df2a63SAmit Arora unsigned uninitialized = 0; 2580a86c6181SAlex Tomas struct ext4_extent *ex; 2581d23142c6SLukas Czerner ext4_fsblk_t pblk; 2582a86c6181SAlex Tomas 2583c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 25845f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2585a86c6181SAlex Tomas if (!path[depth].p_hdr) 2586a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2587a86c6181SAlex Tomas eh = path[depth].p_hdr; 2588273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2589273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2590273df556SFrank Mayhar return -EIO; 2591273df556SFrank Mayhar } 2592a86c6181SAlex Tomas /* find where to start removing */ 25936ae06ff5SAshish Sangwan ex = path[depth].p_ext; 25946ae06ff5SAshish Sangwan if (!ex) 2595a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2596a86c6181SAlex Tomas 2597a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2598a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2599a86c6181SAlex Tomas 2600c0634493SEric Whitney /* 2601c0634493SEric Whitney * If we're starting with an extent other than the last one in the 2602c0634493SEric Whitney * node, we need to see if it shares a cluster with the extent to 2603c0634493SEric Whitney * the right (towards the end of the file). If its leftmost cluster 2604c0634493SEric Whitney * is this extent's rightmost cluster and it is not cluster aligned, 2605c0634493SEric Whitney * we'll mark it as a partial that is not to be deallocated. 2606c0634493SEric Whitney */ 2607c0634493SEric Whitney 2608c0634493SEric Whitney if (ex != EXT_LAST_EXTENT(eh)) { 2609c0634493SEric Whitney ext4_fsblk_t current_pblk, right_pblk; 2610c0634493SEric Whitney long long current_cluster, right_cluster; 2611c0634493SEric Whitney 2612c0634493SEric Whitney current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1; 2613c0634493SEric Whitney current_cluster = (long long)EXT4_B2C(sbi, current_pblk); 2614c0634493SEric Whitney right_pblk = ext4_ext_pblock(ex + 1); 2615c0634493SEric Whitney right_cluster = (long long)EXT4_B2C(sbi, right_pblk); 2616c0634493SEric Whitney if (current_cluster == right_cluster && 2617c0634493SEric Whitney EXT4_PBLK_COFF(sbi, right_pblk)) 2618c0634493SEric Whitney *partial_cluster = -right_cluster; 2619c0634493SEric Whitney } 2620c0634493SEric Whitney 2621d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2622d8990240SAditya Kali 2623a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2624a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2625a41f2071SAneesh Kumar K.V 2626a41f2071SAneesh Kumar K.V if (ext4_ext_is_uninitialized(ex)) 2627a41f2071SAneesh Kumar K.V uninitialized = 1; 2628a41f2071SAneesh Kumar K.V else 2629a41f2071SAneesh Kumar K.V uninitialized = 0; 2630a41f2071SAneesh Kumar K.V 2631553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2632553f9008SMingming uninitialized, ex_ee_len); 2633a86c6181SAlex Tomas path[depth].p_ext = ex; 2634a86c6181SAlex Tomas 2635a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2636d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2637d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2638a86c6181SAlex Tomas 2639a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2640a86c6181SAlex Tomas 2641d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 26425f95d21fSLukas Czerner if (end < ex_ee_block) { 2643d23142c6SLukas Czerner /* 2644d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2645d23142c6SLukas Czerner * so if this extent is not cluster aligned we have 2646d23142c6SLukas Czerner * to mark the current cluster as used to avoid 2647d23142c6SLukas Czerner * accidentally freeing it later on 2648d23142c6SLukas Czerner */ 2649d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 2650f5a44db5STheodore Ts'o if (EXT4_PBLK_COFF(sbi, pblk)) 2651d23142c6SLukas Czerner *partial_cluster = 2652d23142c6SLukas Czerner -((long long)EXT4_B2C(sbi, pblk)); 2653d583fb87SAllison Henderson ex--; 2654d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2655d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2656d583fb87SAllison Henderson continue; 2657750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2658dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2659dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2660dc1841d6SLukas Czerner "on extent %u:%u", 2661dc1841d6SLukas Czerner start, end, ex_ee_block, 2662dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 2663d583fb87SAllison Henderson err = -EIO; 2664d583fb87SAllison Henderson goto out; 2665a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2666a86c6181SAlex Tomas /* remove tail of the extent */ 2667750c9c47SDmitry Monakhov num = a - ex_ee_block; 2668a86c6181SAlex Tomas } else { 2669a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2670a86c6181SAlex Tomas num = 0; 2671d583fb87SAllison Henderson } 267234071da7STheodore Ts'o /* 267334071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 267434071da7STheodore Ts'o * descriptor) for each block group; assume two block 267534071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 267634071da7STheodore Ts'o * the worst case 267734071da7STheodore Ts'o */ 267834071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2679a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2680a86c6181SAlex Tomas correct_index = 1; 2681a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2682a86c6181SAlex Tomas } 26835aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2684a86c6181SAlex Tomas 2685487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 26869102e4faSShen Feng if (err) 2687a86c6181SAlex Tomas goto out; 2688a86c6181SAlex Tomas 2689a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2690a86c6181SAlex Tomas if (err) 2691a86c6181SAlex Tomas goto out; 2692a86c6181SAlex Tomas 26930aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 26940aa06000STheodore Ts'o a, b); 2695a86c6181SAlex Tomas if (err) 2696a86c6181SAlex Tomas goto out; 2697a86c6181SAlex Tomas 2698750c9c47SDmitry Monakhov if (num == 0) 2699d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2700f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2701a86c6181SAlex Tomas 2702a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2703749269faSAmit Arora /* 2704749269faSAmit Arora * Do not mark uninitialized if all the blocks in the 2705749269faSAmit Arora * extent have been removed. 2706749269faSAmit Arora */ 2707749269faSAmit Arora if (uninitialized && num) 2708a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 2709d583fb87SAllison Henderson /* 2710d583fb87SAllison Henderson * If the extent was completely released, 2711d583fb87SAllison Henderson * we need to remove it from the leaf 2712d583fb87SAllison Henderson */ 2713d583fb87SAllison Henderson if (num == 0) { 2714f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2715d583fb87SAllison Henderson /* 2716d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2717d583fb87SAllison Henderson * extents up when an extent is removed so that 2718d583fb87SAllison Henderson * we dont have blank extents in the middle 2719d583fb87SAllison Henderson */ 2720d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2721d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2722d583fb87SAllison Henderson 2723d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2724d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2725d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2726d583fb87SAllison Henderson } 2727d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 2728d23142c6SLukas Czerner } else if (*partial_cluster > 0) 27290aa06000STheodore Ts'o *partial_cluster = 0; 2730d583fb87SAllison Henderson 2731750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2732750c9c47SDmitry Monakhov if (err) 2733750c9c47SDmitry Monakhov goto out; 2734750c9c47SDmitry Monakhov 2735bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2736bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2737a86c6181SAlex Tomas ex--; 2738a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2739a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2740a86c6181SAlex Tomas } 2741a86c6181SAlex Tomas 2742a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2743a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2744a86c6181SAlex Tomas 27450aa06000STheodore Ts'o /* 2746d23142c6SLukas Czerner * Free the partial cluster only if the current extent does not 2747d23142c6SLukas Czerner * reference it. Otherwise we might free used cluster. 27480aa06000STheodore Ts'o */ 2749d23142c6SLukas Czerner if (*partial_cluster > 0 && 27500aa06000STheodore Ts'o (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 27510aa06000STheodore Ts'o *partial_cluster)) { 2752981250caSTheodore Ts'o int flags = get_default_free_blocks_flags(inode); 27530aa06000STheodore Ts'o 27540aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 27550aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 27560aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 27570aa06000STheodore Ts'o *partial_cluster = 0; 27580aa06000STheodore Ts'o } 27590aa06000STheodore Ts'o 2760a86c6181SAlex Tomas /* if this leaf is free, then we should 2761a86c6181SAlex Tomas * remove it from index block above */ 2762a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2763c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2764a86c6181SAlex Tomas 2765a86c6181SAlex Tomas out: 2766a86c6181SAlex Tomas return err; 2767a86c6181SAlex Tomas } 2768a86c6181SAlex Tomas 2769a86c6181SAlex Tomas /* 2770d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2771d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2772a86c6181SAlex Tomas */ 277309b88252SAvantika Mathur static int 2774a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2775a86c6181SAlex Tomas { 2776a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2777a86c6181SAlex Tomas 2778a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2779a86c6181SAlex Tomas return 0; 2780a86c6181SAlex Tomas 2781a86c6181SAlex Tomas /* 2782d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2783a86c6181SAlex Tomas * so we have to consider current index for truncation 2784a86c6181SAlex Tomas */ 2785a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2786a86c6181SAlex Tomas return 0; 2787a86c6181SAlex Tomas return 1; 2788a86c6181SAlex Tomas } 2789a86c6181SAlex Tomas 279026a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 27915f95d21fSLukas Czerner ext4_lblk_t end) 2792a86c6181SAlex Tomas { 2793a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 2794a86c6181SAlex Tomas int depth = ext_depth(inode); 2795968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 2796d23142c6SLukas Czerner long long partial_cluster = 0; 2797a86c6181SAlex Tomas handle_t *handle; 27986f2080e6SDmitry Monakhov int i = 0, err = 0; 2799a86c6181SAlex Tomas 28005f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2801a86c6181SAlex Tomas 2802a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 28039924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2804a86c6181SAlex Tomas if (IS_ERR(handle)) 2805a86c6181SAlex Tomas return PTR_ERR(handle); 2806a86c6181SAlex Tomas 28070617b83fSDmitry Monakhov again: 280861801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2809d8990240SAditya Kali 2810a86c6181SAlex Tomas /* 28115f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 28125f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 28135f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 28145f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 28155f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 28165f95d21fSLukas Czerner */ 28175f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 28185f95d21fSLukas Czerner struct ext4_extent *ex; 28195f95d21fSLukas Czerner ext4_lblk_t ee_block; 28205f95d21fSLukas Czerner 28215f95d21fSLukas Czerner /* find extent for this block */ 2822107a7bd3STheodore Ts'o path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 28235f95d21fSLukas Czerner if (IS_ERR(path)) { 28245f95d21fSLukas Czerner ext4_journal_stop(handle); 28255f95d21fSLukas Czerner return PTR_ERR(path); 28265f95d21fSLukas Czerner } 28275f95d21fSLukas Czerner depth = ext_depth(inode); 28286f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 28295f95d21fSLukas Czerner ex = path[depth].p_ext; 2830968dee77SAshish Sangwan if (!ex) { 28316f2080e6SDmitry Monakhov if (depth) { 28326f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 28336f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 28346f2080e6SDmitry Monakhov depth); 28356f2080e6SDmitry Monakhov err = -EIO; 28366f2080e6SDmitry Monakhov } 28376f2080e6SDmitry Monakhov goto out; 2838968dee77SAshish Sangwan } 28395f95d21fSLukas Czerner 28405f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 28415f95d21fSLukas Czerner 28425f95d21fSLukas Czerner /* 28435f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 28445f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 28455f95d21fSLukas Czerner * tail of the first part of the split extent in 28465f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 28475f95d21fSLukas Czerner */ 28485f95d21fSLukas Czerner if (end >= ee_block && 28495f95d21fSLukas Czerner end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 28505f95d21fSLukas Czerner int split_flag = 0; 28515f95d21fSLukas Czerner 28525f95d21fSLukas Czerner if (ext4_ext_is_uninitialized(ex)) 28535f95d21fSLukas Czerner split_flag = EXT4_EXT_MARK_UNINIT1 | 28545f95d21fSLukas Czerner EXT4_EXT_MARK_UNINIT2; 28555f95d21fSLukas Czerner 28565f95d21fSLukas Czerner /* 28575f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 285827dd4385SLukas Czerner * block in the first new extent. Also we should not 285927dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 286027dd4385SLukas Czerner * reserved block if that happens. 28615f95d21fSLukas Czerner */ 28625f95d21fSLukas Czerner err = ext4_split_extent_at(handle, inode, path, 28635f95d21fSLukas Czerner end + 1, split_flag, 2864107a7bd3STheodore Ts'o EXT4_EX_NOCACHE | 28655f95d21fSLukas Czerner EXT4_GET_BLOCKS_PRE_IO | 286627dd4385SLukas Czerner EXT4_GET_BLOCKS_METADATA_NOFAIL); 28675f95d21fSLukas Czerner 28685f95d21fSLukas Czerner if (err < 0) 28695f95d21fSLukas Czerner goto out; 28705f95d21fSLukas Czerner } 28715f95d21fSLukas Czerner } 28725f95d21fSLukas Czerner /* 2873d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2874d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2875a86c6181SAlex Tomas */ 28760617b83fSDmitry Monakhov depth = ext_depth(inode); 2877968dee77SAshish Sangwan if (path) { 2878968dee77SAshish Sangwan int k = i = depth; 2879968dee77SAshish Sangwan while (--k > 0) 2880968dee77SAshish Sangwan path[k].p_block = 2881968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2882968dee77SAshish Sangwan } else { 2883968dee77SAshish Sangwan path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2884968dee77SAshish Sangwan GFP_NOFS); 2885a86c6181SAlex Tomas if (path == NULL) { 2886a86c6181SAlex Tomas ext4_journal_stop(handle); 2887a86c6181SAlex Tomas return -ENOMEM; 2888a86c6181SAlex Tomas } 28890617b83fSDmitry Monakhov path[0].p_depth = depth; 2890a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 289189a4e48fSTheodore Ts'o i = 0; 28925f95d21fSLukas Czerner 2893c349179bSTheodore Ts'o if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) { 2894a86c6181SAlex Tomas err = -EIO; 2895a86c6181SAlex Tomas goto out; 2896a86c6181SAlex Tomas } 2897968dee77SAshish Sangwan } 2898968dee77SAshish Sangwan err = 0; 2899a86c6181SAlex Tomas 2900a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2901a86c6181SAlex Tomas if (i == depth) { 2902a86c6181SAlex Tomas /* this is leaf block */ 2903d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 29040aa06000STheodore Ts'o &partial_cluster, start, 29055f95d21fSLukas Czerner end); 2906d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2907a86c6181SAlex Tomas brelse(path[i].p_bh); 2908a86c6181SAlex Tomas path[i].p_bh = NULL; 2909a86c6181SAlex Tomas i--; 2910a86c6181SAlex Tomas continue; 2911a86c6181SAlex Tomas } 2912a86c6181SAlex Tomas 2913a86c6181SAlex Tomas /* this is index block */ 2914a86c6181SAlex Tomas if (!path[i].p_hdr) { 2915a86c6181SAlex Tomas ext_debug("initialize header\n"); 2916a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2917a86c6181SAlex Tomas } 2918a86c6181SAlex Tomas 2919a86c6181SAlex Tomas if (!path[i].p_idx) { 2920d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2921a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2922a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2923a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2924a86c6181SAlex Tomas path[i].p_hdr, 2925a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2926a86c6181SAlex Tomas } else { 2927d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2928a86c6181SAlex Tomas path[i].p_idx--; 2929a86c6181SAlex Tomas } 2930a86c6181SAlex Tomas 2931a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2932a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2933a86c6181SAlex Tomas path[i].p_idx); 2934a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2935c29c0ae7SAlex Tomas struct buffer_head *bh; 2936a86c6181SAlex Tomas /* go to the next level */ 29372ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2938bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2939a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 29407d7ea89eSTheodore Ts'o bh = read_extent_tree_block(inode, 2941107a7bd3STheodore Ts'o ext4_idx_pblock(path[i].p_idx), depth - i - 1, 2942107a7bd3STheodore Ts'o EXT4_EX_NOCACHE); 29437d7ea89eSTheodore Ts'o if (IS_ERR(bh)) { 2944a86c6181SAlex Tomas /* should we reset i_size? */ 29457d7ea89eSTheodore Ts'o err = PTR_ERR(bh); 2946a86c6181SAlex Tomas break; 2947a86c6181SAlex Tomas } 294876828c88STheodore Ts'o /* Yield here to deal with large extent trees. 294976828c88STheodore Ts'o * Should be a no-op if we did IO above. */ 295076828c88STheodore Ts'o cond_resched(); 2951c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2952c29c0ae7SAlex Tomas err = -EIO; 2953c29c0ae7SAlex Tomas break; 2954c29c0ae7SAlex Tomas } 2955c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2956a86c6181SAlex Tomas 2957d0d856e8SRandy Dunlap /* save actual number of indexes since this 2958d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2959a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2960a86c6181SAlex Tomas i++; 2961a86c6181SAlex Tomas } else { 2962d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2963a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2964d0d856e8SRandy Dunlap /* index is empty, remove it; 2965a86c6181SAlex Tomas * handle must be already prepared by the 2966a86c6181SAlex Tomas * truncatei_leaf() */ 2967c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 2968a86c6181SAlex Tomas } 2969d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2970a86c6181SAlex Tomas brelse(path[i].p_bh); 2971a86c6181SAlex Tomas path[i].p_bh = NULL; 2972a86c6181SAlex Tomas i--; 2973a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2974a86c6181SAlex Tomas } 2975a86c6181SAlex Tomas } 2976a86c6181SAlex Tomas 297761801325SLukas Czerner trace_ext4_ext_remove_space_done(inode, start, end, depth, 297861801325SLukas Czerner partial_cluster, path->p_hdr->eh_entries); 2979d8990240SAditya Kali 29807b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 29817b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 29827b415bf6SAditya Kali * cluster as well. */ 2983d23142c6SLukas Czerner if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 2984981250caSTheodore Ts'o int flags = get_default_free_blocks_flags(inode); 29857b415bf6SAditya Kali 29867b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 29877b415bf6SAditya Kali EXT4_C2B(EXT4_SB(sb), partial_cluster), 29887b415bf6SAditya Kali EXT4_SB(sb)->s_cluster_ratio, flags); 29897b415bf6SAditya Kali partial_cluster = 0; 29907b415bf6SAditya Kali } 29917b415bf6SAditya Kali 2992a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2993a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2994a86c6181SAlex Tomas /* 2995d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 2996d0d856e8SRandy Dunlap * so we need to correct eh_depth 2997a86c6181SAlex Tomas */ 2998a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 2999a86c6181SAlex Tomas if (err == 0) { 3000a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 3001a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 300255ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 3003a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 3004a86c6181SAlex Tomas } 3005a86c6181SAlex Tomas } 3006a86c6181SAlex Tomas out: 3007a86c6181SAlex Tomas ext4_ext_drop_refs(path); 3008a86c6181SAlex Tomas kfree(path); 3009968dee77SAshish Sangwan if (err == -EAGAIN) { 3010968dee77SAshish Sangwan path = NULL; 30110617b83fSDmitry Monakhov goto again; 3012968dee77SAshish Sangwan } 3013a86c6181SAlex Tomas ext4_journal_stop(handle); 3014a86c6181SAlex Tomas 3015a86c6181SAlex Tomas return err; 3016a86c6181SAlex Tomas } 3017a86c6181SAlex Tomas 3018a86c6181SAlex Tomas /* 3019a86c6181SAlex Tomas * called at mount time 3020a86c6181SAlex Tomas */ 3021a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 3022a86c6181SAlex Tomas { 3023a86c6181SAlex Tomas /* 3024a86c6181SAlex Tomas * possible initialization would be here 3025a86c6181SAlex Tomas */ 3026a86c6181SAlex Tomas 302783982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 302890576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 302992b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 3030bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 303192b97816STheodore Ts'o ", aggressive tests" 3032a86c6181SAlex Tomas #endif 3033a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 303492b97816STheodore Ts'o ", check binsearch" 3035a86c6181SAlex Tomas #endif 3036a86c6181SAlex Tomas #ifdef EXTENTS_STATS 303792b97816STheodore Ts'o ", stats" 3038a86c6181SAlex Tomas #endif 303992b97816STheodore Ts'o "\n"); 304090576c0bSTheodore Ts'o #endif 3041a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3042a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 3043a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 3044a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 3045a86c6181SAlex Tomas #endif 3046a86c6181SAlex Tomas } 3047a86c6181SAlex Tomas } 3048a86c6181SAlex Tomas 3049a86c6181SAlex Tomas /* 3050a86c6181SAlex Tomas * called at umount time 3051a86c6181SAlex Tomas */ 3052a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 3053a86c6181SAlex Tomas { 305483982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 3055a86c6181SAlex Tomas return; 3056a86c6181SAlex Tomas 3057a86c6181SAlex Tomas #ifdef EXTENTS_STATS 3058a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 3059a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3060a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 3061a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 3062a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 3063a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 3064a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 3065a86c6181SAlex Tomas } 3066a86c6181SAlex Tomas #endif 3067a86c6181SAlex Tomas } 3068a86c6181SAlex Tomas 3069d7b2a00cSZheng Liu static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex) 3070d7b2a00cSZheng Liu { 3071d7b2a00cSZheng Liu ext4_lblk_t ee_block; 3072d7b2a00cSZheng Liu ext4_fsblk_t ee_pblock; 3073d7b2a00cSZheng Liu unsigned int ee_len; 3074d7b2a00cSZheng Liu 3075d7b2a00cSZheng Liu ee_block = le32_to_cpu(ex->ee_block); 3076d7b2a00cSZheng Liu ee_len = ext4_ext_get_actual_len(ex); 3077d7b2a00cSZheng Liu ee_pblock = ext4_ext_pblock(ex); 3078d7b2a00cSZheng Liu 3079d7b2a00cSZheng Liu if (ee_len == 0) 3080d7b2a00cSZheng Liu return 0; 3081d7b2a00cSZheng Liu 3082d7b2a00cSZheng Liu return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, 3083d7b2a00cSZheng Liu EXTENT_STATUS_WRITTEN); 3084d7b2a00cSZheng Liu } 3085d7b2a00cSZheng Liu 3086093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 3087093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 3088093a088bSAneesh Kumar K.V { 30892407518dSLukas Czerner ext4_fsblk_t ee_pblock; 30902407518dSLukas Czerner unsigned int ee_len; 3091b720303dSJing Zhang int ret; 3092093a088bSAneesh Kumar K.V 3093093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 3094bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 3095093a088bSAneesh Kumar K.V 3096a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 30972407518dSLukas Czerner if (ret > 0) 30982407518dSLukas Czerner ret = 0; 3099093a088bSAneesh Kumar K.V 31002407518dSLukas Czerner return ret; 3101093a088bSAneesh Kumar K.V } 3102093a088bSAneesh Kumar K.V 310347ea3bb5SYongqiang Yang /* 310447ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 310547ea3bb5SYongqiang Yang * 310647ea3bb5SYongqiang Yang * @handle: the journal handle 310747ea3bb5SYongqiang Yang * @inode: the file inode 310847ea3bb5SYongqiang Yang * @path: the path to the extent 310947ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 311047ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 311147ea3bb5SYongqiang Yang * the states(init or uninit) of new extents. 311247ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 311347ea3bb5SYongqiang Yang * 311447ea3bb5SYongqiang Yang * 311547ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 311647ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 311747ea3bb5SYongqiang Yang * 311847ea3bb5SYongqiang Yang * There are two cases: 311947ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 312047ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 312147ea3bb5SYongqiang Yang * 312247ea3bb5SYongqiang Yang * return 0 on success. 312347ea3bb5SYongqiang Yang */ 312447ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 312547ea3bb5SYongqiang Yang struct inode *inode, 312647ea3bb5SYongqiang Yang struct ext4_ext_path *path, 312747ea3bb5SYongqiang Yang ext4_lblk_t split, 312847ea3bb5SYongqiang Yang int split_flag, 312947ea3bb5SYongqiang Yang int flags) 313047ea3bb5SYongqiang Yang { 313147ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 313247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3133adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 313447ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 313547ea3bb5SYongqiang Yang unsigned int ee_len, depth; 313647ea3bb5SYongqiang Yang int err = 0; 313747ea3bb5SYongqiang Yang 3138dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3139dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3140dee1f973SDmitry Monakhov 314147ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 314247ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 314347ea3bb5SYongqiang Yang 314447ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 314547ea3bb5SYongqiang Yang 314647ea3bb5SYongqiang Yang depth = ext_depth(inode); 314747ea3bb5SYongqiang Yang ex = path[depth].p_ext; 314847ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 314947ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 315047ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 315147ea3bb5SYongqiang Yang 315247ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3153357b66fdSDmitry Monakhov BUG_ON(!ext4_ext_is_uninitialized(ex) && 3154357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3155357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT1 | 3156357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT2)); 315747ea3bb5SYongqiang Yang 315847ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 315947ea3bb5SYongqiang Yang if (err) 316047ea3bb5SYongqiang Yang goto out; 316147ea3bb5SYongqiang Yang 316247ea3bb5SYongqiang Yang if (split == ee_block) { 316347ea3bb5SYongqiang Yang /* 316447ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 316547ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 316647ea3bb5SYongqiang Yang * is not needed. 316747ea3bb5SYongqiang Yang */ 316847ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 316947ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 317047ea3bb5SYongqiang Yang else 317147ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 317247ea3bb5SYongqiang Yang 317347ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3174ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 317547ea3bb5SYongqiang Yang 3176ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 317747ea3bb5SYongqiang Yang goto out; 317847ea3bb5SYongqiang Yang } 317947ea3bb5SYongqiang Yang 318047ea3bb5SYongqiang Yang /* case a */ 318147ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 318247ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 318347ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT1) 318447ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 318547ea3bb5SYongqiang Yang 318647ea3bb5SYongqiang Yang /* 318747ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 318847ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 318947ea3bb5SYongqiang Yang */ 319047ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 319147ea3bb5SYongqiang Yang if (err) 319247ea3bb5SYongqiang Yang goto fix_extent_len; 319347ea3bb5SYongqiang Yang 319447ea3bb5SYongqiang Yang ex2 = &newex; 319547ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 319647ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 319747ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 319847ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 319947ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex2); 320047ea3bb5SYongqiang Yang 320147ea3bb5SYongqiang Yang err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 320247ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3203dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3204adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3205dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3206adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 32078cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32088cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3209adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3210adb23551SZheng Liu ext4_ext_pblock(ex2)); 3211adb23551SZheng Liu } else { 3212dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3213adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 32148cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32158cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3216adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3217adb23551SZheng Liu ext4_ext_pblock(ex)); 3218adb23551SZheng Liu } 3219adb23551SZheng Liu } else { 322047ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3221adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 32228cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 32238cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3224adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3225adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3226adb23551SZheng Liu } 3227dee1f973SDmitry Monakhov 322847ea3bb5SYongqiang Yang if (err) 322947ea3bb5SYongqiang Yang goto fix_extent_len; 323047ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3231af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3232ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3233ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3234adb23551SZheng Liu if (err) 3235adb23551SZheng Liu goto fix_extent_len; 3236adb23551SZheng Liu 3237adb23551SZheng Liu /* update extent status tree */ 3238d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3239adb23551SZheng Liu 324047ea3bb5SYongqiang Yang goto out; 324147ea3bb5SYongqiang Yang } else if (err) 324247ea3bb5SYongqiang Yang goto fix_extent_len; 324347ea3bb5SYongqiang Yang 324447ea3bb5SYongqiang Yang out: 324547ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 324647ea3bb5SYongqiang Yang return err; 324747ea3bb5SYongqiang Yang 324847ea3bb5SYongqiang Yang fix_extent_len: 324947ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 325047ea3bb5SYongqiang Yang ext4_ext_dirty(handle, inode, path + depth); 325147ea3bb5SYongqiang Yang return err; 325247ea3bb5SYongqiang Yang } 325347ea3bb5SYongqiang Yang 325447ea3bb5SYongqiang Yang /* 325547ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 325647ea3bb5SYongqiang Yang * by @map as split_flags indicates 325747ea3bb5SYongqiang Yang * 325847ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (up to three) 325947ea3bb5SYongqiang Yang * There are three possibilities: 326047ea3bb5SYongqiang Yang * a> There is no split required 326147ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 326247ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 326347ea3bb5SYongqiang Yang * 326447ea3bb5SYongqiang Yang */ 326547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 326647ea3bb5SYongqiang Yang struct inode *inode, 326747ea3bb5SYongqiang Yang struct ext4_ext_path *path, 326847ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 326947ea3bb5SYongqiang Yang int split_flag, 327047ea3bb5SYongqiang Yang int flags) 327147ea3bb5SYongqiang Yang { 327247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 327347ea3bb5SYongqiang Yang struct ext4_extent *ex; 327447ea3bb5SYongqiang Yang unsigned int ee_len, depth; 327547ea3bb5SYongqiang Yang int err = 0; 327647ea3bb5SYongqiang Yang int uninitialized; 327747ea3bb5SYongqiang Yang int split_flag1, flags1; 32783a225670SZheng Liu int allocated = map->m_len; 327947ea3bb5SYongqiang Yang 328047ea3bb5SYongqiang Yang depth = ext_depth(inode); 328147ea3bb5SYongqiang Yang ex = path[depth].p_ext; 328247ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 328347ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 328447ea3bb5SYongqiang Yang uninitialized = ext4_ext_is_uninitialized(ex); 328547ea3bb5SYongqiang Yang 328647ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3287dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 328847ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 328947ea3bb5SYongqiang Yang if (uninitialized) 329047ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 329147ea3bb5SYongqiang Yang EXT4_EXT_MARK_UNINIT2; 3292dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3293dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 329447ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 329547ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 329693917411SYongqiang Yang if (err) 329793917411SYongqiang Yang goto out; 32983a225670SZheng Liu } else { 32993a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 330047ea3bb5SYongqiang Yang } 3301357b66fdSDmitry Monakhov /* 3302357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3303357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3304357b66fdSDmitry Monakhov */ 330547ea3bb5SYongqiang Yang ext4_ext_drop_refs(path); 3306107a7bd3STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 330747ea3bb5SYongqiang Yang if (IS_ERR(path)) 330847ea3bb5SYongqiang Yang return PTR_ERR(path); 3309357b66fdSDmitry Monakhov depth = ext_depth(inode); 3310357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3311357b66fdSDmitry Monakhov uninitialized = ext4_ext_is_uninitialized(ex); 3312357b66fdSDmitry Monakhov split_flag1 = 0; 331347ea3bb5SYongqiang Yang 331447ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3315357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3316357b66fdSDmitry Monakhov if (uninitialized) { 331747ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3318357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3319357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT2); 3320357b66fdSDmitry Monakhov } 332147ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 332247ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 332347ea3bb5SYongqiang Yang if (err) 332447ea3bb5SYongqiang Yang goto out; 332547ea3bb5SYongqiang Yang } 332647ea3bb5SYongqiang Yang 332747ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 332847ea3bb5SYongqiang Yang out: 33293a225670SZheng Liu return err ? err : allocated; 333047ea3bb5SYongqiang Yang } 333147ea3bb5SYongqiang Yang 333256055d3aSAmit Arora /* 3333e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 333456055d3aSAmit Arora * to an uninitialized extent. It may result in splitting the uninitialized 333556055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 333656055d3aSAmit Arora * uninitialized). 333756055d3aSAmit Arora * There are three possibilities: 333856055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 333956055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 334056055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 33416f91bc5fSEric Gouriou * 33426f91bc5fSEric Gouriou * Pre-conditions: 33436f91bc5fSEric Gouriou * - The extent pointed to by 'path' is uninitialized. 33446f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 33456f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 33466f91bc5fSEric Gouriou * 33476f91bc5fSEric Gouriou * Post-conditions on success: 33486f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 33496f91bc5fSEric Gouriou * that are allocated and initialized. 33506f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 335156055d3aSAmit Arora */ 3352725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3353725d26d3SAneesh Kumar K.V struct inode *inode, 3354e35fd660STheodore Ts'o struct ext4_map_blocks *map, 335527dd4385SLukas Czerner struct ext4_ext_path *path, 335627dd4385SLukas Czerner int flags) 335756055d3aSAmit Arora { 335867a5da56SZheng Liu struct ext4_sb_info *sbi; 33596f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3360667eff35SYongqiang Yang struct ext4_map_blocks split_map; 3361667eff35SYongqiang Yang struct ext4_extent zero_ex; 3362bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 336321ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3364bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 3365bc2d9db4SLukas Czerner int allocated = 0, max_zeroout = 0; 336656055d3aSAmit Arora int err = 0; 3367667eff35SYongqiang Yang int split_flag = 0; 336821ca087aSDmitry Monakhov 336921ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 337021ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3371bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 337221ca087aSDmitry Monakhov 337367a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 337421ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 337521ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3376bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3377bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 337856055d3aSAmit Arora 337956055d3aSAmit Arora depth = ext_depth(inode); 33806f91bc5fSEric Gouriou eh = path[depth].p_hdr; 338156055d3aSAmit Arora ex = path[depth].p_ext; 338256055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 338356055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3384adb23551SZheng Liu zero_ex.ee_len = 0; 338521ca087aSDmitry Monakhov 33866f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 33876f91bc5fSEric Gouriou 33886f91bc5fSEric Gouriou /* Pre-conditions */ 33896f91bc5fSEric Gouriou BUG_ON(!ext4_ext_is_uninitialized(ex)); 33906f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 33916f91bc5fSEric Gouriou 33926f91bc5fSEric Gouriou /* 33936f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3394bc2d9db4SLukas Czerner * uninitialized extent to its neighbor. This is much cheaper 33956f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3396bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3397bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3398bc2d9db4SLukas Czerner * followed by append writes. 33996f91bc5fSEric Gouriou * 34006f91bc5fSEric Gouriou * Limitations of the current logic: 3401bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 34026f91bc5fSEric Gouriou * This would require removing the extent if the transfer 34036f91bc5fSEric Gouriou * is possible. 3404bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 34056f91bc5fSEric Gouriou * same extent tree node. 34066f91bc5fSEric Gouriou */ 3407bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3408bc2d9db4SLukas Czerner /* See if we can merge left */ 3409bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3410bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 34116f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 34126f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3413bc2d9db4SLukas Czerner unsigned int prev_len; 34146f91bc5fSEric Gouriou 3415bc2d9db4SLukas Czerner abut_ex = ex - 1; 3416bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3417bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3418bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 34196f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 34206f91bc5fSEric Gouriou 34216f91bc5fSEric Gouriou /* 3422bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 34236f91bc5fSEric Gouriou * upon those conditions: 3424bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3425bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3426bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3427bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 34286f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 34296f91bc5fSEric Gouriou */ 3430bc2d9db4SLukas Czerner if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 34316f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 34326f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3433bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 34346f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 34356f91bc5fSEric Gouriou if (err) 34366f91bc5fSEric Gouriou goto out; 34376f91bc5fSEric Gouriou 34386f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3439bc2d9db4SLukas Czerner map, ex, abut_ex); 34406f91bc5fSEric Gouriou 3441bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3442bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3443bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3444bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 34456f91bc5fSEric Gouriou ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 34466f91bc5fSEric Gouriou 3447bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3448bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 34496f91bc5fSEric Gouriou 3450bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3451bc2d9db4SLukas Czerner allocated = map_len; 3452bc2d9db4SLukas Czerner } 3453bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3454bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3455bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3456bc2d9db4SLukas Czerner /* See if we can merge right */ 3457bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3458bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3459bc2d9db4SLukas Czerner unsigned int next_len; 3460bc2d9db4SLukas Czerner 3461bc2d9db4SLukas Czerner abut_ex = ex + 1; 3462bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3463bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3464bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3465bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3466bc2d9db4SLukas Czerner 3467bc2d9db4SLukas Czerner /* 3468bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3469bc2d9db4SLukas Czerner * upon those conditions: 3470bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3471bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3472bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3473bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3474bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3475bc2d9db4SLukas Czerner */ 3476bc2d9db4SLukas Czerner if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 3477bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3478bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3479bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3480bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3481bc2d9db4SLukas Czerner if (err) 3482bc2d9db4SLukas Czerner goto out; 3483bc2d9db4SLukas Czerner 3484bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3485bc2d9db4SLukas Czerner map, ex, abut_ex); 3486bc2d9db4SLukas Czerner 3487bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3488bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3489bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3490bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3491bc2d9db4SLukas Czerner ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3492bc2d9db4SLukas Czerner 3493bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3494bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3495bc2d9db4SLukas Czerner 3496bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3497bc2d9db4SLukas Czerner allocated = map_len; 3498bc2d9db4SLukas Czerner } 3499bc2d9db4SLukas Czerner } 3500bc2d9db4SLukas Czerner if (allocated) { 35016f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 35026f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 35036f91bc5fSEric Gouriou 35046f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3505bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 35066f91bc5fSEric Gouriou goto out; 3507bc2d9db4SLukas Czerner } else 3508bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 35096f91bc5fSEric Gouriou 3510667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 351121ca087aSDmitry Monakhov /* 351221ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 35139e740568SYongqiang Yang * zeroout only if extent is fully inside i_size or new_size. 351421ca087aSDmitry Monakhov */ 3515667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 351621ca087aSDmitry Monakhov 351767a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 351867a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 35194f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 352067a5da56SZheng Liu 352167a5da56SZheng Liu /* If extent is less than s_max_zeroout_kb, zeroout directly */ 352267a5da56SZheng Liu if (max_zeroout && (ee_len <= max_zeroout)) { 3523667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 35243977c965SAneesh Kumar K.V if (err) 352556055d3aSAmit Arora goto out; 3526adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 35278cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3528adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 35299df5643aSAneesh Kumar K.V 35309df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 35319df5643aSAneesh Kumar K.V if (err) 35329df5643aSAneesh Kumar K.V goto out; 3533667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3534ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3535ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 353656055d3aSAmit Arora goto out; 3537667eff35SYongqiang Yang } 3538093a088bSAneesh Kumar K.V 3539667eff35SYongqiang Yang /* 3540667eff35SYongqiang Yang * four cases: 3541667eff35SYongqiang Yang * 1. split the extent into three extents. 3542667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3543667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3544667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3545667eff35SYongqiang Yang */ 3546667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3547667eff35SYongqiang Yang split_map.m_len = map->m_len; 3548667eff35SYongqiang Yang 354967a5da56SZheng Liu if (max_zeroout && (allocated > map->m_len)) { 355067a5da56SZheng Liu if (allocated <= max_zeroout) { 3551667eff35SYongqiang Yang /* case 3 */ 3552667eff35SYongqiang Yang zero_ex.ee_block = 35539b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 35549b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3555667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3556667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3557667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3558667eff35SYongqiang Yang if (err) 3559667eff35SYongqiang Yang goto out; 3560667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3561667eff35SYongqiang Yang split_map.m_len = allocated; 356267a5da56SZheng Liu } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3563667eff35SYongqiang Yang /* case 2 */ 3564667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3565667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3566667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3567667eff35SYongqiang Yang ee_block); 3568667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3569667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3570667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3571667eff35SYongqiang Yang if (err) 3572667eff35SYongqiang Yang goto out; 3573667eff35SYongqiang Yang } 3574667eff35SYongqiang Yang 3575667eff35SYongqiang Yang split_map.m_lblk = ee_block; 35769b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 35779b940f8eSAllison Henderson allocated = map->m_len; 3578667eff35SYongqiang Yang } 3579667eff35SYongqiang Yang } 3580667eff35SYongqiang Yang 3581667eff35SYongqiang Yang allocated = ext4_split_extent(handle, inode, path, 358227dd4385SLukas Czerner &split_map, split_flag, flags); 3583667eff35SYongqiang Yang if (allocated < 0) 3584667eff35SYongqiang Yang err = allocated; 3585667eff35SYongqiang Yang 3586667eff35SYongqiang Yang out: 3587adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 3588adb23551SZheng Liu if (!err) 3589d7b2a00cSZheng Liu err = ext4_zeroout_es(inode, &zero_ex); 3590667eff35SYongqiang Yang return err ? err : allocated; 359156055d3aSAmit Arora } 359256055d3aSAmit Arora 3593c278bfecSAneesh Kumar K.V /* 3594e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 35950031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 35960031462bSMingming Cao * to an uninitialized extent. 35970031462bSMingming Cao * 3598fd018fe8SPaul Bolle * Writing to an uninitialized extent may result in splitting the uninitialized 359930cb27d6SWang Sheng-Hui * extent into multiple initialized/uninitialized extents (up to three) 36000031462bSMingming Cao * There are three possibilities: 36010031462bSMingming Cao * a> There is no split required: Entire extent should be uninitialized 36020031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 36030031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 36040031462bSMingming Cao * 36050031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3606b595076aSUwe Kleine-König * the uninitialized extent split. To prevent ENOSPC occur at the IO 36070031462bSMingming Cao * complete, we need to split the uninitialized extent before DIO submit 3608421f91d2SUwe Kleine-König * the IO. The uninitialized extent called at this time will be split 36090031462bSMingming Cao * into three uninitialized extent(at most). After IO complete, the part 36100031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 36110031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3612ba230c3fSMingming * 3613ba230c3fSMingming * Returns the size of uninitialized extent to be written on success. 36140031462bSMingming Cao */ 36150031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle, 36160031462bSMingming Cao struct inode *inode, 3617e35fd660STheodore Ts'o struct ext4_map_blocks *map, 36180031462bSMingming Cao struct ext4_ext_path *path, 36190031462bSMingming Cao int flags) 36200031462bSMingming Cao { 3621667eff35SYongqiang Yang ext4_lblk_t eof_block; 3622667eff35SYongqiang Yang ext4_lblk_t ee_block; 3623667eff35SYongqiang Yang struct ext4_extent *ex; 3624667eff35SYongqiang Yang unsigned int ee_len; 3625667eff35SYongqiang Yang int split_flag = 0, depth; 36260031462bSMingming Cao 362721ca087aSDmitry Monakhov ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 362821ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3629e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 363021ca087aSDmitry Monakhov 363121ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 363221ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3633e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3634e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 36350031462bSMingming Cao /* 363621ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 363721ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 363821ca087aSDmitry Monakhov */ 3639667eff35SYongqiang Yang depth = ext_depth(inode); 36400031462bSMingming Cao ex = path[depth].p_ext; 3641667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3642667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 36430031462bSMingming Cao 3644667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3645667eff35SYongqiang Yang split_flag |= EXT4_EXT_MARK_UNINIT2; 3646dee1f973SDmitry Monakhov if (flags & EXT4_GET_BLOCKS_CONVERT) 3647dee1f973SDmitry Monakhov split_flag |= EXT4_EXT_DATA_VALID2; 3648667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3649667eff35SYongqiang Yang return ext4_split_extent(handle, inode, path, map, split_flag, flags); 36500031462bSMingming Cao } 3651197217a5SYongqiang Yang 3652c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 36530031462bSMingming Cao struct inode *inode, 3654dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 36550031462bSMingming Cao struct ext4_ext_path *path) 36560031462bSMingming Cao { 36570031462bSMingming Cao struct ext4_extent *ex; 3658dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3659dee1f973SDmitry Monakhov unsigned int ee_len; 36600031462bSMingming Cao int depth; 36610031462bSMingming Cao int err = 0; 36620031462bSMingming Cao 36630031462bSMingming Cao depth = ext_depth(inode); 36640031462bSMingming Cao ex = path[depth].p_ext; 3665dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3666dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 36670031462bSMingming Cao 3668197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3669197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3670dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3671dee1f973SDmitry Monakhov 3672ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3673ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3674ff95ec22SDmitry Monakhov * required. 3675ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3676ff95ec22SDmitry Monakhov * illegal. 3677ff95ec22SDmitry Monakhov */ 3678dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3679ff95ec22SDmitry Monakhov #ifdef EXT4_DEBUG 3680ff95ec22SDmitry Monakhov ext4_warning("Inode (%ld) finished: extent logical block %llu," 3681ff95ec22SDmitry Monakhov " len %u; IO logical block %llu, len %u\n", 3682ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3683ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3684ff95ec22SDmitry Monakhov #endif 3685dee1f973SDmitry Monakhov err = ext4_split_unwritten_extents(handle, inode, map, path, 3686dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3687dee1f973SDmitry Monakhov if (err < 0) 3688dee1f973SDmitry Monakhov goto out; 3689dee1f973SDmitry Monakhov ext4_ext_drop_refs(path); 3690107a7bd3STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 3691dee1f973SDmitry Monakhov if (IS_ERR(path)) { 3692dee1f973SDmitry Monakhov err = PTR_ERR(path); 3693dee1f973SDmitry Monakhov goto out; 3694dee1f973SDmitry Monakhov } 3695dee1f973SDmitry Monakhov depth = ext_depth(inode); 3696dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3697dee1f973SDmitry Monakhov } 3698197217a5SYongqiang Yang 36990031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 37000031462bSMingming Cao if (err) 37010031462bSMingming Cao goto out; 37020031462bSMingming Cao /* first mark the extent as initialized */ 37030031462bSMingming Cao ext4_ext_mark_initialized(ex); 37040031462bSMingming Cao 3705197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3706197217a5SYongqiang Yang * borders are not changed 37070031462bSMingming Cao */ 3708ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3709197217a5SYongqiang Yang 37100031462bSMingming Cao /* Mark modified extent as dirty */ 3711ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 37120031462bSMingming Cao out: 37130031462bSMingming Cao ext4_ext_show_leaf(inode, path); 37140031462bSMingming Cao return err; 37150031462bSMingming Cao } 37160031462bSMingming Cao 3717515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3718515f41c3SAneesh Kumar K.V sector_t block, int count) 3719515f41c3SAneesh Kumar K.V { 3720515f41c3SAneesh Kumar K.V int i; 3721515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3722515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3723515f41c3SAneesh Kumar K.V } 3724515f41c3SAneesh Kumar K.V 372558590b06STheodore Ts'o /* 372658590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 372758590b06STheodore Ts'o */ 372858590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3729d002ebf1SEric Sandeen ext4_lblk_t lblk, 373058590b06STheodore Ts'o struct ext4_ext_path *path, 373158590b06STheodore Ts'o unsigned int len) 373258590b06STheodore Ts'o { 373358590b06STheodore Ts'o int i, depth; 373458590b06STheodore Ts'o struct ext4_extent_header *eh; 373565922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 373658590b06STheodore Ts'o 373758590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 373858590b06STheodore Ts'o return 0; 373958590b06STheodore Ts'o 374058590b06STheodore Ts'o depth = ext_depth(inode); 374158590b06STheodore Ts'o eh = path[depth].p_hdr; 374258590b06STheodore Ts'o 3743afcff5d8SLukas Czerner /* 3744afcff5d8SLukas Czerner * We're going to remove EOFBLOCKS_FL entirely in future so we 3745afcff5d8SLukas Czerner * do not care for this case anymore. Simply remove the flag 3746afcff5d8SLukas Czerner * if there are no extents. 3747afcff5d8SLukas Czerner */ 3748afcff5d8SLukas Czerner if (unlikely(!eh->eh_entries)) 3749afcff5d8SLukas Czerner goto out; 375058590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 375158590b06STheodore Ts'o /* 375258590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 375358590b06STheodore Ts'o * last block in the last extent in the file. We test this by 375458590b06STheodore Ts'o * first checking to see if the caller to 375558590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 375658590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 375758590b06STheodore Ts'o * this turns out to be false, we can bail out from this 375858590b06STheodore Ts'o * function immediately. 375958590b06STheodore Ts'o */ 3760d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 376158590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 376258590b06STheodore Ts'o return 0; 376358590b06STheodore Ts'o /* 376458590b06STheodore Ts'o * If the caller does appear to be planning to write at or 376558590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 376658590b06STheodore Ts'o * if the current extent is the last extent in the file, by 376758590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 376858590b06STheodore Ts'o * at each level of the tree. 376958590b06STheodore Ts'o */ 377058590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 377158590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 377258590b06STheodore Ts'o return 0; 3773afcff5d8SLukas Czerner out: 377458590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 377558590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 377658590b06STheodore Ts'o } 377758590b06STheodore Ts'o 37787b415bf6SAditya Kali /** 37797b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 37807b415bf6SAditya Kali * 37817d1b1fbcSZheng Liu * Return 1 if there is a delalloc block in the range, otherwise 0. 37827b415bf6SAditya Kali */ 3783f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode, 37847b415bf6SAditya Kali ext4_lblk_t lblk_start, 37857d1b1fbcSZheng Liu ext4_lblk_t lblk_end) 37867b415bf6SAditya Kali { 37877d1b1fbcSZheng Liu struct extent_status es; 37887b415bf6SAditya Kali 3789e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); 379006b0c886SZheng Liu if (es.es_len == 0) 37917d1b1fbcSZheng Liu return 0; /* there is no delay extent in this tree */ 379206b0c886SZheng Liu else if (es.es_lblk <= lblk_start && 379306b0c886SZheng Liu lblk_start < es.es_lblk + es.es_len) 37947b415bf6SAditya Kali return 1; 379506b0c886SZheng Liu else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 37967d1b1fbcSZheng Liu return 1; 37977b415bf6SAditya Kali else 37987b415bf6SAditya Kali return 0; 37997b415bf6SAditya Kali } 38007b415bf6SAditya Kali 38017d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 38027b415bf6SAditya Kali { 38037b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 38047b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 3805f5a44db5STheodore Ts'o lblk_start = EXT4_LBLK_CMASK(sbi, lblk); 38067b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 38077b415bf6SAditya Kali 38087d1b1fbcSZheng Liu return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 38097b415bf6SAditya Kali } 38107b415bf6SAditya Kali 38117b415bf6SAditya Kali /** 38127b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 38137b415bf6SAditya Kali * are under delalloc and were reserved quota for. 38147b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 38157b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 38167b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 38177b415bf6SAditya Kali * The cases to look for are: 38187b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 38197b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 38207b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 38217b415bf6SAditya Kali * are not delalloc'ed. 38227b415bf6SAditya Kali * Ex: 38237b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 38247b415bf6SAditya Kali * |++++++ allocated ++++++| 38257b415bf6SAditya Kali * ==> 4 complete clusters in above example 38267b415bf6SAditya Kali * 38277b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 38287b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 38297b415bf6SAditya Kali * cluster. 38307b415bf6SAditya Kali * Ex: 38317b415bf6SAditya Kali * |----====c========|========c========| 38327b415bf6SAditya Kali * |++++++ allocated ++++++| 38337b415bf6SAditya Kali * ==> 1 complete clusters in above example 38347b415bf6SAditya Kali * 38357b415bf6SAditya Kali * Ex: 38367b415bf6SAditya Kali * |================c================| 38377b415bf6SAditya Kali * |++++++ allocated ++++++| 38387b415bf6SAditya Kali * ==> 0 complete clusters in above example 38397b415bf6SAditya Kali * 38407b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 38417b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 38427b415bf6SAditya Kali * this 'allocated' range. 38437b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 38447b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 38457b415bf6SAditya Kali */ 38467b415bf6SAditya Kali static unsigned int 38477b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 38487b415bf6SAditya Kali unsigned int num_blks) 38497b415bf6SAditya Kali { 38507b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 38517b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 38527b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 38537b415bf6SAditya Kali unsigned int allocated_clusters = 0; 38547b415bf6SAditya Kali 38557b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 38567b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 38577b415bf6SAditya Kali 38587b415bf6SAditya Kali /* max possible clusters for this allocation */ 38597b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 38607b415bf6SAditya Kali 3861d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3862d8990240SAditya Kali 38637b415bf6SAditya Kali /* Check towards left side */ 3864f5a44db5STheodore Ts'o c_offset = EXT4_LBLK_COFF(sbi, lblk_start); 38657b415bf6SAditya Kali if (c_offset) { 3866f5a44db5STheodore Ts'o lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); 38677b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 38687b415bf6SAditya Kali 38697d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 38707b415bf6SAditya Kali allocated_clusters--; 38717b415bf6SAditya Kali } 38727b415bf6SAditya Kali 38737b415bf6SAditya Kali /* Now check towards right. */ 3874f5a44db5STheodore Ts'o c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); 38757b415bf6SAditya Kali if (allocated_clusters && c_offset) { 38767b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 38777b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 38787b415bf6SAditya Kali 38797d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 38807b415bf6SAditya Kali allocated_clusters--; 38817b415bf6SAditya Kali } 38827b415bf6SAditya Kali 38837b415bf6SAditya Kali return allocated_clusters; 38847b415bf6SAditya Kali } 38857b415bf6SAditya Kali 38860031462bSMingming Cao static int 38870031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3888e35fd660STheodore Ts'o struct ext4_map_blocks *map, 38890031462bSMingming Cao struct ext4_ext_path *path, int flags, 3890e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 38910031462bSMingming Cao { 38920031462bSMingming Cao int ret = 0; 38930031462bSMingming Cao int err = 0; 3894f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 38950031462bSMingming Cao 38960031462bSMingming Cao ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " 389788635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 3898e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 38990031462bSMingming Cao flags, allocated); 39000031462bSMingming Cao ext4_ext_show_leaf(inode, path); 39010031462bSMingming Cao 390227dd4385SLukas Czerner /* 390327dd4385SLukas Czerner * When writing into uninitialized space, we should not fail to 390427dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 390527dd4385SLukas Czerner */ 390627dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 390727dd4385SLukas Czerner 3908b5645534SZheng Liu trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 3909b5645534SZheng Liu allocated, newblock); 3910d8990240SAditya Kali 3911c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3912744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3913e35fd660STheodore Ts'o ret = ext4_split_unwritten_extents(handle, inode, map, 3914e35fd660STheodore Ts'o path, flags); 391582e54229SDmitry Monakhov if (ret <= 0) 391682e54229SDmitry Monakhov goto out; 39175f524950SMingming /* 39185f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 391925985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 39205f524950SMingming * completed 39215f524950SMingming */ 39220edeb71dSTao Ma if (io) 39230edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io); 39240edeb71dSTao Ma else 392519f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3926a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 3927744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 3928e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 39290031462bSMingming Cao goto out; 39300031462bSMingming Cao } 3931c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3932744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3933dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 39340031462bSMingming Cao path); 393558590b06STheodore Ts'o if (ret >= 0) { 3936b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 3937d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 3938d002ebf1SEric Sandeen path, map->m_len); 393958590b06STheodore Ts'o } else 394058590b06STheodore Ts'o err = ret; 3941cdee7843SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 394215cc1767SEric Whitney map->m_pblk = newblock; 3943cdee7843SZheng Liu if (allocated > map->m_len) 3944cdee7843SZheng Liu allocated = map->m_len; 3945cdee7843SZheng Liu map->m_len = allocated; 39460031462bSMingming Cao goto out2; 39470031462bSMingming Cao } 39480031462bSMingming Cao /* buffered IO case */ 39490031462bSMingming Cao /* 39500031462bSMingming Cao * repeat fallocate creation request 39510031462bSMingming Cao * we already have an unwritten extent 39520031462bSMingming Cao */ 3953a25a4e1aSZheng Liu if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) { 3954a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 39550031462bSMingming Cao goto map_out; 3956a25a4e1aSZheng Liu } 39570031462bSMingming Cao 39580031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 39590031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 39600031462bSMingming Cao /* 39610031462bSMingming Cao * We have blocks reserved already. We 39620031462bSMingming Cao * return allocated blocks so that delalloc 39630031462bSMingming Cao * won't do block reservation for us. But 39640031462bSMingming Cao * the buffer head will be unmapped so that 39650031462bSMingming Cao * a read from the block returns 0s. 39660031462bSMingming Cao */ 3967e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 39680031462bSMingming Cao goto out1; 39690031462bSMingming Cao } 39700031462bSMingming Cao 39710031462bSMingming Cao /* buffered write, writepage time, convert*/ 397227dd4385SLukas Czerner ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags); 3973a4e5d88bSDmitry Monakhov if (ret >= 0) 3974b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 39750031462bSMingming Cao out: 39760031462bSMingming Cao if (ret <= 0) { 39770031462bSMingming Cao err = ret; 39780031462bSMingming Cao goto out2; 39790031462bSMingming Cao } else 39800031462bSMingming Cao allocated = ret; 3981e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 3982515f41c3SAneesh Kumar K.V /* 3983515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 3984515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 3985515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 3986515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 3987515f41c3SAneesh Kumar K.V * new. 3988515f41c3SAneesh Kumar K.V */ 3989e35fd660STheodore Ts'o if (allocated > map->m_len) { 3990515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3991e35fd660STheodore Ts'o newblock + map->m_len, 3992e35fd660STheodore Ts'o allocated - map->m_len); 3993e35fd660STheodore Ts'o allocated = map->m_len; 3994515f41c3SAneesh Kumar K.V } 39953a225670SZheng Liu map->m_len = allocated; 39965f634d06SAneesh Kumar K.V 39975f634d06SAneesh Kumar K.V /* 39985f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 39995f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 40005f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 40015f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 40025f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 40035f634d06SAneesh Kumar K.V */ 40047b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 40057b415bf6SAditya Kali unsigned int reserved_clusters; 40067b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 40077b415bf6SAditya Kali map->m_lblk, map->m_len); 40087b415bf6SAditya Kali if (reserved_clusters) 40097b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 40107b415bf6SAditya Kali reserved_clusters, 40117b415bf6SAditya Kali 0); 40127b415bf6SAditya Kali } 40135f634d06SAneesh Kumar K.V 40140031462bSMingming Cao map_out: 4015e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4016a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 4017a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 4018a4e5d88bSDmitry Monakhov map->m_len); 4019a4e5d88bSDmitry Monakhov if (err < 0) 4020a4e5d88bSDmitry Monakhov goto out2; 4021a4e5d88bSDmitry Monakhov } 40220031462bSMingming Cao out1: 4023e35fd660STheodore Ts'o if (allocated > map->m_len) 4024e35fd660STheodore Ts'o allocated = map->m_len; 40250031462bSMingming Cao ext4_ext_show_leaf(inode, path); 4026e35fd660STheodore Ts'o map->m_pblk = newblock; 4027e35fd660STheodore Ts'o map->m_len = allocated; 40280031462bSMingming Cao out2: 40290031462bSMingming Cao return err ? err : allocated; 40300031462bSMingming Cao } 403158590b06STheodore Ts'o 40320031462bSMingming Cao /* 40334d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 40344d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 40354d33b1efSTheodore Ts'o * allocated in an extent. 4036d8990240SAditya Kali * @sb The filesystem superblock structure 40374d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 40384d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 40394d33b1efSTheodore Ts'o * cluster allocation 40404d33b1efSTheodore Ts'o * 40414d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 40424d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 40434d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 40444d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 40454d33b1efSTheodore Ts'o * want to catch. The first is this case: 40464d33b1efSTheodore Ts'o * 40474d33b1efSTheodore Ts'o * |--- cluster # N--| 40484d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 40494d33b1efSTheodore Ts'o * |==========| 40504d33b1efSTheodore Ts'o * 40514d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 40524d33b1efSTheodore Ts'o * 40534d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 40544d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 40554d33b1efSTheodore Ts'o * |=======================| 40564d33b1efSTheodore Ts'o * 40574d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 40584d33b1efSTheodore Ts'o * within the same cluster: 40594d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 40604d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 40614d33b1efSTheodore Ts'o * |------ requested region ------| 40624d33b1efSTheodore Ts'o * |================| 40634d33b1efSTheodore Ts'o * 40644d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 40654d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 40664d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 40674d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 40684d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 40694d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 40704d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 40714d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 40724d33b1efSTheodore Ts'o */ 4073d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 40744d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 40754d33b1efSTheodore Ts'o struct ext4_extent *ex, 40764d33b1efSTheodore Ts'o struct ext4_ext_path *path) 40774d33b1efSTheodore Ts'o { 4078d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 4079f5a44db5STheodore Ts'o ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 40804d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 408114d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 40824d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 40834d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 40844d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 40854d33b1efSTheodore Ts'o 40864d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 40874d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 40884d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 40894d33b1efSTheodore Ts'o 40904d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 40914d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 40924d33b1efSTheodore Ts'o 40934d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 40944d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 40954d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 40964d33b1efSTheodore Ts'o ee_start += ee_len - 1; 4097f5a44db5STheodore Ts'o map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; 40984d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 40994d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 41004d33b1efSTheodore Ts'o /* 41014d33b1efSTheodore Ts'o * Check for and handle this case: 41024d33b1efSTheodore Ts'o * 41034d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 41044d33b1efSTheodore Ts'o * |------- extent ----| 41054d33b1efSTheodore Ts'o * |--- requested region ---| 41064d33b1efSTheodore Ts'o * |===========| 41074d33b1efSTheodore Ts'o */ 41084d33b1efSTheodore Ts'o 41094d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 41104d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 41114d33b1efSTheodore Ts'o 41124d33b1efSTheodore Ts'o /* 41134d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 41144d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 41154d33b1efSTheodore Ts'o * 41164d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 41174d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 41184d33b1efSTheodore Ts'o * |------ requested region ------| 41194d33b1efSTheodore Ts'o * |================| 41204d33b1efSTheodore Ts'o */ 41214d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 41224d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 41234d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 41244d33b1efSTheodore Ts'o } 4125d8990240SAditya Kali 4126d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 41274d33b1efSTheodore Ts'o return 1; 41284d33b1efSTheodore Ts'o } 4129d8990240SAditya Kali 4130d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 41314d33b1efSTheodore Ts'o return 0; 41324d33b1efSTheodore Ts'o } 41334d33b1efSTheodore Ts'o 41344d33b1efSTheodore Ts'o 41354d33b1efSTheodore Ts'o /* 4136f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4137f5ab0d1fSMingming Cao * 4138f5ab0d1fSMingming Cao * 4139c278bfecSAneesh Kumar K.V * Need to be called with 41400e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 41410e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4142f5ab0d1fSMingming Cao * 4143f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 4144f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4145f5ab0d1fSMingming Cao * buffer head is unmapped 4146f5ab0d1fSMingming Cao * otherwise blocks are mapped 4147f5ab0d1fSMingming Cao * 4148f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4149f5ab0d1fSMingming Cao * buffer head is unmapped 4150f5ab0d1fSMingming Cao * 4151f5ab0d1fSMingming Cao * return < 0, error case. 4152c278bfecSAneesh Kumar K.V */ 4153e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4154e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4155a86c6181SAlex Tomas { 4156a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 41574d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 41584d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 41590562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 4160ce37c429SEric Whitney int free_on_err = 0, err = 0, depth, ret; 41614d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 416281fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4163c9de560dSAlex Tomas struct ext4_allocation_request ar; 4164f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 41654d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 416682e54229SDmitry Monakhov int set_unwritten = 0; 4167a86c6181SAlex Tomas 416884fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 4169e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 41700562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4171a86c6181SAlex Tomas 4172a86c6181SAlex Tomas /* find extent for this block */ 4173107a7bd3STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0); 4174a86c6181SAlex Tomas if (IS_ERR(path)) { 4175a86c6181SAlex Tomas err = PTR_ERR(path); 4176a86c6181SAlex Tomas path = NULL; 4177a86c6181SAlex Tomas goto out2; 4178a86c6181SAlex Tomas } 4179a86c6181SAlex Tomas 4180a86c6181SAlex Tomas depth = ext_depth(inode); 4181a86c6181SAlex Tomas 4182a86c6181SAlex Tomas /* 4183d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4184d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4185a86c6181SAlex Tomas * this is why assert can't be put in ext4_ext_find_extent() 4186a86c6181SAlex Tomas */ 4187273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4188273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4189f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4190f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4191f70f362bSTheodore Ts'o path[depth].p_block); 4192034fb4c9SSurbhi Palande err = -EIO; 4193034fb4c9SSurbhi Palande goto out2; 4194034fb4c9SSurbhi Palande } 4195a86c6181SAlex Tomas 41967e028976SAvantika Mathur ex = path[depth].p_ext; 41977e028976SAvantika Mathur if (ex) { 4198725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4199bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4200a2df2a63SAmit Arora unsigned short ee_len; 4201471d4011SSuparna Bhattacharya 4202471d4011SSuparna Bhattacharya /* 4203471d4011SSuparna Bhattacharya * Uninitialized extents are treated as holes, except that 420456055d3aSAmit Arora * we split out initialized portions during a write. 4205471d4011SSuparna Bhattacharya */ 4206a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4207d8990240SAditya Kali 4208d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4209d8990240SAditya Kali 4210d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4211e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4212e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4213d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4214e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 4215e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4216a86c6181SAlex Tomas ee_block, ee_len, newblock); 421756055d3aSAmit Arora 421869eb33dcSZheng Liu if (!ext4_ext_is_uninitialized(ex)) 4219a86c6181SAlex Tomas goto out; 422069eb33dcSZheng Liu 4221ce37c429SEric Whitney ret = ext4_ext_handle_uninitialized_extents( 4222e861304bSAllison Henderson handle, inode, map, path, flags, 4223e861304bSAllison Henderson allocated, newblock); 4224ce37c429SEric Whitney if (ret < 0) 4225ce37c429SEric Whitney err = ret; 4226ce37c429SEric Whitney else 4227ce37c429SEric Whitney allocated = ret; 422831cf0f2cSEric Whitney goto out2; 422956055d3aSAmit Arora } 4230a86c6181SAlex Tomas } 4231a86c6181SAlex Tomas 42327b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 42337d1b1fbcSZheng Liu ext4_find_delalloc_cluster(inode, map->m_lblk)) 42347b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 42357b415bf6SAditya Kali 4236a86c6181SAlex Tomas /* 4237d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4238a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4239a86c6181SAlex Tomas */ 4240c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 424156055d3aSAmit Arora /* 424256055d3aSAmit Arora * put just found gap into cache to speed up 424356055d3aSAmit Arora * subsequent requests 424456055d3aSAmit Arora */ 4245d100eef2SZheng Liu if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4246e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4247a86c6181SAlex Tomas goto out2; 4248a86c6181SAlex Tomas } 42494d33b1efSTheodore Ts'o 4250a86c6181SAlex Tomas /* 4251c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4252a86c6181SAlex Tomas */ 42537b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 42544d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 4255d0abafacSEric Whitney cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 42564d33b1efSTheodore Ts'o 42574d33b1efSTheodore Ts'o /* 42584d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 42594d33b1efSTheodore Ts'o * by ext4_ext_find_extent() implies a cluster we can use. 42604d33b1efSTheodore Ts'o */ 42614d33b1efSTheodore Ts'o if (cluster_offset && ex && 4262d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 42634d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 42644d33b1efSTheodore Ts'o newblock = map->m_pblk; 42657b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 42664d33b1efSTheodore Ts'o goto got_allocated_blocks; 42674d33b1efSTheodore Ts'o } 4268a86c6181SAlex Tomas 4269c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4270e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4271c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4272c9de560dSAlex Tomas if (err) 4273c9de560dSAlex Tomas goto out2; 4274e35fd660STheodore Ts'o ar.lright = map->m_lblk; 42754d33b1efSTheodore Ts'o ex2 = NULL; 42764d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4277c9de560dSAlex Tomas if (err) 4278c9de560dSAlex Tomas goto out2; 427925d14f98SAmit Arora 42804d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 42814d33b1efSTheodore Ts'o * cluster we can use. */ 42824d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4283d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 42844d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 42854d33b1efSTheodore Ts'o newblock = map->m_pblk; 42867b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 42874d33b1efSTheodore Ts'o goto got_allocated_blocks; 42884d33b1efSTheodore Ts'o } 42894d33b1efSTheodore Ts'o 4290749269faSAmit Arora /* 4291749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4292749269faSAmit Arora * a single extent. For an initialized extent this limit is 4293749269faSAmit Arora * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 4294749269faSAmit Arora * EXT_UNINIT_MAX_LEN. 4295749269faSAmit Arora */ 4296e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4297c2177057STheodore Ts'o !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4298e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4299e35fd660STheodore Ts'o else if (map->m_len > EXT_UNINIT_MAX_LEN && 4300c2177057STheodore Ts'o (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4301e35fd660STheodore Ts'o map->m_len = EXT_UNINIT_MAX_LEN; 4302749269faSAmit Arora 4303e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4304e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 43054d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 430625d14f98SAmit Arora if (err) 4307b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 430825d14f98SAmit Arora else 4309e35fd660STheodore Ts'o allocated = map->m_len; 4310c9de560dSAlex Tomas 4311c9de560dSAlex Tomas /* allocate new block */ 4312c9de560dSAlex Tomas ar.inode = inode; 4313e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4314e35fd660STheodore Ts'o ar.logical = map->m_lblk; 43154d33b1efSTheodore Ts'o /* 43164d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 43174d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 43184d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 43194d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 43204d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 43214d33b1efSTheodore Ts'o * work correctly. 43224d33b1efSTheodore Ts'o */ 4323f5a44db5STheodore Ts'o offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 43244d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 43254d33b1efSTheodore Ts'o ar.goal -= offset; 43264d33b1efSTheodore Ts'o ar.logical -= offset; 4327c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4328c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4329c9de560dSAlex Tomas else 4330c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4331c9de560dSAlex Tomas ar.flags = 0; 4332556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4333556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4334c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4335a86c6181SAlex Tomas if (!newblock) 4336a86c6181SAlex Tomas goto out2; 433784fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4338498e5f24STheodore Ts'o ar.goal, newblock, allocated); 43394d33b1efSTheodore Ts'o free_on_err = 1; 43407b415bf6SAditya Kali allocated_clusters = ar.len; 43414d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 43424d33b1efSTheodore Ts'o if (ar.len > allocated) 43434d33b1efSTheodore Ts'o ar.len = allocated; 4344a86c6181SAlex Tomas 43454d33b1efSTheodore Ts'o got_allocated_blocks: 4346a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 43474d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4348c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 43498d5d02e6SMingming Cao /* Mark uninitialized */ 43508d5d02e6SMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4351a2df2a63SAmit Arora ext4_ext_mark_uninitialized(&newex); 4352a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 43538d5d02e6SMingming Cao /* 4354744692dcSJiaying Zhang * io_end structure was created for every IO write to an 435525985edcSLucas De Marchi * uninitialized extent. To avoid unnecessary conversion, 4356744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 43575f524950SMingming * For non asycn direct IO case, flag the inode state 435825985edcSLucas De Marchi * that we need to perform conversion when IO is done. 43598d5d02e6SMingming Cao */ 436082e54229SDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_PRE_IO)) 436182e54229SDmitry Monakhov set_unwritten = 1; 4362744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 4363e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 43648d5d02e6SMingming Cao } 4365c8d46e41SJiaying Zhang 4366a4e5d88bSDmitry Monakhov err = 0; 4367a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4368a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4369a4e5d88bSDmitry Monakhov path, ar.len); 4370575a1d4bSJiaying Zhang if (!err) 4371575a1d4bSJiaying Zhang err = ext4_ext_insert_extent(handle, inode, path, 4372575a1d4bSJiaying Zhang &newex, flags); 437382e54229SDmitry Monakhov 437482e54229SDmitry Monakhov if (!err && set_unwritten) { 437582e54229SDmitry Monakhov if (io) 437682e54229SDmitry Monakhov ext4_set_io_unwritten_flag(inode, io); 437782e54229SDmitry Monakhov else 437882e54229SDmitry Monakhov ext4_set_inode_state(inode, 437982e54229SDmitry Monakhov EXT4_STATE_DIO_UNWRITTEN); 438082e54229SDmitry Monakhov } 438182e54229SDmitry Monakhov 43824d33b1efSTheodore Ts'o if (err && free_on_err) { 43837132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 43847132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4385315054f0SAlex Tomas /* free data blocks we just allocated */ 4386c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4387c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4388c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4389c8e15130STheodore Ts'o ext4_free_blocks(handle, inode, NULL, newblock, 4390c8e15130STheodore Ts'o EXT4_C2B(sbi, allocated_clusters), fb_flags); 4391a86c6181SAlex Tomas goto out2; 4392315054f0SAlex Tomas } 4393a86c6181SAlex Tomas 4394a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4395bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4396b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4397e35fd660STheodore Ts'o if (allocated > map->m_len) 4398e35fd660STheodore Ts'o allocated = map->m_len; 4399e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4400a86c6181SAlex Tomas 4401b436b9beSJan Kara /* 44025f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 44035f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 44045f634d06SAneesh Kumar K.V */ 44057b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 440681fdbb4aSYongqiang Yang unsigned int reserved_clusters; 44077b415bf6SAditya Kali /* 440881fdbb4aSYongqiang Yang * Check how many clusters we had reserved this allocated range 44097b415bf6SAditya Kali */ 44107b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 44117b415bf6SAditya Kali map->m_lblk, allocated); 44127b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 44137b415bf6SAditya Kali if (reserved_clusters) { 44147b415bf6SAditya Kali /* 44157b415bf6SAditya Kali * We have clusters reserved for this range. 44167b415bf6SAditya Kali * But since we are not doing actual allocation 44177b415bf6SAditya Kali * and are simply using blocks from previously 44187b415bf6SAditya Kali * allocated cluster, we should release the 44197b415bf6SAditya Kali * reservation and not claim quota. 44207b415bf6SAditya Kali */ 44217b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 44227b415bf6SAditya Kali reserved_clusters, 0); 44237b415bf6SAditya Kali } 44247b415bf6SAditya Kali } else { 44257b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 44267b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 44275356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 44287b415bf6SAditya Kali int reservation = allocated_clusters - 44297b415bf6SAditya Kali reserved_clusters; 44307b415bf6SAditya Kali /* 44317b415bf6SAditya Kali * It seems we claimed few clusters outside of 44327b415bf6SAditya Kali * the range of this allocation. We should give 44337b415bf6SAditya Kali * it back to the reservation pool. This can 44347b415bf6SAditya Kali * happen in the following case: 44357b415bf6SAditya Kali * 44367b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 44377b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 44387b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 44397b415bf6SAditya Kali * * First comes delayed allocation write for 44407b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 44417b415bf6SAditya Kali * previous delayed allocated blocks in the 44427b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 44437b415bf6SAditya Kali * for this write. 44447b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 44457b415bf6SAditya Kali * In this case, we will reserve 2 clusters 44467b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 44477b415bf6SAditya Kali * that range has a delayed allocated blocks. 44487b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 44497b415bf6SAditya Kali * * Now, during the delayed allocation writeout 44507b415bf6SAditya Kali * time, we will first write blocks [3-8] and 44517b415bf6SAditya Kali * allocate 3 clusters for writing these 44527b415bf6SAditya Kali * blocks. Also, we would claim all these 44537b415bf6SAditya Kali * three clusters above. 44547b415bf6SAditya Kali * * Now when we come here to writeout the 44557b415bf6SAditya Kali * blocks [10-11], we would expect to claim 44567b415bf6SAditya Kali * the reservation of 1 cluster we had made 44577b415bf6SAditya Kali * (and we would claim it since there are no 44587b415bf6SAditya Kali * more delayed allocated blocks in the range 44597b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 44607b415bf6SAditya Kali * already gone to 0. 44617b415bf6SAditya Kali * 44627b415bf6SAditya Kali * Thus, at the step 4 above when we determine 44637b415bf6SAditya Kali * that there are still some unwritten delayed 44647b415bf6SAditya Kali * allocated blocks outside of our current 44657b415bf6SAditya Kali * block range, we should increment the 44667b415bf6SAditya Kali * reserved clusters count so that when the 44677b415bf6SAditya Kali * remaining blocks finally gets written, we 44687b415bf6SAditya Kali * could claim them. 44697b415bf6SAditya Kali */ 44705356f261SAditya Kali dquot_reserve_block(inode, 44715356f261SAditya Kali EXT4_C2B(sbi, reservation)); 44725356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 44735356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 44745356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 44757b415bf6SAditya Kali } 4476232ec872SLukas Czerner /* 4477232ec872SLukas Czerner * We will claim quota for all newly allocated blocks. 4478232ec872SLukas Czerner * We're updating the reserved space *after* the 4479232ec872SLukas Czerner * correction above so we do not accidentally free 4480232ec872SLukas Czerner * all the metadata reservation because we might 4481232ec872SLukas Czerner * actually need it later on. 4482232ec872SLukas Czerner */ 4483232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4484232ec872SLukas Czerner 1); 44857b415bf6SAditya Kali } 44867b415bf6SAditya Kali } 44875f634d06SAneesh Kumar K.V 44885f634d06SAneesh Kumar K.V /* 4489b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4490b436b9beSJan Kara * when it is _not_ an uninitialized extent. 4491b436b9beSJan Kara */ 449269eb33dcSZheng Liu if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) 4493b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 449469eb33dcSZheng Liu else 4495b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4496a86c6181SAlex Tomas out: 4497e35fd660STheodore Ts'o if (allocated > map->m_len) 4498e35fd660STheodore Ts'o allocated = map->m_len; 4499a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4500e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4501e35fd660STheodore Ts'o map->m_pblk = newblock; 4502e35fd660STheodore Ts'o map->m_len = allocated; 4503a86c6181SAlex Tomas out2: 4504a86c6181SAlex Tomas if (path) { 4505a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4506a86c6181SAlex Tomas kfree(path); 4507a86c6181SAlex Tomas } 4508e861304bSAllison Henderson 450963b99968STheodore Ts'o trace_ext4_ext_map_blocks_exit(inode, flags, map, 451063b99968STheodore Ts'o err ? err : allocated); 451163b99968STheodore Ts'o ext4_es_lru_add(inode); 45127877191cSLukas Czerner return err ? err : allocated; 4513a86c6181SAlex Tomas } 4514a86c6181SAlex Tomas 4515819c4920STheodore Ts'o void ext4_ext_truncate(handle_t *handle, struct inode *inode) 4516a86c6181SAlex Tomas { 4517a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4518725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4519a86c6181SAlex Tomas int err = 0; 4520a86c6181SAlex Tomas 4521a86c6181SAlex Tomas /* 4522d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4523d0d856e8SRandy Dunlap * Probably we need not scan at all, 4524d0d856e8SRandy Dunlap * because page truncation is enough. 4525a86c6181SAlex Tomas */ 4526a86c6181SAlex Tomas 4527a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4528a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4529a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4530a86c6181SAlex Tomas 4531a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4532a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 45338acd5e9bSTheodore Ts'o retry: 453451865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 453551865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 453694eec0fcSTheodore Ts'o if (err == -ENOMEM) { 45378acd5e9bSTheodore Ts'o cond_resched(); 45388acd5e9bSTheodore Ts'o congestion_wait(BLK_RW_ASYNC, HZ/50); 45398acd5e9bSTheodore Ts'o goto retry; 45408acd5e9bSTheodore Ts'o } 45418acd5e9bSTheodore Ts'o if (err) { 45428acd5e9bSTheodore Ts'o ext4_std_error(inode->i_sb, err); 45438acd5e9bSTheodore Ts'o return; 45448acd5e9bSTheodore Ts'o } 45455f95d21fSLukas Czerner err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 45468acd5e9bSTheodore Ts'o ext4_std_error(inode->i_sb, err); 4547a86c6181SAlex Tomas } 4548a86c6181SAlex Tomas 4549*0e8b6879SLukas Czerner static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, 4550*0e8b6879SLukas Czerner ext4_lblk_t len, int flags, int mode) 4551a2df2a63SAmit Arora { 4552496ad9aaSAl Viro struct inode *inode = file_inode(file); 4553a2df2a63SAmit Arora handle_t *handle; 4554a2df2a63SAmit Arora int ret = 0; 4555a2df2a63SAmit Arora int ret2 = 0; 4556a2df2a63SAmit Arora int retries = 0; 45572ed88685STheodore Ts'o struct ext4_map_blocks map; 4558*0e8b6879SLukas Czerner unsigned int credits; 4559a2df2a63SAmit Arora 4560*0e8b6879SLukas Czerner map.m_lblk = offset; 45613c6fe770SGreg Harm /* 45623c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 45633c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 45643c6fe770SGreg Harm * extents. 45653c6fe770SGreg Harm */ 4566*0e8b6879SLukas Czerner if (len <= EXT_UNINIT_MAX_LEN) 45673c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 456860d4616fSDmitry Monakhov 4569*0e8b6879SLukas Czerner /* 4570*0e8b6879SLukas Czerner * credits to insert 1 extent into extent tree 4571*0e8b6879SLukas Czerner */ 4572*0e8b6879SLukas Czerner credits = ext4_chunk_trans_blocks(inode, len); 4573*0e8b6879SLukas Czerner 4574a2df2a63SAmit Arora retry: 4575*0e8b6879SLukas Czerner while (ret >= 0 && ret < len) { 45762ed88685STheodore Ts'o map.m_lblk = map.m_lblk + ret; 4577*0e8b6879SLukas Czerner map.m_len = len = len - ret; 45789924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 45799924a92aSTheodore Ts'o credits); 4580a2df2a63SAmit Arora if (IS_ERR(handle)) { 4581a2df2a63SAmit Arora ret = PTR_ERR(handle); 4582a2df2a63SAmit Arora break; 4583a2df2a63SAmit Arora } 4584a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4585221879c9SAneesh Kumar K.V if (ret <= 0) { 4586f282ac19SLukas Czerner ext4_debug("inode #%lu: block %u: len %u: " 4587b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4588b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4589b06acd38SLukas Czerner map.m_len, ret); 4590a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4591a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4592a2df2a63SAmit Arora break; 4593a2df2a63SAmit Arora } 4594a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4595a2df2a63SAmit Arora if (ret2) 4596a2df2a63SAmit Arora break; 4597a2df2a63SAmit Arora } 4598fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4599fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4600fd28784aSAneesh Kumar K.V ret = 0; 4601a2df2a63SAmit Arora goto retry; 4602a2df2a63SAmit Arora } 4603f282ac19SLukas Czerner 4604*0e8b6879SLukas Czerner return ret > 0 ? ret2 : ret; 4605*0e8b6879SLukas Czerner } 4606*0e8b6879SLukas Czerner 4607*0e8b6879SLukas Czerner /* 4608*0e8b6879SLukas Czerner * preallocate space for a file. This implements ext4's fallocate file 4609*0e8b6879SLukas Czerner * operation, which gets called from sys_fallocate system call. 4610*0e8b6879SLukas Czerner * For block-mapped files, posix_fallocate should fall back to the method 4611*0e8b6879SLukas Czerner * of writing zeroes to the required new blocks (the same behavior which is 4612*0e8b6879SLukas Czerner * expected for file systems which do not support fallocate() system call). 4613*0e8b6879SLukas Czerner */ 4614*0e8b6879SLukas Czerner long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4615*0e8b6879SLukas Czerner { 4616*0e8b6879SLukas Czerner struct inode *inode = file_inode(file); 4617*0e8b6879SLukas Czerner handle_t *handle; 4618*0e8b6879SLukas Czerner loff_t new_size = 0; 4619*0e8b6879SLukas Czerner unsigned int max_blocks; 4620*0e8b6879SLukas Czerner int ret = 0; 4621*0e8b6879SLukas Czerner int flags; 4622*0e8b6879SLukas Czerner ext4_lblk_t lblk; 4623*0e8b6879SLukas Czerner struct timespec tv; 4624*0e8b6879SLukas Czerner unsigned int blkbits = inode->i_blkbits; 4625*0e8b6879SLukas Czerner 4626*0e8b6879SLukas Czerner /* Return error if mode is not supported */ 4627*0e8b6879SLukas Czerner if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | 4628*0e8b6879SLukas Czerner FALLOC_FL_COLLAPSE_RANGE)) 4629*0e8b6879SLukas Czerner return -EOPNOTSUPP; 4630*0e8b6879SLukas Czerner 4631*0e8b6879SLukas Czerner if (mode & FALLOC_FL_PUNCH_HOLE) 4632*0e8b6879SLukas Czerner return ext4_punch_hole(inode, offset, len); 4633*0e8b6879SLukas Czerner 4634*0e8b6879SLukas Czerner if (mode & FALLOC_FL_COLLAPSE_RANGE) 4635*0e8b6879SLukas Czerner return ext4_collapse_range(inode, offset, len); 4636*0e8b6879SLukas Czerner 4637*0e8b6879SLukas Czerner ret = ext4_convert_inline_data(inode); 4638*0e8b6879SLukas Czerner if (ret) 4639*0e8b6879SLukas Czerner return ret; 4640*0e8b6879SLukas Czerner 4641*0e8b6879SLukas Czerner /* 4642*0e8b6879SLukas Czerner * currently supporting (pre)allocate mode for extent-based 4643*0e8b6879SLukas Czerner * files _only_ 4644*0e8b6879SLukas Czerner */ 4645*0e8b6879SLukas Czerner if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4646*0e8b6879SLukas Czerner return -EOPNOTSUPP; 4647*0e8b6879SLukas Czerner 4648*0e8b6879SLukas Czerner trace_ext4_fallocate_enter(inode, offset, len, mode); 4649*0e8b6879SLukas Czerner lblk = offset >> blkbits; 4650*0e8b6879SLukas Czerner /* 4651*0e8b6879SLukas Czerner * We can't just convert len to max_blocks because 4652*0e8b6879SLukas Czerner * If blocksize = 4096 offset = 3072 and len = 2048 4653*0e8b6879SLukas Czerner */ 4654*0e8b6879SLukas Czerner max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 4655*0e8b6879SLukas Czerner - lblk; 4656*0e8b6879SLukas Czerner 4657*0e8b6879SLukas Czerner flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; 4658*0e8b6879SLukas Czerner if (mode & FALLOC_FL_KEEP_SIZE) 4659*0e8b6879SLukas Czerner flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4660*0e8b6879SLukas Czerner 4661*0e8b6879SLukas Czerner mutex_lock(&inode->i_mutex); 4662*0e8b6879SLukas Czerner 4663*0e8b6879SLukas Czerner if (!(mode & FALLOC_FL_KEEP_SIZE) && 4664*0e8b6879SLukas Czerner offset + len > i_size_read(inode)) { 4665*0e8b6879SLukas Czerner new_size = offset + len; 4666*0e8b6879SLukas Czerner ret = inode_newsize_ok(inode, new_size); 4667*0e8b6879SLukas Czerner if (ret) 4668*0e8b6879SLukas Czerner goto out; 4669*0e8b6879SLukas Czerner } 4670*0e8b6879SLukas Czerner 4671*0e8b6879SLukas Czerner ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode); 4672*0e8b6879SLukas Czerner if (ret) 4673*0e8b6879SLukas Czerner goto out; 4674*0e8b6879SLukas Czerner 4675f282ac19SLukas Czerner handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4676f282ac19SLukas Czerner if (IS_ERR(handle)) 4677f282ac19SLukas Czerner goto out; 4678f282ac19SLukas Czerner 4679f282ac19SLukas Czerner tv = inode->i_ctime = ext4_current_time(inode); 4680f282ac19SLukas Czerner 4681*0e8b6879SLukas Czerner if (!ret && new_size) { 4682f282ac19SLukas Czerner if (new_size > i_size_read(inode)) { 4683f282ac19SLukas Czerner i_size_write(inode, new_size); 4684f282ac19SLukas Czerner inode->i_mtime = tv; 4685f282ac19SLukas Czerner } 4686f282ac19SLukas Czerner if (new_size > EXT4_I(inode)->i_disksize) 4687f282ac19SLukas Czerner ext4_update_i_disksize(inode, new_size); 4688*0e8b6879SLukas Czerner } else if (!ret && !new_size) { 4689f282ac19SLukas Czerner /* 4690f282ac19SLukas Czerner * Mark that we allocate beyond EOF so the subsequent truncate 4691f282ac19SLukas Czerner * can proceed even if the new size is the same as i_size. 4692f282ac19SLukas Czerner */ 4693f282ac19SLukas Czerner if ((offset + len) > i_size_read(inode)) 4694f282ac19SLukas Czerner ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4695f282ac19SLukas Czerner } 4696f282ac19SLukas Czerner ext4_mark_inode_dirty(handle, inode); 4697f282ac19SLukas Czerner if (file->f_flags & O_SYNC) 4698f282ac19SLukas Czerner ext4_handle_sync(handle); 4699f282ac19SLukas Czerner 4700f282ac19SLukas Czerner ext4_journal_stop(handle); 4701f282ac19SLukas Czerner out: 470255bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 4703*0e8b6879SLukas Czerner trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 4704*0e8b6879SLukas Czerner return ret; 4705a2df2a63SAmit Arora } 47066873fa0dSEric Sandeen 47076873fa0dSEric Sandeen /* 47080031462bSMingming Cao * This function convert a range of blocks to written extents 47090031462bSMingming Cao * The caller of this function will pass the start offset and the size. 47100031462bSMingming Cao * all unwritten extents within this range will be converted to 47110031462bSMingming Cao * written extents. 47120031462bSMingming Cao * 47130031462bSMingming Cao * This function is called from the direct IO end io call back 47140031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4715109f5565SMingming * Returns 0 on success. 47160031462bSMingming Cao */ 47176b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 47186b523df4SJan Kara loff_t offset, ssize_t len) 47190031462bSMingming Cao { 47200031462bSMingming Cao unsigned int max_blocks; 47210031462bSMingming Cao int ret = 0; 47220031462bSMingming Cao int ret2 = 0; 47232ed88685STheodore Ts'o struct ext4_map_blocks map; 47240031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 47250031462bSMingming Cao 47262ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 47270031462bSMingming Cao /* 47280031462bSMingming Cao * We can't just convert len to max_blocks because 47290031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 47300031462bSMingming Cao */ 47312ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 47322ed88685STheodore Ts'o map.m_lblk); 47330031462bSMingming Cao /* 47346b523df4SJan Kara * This is somewhat ugly but the idea is clear: When transaction is 47356b523df4SJan Kara * reserved, everything goes into it. Otherwise we rather start several 47366b523df4SJan Kara * smaller transactions for conversion of each extent separately. 47376b523df4SJan Kara */ 47386b523df4SJan Kara if (handle) { 47396b523df4SJan Kara handle = ext4_journal_start_reserved(handle, 47406b523df4SJan Kara EXT4_HT_EXT_CONVERT); 47416b523df4SJan Kara if (IS_ERR(handle)) 47426b523df4SJan Kara return PTR_ERR(handle); 47436b523df4SJan Kara credits = 0; 47446b523df4SJan Kara } else { 47456b523df4SJan Kara /* 47460031462bSMingming Cao * credits to insert 1 extent into extent tree 47470031462bSMingming Cao */ 47480031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 47496b523df4SJan Kara } 47500031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 47512ed88685STheodore Ts'o map.m_lblk += ret; 47522ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 47536b523df4SJan Kara if (credits) { 47546b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 47556b523df4SJan Kara credits); 47560031462bSMingming Cao if (IS_ERR(handle)) { 47570031462bSMingming Cao ret = PTR_ERR(handle); 47580031462bSMingming Cao break; 47590031462bSMingming Cao } 47606b523df4SJan Kara } 47612ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4762c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4763b06acd38SLukas Czerner if (ret <= 0) 4764b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4765b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 476692b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4767b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 476892b97816STheodore Ts'o map.m_len, ret); 47690031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 47706b523df4SJan Kara if (credits) 47710031462bSMingming Cao ret2 = ext4_journal_stop(handle); 47720031462bSMingming Cao if (ret <= 0 || ret2) 47730031462bSMingming Cao break; 47740031462bSMingming Cao } 47756b523df4SJan Kara if (!credits) 47766b523df4SJan Kara ret2 = ext4_journal_stop(handle); 47770031462bSMingming Cao return ret > 0 ? ret2 : ret; 47780031462bSMingming Cao } 47796d9c85ebSYongqiang Yang 47800031462bSMingming Cao /* 478169eb33dcSZheng Liu * If newes is not existing extent (newes->ec_pblk equals zero) find 478269eb33dcSZheng Liu * delayed extent at start of newes and update newes accordingly and 478391dd8c11SLukas Czerner * return start of the next delayed extent. 478491dd8c11SLukas Czerner * 478569eb33dcSZheng Liu * If newes is existing extent (newes->ec_pblk is not equal zero) 478691dd8c11SLukas Czerner * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 478769eb33dcSZheng Liu * extent found. Leave newes unmodified. 47886873fa0dSEric Sandeen */ 478991dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 479069eb33dcSZheng Liu struct extent_status *newes) 47916873fa0dSEric Sandeen { 4792b3aff3e3SZheng Liu struct extent_status es; 4793be401363SZheng Liu ext4_lblk_t block, next_del; 47946873fa0dSEric Sandeen 479569eb33dcSZheng Liu if (newes->es_pblk == 0) { 4796e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, newes->es_lblk, 4797e30b5dcaSYan, Zheng newes->es_lblk + newes->es_len - 1, &es); 4798e30b5dcaSYan, Zheng 47996d9c85ebSYongqiang Yang /* 480069eb33dcSZheng Liu * No extent in extent-tree contains block @newes->es_pblk, 48016d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 48026d9c85ebSYongqiang Yang */ 480306b0c886SZheng Liu if (es.es_len == 0) 4804b3aff3e3SZheng Liu /* A hole found. */ 480591dd8c11SLukas Czerner return 0; 48066d9c85ebSYongqiang Yang 480769eb33dcSZheng Liu if (es.es_lblk > newes->es_lblk) { 4808b3aff3e3SZheng Liu /* A hole found. */ 480969eb33dcSZheng Liu newes->es_len = min(es.es_lblk - newes->es_lblk, 481069eb33dcSZheng Liu newes->es_len); 481191dd8c11SLukas Czerner return 0; 48126873fa0dSEric Sandeen } 48136d9c85ebSYongqiang Yang 481469eb33dcSZheng Liu newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 48156d9c85ebSYongqiang Yang } 48166873fa0dSEric Sandeen 481769eb33dcSZheng Liu block = newes->es_lblk + newes->es_len; 4818e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); 4819be401363SZheng Liu if (es.es_len == 0) 4820be401363SZheng Liu next_del = EXT_MAX_BLOCKS; 4821be401363SZheng Liu else 4822be401363SZheng Liu next_del = es.es_lblk; 4823be401363SZheng Liu 482491dd8c11SLukas Czerner return next_del; 48256873fa0dSEric Sandeen } 48266873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 48276873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 48286873fa0dSEric Sandeen 48293a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 48303a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 48316873fa0dSEric Sandeen { 48326873fa0dSEric Sandeen __u64 physical = 0; 48336873fa0dSEric Sandeen __u64 length; 48346873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 48356873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 48366873fa0dSEric Sandeen int error = 0; 48376873fa0dSEric Sandeen 48386873fa0dSEric Sandeen /* in-inode? */ 483919f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 48406873fa0dSEric Sandeen struct ext4_iloc iloc; 48416873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 48426873fa0dSEric Sandeen 48436873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 48446873fa0dSEric Sandeen if (error) 48456873fa0dSEric Sandeen return error; 4846a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 48476873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 48486873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 48496873fa0dSEric Sandeen physical += offset; 48506873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 48516873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 4852fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 48536873fa0dSEric Sandeen } else { /* external block */ 4854a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 48556873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 48566873fa0dSEric Sandeen } 48576873fa0dSEric Sandeen 48586873fa0dSEric Sandeen if (physical) 48596873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 48606873fa0dSEric Sandeen length, flags); 48616873fa0dSEric Sandeen return (error < 0 ? error : 0); 48626873fa0dSEric Sandeen } 48636873fa0dSEric Sandeen 48646873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 48656873fa0dSEric Sandeen __u64 start, __u64 len) 48666873fa0dSEric Sandeen { 48676873fa0dSEric Sandeen ext4_lblk_t start_blk; 48686873fa0dSEric Sandeen int error = 0; 48696873fa0dSEric Sandeen 487094191985STao Ma if (ext4_has_inline_data(inode)) { 487194191985STao Ma int has_inline = 1; 487294191985STao Ma 487394191985STao Ma error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 487494191985STao Ma 487594191985STao Ma if (has_inline) 487694191985STao Ma return error; 487794191985STao Ma } 487894191985STao Ma 48797869a4a6STheodore Ts'o if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { 48807869a4a6STheodore Ts'o error = ext4_ext_precache(inode); 48817869a4a6STheodore Ts'o if (error) 48827869a4a6STheodore Ts'o return error; 48837869a4a6STheodore Ts'o } 48847869a4a6STheodore Ts'o 48856873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 488612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 48876873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 48886873fa0dSEric Sandeen ext4_get_block); 48896873fa0dSEric Sandeen 48906873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 48916873fa0dSEric Sandeen return -EBADR; 48926873fa0dSEric Sandeen 48936873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 48946873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 48956873fa0dSEric Sandeen } else { 4896aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4897aca92ff6SLeonard Michlmayr __u64 last_blk; 4898aca92ff6SLeonard Michlmayr 48996873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4900aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4901f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4902f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4903aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 49046873fa0dSEric Sandeen 49056873fa0dSEric Sandeen /* 490691dd8c11SLukas Czerner * Walk the extent tree gathering extent information 490791dd8c11SLukas Czerner * and pushing extents back to the user. 49086873fa0dSEric Sandeen */ 490991dd8c11SLukas Czerner error = ext4_fill_fiemap_extents(inode, start_blk, 491091dd8c11SLukas Czerner len_blks, fieinfo); 49116873fa0dSEric Sandeen } 4912107a7bd3STheodore Ts'o ext4_es_lru_add(inode); 49136873fa0dSEric Sandeen return error; 49146873fa0dSEric Sandeen } 49159eb79482SNamjae Jeon 49169eb79482SNamjae Jeon /* 49179eb79482SNamjae Jeon * ext4_access_path: 49189eb79482SNamjae Jeon * Function to access the path buffer for marking it dirty. 49199eb79482SNamjae Jeon * It also checks if there are sufficient credits left in the journal handle 49209eb79482SNamjae Jeon * to update path. 49219eb79482SNamjae Jeon */ 49229eb79482SNamjae Jeon static int 49239eb79482SNamjae Jeon ext4_access_path(handle_t *handle, struct inode *inode, 49249eb79482SNamjae Jeon struct ext4_ext_path *path) 49259eb79482SNamjae Jeon { 49269eb79482SNamjae Jeon int credits, err; 49279eb79482SNamjae Jeon 49289eb79482SNamjae Jeon if (!ext4_handle_valid(handle)) 49299eb79482SNamjae Jeon return 0; 49309eb79482SNamjae Jeon 49319eb79482SNamjae Jeon /* 49329eb79482SNamjae Jeon * Check if need to extend journal credits 49339eb79482SNamjae Jeon * 3 for leaf, sb, and inode plus 2 (bmap and group 49349eb79482SNamjae Jeon * descriptor) for each block group; assume two block 49359eb79482SNamjae Jeon * groups 49369eb79482SNamjae Jeon */ 49379eb79482SNamjae Jeon if (handle->h_buffer_credits < 7) { 49389eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 49399eb79482SNamjae Jeon err = ext4_ext_truncate_extend_restart(handle, inode, credits); 49409eb79482SNamjae Jeon /* EAGAIN is success */ 49419eb79482SNamjae Jeon if (err && err != -EAGAIN) 49429eb79482SNamjae Jeon return err; 49439eb79482SNamjae Jeon } 49449eb79482SNamjae Jeon 49459eb79482SNamjae Jeon err = ext4_ext_get_access(handle, inode, path); 49469eb79482SNamjae Jeon return err; 49479eb79482SNamjae Jeon } 49489eb79482SNamjae Jeon 49499eb79482SNamjae Jeon /* 49509eb79482SNamjae Jeon * ext4_ext_shift_path_extents: 49519eb79482SNamjae Jeon * Shift the extents of a path structure lying between path[depth].p_ext 49529eb79482SNamjae Jeon * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift 49539eb79482SNamjae Jeon * from starting block for each extent. 49549eb79482SNamjae Jeon */ 49559eb79482SNamjae Jeon static int 49569eb79482SNamjae Jeon ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift, 49579eb79482SNamjae Jeon struct inode *inode, handle_t *handle, 49589eb79482SNamjae Jeon ext4_lblk_t *start) 49599eb79482SNamjae Jeon { 49609eb79482SNamjae Jeon int depth, err = 0; 49619eb79482SNamjae Jeon struct ext4_extent *ex_start, *ex_last; 49629eb79482SNamjae Jeon bool update = 0; 49639eb79482SNamjae Jeon depth = path->p_depth; 49649eb79482SNamjae Jeon 49659eb79482SNamjae Jeon while (depth >= 0) { 49669eb79482SNamjae Jeon if (depth == path->p_depth) { 49679eb79482SNamjae Jeon ex_start = path[depth].p_ext; 49689eb79482SNamjae Jeon if (!ex_start) 49699eb79482SNamjae Jeon return -EIO; 49709eb79482SNamjae Jeon 49719eb79482SNamjae Jeon ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 49729eb79482SNamjae Jeon if (!ex_last) 49739eb79482SNamjae Jeon return -EIO; 49749eb79482SNamjae Jeon 49759eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 49769eb79482SNamjae Jeon if (err) 49779eb79482SNamjae Jeon goto out; 49789eb79482SNamjae Jeon 49799eb79482SNamjae Jeon if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) 49809eb79482SNamjae Jeon update = 1; 49819eb79482SNamjae Jeon 49829eb79482SNamjae Jeon *start = ex_last->ee_block + 49839eb79482SNamjae Jeon ext4_ext_get_actual_len(ex_last); 49849eb79482SNamjae Jeon 49859eb79482SNamjae Jeon while (ex_start <= ex_last) { 49869eb79482SNamjae Jeon ex_start->ee_block -= shift; 49879eb79482SNamjae Jeon if (ex_start > 49889eb79482SNamjae Jeon EXT_FIRST_EXTENT(path[depth].p_hdr)) { 49899eb79482SNamjae Jeon if (ext4_ext_try_to_merge_right(inode, 49909eb79482SNamjae Jeon path, ex_start - 1)) 49919eb79482SNamjae Jeon ex_last--; 49929eb79482SNamjae Jeon } 49939eb79482SNamjae Jeon ex_start++; 49949eb79482SNamjae Jeon } 49959eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 49969eb79482SNamjae Jeon if (err) 49979eb79482SNamjae Jeon goto out; 49989eb79482SNamjae Jeon 49999eb79482SNamjae Jeon if (--depth < 0 || !update) 50009eb79482SNamjae Jeon break; 50019eb79482SNamjae Jeon } 50029eb79482SNamjae Jeon 50039eb79482SNamjae Jeon /* Update index too */ 50049eb79482SNamjae Jeon err = ext4_access_path(handle, inode, path + depth); 50059eb79482SNamjae Jeon if (err) 50069eb79482SNamjae Jeon goto out; 50079eb79482SNamjae Jeon 50089eb79482SNamjae Jeon path[depth].p_idx->ei_block -= shift; 50099eb79482SNamjae Jeon err = ext4_ext_dirty(handle, inode, path + depth); 50109eb79482SNamjae Jeon if (err) 50119eb79482SNamjae Jeon goto out; 50129eb79482SNamjae Jeon 50139eb79482SNamjae Jeon /* we are done if current index is not a starting index */ 50149eb79482SNamjae Jeon if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr)) 50159eb79482SNamjae Jeon break; 50169eb79482SNamjae Jeon 50179eb79482SNamjae Jeon depth--; 50189eb79482SNamjae Jeon } 50199eb79482SNamjae Jeon 50209eb79482SNamjae Jeon out: 50219eb79482SNamjae Jeon return err; 50229eb79482SNamjae Jeon } 50239eb79482SNamjae Jeon 50249eb79482SNamjae Jeon /* 50259eb79482SNamjae Jeon * ext4_ext_shift_extents: 50269eb79482SNamjae Jeon * All the extents which lies in the range from start to the last allocated 50279eb79482SNamjae Jeon * block for the file are shifted downwards by shift blocks. 50289eb79482SNamjae Jeon * On success, 0 is returned, error otherwise. 50299eb79482SNamjae Jeon */ 50309eb79482SNamjae Jeon static int 50319eb79482SNamjae Jeon ext4_ext_shift_extents(struct inode *inode, handle_t *handle, 50329eb79482SNamjae Jeon ext4_lblk_t start, ext4_lblk_t shift) 50339eb79482SNamjae Jeon { 50349eb79482SNamjae Jeon struct ext4_ext_path *path; 50359eb79482SNamjae Jeon int ret = 0, depth; 50369eb79482SNamjae Jeon struct ext4_extent *extent; 50379eb79482SNamjae Jeon ext4_lblk_t stop_block, current_block; 50389eb79482SNamjae Jeon ext4_lblk_t ex_start, ex_end; 50399eb79482SNamjae Jeon 50409eb79482SNamjae Jeon /* Let path point to the last extent */ 50419eb79482SNamjae Jeon path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); 50429eb79482SNamjae Jeon if (IS_ERR(path)) 50439eb79482SNamjae Jeon return PTR_ERR(path); 50449eb79482SNamjae Jeon 50459eb79482SNamjae Jeon depth = path->p_depth; 50469eb79482SNamjae Jeon extent = path[depth].p_ext; 50479eb79482SNamjae Jeon if (!extent) { 50489eb79482SNamjae Jeon ext4_ext_drop_refs(path); 50499eb79482SNamjae Jeon kfree(path); 50509eb79482SNamjae Jeon return ret; 50519eb79482SNamjae Jeon } 50529eb79482SNamjae Jeon 50539eb79482SNamjae Jeon stop_block = extent->ee_block + ext4_ext_get_actual_len(extent); 50549eb79482SNamjae Jeon ext4_ext_drop_refs(path); 50559eb79482SNamjae Jeon kfree(path); 50569eb79482SNamjae Jeon 50579eb79482SNamjae Jeon /* Nothing to shift, if hole is at the end of file */ 50589eb79482SNamjae Jeon if (start >= stop_block) 50599eb79482SNamjae Jeon return ret; 50609eb79482SNamjae Jeon 50619eb79482SNamjae Jeon /* 50629eb79482SNamjae Jeon * Don't start shifting extents until we make sure the hole is big 50639eb79482SNamjae Jeon * enough to accomodate the shift. 50649eb79482SNamjae Jeon */ 50659eb79482SNamjae Jeon path = ext4_ext_find_extent(inode, start - 1, NULL, 0); 50669eb79482SNamjae Jeon depth = path->p_depth; 50679eb79482SNamjae Jeon extent = path[depth].p_ext; 50689eb79482SNamjae Jeon ex_start = extent->ee_block; 50699eb79482SNamjae Jeon ex_end = extent->ee_block + ext4_ext_get_actual_len(extent); 50709eb79482SNamjae Jeon ext4_ext_drop_refs(path); 50719eb79482SNamjae Jeon kfree(path); 50729eb79482SNamjae Jeon 50739eb79482SNamjae Jeon if ((start == ex_start && shift > ex_start) || 50749eb79482SNamjae Jeon (shift > start - ex_end)) 50759eb79482SNamjae Jeon return -EINVAL; 50769eb79482SNamjae Jeon 50779eb79482SNamjae Jeon /* Its safe to start updating extents */ 50789eb79482SNamjae Jeon while (start < stop_block) { 50799eb79482SNamjae Jeon path = ext4_ext_find_extent(inode, start, NULL, 0); 50809eb79482SNamjae Jeon if (IS_ERR(path)) 50819eb79482SNamjae Jeon return PTR_ERR(path); 50829eb79482SNamjae Jeon depth = path->p_depth; 50839eb79482SNamjae Jeon extent = path[depth].p_ext; 50849eb79482SNamjae Jeon current_block = extent->ee_block; 50859eb79482SNamjae Jeon if (start > current_block) { 50869eb79482SNamjae Jeon /* Hole, move to the next extent */ 50879eb79482SNamjae Jeon ret = mext_next_extent(inode, path, &extent); 50889eb79482SNamjae Jeon if (ret != 0) { 50899eb79482SNamjae Jeon ext4_ext_drop_refs(path); 50909eb79482SNamjae Jeon kfree(path); 50919eb79482SNamjae Jeon if (ret == 1) 50929eb79482SNamjae Jeon ret = 0; 50939eb79482SNamjae Jeon break; 50949eb79482SNamjae Jeon } 50959eb79482SNamjae Jeon } 50969eb79482SNamjae Jeon ret = ext4_ext_shift_path_extents(path, shift, inode, 50979eb79482SNamjae Jeon handle, &start); 50989eb79482SNamjae Jeon ext4_ext_drop_refs(path); 50999eb79482SNamjae Jeon kfree(path); 51009eb79482SNamjae Jeon if (ret) 51019eb79482SNamjae Jeon break; 51029eb79482SNamjae Jeon } 51039eb79482SNamjae Jeon 51049eb79482SNamjae Jeon return ret; 51059eb79482SNamjae Jeon } 51069eb79482SNamjae Jeon 51079eb79482SNamjae Jeon /* 51089eb79482SNamjae Jeon * ext4_collapse_range: 51099eb79482SNamjae Jeon * This implements the fallocate's collapse range functionality for ext4 51109eb79482SNamjae Jeon * Returns: 0 and non-zero on error. 51119eb79482SNamjae Jeon */ 51129eb79482SNamjae Jeon int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) 51139eb79482SNamjae Jeon { 51149eb79482SNamjae Jeon struct super_block *sb = inode->i_sb; 51159eb79482SNamjae Jeon ext4_lblk_t punch_start, punch_stop; 51169eb79482SNamjae Jeon handle_t *handle; 51179eb79482SNamjae Jeon unsigned int credits; 51189eb79482SNamjae Jeon loff_t new_size; 51199eb79482SNamjae Jeon int ret; 51209eb79482SNamjae Jeon 51219eb79482SNamjae Jeon BUG_ON(offset + len > i_size_read(inode)); 51229eb79482SNamjae Jeon 51239eb79482SNamjae Jeon /* Collapse range works only on fs block size aligned offsets. */ 51249eb79482SNamjae Jeon if (offset & (EXT4_BLOCK_SIZE(sb) - 1) || 51259eb79482SNamjae Jeon len & (EXT4_BLOCK_SIZE(sb) - 1)) 51269eb79482SNamjae Jeon return -EINVAL; 51279eb79482SNamjae Jeon 51289eb79482SNamjae Jeon if (!S_ISREG(inode->i_mode)) 51299eb79482SNamjae Jeon return -EOPNOTSUPP; 51309eb79482SNamjae Jeon 51319eb79482SNamjae Jeon trace_ext4_collapse_range(inode, offset, len); 51329eb79482SNamjae Jeon 51339eb79482SNamjae Jeon punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb); 51349eb79482SNamjae Jeon punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb); 51359eb79482SNamjae Jeon 51369eb79482SNamjae Jeon /* Write out all dirty pages */ 51379eb79482SNamjae Jeon ret = filemap_write_and_wait_range(inode->i_mapping, offset, -1); 51389eb79482SNamjae Jeon if (ret) 51399eb79482SNamjae Jeon return ret; 51409eb79482SNamjae Jeon 51419eb79482SNamjae Jeon /* Take mutex lock */ 51429eb79482SNamjae Jeon mutex_lock(&inode->i_mutex); 51439eb79482SNamjae Jeon 51449eb79482SNamjae Jeon /* It's not possible punch hole on append only file */ 51459eb79482SNamjae Jeon if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 51469eb79482SNamjae Jeon ret = -EPERM; 51479eb79482SNamjae Jeon goto out_mutex; 51489eb79482SNamjae Jeon } 51499eb79482SNamjae Jeon 51509eb79482SNamjae Jeon if (IS_SWAPFILE(inode)) { 51519eb79482SNamjae Jeon ret = -ETXTBSY; 51529eb79482SNamjae Jeon goto out_mutex; 51539eb79482SNamjae Jeon } 51549eb79482SNamjae Jeon 51559eb79482SNamjae Jeon /* Currently just for extent based files */ 51569eb79482SNamjae Jeon if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 51579eb79482SNamjae Jeon ret = -EOPNOTSUPP; 51589eb79482SNamjae Jeon goto out_mutex; 51599eb79482SNamjae Jeon } 51609eb79482SNamjae Jeon 51619eb79482SNamjae Jeon truncate_pagecache_range(inode, offset, -1); 51629eb79482SNamjae Jeon 51639eb79482SNamjae Jeon /* Wait for existing dio to complete */ 51649eb79482SNamjae Jeon ext4_inode_block_unlocked_dio(inode); 51659eb79482SNamjae Jeon inode_dio_wait(inode); 51669eb79482SNamjae Jeon 51679eb79482SNamjae Jeon credits = ext4_writepage_trans_blocks(inode); 51689eb79482SNamjae Jeon handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 51699eb79482SNamjae Jeon if (IS_ERR(handle)) { 51709eb79482SNamjae Jeon ret = PTR_ERR(handle); 51719eb79482SNamjae Jeon goto out_dio; 51729eb79482SNamjae Jeon } 51739eb79482SNamjae Jeon 51749eb79482SNamjae Jeon down_write(&EXT4_I(inode)->i_data_sem); 51759eb79482SNamjae Jeon ext4_discard_preallocations(inode); 51769eb79482SNamjae Jeon 51779eb79482SNamjae Jeon ret = ext4_es_remove_extent(inode, punch_start, 51789eb79482SNamjae Jeon EXT_MAX_BLOCKS - punch_start - 1); 51799eb79482SNamjae Jeon if (ret) { 51809eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 51819eb79482SNamjae Jeon goto out_stop; 51829eb79482SNamjae Jeon } 51839eb79482SNamjae Jeon 51849eb79482SNamjae Jeon ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1); 51859eb79482SNamjae Jeon if (ret) { 51869eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 51879eb79482SNamjae Jeon goto out_stop; 51889eb79482SNamjae Jeon } 51899eb79482SNamjae Jeon 51909eb79482SNamjae Jeon ret = ext4_ext_shift_extents(inode, handle, punch_stop, 51919eb79482SNamjae Jeon punch_stop - punch_start); 51929eb79482SNamjae Jeon if (ret) { 51939eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 51949eb79482SNamjae Jeon goto out_stop; 51959eb79482SNamjae Jeon } 51969eb79482SNamjae Jeon 51979eb79482SNamjae Jeon new_size = i_size_read(inode) - len; 51989eb79482SNamjae Jeon truncate_setsize(inode, new_size); 51999eb79482SNamjae Jeon EXT4_I(inode)->i_disksize = new_size; 52009eb79482SNamjae Jeon 52019eb79482SNamjae Jeon ext4_discard_preallocations(inode); 52029eb79482SNamjae Jeon up_write(&EXT4_I(inode)->i_data_sem); 52039eb79482SNamjae Jeon if (IS_SYNC(inode)) 52049eb79482SNamjae Jeon ext4_handle_sync(handle); 52059eb79482SNamjae Jeon inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 52069eb79482SNamjae Jeon ext4_mark_inode_dirty(handle, inode); 52079eb79482SNamjae Jeon 52089eb79482SNamjae Jeon out_stop: 52099eb79482SNamjae Jeon ext4_journal_stop(handle); 52109eb79482SNamjae Jeon out_dio: 52119eb79482SNamjae Jeon ext4_inode_resume_unlocked_dio(inode); 52129eb79482SNamjae Jeon out_mutex: 52139eb79482SNamjae Jeon mutex_unlock(&inode->i_mutex); 52149eb79482SNamjae Jeon return ret; 52159eb79482SNamjae Jeon } 5216