1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/fs.h> 33a86c6181SAlex Tomas #include <linux/time.h> 34cd02ff0bSMingming Cao #include <linux/jbd2.h> 35a86c6181SAlex Tomas #include <linux/highuid.h> 36a86c6181SAlex Tomas #include <linux/pagemap.h> 37a86c6181SAlex Tomas #include <linux/quotaops.h> 38a86c6181SAlex Tomas #include <linux/string.h> 39a86c6181SAlex Tomas #include <linux/slab.h> 40a2df2a63SAmit Arora #include <linux/falloc.h> 41a86c6181SAlex Tomas #include <asm/uaccess.h> 426873fa0dSEric Sandeen #include <linux/fiemap.h> 433dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 444a092d73STheodore Ts'o #include "ext4_extents.h" 45f19d5870STao Ma #include "xattr.h" 46a86c6181SAlex Tomas 470562e0baSJiaying Zhang #include <trace/events/ext4.h> 480562e0baSJiaying Zhang 495f95d21fSLukas Czerner /* 505f95d21fSLukas Czerner * used by extent splitting. 515f95d21fSLukas Czerner */ 525f95d21fSLukas Czerner #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 535f95d21fSLukas Czerner due to ENOSPC */ 545f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 555f95d21fSLukas Czerner #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 565f95d21fSLukas Czerner 57dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */ 58dee1f973SDmitry Monakhov #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */ 59dee1f973SDmitry Monakhov 607ac5990dSDarrick J. Wong static __le32 ext4_extent_block_csum(struct inode *inode, 617ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 627ac5990dSDarrick J. Wong { 637ac5990dSDarrick J. Wong struct ext4_inode_info *ei = EXT4_I(inode); 647ac5990dSDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 657ac5990dSDarrick J. Wong __u32 csum; 667ac5990dSDarrick J. Wong 677ac5990dSDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh, 687ac5990dSDarrick J. Wong EXT4_EXTENT_TAIL_OFFSET(eh)); 697ac5990dSDarrick J. Wong return cpu_to_le32(csum); 707ac5990dSDarrick J. Wong } 717ac5990dSDarrick J. Wong 727ac5990dSDarrick J. Wong static int ext4_extent_block_csum_verify(struct inode *inode, 737ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 747ac5990dSDarrick J. Wong { 757ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 767ac5990dSDarrick J. Wong 777ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 787ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 797ac5990dSDarrick J. Wong return 1; 807ac5990dSDarrick J. Wong 817ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 827ac5990dSDarrick J. Wong if (et->et_checksum != ext4_extent_block_csum(inode, eh)) 837ac5990dSDarrick J. Wong return 0; 847ac5990dSDarrick J. Wong return 1; 857ac5990dSDarrick J. Wong } 867ac5990dSDarrick J. Wong 877ac5990dSDarrick J. Wong static void ext4_extent_block_csum_set(struct inode *inode, 887ac5990dSDarrick J. Wong struct ext4_extent_header *eh) 897ac5990dSDarrick J. Wong { 907ac5990dSDarrick J. Wong struct ext4_extent_tail *et; 917ac5990dSDarrick J. Wong 927ac5990dSDarrick J. Wong if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 937ac5990dSDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 947ac5990dSDarrick J. Wong return; 957ac5990dSDarrick J. Wong 967ac5990dSDarrick J. Wong et = find_ext4_extent_tail(eh); 977ac5990dSDarrick J. Wong et->et_checksum = ext4_extent_block_csum(inode, eh); 987ac5990dSDarrick J. Wong } 997ac5990dSDarrick J. Wong 100d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 101d583fb87SAllison Henderson struct inode *inode, 102d583fb87SAllison Henderson struct ext4_ext_path *path, 103d583fb87SAllison Henderson struct ext4_map_blocks *map, 104d583fb87SAllison Henderson int split_flag, 105d583fb87SAllison Henderson int flags); 106d583fb87SAllison Henderson 1075f95d21fSLukas Czerner static int ext4_split_extent_at(handle_t *handle, 1085f95d21fSLukas Czerner struct inode *inode, 1095f95d21fSLukas Czerner struct ext4_ext_path *path, 1105f95d21fSLukas Czerner ext4_lblk_t split, 1115f95d21fSLukas Czerner int split_flag, 1125f95d21fSLukas Czerner int flags); 1135f95d21fSLukas Czerner 11491dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 11569eb33dcSZheng Liu struct extent_status *newes); 11691dd8c11SLukas Czerner 117487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 118487caeefSJan Kara struct inode *inode, 119487caeefSJan Kara int needed) 120a86c6181SAlex Tomas { 121a86c6181SAlex Tomas int err; 122a86c6181SAlex Tomas 1230390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 1240390131bSFrank Mayhar return 0; 125a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 1269102e4faSShen Feng return 0; 1279102e4faSShen Feng err = ext4_journal_extend(handle, needed); 1280123c939STheodore Ts'o if (err <= 0) 1299102e4faSShen Feng return err; 130487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 1310617b83fSDmitry Monakhov if (err == 0) 1320617b83fSDmitry Monakhov err = -EAGAIN; 133487caeefSJan Kara 134487caeefSJan Kara return err; 135a86c6181SAlex Tomas } 136a86c6181SAlex Tomas 137a86c6181SAlex Tomas /* 138a86c6181SAlex Tomas * could return: 139a86c6181SAlex Tomas * - EROFS 140a86c6181SAlex Tomas * - ENOMEM 141a86c6181SAlex Tomas */ 142a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 143a86c6181SAlex Tomas struct ext4_ext_path *path) 144a86c6181SAlex Tomas { 145a86c6181SAlex Tomas if (path->p_bh) { 146a86c6181SAlex Tomas /* path points to block */ 147a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 148a86c6181SAlex Tomas } 149a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 150a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 151a86c6181SAlex Tomas return 0; 152a86c6181SAlex Tomas } 153a86c6181SAlex Tomas 154a86c6181SAlex Tomas /* 155a86c6181SAlex Tomas * could return: 156a86c6181SAlex Tomas * - EROFS 157a86c6181SAlex Tomas * - ENOMEM 158a86c6181SAlex Tomas * - EIO 159a86c6181SAlex Tomas */ 1602656497bSDarrick J. Wong int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle, 1612656497bSDarrick J. Wong struct inode *inode, struct ext4_ext_path *path) 162a86c6181SAlex Tomas { 163a86c6181SAlex Tomas int err; 164a86c6181SAlex Tomas if (path->p_bh) { 1657ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh)); 166a86c6181SAlex Tomas /* path points to block */ 1679ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1689ea7a0dfSTheodore Ts'o inode, path->p_bh); 169a86c6181SAlex Tomas } else { 170a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 171a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 172a86c6181SAlex Tomas } 173a86c6181SAlex Tomas return err; 174a86c6181SAlex Tomas } 175a86c6181SAlex Tomas 176f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 177a86c6181SAlex Tomas struct ext4_ext_path *path, 178725d26d3SAneesh Kumar K.V ext4_lblk_t block) 179a86c6181SAlex Tomas { 180a86c6181SAlex Tomas if (path) { 18181fdbb4aSYongqiang Yang int depth = path->p_depth; 182a86c6181SAlex Tomas struct ext4_extent *ex; 183a86c6181SAlex Tomas 184ad4fb9caSKazuya Mio /* 185ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 186ad4fb9caSKazuya Mio * filling in a file which will eventually be 187ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 188ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 189ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 190ad4fb9caSKazuya Mio * executable file, or some database extending a table 191ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 192ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 193ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 194ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 195ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 196ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 197ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 198b8d6568aSTao Ma * especially if the latter case turns out to be 199ad4fb9caSKazuya Mio * common. 200ad4fb9caSKazuya Mio */ 2017e028976SAvantika Mathur ex = path[depth].p_ext; 202ad4fb9caSKazuya Mio if (ex) { 203ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 204ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 205ad4fb9caSKazuya Mio 206ad4fb9caSKazuya Mio if (block > ext_block) 207ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 208ad4fb9caSKazuya Mio else 209ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 210ad4fb9caSKazuya Mio } 211a86c6181SAlex Tomas 212d0d856e8SRandy Dunlap /* it looks like index is empty; 213d0d856e8SRandy Dunlap * try to find starting block from index itself */ 214a86c6181SAlex Tomas if (path[depth].p_bh) 215a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 216a86c6181SAlex Tomas } 217a86c6181SAlex Tomas 218a86c6181SAlex Tomas /* OK. use inode's group */ 219f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 220a86c6181SAlex Tomas } 221a86c6181SAlex Tomas 222654b4908SAneesh Kumar K.V /* 223654b4908SAneesh Kumar K.V * Allocation for a meta data block 224654b4908SAneesh Kumar K.V */ 225f65e6fbaSAlex Tomas static ext4_fsblk_t 226654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 227a86c6181SAlex Tomas struct ext4_ext_path *path, 22855f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 229a86c6181SAlex Tomas { 230f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 231a86c6181SAlex Tomas 232a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 23355f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 23455f020dbSAllison Henderson NULL, err); 235a86c6181SAlex Tomas return newblock; 236a86c6181SAlex Tomas } 237a86c6181SAlex Tomas 23855ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 239a86c6181SAlex Tomas { 240a86c6181SAlex Tomas int size; 241a86c6181SAlex Tomas 242a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 243a86c6181SAlex Tomas / sizeof(struct ext4_extent); 244bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 24502dc62fbSYongqiang Yang if (!check && size > 6) 246a86c6181SAlex Tomas size = 6; 247a86c6181SAlex Tomas #endif 248a86c6181SAlex Tomas return size; 249a86c6181SAlex Tomas } 250a86c6181SAlex Tomas 25155ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 252a86c6181SAlex Tomas { 253a86c6181SAlex Tomas int size; 254a86c6181SAlex Tomas 255a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 256a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 257bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 25802dc62fbSYongqiang Yang if (!check && size > 5) 259a86c6181SAlex Tomas size = 5; 260a86c6181SAlex Tomas #endif 261a86c6181SAlex Tomas return size; 262a86c6181SAlex Tomas } 263a86c6181SAlex Tomas 26455ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 265a86c6181SAlex Tomas { 266a86c6181SAlex Tomas int size; 267a86c6181SAlex Tomas 268a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 269a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 270a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 271bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 27202dc62fbSYongqiang Yang if (!check && size > 3) 273a86c6181SAlex Tomas size = 3; 274a86c6181SAlex Tomas #endif 275a86c6181SAlex Tomas return size; 276a86c6181SAlex Tomas } 277a86c6181SAlex Tomas 27855ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 279a86c6181SAlex Tomas { 280a86c6181SAlex Tomas int size; 281a86c6181SAlex Tomas 282a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 283a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 284a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 285bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 28602dc62fbSYongqiang Yang if (!check && size > 4) 287a86c6181SAlex Tomas size = 4; 288a86c6181SAlex Tomas #endif 289a86c6181SAlex Tomas return size; 290a86c6181SAlex Tomas } 291a86c6181SAlex Tomas 292d2a17637SMingming Cao /* 293d2a17637SMingming Cao * Calculate the number of metadata blocks needed 294d2a17637SMingming Cao * to allocate @blocks 295d2a17637SMingming Cao * Worse case is one block per extent 296d2a17637SMingming Cao */ 29701f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 298d2a17637SMingming Cao { 2999d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 30081fdbb4aSYongqiang Yang int idxs; 301d2a17637SMingming Cao 3029d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 3039d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 304d2a17637SMingming Cao 305d2a17637SMingming Cao /* 3069d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 3079d0be502STheodore Ts'o * previous da block, it can share index blocks with the 3089d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 3099d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 3109d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 3119d0be502STheodore Ts'o * another index blocks. 312d2a17637SMingming Cao */ 3139d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 3149d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 31581fdbb4aSYongqiang Yang int num = 0; 31681fdbb4aSYongqiang Yang 3179d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 3189d0be502STheodore Ts'o num++; 3199d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 3209d0be502STheodore Ts'o num++; 3219d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 3229d0be502STheodore Ts'o num++; 3239d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3249d0be502STheodore Ts'o } else 3259d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 3269d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 327d2a17637SMingming Cao return num; 328d2a17637SMingming Cao } 329d2a17637SMingming Cao 3309d0be502STheodore Ts'o /* 3319d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 3329d0be502STheodore Ts'o * every level of the inode's extent tree. 3339d0be502STheodore Ts'o */ 3349d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 3359d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 3369d0be502STheodore Ts'o return ext_depth(inode) + 1; 3379d0be502STheodore Ts'o } 3389d0be502STheodore Ts'o 339c29c0ae7SAlex Tomas static int 340c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 341c29c0ae7SAlex Tomas { 342c29c0ae7SAlex Tomas int max; 343c29c0ae7SAlex Tomas 344c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 345c29c0ae7SAlex Tomas if (depth == 0) 34655ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 347c29c0ae7SAlex Tomas else 34855ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 349c29c0ae7SAlex Tomas } else { 350c29c0ae7SAlex Tomas if (depth == 0) 35155ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 352c29c0ae7SAlex Tomas else 35355ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 354c29c0ae7SAlex Tomas } 355c29c0ae7SAlex Tomas 356c29c0ae7SAlex Tomas return max; 357c29c0ae7SAlex Tomas } 358c29c0ae7SAlex Tomas 35956b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 36056b19868SAneesh Kumar K.V { 361bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 36256b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 363e84a26ceSTheodore Ts'o 36431d4f3a2STheodore Ts'o if (len == 0) 36531d4f3a2STheodore Ts'o return 0; 3666fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 36756b19868SAneesh Kumar K.V } 36856b19868SAneesh Kumar K.V 36956b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 37056b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 37156b19868SAneesh Kumar K.V { 372bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 373e84a26ceSTheodore Ts'o 3746fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 37556b19868SAneesh Kumar K.V } 37656b19868SAneesh Kumar K.V 37756b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 37856b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 37956b19868SAneesh Kumar K.V int depth) 38056b19868SAneesh Kumar K.V { 38156b19868SAneesh Kumar K.V unsigned short entries; 38256b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 38356b19868SAneesh Kumar K.V return 1; 38456b19868SAneesh Kumar K.V 38556b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 38656b19868SAneesh Kumar K.V 38756b19868SAneesh Kumar K.V if (depth == 0) { 38856b19868SAneesh Kumar K.V /* leaf entries */ 38981fdbb4aSYongqiang Yang struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); 39056b19868SAneesh Kumar K.V while (entries) { 39156b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 39256b19868SAneesh Kumar K.V return 0; 39356b19868SAneesh Kumar K.V ext++; 39456b19868SAneesh Kumar K.V entries--; 39556b19868SAneesh Kumar K.V } 39656b19868SAneesh Kumar K.V } else { 39781fdbb4aSYongqiang Yang struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); 39856b19868SAneesh Kumar K.V while (entries) { 39956b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 40056b19868SAneesh Kumar K.V return 0; 40156b19868SAneesh Kumar K.V ext_idx++; 40256b19868SAneesh Kumar K.V entries--; 40356b19868SAneesh Kumar K.V } 40456b19868SAneesh Kumar K.V } 40556b19868SAneesh Kumar K.V return 1; 40656b19868SAneesh Kumar K.V } 40756b19868SAneesh Kumar K.V 408c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 409c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 410c29c0ae7SAlex Tomas int depth) 411c29c0ae7SAlex Tomas { 412c29c0ae7SAlex Tomas const char *error_msg; 413c29c0ae7SAlex Tomas int max = 0; 414c29c0ae7SAlex Tomas 415c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 416c29c0ae7SAlex Tomas error_msg = "invalid magic"; 417c29c0ae7SAlex Tomas goto corrupted; 418c29c0ae7SAlex Tomas } 419c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 420c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 421c29c0ae7SAlex Tomas goto corrupted; 422c29c0ae7SAlex Tomas } 423c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 424c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 425c29c0ae7SAlex Tomas goto corrupted; 426c29c0ae7SAlex Tomas } 427c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 428c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 429c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 430c29c0ae7SAlex Tomas goto corrupted; 431c29c0ae7SAlex Tomas } 432c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 433c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 434c29c0ae7SAlex Tomas goto corrupted; 435c29c0ae7SAlex Tomas } 43656b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 43756b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 43856b19868SAneesh Kumar K.V goto corrupted; 43956b19868SAneesh Kumar K.V } 4407ac5990dSDarrick J. Wong /* Verify checksum on non-root extent tree nodes */ 4417ac5990dSDarrick J. Wong if (ext_depth(inode) != depth && 4427ac5990dSDarrick J. Wong !ext4_extent_block_csum_verify(inode, eh)) { 4437ac5990dSDarrick J. Wong error_msg = "extent tree corrupted"; 4447ac5990dSDarrick J. Wong goto corrupted; 4457ac5990dSDarrick J. Wong } 446c29c0ae7SAlex Tomas return 0; 447c29c0ae7SAlex Tomas 448c29c0ae7SAlex Tomas corrupted: 449c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 45024676da4STheodore Ts'o "bad header/extent: %s - magic %x, " 451c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 45224676da4STheodore Ts'o error_msg, le16_to_cpu(eh->eh_magic), 453c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 454c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 455c29c0ae7SAlex Tomas 456c29c0ae7SAlex Tomas return -EIO; 457c29c0ae7SAlex Tomas } 458c29c0ae7SAlex Tomas 45956b19868SAneesh Kumar K.V #define ext4_ext_check(inode, eh, depth) \ 460c398eda0STheodore Ts'o __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 461c29c0ae7SAlex Tomas 4627a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4637a262f7cSAneesh Kumar K.V { 4647a262f7cSAneesh Kumar K.V return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 4657a262f7cSAneesh Kumar K.V } 4667a262f7cSAneesh Kumar K.V 467f8489128SDarrick J. Wong static int __ext4_ext_check_block(const char *function, unsigned int line, 468f8489128SDarrick J. Wong struct inode *inode, 469f8489128SDarrick J. Wong struct ext4_extent_header *eh, 470f8489128SDarrick J. Wong int depth, 471f8489128SDarrick J. Wong struct buffer_head *bh) 472f8489128SDarrick J. Wong { 473f8489128SDarrick J. Wong int ret; 474f8489128SDarrick J. Wong 475f8489128SDarrick J. Wong if (buffer_verified(bh)) 476f8489128SDarrick J. Wong return 0; 477f8489128SDarrick J. Wong ret = ext4_ext_check(inode, eh, depth); 478f8489128SDarrick J. Wong if (ret) 479f8489128SDarrick J. Wong return ret; 480f8489128SDarrick J. Wong set_buffer_verified(bh); 481f8489128SDarrick J. Wong return ret; 482f8489128SDarrick J. Wong } 483f8489128SDarrick J. Wong 484f8489128SDarrick J. Wong #define ext4_ext_check_block(inode, eh, depth, bh) \ 485f8489128SDarrick J. Wong __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh) 486f8489128SDarrick J. Wong 487a86c6181SAlex Tomas #ifdef EXT_DEBUG 488a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 489a86c6181SAlex Tomas { 490a86c6181SAlex Tomas int k, l = path->p_depth; 491a86c6181SAlex Tomas 492a86c6181SAlex Tomas ext_debug("path:"); 493a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 494a86c6181SAlex Tomas if (path->p_idx) { 4952ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 496bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 497a86c6181SAlex Tomas } else if (path->p_ext) { 498553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 499a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 500553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 501a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 502bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 503a86c6181SAlex Tomas } else 504a86c6181SAlex Tomas ext_debug(" []"); 505a86c6181SAlex Tomas } 506a86c6181SAlex Tomas ext_debug("\n"); 507a86c6181SAlex Tomas } 508a86c6181SAlex Tomas 509a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 510a86c6181SAlex Tomas { 511a86c6181SAlex Tomas int depth = ext_depth(inode); 512a86c6181SAlex Tomas struct ext4_extent_header *eh; 513a86c6181SAlex Tomas struct ext4_extent *ex; 514a86c6181SAlex Tomas int i; 515a86c6181SAlex Tomas 516a86c6181SAlex Tomas if (!path) 517a86c6181SAlex Tomas return; 518a86c6181SAlex Tomas 519a86c6181SAlex Tomas eh = path[depth].p_hdr; 520a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 521a86c6181SAlex Tomas 522553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 523553f9008SMingming 524a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 525553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 526553f9008SMingming ext4_ext_is_uninitialized(ex), 527bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 528a86c6181SAlex Tomas } 529a86c6181SAlex Tomas ext_debug("\n"); 530a86c6181SAlex Tomas } 5311b16da77SYongqiang Yang 5321b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 5331b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 5341b16da77SYongqiang Yang { 5351b16da77SYongqiang Yang int depth = ext_depth(inode); 5361b16da77SYongqiang Yang struct ext4_extent *ex; 5371b16da77SYongqiang Yang 5381b16da77SYongqiang Yang if (depth != level) { 5391b16da77SYongqiang Yang struct ext4_extent_idx *idx; 5401b16da77SYongqiang Yang idx = path[level].p_idx; 5411b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 5421b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 5431b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 5441b16da77SYongqiang Yang ext4_idx_pblock(idx), 5451b16da77SYongqiang Yang newblock); 5461b16da77SYongqiang Yang idx++; 5471b16da77SYongqiang Yang } 5481b16da77SYongqiang Yang 5491b16da77SYongqiang Yang return; 5501b16da77SYongqiang Yang } 5511b16da77SYongqiang Yang 5521b16da77SYongqiang Yang ex = path[depth].p_ext; 5531b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 5541b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 5551b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 5561b16da77SYongqiang Yang ext4_ext_pblock(ex), 5571b16da77SYongqiang Yang ext4_ext_is_uninitialized(ex), 5581b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 5591b16da77SYongqiang Yang newblock); 5601b16da77SYongqiang Yang ex++; 5611b16da77SYongqiang Yang } 5621b16da77SYongqiang Yang } 5631b16da77SYongqiang Yang 564a86c6181SAlex Tomas #else 565a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 566a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 5671b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 568a86c6181SAlex Tomas #endif 569a86c6181SAlex Tomas 570b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 571a86c6181SAlex Tomas { 572a86c6181SAlex Tomas int depth = path->p_depth; 573a86c6181SAlex Tomas int i; 574a86c6181SAlex Tomas 575a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 576a86c6181SAlex Tomas if (path->p_bh) { 577a86c6181SAlex Tomas brelse(path->p_bh); 578a86c6181SAlex Tomas path->p_bh = NULL; 579a86c6181SAlex Tomas } 580a86c6181SAlex Tomas } 581a86c6181SAlex Tomas 582a86c6181SAlex Tomas /* 583d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 584d0d856e8SRandy Dunlap * binary search for the closest index of the given block 585c29c0ae7SAlex Tomas * the header must be checked before calling this 586a86c6181SAlex Tomas */ 587a86c6181SAlex Tomas static void 588725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 589725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 590a86c6181SAlex Tomas { 591a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 592a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 593a86c6181SAlex Tomas 594a86c6181SAlex Tomas 595bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 596a86c6181SAlex Tomas 597a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 598e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 599a86c6181SAlex Tomas while (l <= r) { 600a86c6181SAlex Tomas m = l + (r - l) / 2; 601a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 602a86c6181SAlex Tomas r = m - 1; 603a86c6181SAlex Tomas else 604a86c6181SAlex Tomas l = m + 1; 60526d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 60626d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 60726d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 608a86c6181SAlex Tomas } 609a86c6181SAlex Tomas 610a86c6181SAlex Tomas path->p_idx = l - 1; 6114a3c3a51SZheng Liu ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block), 612bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 613a86c6181SAlex Tomas 614a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 615a86c6181SAlex Tomas { 616a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 617a86c6181SAlex Tomas int k; 618a86c6181SAlex Tomas 619a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 620a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 621a86c6181SAlex Tomas if (k != 0 && 622a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 6234776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 6244776004fSTheodore Ts'o "first=0x%p\n", k, 625a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 6264776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 627a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 628a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 629a86c6181SAlex Tomas } 630a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 631a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 632a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 633a86c6181SAlex Tomas break; 634a86c6181SAlex Tomas chix = ix; 635a86c6181SAlex Tomas } 636a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 637a86c6181SAlex Tomas } 638a86c6181SAlex Tomas #endif 639a86c6181SAlex Tomas 640a86c6181SAlex Tomas } 641a86c6181SAlex Tomas 642a86c6181SAlex Tomas /* 643d0d856e8SRandy Dunlap * ext4_ext_binsearch: 644d0d856e8SRandy Dunlap * binary search for closest extent of the given block 645c29c0ae7SAlex Tomas * the header must be checked before calling this 646a86c6181SAlex Tomas */ 647a86c6181SAlex Tomas static void 648725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 649725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 650a86c6181SAlex Tomas { 651a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 652a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 653a86c6181SAlex Tomas 654a86c6181SAlex Tomas if (eh->eh_entries == 0) { 655a86c6181SAlex Tomas /* 656d0d856e8SRandy Dunlap * this leaf is empty: 657a86c6181SAlex Tomas * we get such a leaf in split/add case 658a86c6181SAlex Tomas */ 659a86c6181SAlex Tomas return; 660a86c6181SAlex Tomas } 661a86c6181SAlex Tomas 662bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 663a86c6181SAlex Tomas 664a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 665e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 666a86c6181SAlex Tomas 667a86c6181SAlex Tomas while (l <= r) { 668a86c6181SAlex Tomas m = l + (r - l) / 2; 669a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 670a86c6181SAlex Tomas r = m - 1; 671a86c6181SAlex Tomas else 672a86c6181SAlex Tomas l = m + 1; 67326d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 67426d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 67526d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 676a86c6181SAlex Tomas } 677a86c6181SAlex Tomas 678a86c6181SAlex Tomas path->p_ext = l - 1; 679553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 680a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 681bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 682553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 683a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 684a86c6181SAlex Tomas 685a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 686a86c6181SAlex Tomas { 687a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 688a86c6181SAlex Tomas int k; 689a86c6181SAlex Tomas 690a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 691a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 692a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 693a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 694a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 695a86c6181SAlex Tomas break; 696a86c6181SAlex Tomas chex = ex; 697a86c6181SAlex Tomas } 698a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 699a86c6181SAlex Tomas } 700a86c6181SAlex Tomas #endif 701a86c6181SAlex Tomas 702a86c6181SAlex Tomas } 703a86c6181SAlex Tomas 704a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 705a86c6181SAlex Tomas { 706a86c6181SAlex Tomas struct ext4_extent_header *eh; 707a86c6181SAlex Tomas 708a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 709a86c6181SAlex Tomas eh->eh_depth = 0; 710a86c6181SAlex Tomas eh->eh_entries = 0; 711a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 71255ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 713a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 714a86c6181SAlex Tomas return 0; 715a86c6181SAlex Tomas } 716a86c6181SAlex Tomas 717a86c6181SAlex Tomas struct ext4_ext_path * 718725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 719725d26d3SAneesh Kumar K.V struct ext4_ext_path *path) 720a86c6181SAlex Tomas { 721a86c6181SAlex Tomas struct ext4_extent_header *eh; 722a86c6181SAlex Tomas struct buffer_head *bh; 723a86c6181SAlex Tomas short int depth, i, ppos = 0, alloc = 0; 724860d21e2STheodore Ts'o int ret; 725a86c6181SAlex Tomas 726a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 727c29c0ae7SAlex Tomas depth = ext_depth(inode); 728a86c6181SAlex Tomas 729a86c6181SAlex Tomas /* account possible depth increase */ 730a86c6181SAlex Tomas if (!path) { 7315d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 732a86c6181SAlex Tomas GFP_NOFS); 733a86c6181SAlex Tomas if (!path) 734a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 735a86c6181SAlex Tomas alloc = 1; 736a86c6181SAlex Tomas } 737a86c6181SAlex Tomas path[0].p_hdr = eh; 7381973adcbSShen Feng path[0].p_bh = NULL; 739a86c6181SAlex Tomas 740c29c0ae7SAlex Tomas i = depth; 741a86c6181SAlex Tomas /* walk through the tree */ 742a86c6181SAlex Tomas while (i) { 743a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 744a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 745c29c0ae7SAlex Tomas 746a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 747bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 748a86c6181SAlex Tomas path[ppos].p_depth = i; 749a86c6181SAlex Tomas path[ppos].p_ext = NULL; 750a86c6181SAlex Tomas 7517a262f7cSAneesh Kumar K.V bh = sb_getblk(inode->i_sb, path[ppos].p_block); 752860d21e2STheodore Ts'o if (unlikely(!bh)) { 753860d21e2STheodore Ts'o ret = -ENOMEM; 754a86c6181SAlex Tomas goto err; 755860d21e2STheodore Ts'o } 7567a262f7cSAneesh Kumar K.V if (!bh_uptodate_or_lock(bh)) { 7570562e0baSJiaying Zhang trace_ext4_ext_load_extent(inode, block, 7580562e0baSJiaying Zhang path[ppos].p_block); 759860d21e2STheodore Ts'o ret = bh_submit_read(bh); 760860d21e2STheodore Ts'o if (ret < 0) { 7617a262f7cSAneesh Kumar K.V put_bh(bh); 7627a262f7cSAneesh Kumar K.V goto err; 7637a262f7cSAneesh Kumar K.V } 7647a262f7cSAneesh Kumar K.V } 765a86c6181SAlex Tomas eh = ext_block_hdr(bh); 766a86c6181SAlex Tomas ppos++; 767273df556SFrank Mayhar if (unlikely(ppos > depth)) { 768273df556SFrank Mayhar put_bh(bh); 769273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 770273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 771860d21e2STheodore Ts'o ret = -EIO; 772273df556SFrank Mayhar goto err; 773273df556SFrank Mayhar } 774a86c6181SAlex Tomas path[ppos].p_bh = bh; 775a86c6181SAlex Tomas path[ppos].p_hdr = eh; 776a86c6181SAlex Tomas i--; 777a86c6181SAlex Tomas 778860d21e2STheodore Ts'o ret = ext4_ext_check_block(inode, eh, i, bh); 779860d21e2STheodore Ts'o if (ret < 0) 780a86c6181SAlex Tomas goto err; 781a86c6181SAlex Tomas } 782a86c6181SAlex Tomas 783a86c6181SAlex Tomas path[ppos].p_depth = i; 784a86c6181SAlex Tomas path[ppos].p_ext = NULL; 785a86c6181SAlex Tomas path[ppos].p_idx = NULL; 786a86c6181SAlex Tomas 787a86c6181SAlex Tomas /* find extent */ 788a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 7891973adcbSShen Feng /* if not an empty leaf */ 7901973adcbSShen Feng if (path[ppos].p_ext) 791bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 792a86c6181SAlex Tomas 793a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 794a86c6181SAlex Tomas 795a86c6181SAlex Tomas return path; 796a86c6181SAlex Tomas 797a86c6181SAlex Tomas err: 798a86c6181SAlex Tomas ext4_ext_drop_refs(path); 799a86c6181SAlex Tomas if (alloc) 800a86c6181SAlex Tomas kfree(path); 801860d21e2STheodore Ts'o return ERR_PTR(ret); 802a86c6181SAlex Tomas } 803a86c6181SAlex Tomas 804a86c6181SAlex Tomas /* 805d0d856e8SRandy Dunlap * ext4_ext_insert_index: 806d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 807d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 808a86c6181SAlex Tomas */ 8091f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 810a86c6181SAlex Tomas struct ext4_ext_path *curp, 811f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 812a86c6181SAlex Tomas { 813a86c6181SAlex Tomas struct ext4_extent_idx *ix; 814a86c6181SAlex Tomas int len, err; 815a86c6181SAlex Tomas 8167e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 8177e028976SAvantika Mathur if (err) 818a86c6181SAlex Tomas return err; 819a86c6181SAlex Tomas 820273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 821273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 822273df556SFrank Mayhar "logical %d == ei_block %d!", 823273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 824273df556SFrank Mayhar return -EIO; 825273df556SFrank Mayhar } 826d4620315SRobin Dong 827d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 828d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 829d4620315SRobin Dong EXT4_ERROR_INODE(inode, 830d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 831d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 832d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 833d4620315SRobin Dong return -EIO; 834d4620315SRobin Dong } 835d4620315SRobin Dong 836a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 837a86c6181SAlex Tomas /* insert after */ 83880e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 839a86c6181SAlex Tomas ix = curp->p_idx + 1; 840a86c6181SAlex Tomas } else { 841a86c6181SAlex Tomas /* insert before */ 84280e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 843a86c6181SAlex Tomas ix = curp->p_idx; 844a86c6181SAlex Tomas } 845a86c6181SAlex Tomas 84680e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 84780e675f9SEric Gouriou BUG_ON(len < 0); 84880e675f9SEric Gouriou if (len > 0) { 84980e675f9SEric Gouriou ext_debug("insert new index %d: " 85080e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 85180e675f9SEric Gouriou logical, len, ix, ix + 1); 85280e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 85380e675f9SEric Gouriou } 85480e675f9SEric Gouriou 855f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 856f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 857f472e026STao Ma return -EIO; 858f472e026STao Ma } 859f472e026STao Ma 860a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 861f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 862e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 863a86c6181SAlex Tomas 864273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 865273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 866273df556SFrank Mayhar return -EIO; 867273df556SFrank Mayhar } 868a86c6181SAlex Tomas 869a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 870a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 871a86c6181SAlex Tomas 872a86c6181SAlex Tomas return err; 873a86c6181SAlex Tomas } 874a86c6181SAlex Tomas 875a86c6181SAlex Tomas /* 876d0d856e8SRandy Dunlap * ext4_ext_split: 877d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 878d0d856e8SRandy Dunlap * at depth @at: 879a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 880a86c6181SAlex Tomas * - makes decision where to split 881d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 882a86c6181SAlex Tomas * into the newly allocated blocks 883d0d856e8SRandy Dunlap * - initializes subtree 884a86c6181SAlex Tomas */ 885a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 88655f020dbSAllison Henderson unsigned int flags, 887a86c6181SAlex Tomas struct ext4_ext_path *path, 888a86c6181SAlex Tomas struct ext4_extent *newext, int at) 889a86c6181SAlex Tomas { 890a86c6181SAlex Tomas struct buffer_head *bh = NULL; 891a86c6181SAlex Tomas int depth = ext_depth(inode); 892a86c6181SAlex Tomas struct ext4_extent_header *neh; 893a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 894a86c6181SAlex Tomas int i = at, k, m, a; 895f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 896a86c6181SAlex Tomas __le32 border; 897f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 898a86c6181SAlex Tomas int err = 0; 899a86c6181SAlex Tomas 900a86c6181SAlex Tomas /* make decision: where to split? */ 901d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 902a86c6181SAlex Tomas 903d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 904a86c6181SAlex Tomas * border from split point */ 905273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 906273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 907273df556SFrank Mayhar return -EIO; 908273df556SFrank Mayhar } 909a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 910a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 911d0d856e8SRandy Dunlap ext_debug("leaf will be split." 912a86c6181SAlex Tomas " next leaf starts at %d\n", 913a86c6181SAlex Tomas le32_to_cpu(border)); 914a86c6181SAlex Tomas } else { 915a86c6181SAlex Tomas border = newext->ee_block; 916a86c6181SAlex Tomas ext_debug("leaf will be added." 917a86c6181SAlex Tomas " next leaf starts at %d\n", 918a86c6181SAlex Tomas le32_to_cpu(border)); 919a86c6181SAlex Tomas } 920a86c6181SAlex Tomas 921a86c6181SAlex Tomas /* 922d0d856e8SRandy Dunlap * If error occurs, then we break processing 923d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 924a86c6181SAlex Tomas * be inserted and tree will be in consistent 925d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 926a86c6181SAlex Tomas */ 927a86c6181SAlex Tomas 928a86c6181SAlex Tomas /* 929d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 930d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 931d0d856e8SRandy Dunlap * upon them. 932a86c6181SAlex Tomas */ 9335d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 934a86c6181SAlex Tomas if (!ablocks) 935a86c6181SAlex Tomas return -ENOMEM; 936a86c6181SAlex Tomas 937a86c6181SAlex Tomas /* allocate all needed blocks */ 938a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 939a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 940654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 94155f020dbSAllison Henderson newext, &err, flags); 942a86c6181SAlex Tomas if (newblock == 0) 943a86c6181SAlex Tomas goto cleanup; 944a86c6181SAlex Tomas ablocks[a] = newblock; 945a86c6181SAlex Tomas } 946a86c6181SAlex Tomas 947a86c6181SAlex Tomas /* initialize new leaf */ 948a86c6181SAlex Tomas newblock = ablocks[--a]; 949273df556SFrank Mayhar if (unlikely(newblock == 0)) { 950273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 951273df556SFrank Mayhar err = -EIO; 952273df556SFrank Mayhar goto cleanup; 953273df556SFrank Mayhar } 954a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 955aebf0243SWang Shilong if (unlikely(!bh)) { 956860d21e2STheodore Ts'o err = -ENOMEM; 957a86c6181SAlex Tomas goto cleanup; 958a86c6181SAlex Tomas } 959a86c6181SAlex Tomas lock_buffer(bh); 960a86c6181SAlex Tomas 9617e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 9627e028976SAvantika Mathur if (err) 963a86c6181SAlex Tomas goto cleanup; 964a86c6181SAlex Tomas 965a86c6181SAlex Tomas neh = ext_block_hdr(bh); 966a86c6181SAlex Tomas neh->eh_entries = 0; 96755ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 968a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 969a86c6181SAlex Tomas neh->eh_depth = 0; 970a86c6181SAlex Tomas 971d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 972273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 973273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 974273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 975273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 976273df556SFrank Mayhar path[depth].p_hdr->eh_max); 977273df556SFrank Mayhar err = -EIO; 978273df556SFrank Mayhar goto cleanup; 979273df556SFrank Mayhar } 980a86c6181SAlex Tomas /* start copy from next extent */ 9811b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 9821b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 983a86c6181SAlex Tomas if (m) { 9841b16da77SYongqiang Yang struct ext4_extent *ex; 9851b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 9861b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 987e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 988a86c6181SAlex Tomas } 989a86c6181SAlex Tomas 9907ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 991a86c6181SAlex Tomas set_buffer_uptodate(bh); 992a86c6181SAlex Tomas unlock_buffer(bh); 993a86c6181SAlex Tomas 9940390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9957e028976SAvantika Mathur if (err) 996a86c6181SAlex Tomas goto cleanup; 997a86c6181SAlex Tomas brelse(bh); 998a86c6181SAlex Tomas bh = NULL; 999a86c6181SAlex Tomas 1000a86c6181SAlex Tomas /* correct old leaf */ 1001a86c6181SAlex Tomas if (m) { 10027e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 10037e028976SAvantika Mathur if (err) 1004a86c6181SAlex Tomas goto cleanup; 1005e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 10067e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 10077e028976SAvantika Mathur if (err) 1008a86c6181SAlex Tomas goto cleanup; 1009a86c6181SAlex Tomas 1010a86c6181SAlex Tomas } 1011a86c6181SAlex Tomas 1012a86c6181SAlex Tomas /* create intermediate indexes */ 1013a86c6181SAlex Tomas k = depth - at - 1; 1014273df556SFrank Mayhar if (unlikely(k < 0)) { 1015273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 1016273df556SFrank Mayhar err = -EIO; 1017273df556SFrank Mayhar goto cleanup; 1018273df556SFrank Mayhar } 1019a86c6181SAlex Tomas if (k) 1020a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 1021a86c6181SAlex Tomas /* insert new index into current index block */ 1022a86c6181SAlex Tomas /* current depth stored in i var */ 1023a86c6181SAlex Tomas i = depth - 1; 1024a86c6181SAlex Tomas while (k--) { 1025a86c6181SAlex Tomas oldblock = newblock; 1026a86c6181SAlex Tomas newblock = ablocks[--a]; 1027bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 1028aebf0243SWang Shilong if (unlikely(!bh)) { 1029860d21e2STheodore Ts'o err = -ENOMEM; 1030a86c6181SAlex Tomas goto cleanup; 1031a86c6181SAlex Tomas } 1032a86c6181SAlex Tomas lock_buffer(bh); 1033a86c6181SAlex Tomas 10347e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10357e028976SAvantika Mathur if (err) 1036a86c6181SAlex Tomas goto cleanup; 1037a86c6181SAlex Tomas 1038a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1039a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 1040a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 104155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1042a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 1043a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 1044a86c6181SAlex Tomas fidx->ei_block = border; 1045f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 1046a86c6181SAlex Tomas 1047bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 1048bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 1049a86c6181SAlex Tomas 10501b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 1051273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1052273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 1053273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1054273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 1055273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 1056273df556SFrank Mayhar err = -EIO; 1057273df556SFrank Mayhar goto cleanup; 1058273df556SFrank Mayhar } 10591b16da77SYongqiang Yang /* start copy indexes */ 10601b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 10611b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 10621b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 10631b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 1064a86c6181SAlex Tomas if (m) { 10651b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 1066a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 1067e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 1068a86c6181SAlex Tomas } 10697ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1070a86c6181SAlex Tomas set_buffer_uptodate(bh); 1071a86c6181SAlex Tomas unlock_buffer(bh); 1072a86c6181SAlex Tomas 10730390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 10747e028976SAvantika Mathur if (err) 1075a86c6181SAlex Tomas goto cleanup; 1076a86c6181SAlex Tomas brelse(bh); 1077a86c6181SAlex Tomas bh = NULL; 1078a86c6181SAlex Tomas 1079a86c6181SAlex Tomas /* correct old index */ 1080a86c6181SAlex Tomas if (m) { 1081a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1082a86c6181SAlex Tomas if (err) 1083a86c6181SAlex Tomas goto cleanup; 1084e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1085a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1086a86c6181SAlex Tomas if (err) 1087a86c6181SAlex Tomas goto cleanup; 1088a86c6181SAlex Tomas } 1089a86c6181SAlex Tomas 1090a86c6181SAlex Tomas i--; 1091a86c6181SAlex Tomas } 1092a86c6181SAlex Tomas 1093a86c6181SAlex Tomas /* insert new index */ 1094a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1095a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1096a86c6181SAlex Tomas 1097a86c6181SAlex Tomas cleanup: 1098a86c6181SAlex Tomas if (bh) { 1099a86c6181SAlex Tomas if (buffer_locked(bh)) 1100a86c6181SAlex Tomas unlock_buffer(bh); 1101a86c6181SAlex Tomas brelse(bh); 1102a86c6181SAlex Tomas } 1103a86c6181SAlex Tomas 1104a86c6181SAlex Tomas if (err) { 1105a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1106a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1107a86c6181SAlex Tomas if (!ablocks[i]) 1108a86c6181SAlex Tomas continue; 11097dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1110e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1111a86c6181SAlex Tomas } 1112a86c6181SAlex Tomas } 1113a86c6181SAlex Tomas kfree(ablocks); 1114a86c6181SAlex Tomas 1115a86c6181SAlex Tomas return err; 1116a86c6181SAlex Tomas } 1117a86c6181SAlex Tomas 1118a86c6181SAlex Tomas /* 1119d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1120d0d856e8SRandy Dunlap * implements tree growing procedure: 1121a86c6181SAlex Tomas * - allocates new block 1122a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1123d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1124a86c6181SAlex Tomas * just created block 1125a86c6181SAlex Tomas */ 1126a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 112755f020dbSAllison Henderson unsigned int flags, 1128a86c6181SAlex Tomas struct ext4_extent *newext) 1129a86c6181SAlex Tomas { 1130a86c6181SAlex Tomas struct ext4_extent_header *neh; 1131a86c6181SAlex Tomas struct buffer_head *bh; 1132f65e6fbaSAlex Tomas ext4_fsblk_t newblock; 1133a86c6181SAlex Tomas int err = 0; 1134a86c6181SAlex Tomas 11351939dd84SDmitry Monakhov newblock = ext4_ext_new_meta_block(handle, inode, NULL, 113655f020dbSAllison Henderson newext, &err, flags); 1137a86c6181SAlex Tomas if (newblock == 0) 1138a86c6181SAlex Tomas return err; 1139a86c6181SAlex Tomas 1140a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1141aebf0243SWang Shilong if (unlikely(!bh)) 1142860d21e2STheodore Ts'o return -ENOMEM; 1143a86c6181SAlex Tomas lock_buffer(bh); 1144a86c6181SAlex Tomas 11457e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 11467e028976SAvantika Mathur if (err) { 1147a86c6181SAlex Tomas unlock_buffer(bh); 1148a86c6181SAlex Tomas goto out; 1149a86c6181SAlex Tomas } 1150a86c6181SAlex Tomas 1151a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 11521939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 11531939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1154a86c6181SAlex Tomas 1155a86c6181SAlex Tomas /* set size of new block */ 1156a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1157a86c6181SAlex Tomas /* old root could have indexes or leaves 1158a86c6181SAlex Tomas * so calculate e_max right way */ 1159a86c6181SAlex Tomas if (ext_depth(inode)) 116055ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1161a86c6181SAlex Tomas else 116255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1163a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 11647ac5990dSDarrick J. Wong ext4_extent_block_csum_set(inode, neh); 1165a86c6181SAlex Tomas set_buffer_uptodate(bh); 1166a86c6181SAlex Tomas unlock_buffer(bh); 1167a86c6181SAlex Tomas 11680390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 11697e028976SAvantika Mathur if (err) 1170a86c6181SAlex Tomas goto out; 1171a86c6181SAlex Tomas 11721939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1173a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 11741939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 11751939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 11761939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 11771939dd84SDmitry Monakhov /* Root extent block becomes index block */ 11781939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 11791939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 11801939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 11811939dd84SDmitry Monakhov } 11822ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1183a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 11845a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1185bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1186a86c6181SAlex Tomas 1187ba39ebb6SWei Yongjun le16_add_cpu(&neh->eh_depth, 1); 11881939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1189a86c6181SAlex Tomas out: 1190a86c6181SAlex Tomas brelse(bh); 1191a86c6181SAlex Tomas 1192a86c6181SAlex Tomas return err; 1193a86c6181SAlex Tomas } 1194a86c6181SAlex Tomas 1195a86c6181SAlex Tomas /* 1196d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1197d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1198d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1199a86c6181SAlex Tomas */ 1200a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 120155f020dbSAllison Henderson unsigned int flags, 1202a86c6181SAlex Tomas struct ext4_ext_path *path, 1203a86c6181SAlex Tomas struct ext4_extent *newext) 1204a86c6181SAlex Tomas { 1205a86c6181SAlex Tomas struct ext4_ext_path *curp; 1206a86c6181SAlex Tomas int depth, i, err = 0; 1207a86c6181SAlex Tomas 1208a86c6181SAlex Tomas repeat: 1209a86c6181SAlex Tomas i = depth = ext_depth(inode); 1210a86c6181SAlex Tomas 1211a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1212a86c6181SAlex Tomas curp = path + depth; 1213a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1214a86c6181SAlex Tomas i--; 1215a86c6181SAlex Tomas curp--; 1216a86c6181SAlex Tomas } 1217a86c6181SAlex Tomas 1218d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1219d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1220a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1221a86c6181SAlex Tomas /* if we found index with free entry, then use that 1222a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 122355f020dbSAllison Henderson err = ext4_ext_split(handle, inode, flags, path, newext, i); 1224787e0981SShen Feng if (err) 1225787e0981SShen Feng goto out; 1226a86c6181SAlex Tomas 1227a86c6181SAlex Tomas /* refill path */ 1228a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1229a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1230725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1231a86c6181SAlex Tomas path); 1232a86c6181SAlex Tomas if (IS_ERR(path)) 1233a86c6181SAlex Tomas err = PTR_ERR(path); 1234a86c6181SAlex Tomas } else { 1235a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 12361939dd84SDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, flags, newext); 1237a86c6181SAlex Tomas if (err) 1238a86c6181SAlex Tomas goto out; 1239a86c6181SAlex Tomas 1240a86c6181SAlex Tomas /* refill path */ 1241a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1242a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1243725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1244a86c6181SAlex Tomas path); 1245a86c6181SAlex Tomas if (IS_ERR(path)) { 1246a86c6181SAlex Tomas err = PTR_ERR(path); 1247a86c6181SAlex Tomas goto out; 1248a86c6181SAlex Tomas } 1249a86c6181SAlex Tomas 1250a86c6181SAlex Tomas /* 1251d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1252d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1253a86c6181SAlex Tomas */ 1254a86c6181SAlex Tomas depth = ext_depth(inode); 1255a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1256d0d856e8SRandy Dunlap /* now we need to split */ 1257a86c6181SAlex Tomas goto repeat; 1258a86c6181SAlex Tomas } 1259a86c6181SAlex Tomas } 1260a86c6181SAlex Tomas 1261a86c6181SAlex Tomas out: 1262a86c6181SAlex Tomas return err; 1263a86c6181SAlex Tomas } 1264a86c6181SAlex Tomas 1265a86c6181SAlex Tomas /* 12661988b51eSAlex Tomas * search the closest allocated block to the left for *logical 12671988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 12681988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 12691988b51eSAlex Tomas * returns 0 at @phys 12701988b51eSAlex Tomas * return value contains 0 (success) or error code 12711988b51eSAlex Tomas */ 12721f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 12731f109d5aSTheodore Ts'o struct ext4_ext_path *path, 12741988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 12751988b51eSAlex Tomas { 12761988b51eSAlex Tomas struct ext4_extent_idx *ix; 12771988b51eSAlex Tomas struct ext4_extent *ex; 1278b939e376SAneesh Kumar K.V int depth, ee_len; 12791988b51eSAlex Tomas 1280273df556SFrank Mayhar if (unlikely(path == NULL)) { 1281273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1282273df556SFrank Mayhar return -EIO; 1283273df556SFrank Mayhar } 12841988b51eSAlex Tomas depth = path->p_depth; 12851988b51eSAlex Tomas *phys = 0; 12861988b51eSAlex Tomas 12871988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12881988b51eSAlex Tomas return 0; 12891988b51eSAlex Tomas 12901988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12911988b51eSAlex Tomas * then *logical, but it can be that extent is the 12921988b51eSAlex Tomas * first one in the file */ 12931988b51eSAlex Tomas 12941988b51eSAlex Tomas ex = path[depth].p_ext; 1295b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 12961988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1297273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1298273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1299273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1300273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1301273df556SFrank Mayhar return -EIO; 1302273df556SFrank Mayhar } 13031988b51eSAlex Tomas while (--depth >= 0) { 13041988b51eSAlex Tomas ix = path[depth].p_idx; 1305273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1306273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1307273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 13086ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1309273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 13106ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1311273df556SFrank Mayhar depth); 1312273df556SFrank Mayhar return -EIO; 1313273df556SFrank Mayhar } 13141988b51eSAlex Tomas } 13151988b51eSAlex Tomas return 0; 13161988b51eSAlex Tomas } 13171988b51eSAlex Tomas 1318273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1319273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1320273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1321273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1322273df556SFrank Mayhar return -EIO; 1323273df556SFrank Mayhar } 13241988b51eSAlex Tomas 1325b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1326bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 13271988b51eSAlex Tomas return 0; 13281988b51eSAlex Tomas } 13291988b51eSAlex Tomas 13301988b51eSAlex Tomas /* 13311988b51eSAlex Tomas * search the closest allocated block to the right for *logical 13321988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1333df3ab170STao Ma * if *logical is the largest allocated block, the function 13341988b51eSAlex Tomas * returns 0 at @phys 13351988b51eSAlex Tomas * return value contains 0 (success) or error code 13361988b51eSAlex Tomas */ 13371f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 13381f109d5aSTheodore Ts'o struct ext4_ext_path *path, 13394d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 13404d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 13411988b51eSAlex Tomas { 13421988b51eSAlex Tomas struct buffer_head *bh = NULL; 13431988b51eSAlex Tomas struct ext4_extent_header *eh; 13441988b51eSAlex Tomas struct ext4_extent_idx *ix; 13451988b51eSAlex Tomas struct ext4_extent *ex; 13461988b51eSAlex Tomas ext4_fsblk_t block; 1347395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1348395a87bfSEric Sandeen int ee_len; 13491988b51eSAlex Tomas 1350273df556SFrank Mayhar if (unlikely(path == NULL)) { 1351273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1352273df556SFrank Mayhar return -EIO; 1353273df556SFrank Mayhar } 13541988b51eSAlex Tomas depth = path->p_depth; 13551988b51eSAlex Tomas *phys = 0; 13561988b51eSAlex Tomas 13571988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 13581988b51eSAlex Tomas return 0; 13591988b51eSAlex Tomas 13601988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 13611988b51eSAlex Tomas * then *logical, but it can be that extent is the 13621988b51eSAlex Tomas * first one in the file */ 13631988b51eSAlex Tomas 13641988b51eSAlex Tomas ex = path[depth].p_ext; 1365b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 13661988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1367273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1368273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1369273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1370273df556SFrank Mayhar depth); 1371273df556SFrank Mayhar return -EIO; 1372273df556SFrank Mayhar } 13731988b51eSAlex Tomas while (--depth >= 0) { 13741988b51eSAlex Tomas ix = path[depth].p_idx; 1375273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1376273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1377273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1378273df556SFrank Mayhar *logical); 1379273df556SFrank Mayhar return -EIO; 1380273df556SFrank Mayhar } 13811988b51eSAlex Tomas } 13824d33b1efSTheodore Ts'o goto found_extent; 13831988b51eSAlex Tomas } 13841988b51eSAlex Tomas 1385273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1386273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1387273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1388273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1389273df556SFrank Mayhar return -EIO; 1390273df556SFrank Mayhar } 13911988b51eSAlex Tomas 13921988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 13931988b51eSAlex Tomas /* next allocated block in this leaf */ 13941988b51eSAlex Tomas ex++; 13954d33b1efSTheodore Ts'o goto found_extent; 13961988b51eSAlex Tomas } 13971988b51eSAlex Tomas 13981988b51eSAlex Tomas /* go up and search for index to the right */ 13991988b51eSAlex Tomas while (--depth >= 0) { 14001988b51eSAlex Tomas ix = path[depth].p_idx; 14011988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 140225f1ee3aSWu Fengguang goto got_index; 14031988b51eSAlex Tomas } 14041988b51eSAlex Tomas 140525f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 14061988b51eSAlex Tomas return 0; 14071988b51eSAlex Tomas 140825f1ee3aSWu Fengguang got_index: 14091988b51eSAlex Tomas /* we've found index to the right, let's 14101988b51eSAlex Tomas * follow it and find the closest allocated 14111988b51eSAlex Tomas * block to the right */ 14121988b51eSAlex Tomas ix++; 1413bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 14141988b51eSAlex Tomas while (++depth < path->p_depth) { 14151988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 14161988b51eSAlex Tomas if (bh == NULL) 14171988b51eSAlex Tomas return -EIO; 14181988b51eSAlex Tomas eh = ext_block_hdr(bh); 1419395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 1420f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, eh, 1421f8489128SDarrick J. Wong path->p_depth - depth, bh)) { 14221988b51eSAlex Tomas put_bh(bh); 14231988b51eSAlex Tomas return -EIO; 14241988b51eSAlex Tomas } 14251988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1426bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 14271988b51eSAlex Tomas put_bh(bh); 14281988b51eSAlex Tomas } 14291988b51eSAlex Tomas 14301988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 14311988b51eSAlex Tomas if (bh == NULL) 14321988b51eSAlex Tomas return -EIO; 14331988b51eSAlex Tomas eh = ext_block_hdr(bh); 1434f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) { 14351988b51eSAlex Tomas put_bh(bh); 14361988b51eSAlex Tomas return -EIO; 14371988b51eSAlex Tomas } 14381988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 14394d33b1efSTheodore Ts'o found_extent: 14401988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1441bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 14424d33b1efSTheodore Ts'o *ret_ex = ex; 14434d33b1efSTheodore Ts'o if (bh) 14441988b51eSAlex Tomas put_bh(bh); 14451988b51eSAlex Tomas return 0; 14461988b51eSAlex Tomas } 14471988b51eSAlex Tomas 14481988b51eSAlex Tomas /* 1449d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1450f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1451d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1452d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1453d0d856e8SRandy Dunlap * with leaves. 1454a86c6181SAlex Tomas */ 1455725d26d3SAneesh Kumar K.V static ext4_lblk_t 1456a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1457a86c6181SAlex Tomas { 1458a86c6181SAlex Tomas int depth; 1459a86c6181SAlex Tomas 1460a86c6181SAlex Tomas BUG_ON(path == NULL); 1461a86c6181SAlex Tomas depth = path->p_depth; 1462a86c6181SAlex Tomas 1463a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1464f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1465a86c6181SAlex Tomas 1466a86c6181SAlex Tomas while (depth >= 0) { 1467a86c6181SAlex Tomas if (depth == path->p_depth) { 1468a86c6181SAlex Tomas /* leaf */ 14696f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 14706f8ff537SCurt Wohlgemuth path[depth].p_ext != 1471a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1472a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1473a86c6181SAlex Tomas } else { 1474a86c6181SAlex Tomas /* index */ 1475a86c6181SAlex Tomas if (path[depth].p_idx != 1476a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1477a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1478a86c6181SAlex Tomas } 1479a86c6181SAlex Tomas depth--; 1480a86c6181SAlex Tomas } 1481a86c6181SAlex Tomas 1482f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1483a86c6181SAlex Tomas } 1484a86c6181SAlex Tomas 1485a86c6181SAlex Tomas /* 1486d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1487f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1488a86c6181SAlex Tomas */ 14895718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1490a86c6181SAlex Tomas { 1491a86c6181SAlex Tomas int depth; 1492a86c6181SAlex Tomas 1493a86c6181SAlex Tomas BUG_ON(path == NULL); 1494a86c6181SAlex Tomas depth = path->p_depth; 1495a86c6181SAlex Tomas 1496a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1497a86c6181SAlex Tomas if (depth == 0) 1498f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1499a86c6181SAlex Tomas 1500a86c6181SAlex Tomas /* go to index block */ 1501a86c6181SAlex Tomas depth--; 1502a86c6181SAlex Tomas 1503a86c6181SAlex Tomas while (depth >= 0) { 1504a86c6181SAlex Tomas if (path[depth].p_idx != 1505a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1506725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1507725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1508a86c6181SAlex Tomas depth--; 1509a86c6181SAlex Tomas } 1510a86c6181SAlex Tomas 1511f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1512a86c6181SAlex Tomas } 1513a86c6181SAlex Tomas 1514a86c6181SAlex Tomas /* 1515d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1516d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1517d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1518a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1519a86c6181SAlex Tomas */ 15201d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1521a86c6181SAlex Tomas struct ext4_ext_path *path) 1522a86c6181SAlex Tomas { 1523a86c6181SAlex Tomas struct ext4_extent_header *eh; 1524a86c6181SAlex Tomas int depth = ext_depth(inode); 1525a86c6181SAlex Tomas struct ext4_extent *ex; 1526a86c6181SAlex Tomas __le32 border; 1527a86c6181SAlex Tomas int k, err = 0; 1528a86c6181SAlex Tomas 1529a86c6181SAlex Tomas eh = path[depth].p_hdr; 1530a86c6181SAlex Tomas ex = path[depth].p_ext; 1531273df556SFrank Mayhar 1532273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1533273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1534273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1535273df556SFrank Mayhar return -EIO; 1536273df556SFrank Mayhar } 1537a86c6181SAlex Tomas 1538a86c6181SAlex Tomas if (depth == 0) { 1539a86c6181SAlex Tomas /* there is no tree at all */ 1540a86c6181SAlex Tomas return 0; 1541a86c6181SAlex Tomas } 1542a86c6181SAlex Tomas 1543a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1544a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1545a86c6181SAlex Tomas return 0; 1546a86c6181SAlex Tomas } 1547a86c6181SAlex Tomas 1548a86c6181SAlex Tomas /* 1549d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1550a86c6181SAlex Tomas */ 1551a86c6181SAlex Tomas k = depth - 1; 1552a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 15537e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 15547e028976SAvantika Mathur if (err) 1555a86c6181SAlex Tomas return err; 1556a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 15577e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 15587e028976SAvantika Mathur if (err) 1559a86c6181SAlex Tomas return err; 1560a86c6181SAlex Tomas 1561a86c6181SAlex Tomas while (k--) { 1562a86c6181SAlex Tomas /* change all left-side indexes */ 1563a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1564a86c6181SAlex Tomas break; 15657e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 15667e028976SAvantika Mathur if (err) 1567a86c6181SAlex Tomas break; 1568a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 15697e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 15707e028976SAvantika Mathur if (err) 1571a86c6181SAlex Tomas break; 1572a86c6181SAlex Tomas } 1573a86c6181SAlex Tomas 1574a86c6181SAlex Tomas return err; 1575a86c6181SAlex Tomas } 1576a86c6181SAlex Tomas 1577748de673SAkira Fujita int 1578a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1579a86c6181SAlex Tomas struct ext4_extent *ex2) 1580a86c6181SAlex Tomas { 1581749269faSAmit Arora unsigned short ext1_ee_len, ext2_ee_len, max_len; 1582a2df2a63SAmit Arora 1583a2df2a63SAmit Arora /* 1584ec22ba8eSDmitry Monakhov * Make sure that both extents are initialized. We don't merge 1585ec22ba8eSDmitry Monakhov * uninitialized extents so that we can be sure that end_io code has 1586ec22ba8eSDmitry Monakhov * the extent that was written properly split out and conversion to 1587ec22ba8eSDmitry Monakhov * initialized is trivial. 1588a2df2a63SAmit Arora */ 1589ec22ba8eSDmitry Monakhov if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2)) 1590a2df2a63SAmit Arora return 0; 1591a2df2a63SAmit Arora 1592749269faSAmit Arora if (ext4_ext_is_uninitialized(ex1)) 1593749269faSAmit Arora max_len = EXT_UNINIT_MAX_LEN; 1594749269faSAmit Arora else 1595749269faSAmit Arora max_len = EXT_INIT_MAX_LEN; 1596749269faSAmit Arora 1597a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1598a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1599a2df2a63SAmit Arora 1600a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 160163f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1602a86c6181SAlex Tomas return 0; 1603a86c6181SAlex Tomas 1604471d4011SSuparna Bhattacharya /* 1605471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1606471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1607d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1608471d4011SSuparna Bhattacharya */ 1609749269faSAmit Arora if (ext1_ee_len + ext2_ee_len > max_len) 1610471d4011SSuparna Bhattacharya return 0; 1611bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1612b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1613a86c6181SAlex Tomas return 0; 1614a86c6181SAlex Tomas #endif 1615a86c6181SAlex Tomas 1616bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1617a86c6181SAlex Tomas return 1; 1618a86c6181SAlex Tomas return 0; 1619a86c6181SAlex Tomas } 1620a86c6181SAlex Tomas 1621a86c6181SAlex Tomas /* 162256055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 162356055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 162456055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 162556055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 162656055d3aSAmit Arora * 1 if they got merged. 162756055d3aSAmit Arora */ 1628197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 162956055d3aSAmit Arora struct ext4_ext_path *path, 163056055d3aSAmit Arora struct ext4_extent *ex) 163156055d3aSAmit Arora { 163256055d3aSAmit Arora struct ext4_extent_header *eh; 163356055d3aSAmit Arora unsigned int depth, len; 163456055d3aSAmit Arora int merge_done = 0; 163556055d3aSAmit Arora int uninitialized = 0; 163656055d3aSAmit Arora 163756055d3aSAmit Arora depth = ext_depth(inode); 163856055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 163956055d3aSAmit Arora eh = path[depth].p_hdr; 164056055d3aSAmit Arora 164156055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 164256055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 164356055d3aSAmit Arora break; 164456055d3aSAmit Arora /* merge with next extent! */ 164556055d3aSAmit Arora if (ext4_ext_is_uninitialized(ex)) 164656055d3aSAmit Arora uninitialized = 1; 164756055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 164856055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 164956055d3aSAmit Arora if (uninitialized) 165056055d3aSAmit Arora ext4_ext_mark_uninitialized(ex); 165156055d3aSAmit Arora 165256055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 165356055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 165456055d3aSAmit Arora * sizeof(struct ext4_extent); 165556055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 165656055d3aSAmit Arora } 1657e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 165856055d3aSAmit Arora merge_done = 1; 165956055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 166056055d3aSAmit Arora if (!eh->eh_entries) 166124676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 166256055d3aSAmit Arora } 166356055d3aSAmit Arora 166456055d3aSAmit Arora return merge_done; 166556055d3aSAmit Arora } 166656055d3aSAmit Arora 166756055d3aSAmit Arora /* 1668ecb94f5fSTheodore Ts'o * This function does a very simple check to see if we can collapse 1669ecb94f5fSTheodore Ts'o * an extent tree with a single extent tree leaf block into the inode. 1670ecb94f5fSTheodore Ts'o */ 1671ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge_up(handle_t *handle, 1672ecb94f5fSTheodore Ts'o struct inode *inode, 1673ecb94f5fSTheodore Ts'o struct ext4_ext_path *path) 1674ecb94f5fSTheodore Ts'o { 1675ecb94f5fSTheodore Ts'o size_t s; 1676ecb94f5fSTheodore Ts'o unsigned max_root = ext4_ext_space_root(inode, 0); 1677ecb94f5fSTheodore Ts'o ext4_fsblk_t blk; 1678ecb94f5fSTheodore Ts'o 1679ecb94f5fSTheodore Ts'o if ((path[0].p_depth != 1) || 1680ecb94f5fSTheodore Ts'o (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) || 1681ecb94f5fSTheodore Ts'o (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root)) 1682ecb94f5fSTheodore Ts'o return; 1683ecb94f5fSTheodore Ts'o 1684ecb94f5fSTheodore Ts'o /* 1685ecb94f5fSTheodore Ts'o * We need to modify the block allocation bitmap and the block 1686ecb94f5fSTheodore Ts'o * group descriptor to release the extent tree block. If we 1687ecb94f5fSTheodore Ts'o * can't get the journal credits, give up. 1688ecb94f5fSTheodore Ts'o */ 1689ecb94f5fSTheodore Ts'o if (ext4_journal_extend(handle, 2)) 1690ecb94f5fSTheodore Ts'o return; 1691ecb94f5fSTheodore Ts'o 1692ecb94f5fSTheodore Ts'o /* 1693ecb94f5fSTheodore Ts'o * Copy the extent data up to the inode 1694ecb94f5fSTheodore Ts'o */ 1695ecb94f5fSTheodore Ts'o blk = ext4_idx_pblock(path[0].p_idx); 1696ecb94f5fSTheodore Ts'o s = le16_to_cpu(path[1].p_hdr->eh_entries) * 1697ecb94f5fSTheodore Ts'o sizeof(struct ext4_extent_idx); 1698ecb94f5fSTheodore Ts'o s += sizeof(struct ext4_extent_header); 1699ecb94f5fSTheodore Ts'o 1700ecb94f5fSTheodore Ts'o memcpy(path[0].p_hdr, path[1].p_hdr, s); 1701ecb94f5fSTheodore Ts'o path[0].p_depth = 0; 1702ecb94f5fSTheodore Ts'o path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1703ecb94f5fSTheodore Ts'o (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr)); 1704ecb94f5fSTheodore Ts'o path[0].p_hdr->eh_max = cpu_to_le16(max_root); 1705ecb94f5fSTheodore Ts'o 1706ecb94f5fSTheodore Ts'o brelse(path[1].p_bh); 1707ecb94f5fSTheodore Ts'o ext4_free_blocks(handle, inode, NULL, blk, 1, 1708ecb94f5fSTheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 1709ecb94f5fSTheodore Ts'o } 1710ecb94f5fSTheodore Ts'o 1711ecb94f5fSTheodore Ts'o /* 1712197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1713197217a5SYongqiang Yang * return 1 if merge left else 0. 1714197217a5SYongqiang Yang */ 1715ecb94f5fSTheodore Ts'o static void ext4_ext_try_to_merge(handle_t *handle, 1716ecb94f5fSTheodore Ts'o struct inode *inode, 1717197217a5SYongqiang Yang struct ext4_ext_path *path, 1718197217a5SYongqiang Yang struct ext4_extent *ex) { 1719197217a5SYongqiang Yang struct ext4_extent_header *eh; 1720197217a5SYongqiang Yang unsigned int depth; 1721197217a5SYongqiang Yang int merge_done = 0; 1722197217a5SYongqiang Yang 1723197217a5SYongqiang Yang depth = ext_depth(inode); 1724197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1725197217a5SYongqiang Yang eh = path[depth].p_hdr; 1726197217a5SYongqiang Yang 1727197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1728197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1729197217a5SYongqiang Yang 1730197217a5SYongqiang Yang if (!merge_done) 1731ecb94f5fSTheodore Ts'o (void) ext4_ext_try_to_merge_right(inode, path, ex); 1732197217a5SYongqiang Yang 1733ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge_up(handle, inode, path); 1734197217a5SYongqiang Yang } 1735197217a5SYongqiang Yang 1736197217a5SYongqiang Yang /* 173725d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 173825d14f98SAmit Arora * existing extent. 173925d14f98SAmit Arora * 174025d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 174125d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 174225d14f98SAmit Arora * If there is no overlap found, it returns 0. 174325d14f98SAmit Arora */ 17444d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 17454d33b1efSTheodore Ts'o struct inode *inode, 174625d14f98SAmit Arora struct ext4_extent *newext, 174725d14f98SAmit Arora struct ext4_ext_path *path) 174825d14f98SAmit Arora { 1749725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 175025d14f98SAmit Arora unsigned int depth, len1; 175125d14f98SAmit Arora unsigned int ret = 0; 175225d14f98SAmit Arora 175325d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1754a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 175525d14f98SAmit Arora depth = ext_depth(inode); 175625d14f98SAmit Arora if (!path[depth].p_ext) 175725d14f98SAmit Arora goto out; 175825d14f98SAmit Arora b2 = le32_to_cpu(path[depth].p_ext->ee_block); 17594d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 176025d14f98SAmit Arora 176125d14f98SAmit Arora /* 176225d14f98SAmit Arora * get the next allocated block if the extent in the path 176325d14f98SAmit Arora * is before the requested block(s) 176425d14f98SAmit Arora */ 176525d14f98SAmit Arora if (b2 < b1) { 176625d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1767f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 176825d14f98SAmit Arora goto out; 17694d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 177025d14f98SAmit Arora } 177125d14f98SAmit Arora 1772725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 177325d14f98SAmit Arora if (b1 + len1 < b1) { 1774f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 177525d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 177625d14f98SAmit Arora ret = 1; 177725d14f98SAmit Arora } 177825d14f98SAmit Arora 177925d14f98SAmit Arora /* check for overlap */ 178025d14f98SAmit Arora if (b1 + len1 > b2) { 178125d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 178225d14f98SAmit Arora ret = 1; 178325d14f98SAmit Arora } 178425d14f98SAmit Arora out: 178525d14f98SAmit Arora return ret; 178625d14f98SAmit Arora } 178725d14f98SAmit Arora 178825d14f98SAmit Arora /* 1789d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1790d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1791d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1792d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1793a86c6181SAlex Tomas */ 1794a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1795a86c6181SAlex Tomas struct ext4_ext_path *path, 17960031462bSMingming Cao struct ext4_extent *newext, int flag) 1797a86c6181SAlex Tomas { 1798a86c6181SAlex Tomas struct ext4_extent_header *eh; 1799a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1800a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1801a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1802725d26d3SAneesh Kumar K.V int depth, len, err; 1803725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1804a2df2a63SAmit Arora unsigned uninitialized = 0; 180555f020dbSAllison Henderson int flags = 0; 1806a86c6181SAlex Tomas 1807273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1808273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1809273df556SFrank Mayhar return -EIO; 1810273df556SFrank Mayhar } 1811a86c6181SAlex Tomas depth = ext_depth(inode); 1812a86c6181SAlex Tomas ex = path[depth].p_ext; 1813be8981beSLukas Czerner eh = path[depth].p_hdr; 1814273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1815273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1816273df556SFrank Mayhar return -EIO; 1817273df556SFrank Mayhar } 1818a86c6181SAlex Tomas 1819a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1820be8981beSLukas Czerner if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) { 1821be8981beSLukas Czerner 1822be8981beSLukas Czerner /* 1823be8981beSLukas Czerner * Try to see whether we should rather test the extent on 1824be8981beSLukas Czerner * right from ex, or from the left of ex. This is because 1825be8981beSLukas Czerner * ext4_ext_find_extent() can return either extent on the 1826be8981beSLukas Czerner * left, or on the right from the searched position. This 1827be8981beSLukas Czerner * will make merging more effective. 1828be8981beSLukas Czerner */ 1829be8981beSLukas Czerner if (ex < EXT_LAST_EXTENT(eh) && 1830be8981beSLukas Czerner (le32_to_cpu(ex->ee_block) + 1831be8981beSLukas Czerner ext4_ext_get_actual_len(ex) < 1832be8981beSLukas Czerner le32_to_cpu(newext->ee_block))) { 1833be8981beSLukas Czerner ex += 1; 1834be8981beSLukas Czerner goto prepend; 1835be8981beSLukas Czerner } else if ((ex > EXT_FIRST_EXTENT(eh)) && 1836be8981beSLukas Czerner (le32_to_cpu(newext->ee_block) + 1837be8981beSLukas Czerner ext4_ext_get_actual_len(newext) < 1838be8981beSLukas Czerner le32_to_cpu(ex->ee_block))) 1839be8981beSLukas Czerner ex -= 1; 1840be8981beSLukas Czerner 1841be8981beSLukas Czerner /* Try to append newex to the ex */ 1842be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, ex, newext)) { 1843be8981beSLukas Czerner ext_debug("append [%d]%d block to %u:[%d]%d" 1844be8981beSLukas Czerner "(from %llu)\n", 1845553f9008SMingming ext4_ext_is_uninitialized(newext), 1846a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1847a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1848553f9008SMingming ext4_ext_is_uninitialized(ex), 1849bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1850bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 1851be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1852be8981beSLukas Czerner path + depth); 18537e028976SAvantika Mathur if (err) 1854a86c6181SAlex Tomas return err; 1855a2df2a63SAmit Arora 1856a2df2a63SAmit Arora /* 1857be8981beSLukas Czerner * ext4_can_extents_be_merged should have checked 1858be8981beSLukas Czerner * that either both extents are uninitialized, or 1859be8981beSLukas Czerner * both aren't. Thus we need to check only one of 1860be8981beSLukas Czerner * them here. 1861a2df2a63SAmit Arora */ 1862a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex)) 1863a2df2a63SAmit Arora uninitialized = 1; 1864a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1865a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1866a2df2a63SAmit Arora if (uninitialized) 1867a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 1868a86c6181SAlex Tomas eh = path[depth].p_hdr; 1869a86c6181SAlex Tomas nearex = ex; 1870a86c6181SAlex Tomas goto merge; 1871a86c6181SAlex Tomas } 1872a86c6181SAlex Tomas 1873be8981beSLukas Czerner prepend: 1874be8981beSLukas Czerner /* Try to prepend newex to the ex */ 1875be8981beSLukas Czerner if (ext4_can_extents_be_merged(inode, newext, ex)) { 1876be8981beSLukas Czerner ext_debug("prepend %u[%d]%d block to %u:[%d]%d" 1877be8981beSLukas Czerner "(from %llu)\n", 1878be8981beSLukas Czerner le32_to_cpu(newext->ee_block), 1879be8981beSLukas Czerner ext4_ext_is_uninitialized(newext), 1880be8981beSLukas Czerner ext4_ext_get_actual_len(newext), 1881be8981beSLukas Czerner le32_to_cpu(ex->ee_block), 1882be8981beSLukas Czerner ext4_ext_is_uninitialized(ex), 1883be8981beSLukas Czerner ext4_ext_get_actual_len(ex), 1884be8981beSLukas Czerner ext4_ext_pblock(ex)); 1885be8981beSLukas Czerner err = ext4_ext_get_access(handle, inode, 1886be8981beSLukas Czerner path + depth); 1887be8981beSLukas Czerner if (err) 1888be8981beSLukas Czerner return err; 1889be8981beSLukas Czerner 1890be8981beSLukas Czerner /* 1891be8981beSLukas Czerner * ext4_can_extents_be_merged should have checked 1892be8981beSLukas Czerner * that either both extents are uninitialized, or 1893be8981beSLukas Czerner * both aren't. Thus we need to check only one of 1894be8981beSLukas Czerner * them here. 1895be8981beSLukas Czerner */ 1896be8981beSLukas Czerner if (ext4_ext_is_uninitialized(ex)) 1897be8981beSLukas Czerner uninitialized = 1; 1898be8981beSLukas Czerner ex->ee_block = newext->ee_block; 1899be8981beSLukas Czerner ext4_ext_store_pblock(ex, ext4_ext_pblock(newext)); 1900be8981beSLukas Czerner ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1901be8981beSLukas Czerner + ext4_ext_get_actual_len(newext)); 1902be8981beSLukas Czerner if (uninitialized) 1903be8981beSLukas Czerner ext4_ext_mark_uninitialized(ex); 1904be8981beSLukas Czerner eh = path[depth].p_hdr; 1905be8981beSLukas Czerner nearex = ex; 1906be8981beSLukas Czerner goto merge; 1907be8981beSLukas Czerner } 1908be8981beSLukas Czerner } 1909be8981beSLukas Czerner 1910a86c6181SAlex Tomas depth = ext_depth(inode); 1911a86c6181SAlex Tomas eh = path[depth].p_hdr; 1912a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1913a86c6181SAlex Tomas goto has_space; 1914a86c6181SAlex Tomas 1915a86c6181SAlex Tomas /* probably next leaf has space for us? */ 1916a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 1917598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 1918598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 19195718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 1920598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 192132de6756SYongqiang Yang ext_debug("next leaf block - %u\n", next); 1922a86c6181SAlex Tomas BUG_ON(npath != NULL); 1923a86c6181SAlex Tomas npath = ext4_ext_find_extent(inode, next, NULL); 1924a86c6181SAlex Tomas if (IS_ERR(npath)) 1925a86c6181SAlex Tomas return PTR_ERR(npath); 1926a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 1927a86c6181SAlex Tomas eh = npath[depth].p_hdr; 1928a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 192925985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 1930a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 1931a86c6181SAlex Tomas path = npath; 1932ffb505ffSRobin Dong goto has_space; 1933a86c6181SAlex Tomas } 1934a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 1935a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1936a86c6181SAlex Tomas } 1937a86c6181SAlex Tomas 1938a86c6181SAlex Tomas /* 1939d0d856e8SRandy Dunlap * There is no free space in the found leaf. 1940d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 1941a86c6181SAlex Tomas */ 194227dd4385SLukas Czerner if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL) 194327dd4385SLukas Czerner flags = EXT4_MB_USE_RESERVED; 194455f020dbSAllison Henderson err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1945a86c6181SAlex Tomas if (err) 1946a86c6181SAlex Tomas goto cleanup; 1947a86c6181SAlex Tomas depth = ext_depth(inode); 1948a86c6181SAlex Tomas eh = path[depth].p_hdr; 1949a86c6181SAlex Tomas 1950a86c6181SAlex Tomas has_space: 1951a86c6181SAlex Tomas nearex = path[depth].p_ext; 1952a86c6181SAlex Tomas 19537e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 19547e028976SAvantika Mathur if (err) 1955a86c6181SAlex Tomas goto cleanup; 1956a86c6181SAlex Tomas 1957a86c6181SAlex Tomas if (!nearex) { 1958a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 195932de6756SYongqiang Yang ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n", 1960a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1961bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1962553f9008SMingming ext4_ext_is_uninitialized(newext), 1963a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 196480e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 1965a86c6181SAlex Tomas } else { 196680e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 196780e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 196880e675f9SEric Gouriou /* Insert after */ 196932de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d before: " 197032de6756SYongqiang Yang "nearest %p\n", 1971a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1972bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1973553f9008SMingming ext4_ext_is_uninitialized(newext), 1974a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 197580e675f9SEric Gouriou nearex); 197680e675f9SEric Gouriou nearex++; 197780e675f9SEric Gouriou } else { 197880e675f9SEric Gouriou /* Insert before */ 197980e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 198032de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d after: " 198132de6756SYongqiang Yang "nearest %p\n", 198280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 198380e675f9SEric Gouriou ext4_ext_pblock(newext), 198480e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 198580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 198680e675f9SEric Gouriou nearex); 198780e675f9SEric Gouriou } 198880e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 198980e675f9SEric Gouriou if (len > 0) { 199032de6756SYongqiang Yang ext_debug("insert %u:%llu:[%d]%d: " 199180e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 199280e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 199380e675f9SEric Gouriou ext4_ext_pblock(newext), 199480e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 199580e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 199680e675f9SEric Gouriou len, nearex, nearex + 1); 199780e675f9SEric Gouriou memmove(nearex + 1, nearex, 199880e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 199980e675f9SEric Gouriou } 2000a86c6181SAlex Tomas } 2001a86c6181SAlex Tomas 2002e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 200380e675f9SEric Gouriou path[depth].p_ext = nearex; 2004a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 2005bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 2006a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 2007a86c6181SAlex Tomas 2008a86c6181SAlex Tomas merge: 2009e7bcf823SHaiboLiu /* try to merge extents */ 2010744692dcSJiaying Zhang if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 2011ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, nearex); 2012a86c6181SAlex Tomas 2013a86c6181SAlex Tomas 2014a86c6181SAlex Tomas /* time to correct all indexes above */ 2015a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2016a86c6181SAlex Tomas if (err) 2017a86c6181SAlex Tomas goto cleanup; 2018a86c6181SAlex Tomas 2019ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2020a86c6181SAlex Tomas 2021a86c6181SAlex Tomas cleanup: 2022a86c6181SAlex Tomas if (npath) { 2023a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 2024a86c6181SAlex Tomas kfree(npath); 2025a86c6181SAlex Tomas } 2026a86c6181SAlex Tomas return err; 2027a86c6181SAlex Tomas } 2028a86c6181SAlex Tomas 202991dd8c11SLukas Czerner static int ext4_fill_fiemap_extents(struct inode *inode, 203091dd8c11SLukas Czerner ext4_lblk_t block, ext4_lblk_t num, 203191dd8c11SLukas Czerner struct fiemap_extent_info *fieinfo) 20326873fa0dSEric Sandeen { 20336873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 20346873fa0dSEric Sandeen struct ext4_extent *ex; 203569eb33dcSZheng Liu struct extent_status es; 203691dd8c11SLukas Czerner ext4_lblk_t next, next_del, start = 0, end = 0; 20376873fa0dSEric Sandeen ext4_lblk_t last = block + num; 203891dd8c11SLukas Czerner int exists, depth = 0, err = 0; 203991dd8c11SLukas Czerner unsigned int flags = 0; 204091dd8c11SLukas Czerner unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; 20416873fa0dSEric Sandeen 2042f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 20436873fa0dSEric Sandeen num = last - block; 20446873fa0dSEric Sandeen /* find extent for this block */ 2045fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 204691dd8c11SLukas Czerner 204791dd8c11SLukas Czerner if (path && ext_depth(inode) != depth) { 204891dd8c11SLukas Czerner /* depth was changed. we have to realloc path */ 204991dd8c11SLukas Czerner kfree(path); 205091dd8c11SLukas Czerner path = NULL; 205191dd8c11SLukas Czerner } 205291dd8c11SLukas Czerner 20536873fa0dSEric Sandeen path = ext4_ext_find_extent(inode, block, path); 20546873fa0dSEric Sandeen if (IS_ERR(path)) { 205591dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 20566873fa0dSEric Sandeen err = PTR_ERR(path); 20576873fa0dSEric Sandeen path = NULL; 20586873fa0dSEric Sandeen break; 20596873fa0dSEric Sandeen } 20606873fa0dSEric Sandeen 20616873fa0dSEric Sandeen depth = ext_depth(inode); 2062273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 206391dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 2064273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2065273df556SFrank Mayhar err = -EIO; 2066273df556SFrank Mayhar break; 2067273df556SFrank Mayhar } 20686873fa0dSEric Sandeen ex = path[depth].p_ext; 20696873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 207091dd8c11SLukas Czerner ext4_ext_drop_refs(path); 20716873fa0dSEric Sandeen 207291dd8c11SLukas Czerner flags = 0; 20736873fa0dSEric Sandeen exists = 0; 20746873fa0dSEric Sandeen if (!ex) { 20756873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 20766873fa0dSEric Sandeen * all requested space */ 20776873fa0dSEric Sandeen start = block; 20786873fa0dSEric Sandeen end = block + num; 20796873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 20806873fa0dSEric Sandeen /* need to allocate space before found extent */ 20816873fa0dSEric Sandeen start = block; 20826873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 20836873fa0dSEric Sandeen if (block + num < end) 20846873fa0dSEric Sandeen end = block + num; 20856873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 20866873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 20876873fa0dSEric Sandeen /* need to allocate space after found extent */ 20886873fa0dSEric Sandeen start = block; 20896873fa0dSEric Sandeen end = block + num; 20906873fa0dSEric Sandeen if (end >= next) 20916873fa0dSEric Sandeen end = next; 20926873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 20936873fa0dSEric Sandeen /* 20946873fa0dSEric Sandeen * some part of requested space is covered 20956873fa0dSEric Sandeen * by found extent 20966873fa0dSEric Sandeen */ 20976873fa0dSEric Sandeen start = block; 20986873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 20996873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 21006873fa0dSEric Sandeen if (block + num < end) 21016873fa0dSEric Sandeen end = block + num; 21026873fa0dSEric Sandeen exists = 1; 21036873fa0dSEric Sandeen } else { 21046873fa0dSEric Sandeen BUG(); 21056873fa0dSEric Sandeen } 21066873fa0dSEric Sandeen BUG_ON(end <= start); 21076873fa0dSEric Sandeen 21086873fa0dSEric Sandeen if (!exists) { 210969eb33dcSZheng Liu es.es_lblk = start; 211069eb33dcSZheng Liu es.es_len = end - start; 211169eb33dcSZheng Liu es.es_pblk = 0; 21126873fa0dSEric Sandeen } else { 211369eb33dcSZheng Liu es.es_lblk = le32_to_cpu(ex->ee_block); 211469eb33dcSZheng Liu es.es_len = ext4_ext_get_actual_len(ex); 211569eb33dcSZheng Liu es.es_pblk = ext4_ext_pblock(ex); 211691dd8c11SLukas Czerner if (ext4_ext_is_uninitialized(ex)) 211791dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_UNWRITTEN; 21186873fa0dSEric Sandeen } 21196873fa0dSEric Sandeen 212091dd8c11SLukas Czerner /* 212169eb33dcSZheng Liu * Find delayed extent and update es accordingly. We call 212269eb33dcSZheng Liu * it even in !exists case to find out whether es is the 212391dd8c11SLukas Czerner * last existing extent or not. 212491dd8c11SLukas Czerner */ 212569eb33dcSZheng Liu next_del = ext4_find_delayed_extent(inode, &es); 212691dd8c11SLukas Czerner if (!exists && next_del) { 212791dd8c11SLukas Czerner exists = 1; 212891dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_DELALLOC; 212991dd8c11SLukas Czerner } 213091dd8c11SLukas Czerner up_read(&EXT4_I(inode)->i_data_sem); 213191dd8c11SLukas Czerner 213269eb33dcSZheng Liu if (unlikely(es.es_len == 0)) { 213369eb33dcSZheng Liu EXT4_ERROR_INODE(inode, "es.es_len == 0"); 2134273df556SFrank Mayhar err = -EIO; 2135273df556SFrank Mayhar break; 2136273df556SFrank Mayhar } 21376873fa0dSEric Sandeen 2138f7fec032SZheng Liu /* 2139f7fec032SZheng Liu * This is possible iff next == next_del == EXT_MAX_BLOCKS. 2140f7fec032SZheng Liu * we need to check next == EXT_MAX_BLOCKS because it is 2141f7fec032SZheng Liu * possible that an extent is with unwritten and delayed 2142f7fec032SZheng Liu * status due to when an extent is delayed allocated and 2143f7fec032SZheng Liu * is allocated by fallocate status tree will track both of 2144f7fec032SZheng Liu * them in a extent. 2145f7fec032SZheng Liu * 2146f7fec032SZheng Liu * So we could return a unwritten and delayed extent, and 2147f7fec032SZheng Liu * its block is equal to 'next'. 2148f7fec032SZheng Liu */ 2149f7fec032SZheng Liu if (next == next_del && next == EXT_MAX_BLOCKS) { 215091dd8c11SLukas Czerner flags |= FIEMAP_EXTENT_LAST; 215191dd8c11SLukas Czerner if (unlikely(next_del != EXT_MAX_BLOCKS || 215291dd8c11SLukas Czerner next != EXT_MAX_BLOCKS)) { 215391dd8c11SLukas Czerner EXT4_ERROR_INODE(inode, 215491dd8c11SLukas Czerner "next extent == %u, next " 215591dd8c11SLukas Czerner "delalloc extent = %u", 215691dd8c11SLukas Czerner next, next_del); 215791dd8c11SLukas Czerner err = -EIO; 215891dd8c11SLukas Czerner break; 215991dd8c11SLukas Czerner } 216091dd8c11SLukas Czerner } 216191dd8c11SLukas Czerner 216291dd8c11SLukas Czerner if (exists) { 216391dd8c11SLukas Czerner err = fiemap_fill_next_extent(fieinfo, 216469eb33dcSZheng Liu (__u64)es.es_lblk << blksize_bits, 216569eb33dcSZheng Liu (__u64)es.es_pblk << blksize_bits, 216669eb33dcSZheng Liu (__u64)es.es_len << blksize_bits, 216791dd8c11SLukas Czerner flags); 21686873fa0dSEric Sandeen if (err < 0) 21696873fa0dSEric Sandeen break; 217091dd8c11SLukas Czerner if (err == 1) { 21716873fa0dSEric Sandeen err = 0; 21726873fa0dSEric Sandeen break; 21736873fa0dSEric Sandeen } 21746873fa0dSEric Sandeen } 21756873fa0dSEric Sandeen 217669eb33dcSZheng Liu block = es.es_lblk + es.es_len; 21776873fa0dSEric Sandeen } 21786873fa0dSEric Sandeen 21796873fa0dSEric Sandeen if (path) { 21806873fa0dSEric Sandeen ext4_ext_drop_refs(path); 21816873fa0dSEric Sandeen kfree(path); 21826873fa0dSEric Sandeen } 21836873fa0dSEric Sandeen 21846873fa0dSEric Sandeen return err; 21856873fa0dSEric Sandeen } 21866873fa0dSEric Sandeen 2187a86c6181SAlex Tomas /* 2188d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 2189d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 2190a86c6181SAlex Tomas * and cache this gap 2191a86c6181SAlex Tomas */ 219209b88252SAvantika Mathur static void 2193a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 2194725d26d3SAneesh Kumar K.V ext4_lblk_t block) 2195a86c6181SAlex Tomas { 2196a86c6181SAlex Tomas int depth = ext_depth(inode); 2197725d26d3SAneesh Kumar K.V unsigned long len; 2198725d26d3SAneesh Kumar K.V ext4_lblk_t lblock; 2199a86c6181SAlex Tomas struct ext4_extent *ex; 2200a86c6181SAlex Tomas 2201a86c6181SAlex Tomas ex = path[depth].p_ext; 2202a86c6181SAlex Tomas if (ex == NULL) { 220369eb33dcSZheng Liu /* 220469eb33dcSZheng Liu * there is no extent yet, so gap is [0;-] and we 220569eb33dcSZheng Liu * don't cache it 220669eb33dcSZheng Liu */ 2207a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 2208a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 2209a86c6181SAlex Tomas lblock = block; 2210a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 2211bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 2212bba90743SEric Sandeen block, 2213bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2214bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2215d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2216d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2217d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2218a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2219a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2220725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2221a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2222a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2223725d26d3SAneesh Kumar K.V 2224725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2225bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2226bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2227bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2228bba90743SEric Sandeen block); 2229725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2230725d26d3SAneesh Kumar K.V len = next - lblock; 2231d100eef2SZheng Liu if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1)) 2232d100eef2SZheng Liu ext4_es_insert_extent(inode, lblock, len, ~0, 2233d100eef2SZheng Liu EXTENT_STATUS_HOLE); 2234a86c6181SAlex Tomas } else { 2235a86c6181SAlex Tomas lblock = len = 0; 2236a86c6181SAlex Tomas BUG(); 2237a86c6181SAlex Tomas } 2238a86c6181SAlex Tomas 2239bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2240a86c6181SAlex Tomas } 2241a86c6181SAlex Tomas 2242a86c6181SAlex Tomas /* 2243d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2244d0d856e8SRandy Dunlap * removes index from the index block. 2245a86c6181SAlex Tomas */ 22461d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2247c36575e6SForrest Liu struct ext4_ext_path *path, int depth) 2248a86c6181SAlex Tomas { 2249a86c6181SAlex Tomas int err; 2250f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2251a86c6181SAlex Tomas 2252a86c6181SAlex Tomas /* free index block */ 2253c36575e6SForrest Liu depth--; 2254c36575e6SForrest Liu path = path + depth; 2255bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2256273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2257273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2258273df556SFrank Mayhar return -EIO; 2259273df556SFrank Mayhar } 22607e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 22617e028976SAvantika Mathur if (err) 2262a86c6181SAlex Tomas return err; 22630e1147b0SRobin Dong 22640e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 22650e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 22660e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 22670e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 22680e1147b0SRobin Dong } 22690e1147b0SRobin Dong 2270e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 22717e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 22727e028976SAvantika Mathur if (err) 2273a86c6181SAlex Tomas return err; 22742ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2275d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2276d8990240SAditya Kali 22777dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2278e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2279c36575e6SForrest Liu 2280c36575e6SForrest Liu while (--depth >= 0) { 2281c36575e6SForrest Liu if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr)) 2282c36575e6SForrest Liu break; 2283c36575e6SForrest Liu path--; 2284c36575e6SForrest Liu err = ext4_ext_get_access(handle, inode, path); 2285c36575e6SForrest Liu if (err) 2286c36575e6SForrest Liu break; 2287c36575e6SForrest Liu path->p_idx->ei_block = (path+1)->p_idx->ei_block; 2288c36575e6SForrest Liu err = ext4_ext_dirty(handle, inode, path); 2289c36575e6SForrest Liu if (err) 2290c36575e6SForrest Liu break; 2291c36575e6SForrest Liu } 2292a86c6181SAlex Tomas return err; 2293a86c6181SAlex Tomas } 2294a86c6181SAlex Tomas 2295a86c6181SAlex Tomas /* 2296ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2297ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2298ee12b630SMingming Cao * to the extent tree. 2299ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2300ee12b630SMingming Cao * under i_data_sem. 2301a86c6181SAlex Tomas */ 2302525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2303a86c6181SAlex Tomas struct ext4_ext_path *path) 2304a86c6181SAlex Tomas { 2305a86c6181SAlex Tomas if (path) { 2306ee12b630SMingming Cao int depth = ext_depth(inode); 2307f3bd1f3fSMingming Cao int ret = 0; 2308ee12b630SMingming Cao 2309a86c6181SAlex Tomas /* probably there is space in leaf? */ 2310a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2311ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2312ee12b630SMingming Cao 2313ee12b630SMingming Cao /* 2314ee12b630SMingming Cao * There are some space in the leaf tree, no 2315ee12b630SMingming Cao * need to account for leaf block credit 2316ee12b630SMingming Cao * 2317ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2318df3ab170STao Ma * and other metadata blocks still need to be 2319ee12b630SMingming Cao * accounted. 2320ee12b630SMingming Cao */ 2321525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2322ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 23235887e98bSAneesh Kumar K.V return ret; 2324ee12b630SMingming Cao } 2325ee12b630SMingming Cao } 2326ee12b630SMingming Cao 2327525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2328a86c6181SAlex Tomas } 2329a86c6181SAlex Tomas 2330a86c6181SAlex Tomas /* 2331fffb2739SJan Kara * How many index/leaf blocks need to change/allocate to add @extents extents? 2332ee12b630SMingming Cao * 2333fffb2739SJan Kara * If we add a single extent, then in the worse case, each tree level 2334fffb2739SJan Kara * index/leaf need to be changed in case of the tree split. 2335ee12b630SMingming Cao * 2336fffb2739SJan Kara * If more extents are inserted, they could cause the whole tree split more 2337fffb2739SJan Kara * than once, but this is really rare. 2338a86c6181SAlex Tomas */ 2339fffb2739SJan Kara int ext4_ext_index_trans_blocks(struct inode *inode, int extents) 2340ee12b630SMingming Cao { 2341ee12b630SMingming Cao int index; 2342f19d5870STao Ma int depth; 2343f19d5870STao Ma 2344f19d5870STao Ma /* If we are converting the inline data, only one is needed here. */ 2345f19d5870STao Ma if (ext4_has_inline_data(inode)) 2346f19d5870STao Ma return 1; 2347f19d5870STao Ma 2348f19d5870STao Ma depth = ext_depth(inode); 2349a86c6181SAlex Tomas 2350fffb2739SJan Kara if (extents <= 1) 2351ee12b630SMingming Cao index = depth * 2; 2352ee12b630SMingming Cao else 2353ee12b630SMingming Cao index = depth * 3; 2354a86c6181SAlex Tomas 2355ee12b630SMingming Cao return index; 2356a86c6181SAlex Tomas } 2357a86c6181SAlex Tomas 2358a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2359a86c6181SAlex Tomas struct ext4_extent *ex, 2360d23142c6SLukas Czerner long long *partial_cluster, 2361725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2362a86c6181SAlex Tomas { 23630aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2364a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 23650aa06000STheodore Ts'o ext4_fsblk_t pblk; 236618888cf0SAndrey Sidorov int flags = 0; 2367a86c6181SAlex Tomas 2368c9de560dSAlex Tomas if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 236918888cf0SAndrey Sidorov flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET; 237018888cf0SAndrey Sidorov else if (ext4_should_journal_data(inode)) 237118888cf0SAndrey Sidorov flags |= EXT4_FREE_BLOCKS_FORGET; 237218888cf0SAndrey Sidorov 23730aa06000STheodore Ts'o /* 23740aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 23750aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 23760aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 23770aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 23780aa06000STheodore Ts'o * or at the end of the ext4_truncate() operation. 23790aa06000STheodore Ts'o */ 23800aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 23810aa06000STheodore Ts'o 2382d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 23830aa06000STheodore Ts'o /* 23840aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 23850aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 23860aa06000STheodore Ts'o * partial cluster here. 23870aa06000STheodore Ts'o */ 23880aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 2389d23142c6SLukas Czerner if ((*partial_cluster > 0) && 2390d23142c6SLukas Czerner (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 23910aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 23920aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 23930aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 23940aa06000STheodore Ts'o *partial_cluster = 0; 23950aa06000STheodore Ts'o } 23960aa06000STheodore Ts'o 2397a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2398a86c6181SAlex Tomas { 2399a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2400a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2401a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2402a86c6181SAlex Tomas sbi->s_ext_extents++; 2403a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2404a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2405a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2406a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2407a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2408a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2409a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2410a86c6181SAlex Tomas } 2411a86c6181SAlex Tomas #endif 2412a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2413a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2414a86c6181SAlex Tomas /* tail removal */ 2415725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2416d23142c6SLukas Czerner unsigned int unaligned; 2417725d26d3SAneesh Kumar K.V 2418a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 24190aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 2420d23142c6SLukas Czerner /* 2421d23142c6SLukas Czerner * Usually we want to free partial cluster at the end of the 2422d23142c6SLukas Czerner * extent, except for the situation when the cluster is still 2423d23142c6SLukas Czerner * used by any other extent (partial_cluster is negative). 2424d23142c6SLukas Czerner */ 2425d23142c6SLukas Czerner if (*partial_cluster < 0 && 2426d23142c6SLukas Czerner -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) 2427d23142c6SLukas Czerner flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2428d23142c6SLukas Czerner 2429d23142c6SLukas Czerner ext_debug("free last %u blocks starting %llu partial %lld\n", 2430d23142c6SLukas Czerner num, pblk, *partial_cluster); 24310aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 24320aa06000STheodore Ts'o /* 24330aa06000STheodore Ts'o * If the block range to be freed didn't start at the 24340aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 2435d23142c6SLukas Czerner * extent and the cluster is not used by any other extent, 2436d23142c6SLukas Czerner * save the partial cluster here, since we might need to 2437d23142c6SLukas Czerner * delete if we determine that the truncate operation has 2438d23142c6SLukas Czerner * removed all of the blocks in the cluster. 2439d23142c6SLukas Czerner * 2440d23142c6SLukas Czerner * On the other hand, if we did not manage to free the whole 2441d23142c6SLukas Czerner * extent, we have to mark the cluster as used (store negative 2442d23142c6SLukas Czerner * cluster number in partial_cluster). 24430aa06000STheodore Ts'o */ 2444d23142c6SLukas Czerner unaligned = pblk & (sbi->s_cluster_ratio - 1); 2445d23142c6SLukas Czerner if (unaligned && (ee_len == num) && 2446d23142c6SLukas Czerner (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 24470aa06000STheodore Ts'o *partial_cluster = EXT4_B2C(sbi, pblk); 2448d23142c6SLukas Czerner else if (unaligned) 2449d23142c6SLukas Czerner *partial_cluster = -((long long)EXT4_B2C(sbi, pblk)); 2450d23142c6SLukas Czerner else if (*partial_cluster > 0) 24510aa06000STheodore Ts'o *partial_cluster = 0; 245278fb9cdfSLukas Czerner } else 245378fb9cdfSLukas Czerner ext4_error(sbi->s_sb, "strange request: removal(2) " 2454725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2455a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2456a86c6181SAlex Tomas return 0; 2457a86c6181SAlex Tomas } 2458a86c6181SAlex Tomas 2459d583fb87SAllison Henderson 2460d583fb87SAllison Henderson /* 2461d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 2462d583fb87SAllison Henderson * blocks appearing between "start" and "end", and splits the extents 2463d583fb87SAllison Henderson * if "start" and "end" appear in the same extent 2464d583fb87SAllison Henderson * 2465d583fb87SAllison Henderson * @handle: The journal handle 2466d583fb87SAllison Henderson * @inode: The files inode 2467d583fb87SAllison Henderson * @path: The path to the leaf 2468d23142c6SLukas Czerner * @partial_cluster: The cluster which we'll have to free if all extents 2469d23142c6SLukas Czerner * has been released from it. It gets negative in case 2470d23142c6SLukas Czerner * that the cluster is still used. 2471d583fb87SAllison Henderson * @start: The first block to remove 2472d583fb87SAllison Henderson * @end: The last block to remove 2473d583fb87SAllison Henderson */ 2474a86c6181SAlex Tomas static int 2475a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 2476d23142c6SLukas Czerner struct ext4_ext_path *path, 2477d23142c6SLukas Czerner long long *partial_cluster, 24780aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2479a86c6181SAlex Tomas { 24800aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2481a86c6181SAlex Tomas int err = 0, correct_index = 0; 2482a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2483a86c6181SAlex Tomas struct ext4_extent_header *eh; 2484750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2485725d26d3SAneesh Kumar K.V unsigned num; 2486725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2487a86c6181SAlex Tomas unsigned short ex_ee_len; 2488a2df2a63SAmit Arora unsigned uninitialized = 0; 2489a86c6181SAlex Tomas struct ext4_extent *ex; 2490d23142c6SLukas Czerner ext4_fsblk_t pblk; 2491a86c6181SAlex Tomas 2492c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 24935f95d21fSLukas Czerner ext_debug("truncate since %u in leaf to %u\n", start, end); 2494a86c6181SAlex Tomas if (!path[depth].p_hdr) 2495a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2496a86c6181SAlex Tomas eh = path[depth].p_hdr; 2497273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2498273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2499273df556SFrank Mayhar return -EIO; 2500273df556SFrank Mayhar } 2501a86c6181SAlex Tomas /* find where to start removing */ 2502a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2503a86c6181SAlex Tomas 2504a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2505a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2506a86c6181SAlex Tomas 2507d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2508d8990240SAditya Kali 2509a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2510a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2511a41f2071SAneesh Kumar K.V 2512a41f2071SAneesh Kumar K.V if (ext4_ext_is_uninitialized(ex)) 2513a41f2071SAneesh Kumar K.V uninitialized = 1; 2514a41f2071SAneesh Kumar K.V else 2515a41f2071SAneesh Kumar K.V uninitialized = 0; 2516a41f2071SAneesh Kumar K.V 2517553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2518553f9008SMingming uninitialized, ex_ee_len); 2519a86c6181SAlex Tomas path[depth].p_ext = ex; 2520a86c6181SAlex Tomas 2521a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2522d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2523d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2524a86c6181SAlex Tomas 2525a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2526a86c6181SAlex Tomas 2527d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 25285f95d21fSLukas Czerner if (end < ex_ee_block) { 2529d23142c6SLukas Czerner /* 2530d23142c6SLukas Czerner * We're going to skip this extent and move to another, 2531d23142c6SLukas Czerner * so if this extent is not cluster aligned we have 2532d23142c6SLukas Czerner * to mark the current cluster as used to avoid 2533d23142c6SLukas Czerner * accidentally freeing it later on 2534d23142c6SLukas Czerner */ 2535d23142c6SLukas Czerner pblk = ext4_ext_pblock(ex); 2536d23142c6SLukas Czerner if (pblk & (sbi->s_cluster_ratio - 1)) 2537d23142c6SLukas Czerner *partial_cluster = 2538d23142c6SLukas Czerner -((long long)EXT4_B2C(sbi, pblk)); 2539d583fb87SAllison Henderson ex--; 2540d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2541d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2542d583fb87SAllison Henderson continue; 2543750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2544dc1841d6SLukas Czerner EXT4_ERROR_INODE(inode, 2545dc1841d6SLukas Czerner "can not handle truncate %u:%u " 2546dc1841d6SLukas Czerner "on extent %u:%u", 2547dc1841d6SLukas Czerner start, end, ex_ee_block, 2548dc1841d6SLukas Czerner ex_ee_block + ex_ee_len - 1); 2549d583fb87SAllison Henderson err = -EIO; 2550d583fb87SAllison Henderson goto out; 2551a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2552a86c6181SAlex Tomas /* remove tail of the extent */ 2553750c9c47SDmitry Monakhov num = a - ex_ee_block; 2554a86c6181SAlex Tomas } else { 2555a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2556a86c6181SAlex Tomas num = 0; 2557d583fb87SAllison Henderson } 255834071da7STheodore Ts'o /* 255934071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 256034071da7STheodore Ts'o * descriptor) for each block group; assume two block 256134071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 256234071da7STheodore Ts'o * the worst case 256334071da7STheodore Ts'o */ 256434071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2565a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2566a86c6181SAlex Tomas correct_index = 1; 2567a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2568a86c6181SAlex Tomas } 25695aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2570a86c6181SAlex Tomas 2571487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 25729102e4faSShen Feng if (err) 2573a86c6181SAlex Tomas goto out; 2574a86c6181SAlex Tomas 2575a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2576a86c6181SAlex Tomas if (err) 2577a86c6181SAlex Tomas goto out; 2578a86c6181SAlex Tomas 25790aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 25800aa06000STheodore Ts'o a, b); 2581a86c6181SAlex Tomas if (err) 2582a86c6181SAlex Tomas goto out; 2583a86c6181SAlex Tomas 2584750c9c47SDmitry Monakhov if (num == 0) 2585d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2586f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2587a86c6181SAlex Tomas 2588a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2589749269faSAmit Arora /* 2590749269faSAmit Arora * Do not mark uninitialized if all the blocks in the 2591749269faSAmit Arora * extent have been removed. 2592749269faSAmit Arora */ 2593749269faSAmit Arora if (uninitialized && num) 2594a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 2595d583fb87SAllison Henderson /* 2596d583fb87SAllison Henderson * If the extent was completely released, 2597d583fb87SAllison Henderson * we need to remove it from the leaf 2598d583fb87SAllison Henderson */ 2599d583fb87SAllison Henderson if (num == 0) { 2600f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2601d583fb87SAllison Henderson /* 2602d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2603d583fb87SAllison Henderson * extents up when an extent is removed so that 2604d583fb87SAllison Henderson * we dont have blank extents in the middle 2605d583fb87SAllison Henderson */ 2606d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2607d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2608d583fb87SAllison Henderson 2609d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2610d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2611d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2612d583fb87SAllison Henderson } 2613d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 2614d23142c6SLukas Czerner } else if (*partial_cluster > 0) 26150aa06000STheodore Ts'o *partial_cluster = 0; 2616d583fb87SAllison Henderson 2617750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2618750c9c47SDmitry Monakhov if (err) 2619750c9c47SDmitry Monakhov goto out; 2620750c9c47SDmitry Monakhov 2621bf52c6f7SYongqiang Yang ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num, 2622bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2623a86c6181SAlex Tomas ex--; 2624a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2625a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2626a86c6181SAlex Tomas } 2627a86c6181SAlex Tomas 2628a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2629a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2630a86c6181SAlex Tomas 26310aa06000STheodore Ts'o /* 2632d23142c6SLukas Czerner * Free the partial cluster only if the current extent does not 2633d23142c6SLukas Czerner * reference it. Otherwise we might free used cluster. 26340aa06000STheodore Ts'o */ 2635d23142c6SLukas Czerner if (*partial_cluster > 0 && 26360aa06000STheodore Ts'o (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 26370aa06000STheodore Ts'o *partial_cluster)) { 26380aa06000STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 26390aa06000STheodore Ts'o 26400aa06000STheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 26410aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 26420aa06000STheodore Ts'o 26430aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 26440aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 26450aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 26460aa06000STheodore Ts'o *partial_cluster = 0; 26470aa06000STheodore Ts'o } 26480aa06000STheodore Ts'o 2649a86c6181SAlex Tomas /* if this leaf is free, then we should 2650a86c6181SAlex Tomas * remove it from index block above */ 2651a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2652c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, depth); 2653a86c6181SAlex Tomas 2654a86c6181SAlex Tomas out: 2655a86c6181SAlex Tomas return err; 2656a86c6181SAlex Tomas } 2657a86c6181SAlex Tomas 2658a86c6181SAlex Tomas /* 2659d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2660d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2661a86c6181SAlex Tomas */ 266209b88252SAvantika Mathur static int 2663a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2664a86c6181SAlex Tomas { 2665a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2666a86c6181SAlex Tomas 2667a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2668a86c6181SAlex Tomas return 0; 2669a86c6181SAlex Tomas 2670a86c6181SAlex Tomas /* 2671d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2672a86c6181SAlex Tomas * so we have to consider current index for truncation 2673a86c6181SAlex Tomas */ 2674a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2675a86c6181SAlex Tomas return 0; 2676a86c6181SAlex Tomas return 1; 2677a86c6181SAlex Tomas } 2678a86c6181SAlex Tomas 267926a4c0c6STheodore Ts'o int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 26805f95d21fSLukas Czerner ext4_lblk_t end) 2681a86c6181SAlex Tomas { 2682a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 2683a86c6181SAlex Tomas int depth = ext_depth(inode); 2684968dee77SAshish Sangwan struct ext4_ext_path *path = NULL; 2685d23142c6SLukas Czerner long long partial_cluster = 0; 2686a86c6181SAlex Tomas handle_t *handle; 26876f2080e6SDmitry Monakhov int i = 0, err = 0; 2688a86c6181SAlex Tomas 26895f95d21fSLukas Czerner ext_debug("truncate since %u to %u\n", start, end); 2690a86c6181SAlex Tomas 2691a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 26929924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1); 2693a86c6181SAlex Tomas if (IS_ERR(handle)) 2694a86c6181SAlex Tomas return PTR_ERR(handle); 2695a86c6181SAlex Tomas 26960617b83fSDmitry Monakhov again: 269761801325SLukas Czerner trace_ext4_ext_remove_space(inode, start, end, depth); 2698d8990240SAditya Kali 2699a86c6181SAlex Tomas /* 27005f95d21fSLukas Czerner * Check if we are removing extents inside the extent tree. If that 27015f95d21fSLukas Czerner * is the case, we are going to punch a hole inside the extent tree 27025f95d21fSLukas Czerner * so we have to check whether we need to split the extent covering 27035f95d21fSLukas Czerner * the last block to remove so we can easily remove the part of it 27045f95d21fSLukas Czerner * in ext4_ext_rm_leaf(). 27055f95d21fSLukas Czerner */ 27065f95d21fSLukas Czerner if (end < EXT_MAX_BLOCKS - 1) { 27075f95d21fSLukas Czerner struct ext4_extent *ex; 27085f95d21fSLukas Czerner ext4_lblk_t ee_block; 27095f95d21fSLukas Czerner 27105f95d21fSLukas Czerner /* find extent for this block */ 27115f95d21fSLukas Czerner path = ext4_ext_find_extent(inode, end, NULL); 27125f95d21fSLukas Czerner if (IS_ERR(path)) { 27135f95d21fSLukas Czerner ext4_journal_stop(handle); 27145f95d21fSLukas Czerner return PTR_ERR(path); 27155f95d21fSLukas Czerner } 27165f95d21fSLukas Czerner depth = ext_depth(inode); 27176f2080e6SDmitry Monakhov /* Leaf not may not exist only if inode has no blocks at all */ 27185f95d21fSLukas Czerner ex = path[depth].p_ext; 2719968dee77SAshish Sangwan if (!ex) { 27206f2080e6SDmitry Monakhov if (depth) { 27216f2080e6SDmitry Monakhov EXT4_ERROR_INODE(inode, 27226f2080e6SDmitry Monakhov "path[%d].p_hdr == NULL", 27236f2080e6SDmitry Monakhov depth); 27246f2080e6SDmitry Monakhov err = -EIO; 27256f2080e6SDmitry Monakhov } 27266f2080e6SDmitry Monakhov goto out; 2727968dee77SAshish Sangwan } 27285f95d21fSLukas Czerner 27295f95d21fSLukas Czerner ee_block = le32_to_cpu(ex->ee_block); 27305f95d21fSLukas Czerner 27315f95d21fSLukas Czerner /* 27325f95d21fSLukas Czerner * See if the last block is inside the extent, if so split 27335f95d21fSLukas Czerner * the extent at 'end' block so we can easily remove the 27345f95d21fSLukas Czerner * tail of the first part of the split extent in 27355f95d21fSLukas Czerner * ext4_ext_rm_leaf(). 27365f95d21fSLukas Czerner */ 27375f95d21fSLukas Czerner if (end >= ee_block && 27385f95d21fSLukas Czerner end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 27395f95d21fSLukas Czerner int split_flag = 0; 27405f95d21fSLukas Czerner 27415f95d21fSLukas Czerner if (ext4_ext_is_uninitialized(ex)) 27425f95d21fSLukas Czerner split_flag = EXT4_EXT_MARK_UNINIT1 | 27435f95d21fSLukas Czerner EXT4_EXT_MARK_UNINIT2; 27445f95d21fSLukas Czerner 27455f95d21fSLukas Czerner /* 27465f95d21fSLukas Czerner * Split the extent in two so that 'end' is the last 274727dd4385SLukas Czerner * block in the first new extent. Also we should not 274827dd4385SLukas Czerner * fail removing space due to ENOSPC so try to use 274927dd4385SLukas Czerner * reserved block if that happens. 27505f95d21fSLukas Czerner */ 27515f95d21fSLukas Czerner err = ext4_split_extent_at(handle, inode, path, 27525f95d21fSLukas Czerner end + 1, split_flag, 27535f95d21fSLukas Czerner EXT4_GET_BLOCKS_PRE_IO | 275427dd4385SLukas Czerner EXT4_GET_BLOCKS_METADATA_NOFAIL); 27555f95d21fSLukas Czerner 27565f95d21fSLukas Czerner if (err < 0) 27575f95d21fSLukas Czerner goto out; 27585f95d21fSLukas Czerner } 27595f95d21fSLukas Czerner } 27605f95d21fSLukas Czerner /* 2761d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2762d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2763a86c6181SAlex Tomas */ 27640617b83fSDmitry Monakhov depth = ext_depth(inode); 2765968dee77SAshish Sangwan if (path) { 2766968dee77SAshish Sangwan int k = i = depth; 2767968dee77SAshish Sangwan while (--k > 0) 2768968dee77SAshish Sangwan path[k].p_block = 2769968dee77SAshish Sangwan le16_to_cpu(path[k].p_hdr->eh_entries)+1; 2770968dee77SAshish Sangwan } else { 2771968dee77SAshish Sangwan path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), 2772968dee77SAshish Sangwan GFP_NOFS); 2773a86c6181SAlex Tomas if (path == NULL) { 2774a86c6181SAlex Tomas ext4_journal_stop(handle); 2775a86c6181SAlex Tomas return -ENOMEM; 2776a86c6181SAlex Tomas } 27770617b83fSDmitry Monakhov path[0].p_depth = depth; 2778a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 277989a4e48fSTheodore Ts'o i = 0; 27805f95d21fSLukas Czerner 278156b19868SAneesh Kumar K.V if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2782a86c6181SAlex Tomas err = -EIO; 2783a86c6181SAlex Tomas goto out; 2784a86c6181SAlex Tomas } 2785968dee77SAshish Sangwan } 2786968dee77SAshish Sangwan err = 0; 2787a86c6181SAlex Tomas 2788a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2789a86c6181SAlex Tomas if (i == depth) { 2790a86c6181SAlex Tomas /* this is leaf block */ 2791d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 27920aa06000STheodore Ts'o &partial_cluster, start, 27935f95d21fSLukas Czerner end); 2794d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2795a86c6181SAlex Tomas brelse(path[i].p_bh); 2796a86c6181SAlex Tomas path[i].p_bh = NULL; 2797a86c6181SAlex Tomas i--; 2798a86c6181SAlex Tomas continue; 2799a86c6181SAlex Tomas } 2800a86c6181SAlex Tomas 2801a86c6181SAlex Tomas /* this is index block */ 2802a86c6181SAlex Tomas if (!path[i].p_hdr) { 2803a86c6181SAlex Tomas ext_debug("initialize header\n"); 2804a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2805a86c6181SAlex Tomas } 2806a86c6181SAlex Tomas 2807a86c6181SAlex Tomas if (!path[i].p_idx) { 2808d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2809a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2810a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2811a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2812a86c6181SAlex Tomas path[i].p_hdr, 2813a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2814a86c6181SAlex Tomas } else { 2815d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2816a86c6181SAlex Tomas path[i].p_idx--; 2817a86c6181SAlex Tomas } 2818a86c6181SAlex Tomas 2819a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2820a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2821a86c6181SAlex Tomas path[i].p_idx); 2822a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2823c29c0ae7SAlex Tomas struct buffer_head *bh; 2824a86c6181SAlex Tomas /* go to the next level */ 28252ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2826bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2827a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 2828bf89d16fSTheodore Ts'o bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2829c29c0ae7SAlex Tomas if (!bh) { 2830a86c6181SAlex Tomas /* should we reset i_size? */ 2831a86c6181SAlex Tomas err = -EIO; 2832a86c6181SAlex Tomas break; 2833a86c6181SAlex Tomas } 2834c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2835c29c0ae7SAlex Tomas err = -EIO; 2836c29c0ae7SAlex Tomas break; 2837c29c0ae7SAlex Tomas } 2838f8489128SDarrick J. Wong if (ext4_ext_check_block(inode, ext_block_hdr(bh), 2839f8489128SDarrick J. Wong depth - i - 1, bh)) { 2840c29c0ae7SAlex Tomas err = -EIO; 2841c29c0ae7SAlex Tomas break; 2842c29c0ae7SAlex Tomas } 2843c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2844a86c6181SAlex Tomas 2845d0d856e8SRandy Dunlap /* save actual number of indexes since this 2846d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2847a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2848a86c6181SAlex Tomas i++; 2849a86c6181SAlex Tomas } else { 2850d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2851a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2852d0d856e8SRandy Dunlap /* index is empty, remove it; 2853a86c6181SAlex Tomas * handle must be already prepared by the 2854a86c6181SAlex Tomas * truncatei_leaf() */ 2855c36575e6SForrest Liu err = ext4_ext_rm_idx(handle, inode, path, i); 2856a86c6181SAlex Tomas } 2857d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2858a86c6181SAlex Tomas brelse(path[i].p_bh); 2859a86c6181SAlex Tomas path[i].p_bh = NULL; 2860a86c6181SAlex Tomas i--; 2861a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2862a86c6181SAlex Tomas } 2863a86c6181SAlex Tomas } 2864a86c6181SAlex Tomas 286561801325SLukas Czerner trace_ext4_ext_remove_space_done(inode, start, end, depth, 286661801325SLukas Czerner partial_cluster, path->p_hdr->eh_entries); 2867d8990240SAditya Kali 28687b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 28697b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 28707b415bf6SAditya Kali * cluster as well. */ 2871d23142c6SLukas Czerner if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 28727b415bf6SAditya Kali int flags = EXT4_FREE_BLOCKS_FORGET; 28737b415bf6SAditya Kali 28747b415bf6SAditya Kali if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 28757b415bf6SAditya Kali flags |= EXT4_FREE_BLOCKS_METADATA; 28767b415bf6SAditya Kali 28777b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 28787b415bf6SAditya Kali EXT4_C2B(EXT4_SB(sb), partial_cluster), 28797b415bf6SAditya Kali EXT4_SB(sb)->s_cluster_ratio, flags); 28807b415bf6SAditya Kali partial_cluster = 0; 28817b415bf6SAditya Kali } 28827b415bf6SAditya Kali 2883a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2884a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2885a86c6181SAlex Tomas /* 2886d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 2887d0d856e8SRandy Dunlap * so we need to correct eh_depth 2888a86c6181SAlex Tomas */ 2889a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 2890a86c6181SAlex Tomas if (err == 0) { 2891a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 2892a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 289355ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 2894a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 2895a86c6181SAlex Tomas } 2896a86c6181SAlex Tomas } 2897a86c6181SAlex Tomas out: 2898a86c6181SAlex Tomas ext4_ext_drop_refs(path); 2899a86c6181SAlex Tomas kfree(path); 2900968dee77SAshish Sangwan if (err == -EAGAIN) { 2901968dee77SAshish Sangwan path = NULL; 29020617b83fSDmitry Monakhov goto again; 2903968dee77SAshish Sangwan } 2904a86c6181SAlex Tomas ext4_journal_stop(handle); 2905a86c6181SAlex Tomas 2906a86c6181SAlex Tomas return err; 2907a86c6181SAlex Tomas } 2908a86c6181SAlex Tomas 2909a86c6181SAlex Tomas /* 2910a86c6181SAlex Tomas * called at mount time 2911a86c6181SAlex Tomas */ 2912a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 2913a86c6181SAlex Tomas { 2914a86c6181SAlex Tomas /* 2915a86c6181SAlex Tomas * possible initialization would be here 2916a86c6181SAlex Tomas */ 2917a86c6181SAlex Tomas 291883982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 291990576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 292092b97816STheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled" 2921bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 292292b97816STheodore Ts'o ", aggressive tests" 2923a86c6181SAlex Tomas #endif 2924a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 292592b97816STheodore Ts'o ", check binsearch" 2926a86c6181SAlex Tomas #endif 2927a86c6181SAlex Tomas #ifdef EXTENTS_STATS 292892b97816STheodore Ts'o ", stats" 2929a86c6181SAlex Tomas #endif 293092b97816STheodore Ts'o "\n"); 293190576c0bSTheodore Ts'o #endif 2932a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2933a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2934a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 2935a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 2936a86c6181SAlex Tomas #endif 2937a86c6181SAlex Tomas } 2938a86c6181SAlex Tomas } 2939a86c6181SAlex Tomas 2940a86c6181SAlex Tomas /* 2941a86c6181SAlex Tomas * called at umount time 2942a86c6181SAlex Tomas */ 2943a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 2944a86c6181SAlex Tomas { 294583982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2946a86c6181SAlex Tomas return; 2947a86c6181SAlex Tomas 2948a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2949a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2950a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2951a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2952a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 2953a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 2954a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2955a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2956a86c6181SAlex Tomas } 2957a86c6181SAlex Tomas #endif 2958a86c6181SAlex Tomas } 2959a86c6181SAlex Tomas 2960093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 2961093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2962093a088bSAneesh Kumar K.V { 29632407518dSLukas Czerner ext4_fsblk_t ee_pblock; 29642407518dSLukas Czerner unsigned int ee_len; 2965b720303dSJing Zhang int ret; 2966093a088bSAneesh Kumar K.V 2967093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 2968bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 2969093a088bSAneesh Kumar K.V 2970a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 29712407518dSLukas Czerner if (ret > 0) 29722407518dSLukas Czerner ret = 0; 2973093a088bSAneesh Kumar K.V 29742407518dSLukas Czerner return ret; 2975093a088bSAneesh Kumar K.V } 2976093a088bSAneesh Kumar K.V 297747ea3bb5SYongqiang Yang /* 297847ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 297947ea3bb5SYongqiang Yang * 298047ea3bb5SYongqiang Yang * @handle: the journal handle 298147ea3bb5SYongqiang Yang * @inode: the file inode 298247ea3bb5SYongqiang Yang * @path: the path to the extent 298347ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 298447ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 298547ea3bb5SYongqiang Yang * the states(init or uninit) of new extents. 298647ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 298747ea3bb5SYongqiang Yang * 298847ea3bb5SYongqiang Yang * 298947ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 299047ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 299147ea3bb5SYongqiang Yang * 299247ea3bb5SYongqiang Yang * There are two cases: 299347ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 299447ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 299547ea3bb5SYongqiang Yang * 299647ea3bb5SYongqiang Yang * return 0 on success. 299747ea3bb5SYongqiang Yang */ 299847ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 299947ea3bb5SYongqiang Yang struct inode *inode, 300047ea3bb5SYongqiang Yang struct ext4_ext_path *path, 300147ea3bb5SYongqiang Yang ext4_lblk_t split, 300247ea3bb5SYongqiang Yang int split_flag, 300347ea3bb5SYongqiang Yang int flags) 300447ea3bb5SYongqiang Yang { 300547ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 300647ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 3007adb23551SZheng Liu struct ext4_extent *ex, newex, orig_ex, zero_ex; 300847ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 300947ea3bb5SYongqiang Yang unsigned int ee_len, depth; 301047ea3bb5SYongqiang Yang int err = 0; 301147ea3bb5SYongqiang Yang 3012dee1f973SDmitry Monakhov BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) == 3013dee1f973SDmitry Monakhov (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)); 3014dee1f973SDmitry Monakhov 301547ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 301647ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 301747ea3bb5SYongqiang Yang 301847ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 301947ea3bb5SYongqiang Yang 302047ea3bb5SYongqiang Yang depth = ext_depth(inode); 302147ea3bb5SYongqiang Yang ex = path[depth].p_ext; 302247ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 302347ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 302447ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 302547ea3bb5SYongqiang Yang 302647ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 3027357b66fdSDmitry Monakhov BUG_ON(!ext4_ext_is_uninitialized(ex) && 3028357b66fdSDmitry Monakhov split_flag & (EXT4_EXT_MAY_ZEROOUT | 3029357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT1 | 3030357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT2)); 303147ea3bb5SYongqiang Yang 303247ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 303347ea3bb5SYongqiang Yang if (err) 303447ea3bb5SYongqiang Yang goto out; 303547ea3bb5SYongqiang Yang 303647ea3bb5SYongqiang Yang if (split == ee_block) { 303747ea3bb5SYongqiang Yang /* 303847ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 303947ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 304047ea3bb5SYongqiang Yang * is not needed. 304147ea3bb5SYongqiang Yang */ 304247ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 304347ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 304447ea3bb5SYongqiang Yang else 304547ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 304647ea3bb5SYongqiang Yang 304747ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 3048ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 304947ea3bb5SYongqiang Yang 3050ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 305147ea3bb5SYongqiang Yang goto out; 305247ea3bb5SYongqiang Yang } 305347ea3bb5SYongqiang Yang 305447ea3bb5SYongqiang Yang /* case a */ 305547ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 305647ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 305747ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT1) 305847ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 305947ea3bb5SYongqiang Yang 306047ea3bb5SYongqiang Yang /* 306147ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 306247ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 306347ea3bb5SYongqiang Yang */ 306447ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 306547ea3bb5SYongqiang Yang if (err) 306647ea3bb5SYongqiang Yang goto fix_extent_len; 306747ea3bb5SYongqiang Yang 306847ea3bb5SYongqiang Yang ex2 = &newex; 306947ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 307047ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 307147ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 307247ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 307347ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex2); 307447ea3bb5SYongqiang Yang 307547ea3bb5SYongqiang Yang err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 307647ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3077dee1f973SDmitry Monakhov if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3078adb23551SZheng Liu if (split_flag & EXT4_EXT_DATA_VALID1) { 3079dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex2); 3080adb23551SZheng Liu zero_ex.ee_block = ex2->ee_block; 30818cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 30828cde7ad1SZheng Liu ext4_ext_get_actual_len(ex2)); 3083adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3084adb23551SZheng Liu ext4_ext_pblock(ex2)); 3085adb23551SZheng Liu } else { 3086dee1f973SDmitry Monakhov err = ext4_ext_zeroout(inode, ex); 3087adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 30888cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 30898cde7ad1SZheng Liu ext4_ext_get_actual_len(ex)); 3090adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3091adb23551SZheng Liu ext4_ext_pblock(ex)); 3092adb23551SZheng Liu } 3093adb23551SZheng Liu } else { 309447ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 3095adb23551SZheng Liu zero_ex.ee_block = orig_ex.ee_block; 30968cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16( 30978cde7ad1SZheng Liu ext4_ext_get_actual_len(&orig_ex)); 3098adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, 3099adb23551SZheng Liu ext4_ext_pblock(&orig_ex)); 3100adb23551SZheng Liu } 3101dee1f973SDmitry Monakhov 310247ea3bb5SYongqiang Yang if (err) 310347ea3bb5SYongqiang Yang goto fix_extent_len; 310447ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 3105af1584f5SAl Viro ex->ee_len = cpu_to_le16(ee_len); 3106ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3107ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3108adb23551SZheng Liu if (err) 3109adb23551SZheng Liu goto fix_extent_len; 3110adb23551SZheng Liu 3111adb23551SZheng Liu /* update extent status tree */ 3112adb23551SZheng Liu err = ext4_es_zeroout(inode, &zero_ex); 3113adb23551SZheng Liu 311447ea3bb5SYongqiang Yang goto out; 311547ea3bb5SYongqiang Yang } else if (err) 311647ea3bb5SYongqiang Yang goto fix_extent_len; 311747ea3bb5SYongqiang Yang 311847ea3bb5SYongqiang Yang out: 311947ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 312047ea3bb5SYongqiang Yang return err; 312147ea3bb5SYongqiang Yang 312247ea3bb5SYongqiang Yang fix_extent_len: 312347ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 312447ea3bb5SYongqiang Yang ext4_ext_dirty(handle, inode, path + depth); 312547ea3bb5SYongqiang Yang return err; 312647ea3bb5SYongqiang Yang } 312747ea3bb5SYongqiang Yang 312847ea3bb5SYongqiang Yang /* 312947ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 313047ea3bb5SYongqiang Yang * by @map as split_flags indicates 313147ea3bb5SYongqiang Yang * 313247ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (upto three) 313347ea3bb5SYongqiang Yang * There are three possibilities: 313447ea3bb5SYongqiang Yang * a> There is no split required 313547ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 313647ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 313747ea3bb5SYongqiang Yang * 313847ea3bb5SYongqiang Yang */ 313947ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 314047ea3bb5SYongqiang Yang struct inode *inode, 314147ea3bb5SYongqiang Yang struct ext4_ext_path *path, 314247ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 314347ea3bb5SYongqiang Yang int split_flag, 314447ea3bb5SYongqiang Yang int flags) 314547ea3bb5SYongqiang Yang { 314647ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 314747ea3bb5SYongqiang Yang struct ext4_extent *ex; 314847ea3bb5SYongqiang Yang unsigned int ee_len, depth; 314947ea3bb5SYongqiang Yang int err = 0; 315047ea3bb5SYongqiang Yang int uninitialized; 315147ea3bb5SYongqiang Yang int split_flag1, flags1; 31523a225670SZheng Liu int allocated = map->m_len; 315347ea3bb5SYongqiang Yang 315447ea3bb5SYongqiang Yang depth = ext_depth(inode); 315547ea3bb5SYongqiang Yang ex = path[depth].p_ext; 315647ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 315747ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 315847ea3bb5SYongqiang Yang uninitialized = ext4_ext_is_uninitialized(ex); 315947ea3bb5SYongqiang Yang 316047ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 3161dee1f973SDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT; 316247ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 316347ea3bb5SYongqiang Yang if (uninitialized) 316447ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 316547ea3bb5SYongqiang Yang EXT4_EXT_MARK_UNINIT2; 3166dee1f973SDmitry Monakhov if (split_flag & EXT4_EXT_DATA_VALID2) 3167dee1f973SDmitry Monakhov split_flag1 |= EXT4_EXT_DATA_VALID1; 316847ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 316947ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 317093917411SYongqiang Yang if (err) 317193917411SYongqiang Yang goto out; 31723a225670SZheng Liu } else { 31733a225670SZheng Liu allocated = ee_len - (map->m_lblk - ee_block); 317447ea3bb5SYongqiang Yang } 3175357b66fdSDmitry Monakhov /* 3176357b66fdSDmitry Monakhov * Update path is required because previous ext4_split_extent_at() may 3177357b66fdSDmitry Monakhov * result in split of original leaf or extent zeroout. 3178357b66fdSDmitry Monakhov */ 317947ea3bb5SYongqiang Yang ext4_ext_drop_refs(path); 318047ea3bb5SYongqiang Yang path = ext4_ext_find_extent(inode, map->m_lblk, path); 318147ea3bb5SYongqiang Yang if (IS_ERR(path)) 318247ea3bb5SYongqiang Yang return PTR_ERR(path); 3183357b66fdSDmitry Monakhov depth = ext_depth(inode); 3184357b66fdSDmitry Monakhov ex = path[depth].p_ext; 3185357b66fdSDmitry Monakhov uninitialized = ext4_ext_is_uninitialized(ex); 3186357b66fdSDmitry Monakhov split_flag1 = 0; 318747ea3bb5SYongqiang Yang 318847ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 3189357b66fdSDmitry Monakhov split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; 3190357b66fdSDmitry Monakhov if (uninitialized) { 319147ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1; 3192357b66fdSDmitry Monakhov split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3193357b66fdSDmitry Monakhov EXT4_EXT_MARK_UNINIT2); 3194357b66fdSDmitry Monakhov } 319547ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 319647ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 319747ea3bb5SYongqiang Yang if (err) 319847ea3bb5SYongqiang Yang goto out; 319947ea3bb5SYongqiang Yang } 320047ea3bb5SYongqiang Yang 320147ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 320247ea3bb5SYongqiang Yang out: 32033a225670SZheng Liu return err ? err : allocated; 320447ea3bb5SYongqiang Yang } 320547ea3bb5SYongqiang Yang 320656055d3aSAmit Arora /* 3207e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 320856055d3aSAmit Arora * to an uninitialized extent. It may result in splitting the uninitialized 320956055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 321056055d3aSAmit Arora * uninitialized). 321156055d3aSAmit Arora * There are three possibilities: 321256055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 321356055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 321456055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 32156f91bc5fSEric Gouriou * 32166f91bc5fSEric Gouriou * Pre-conditions: 32176f91bc5fSEric Gouriou * - The extent pointed to by 'path' is uninitialized. 32186f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 32196f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 32206f91bc5fSEric Gouriou * 32216f91bc5fSEric Gouriou * Post-conditions on success: 32226f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 32236f91bc5fSEric Gouriou * that are allocated and initialized. 32246f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 322556055d3aSAmit Arora */ 3226725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 3227725d26d3SAneesh Kumar K.V struct inode *inode, 3228e35fd660STheodore Ts'o struct ext4_map_blocks *map, 322927dd4385SLukas Czerner struct ext4_ext_path *path, 323027dd4385SLukas Czerner int flags) 323156055d3aSAmit Arora { 323267a5da56SZheng Liu struct ext4_sb_info *sbi; 32336f91bc5fSEric Gouriou struct ext4_extent_header *eh; 3234667eff35SYongqiang Yang struct ext4_map_blocks split_map; 3235667eff35SYongqiang Yang struct ext4_extent zero_ex; 3236bc2d9db4SLukas Czerner struct ext4_extent *ex, *abut_ex; 323721ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 3238bc2d9db4SLukas Czerner unsigned int ee_len, depth, map_len = map->m_len; 3239bc2d9db4SLukas Czerner int allocated = 0, max_zeroout = 0; 324056055d3aSAmit Arora int err = 0; 3241667eff35SYongqiang Yang int split_flag = 0; 324221ca087aSDmitry Monakhov 324321ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 324421ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3245bc2d9db4SLukas Czerner (unsigned long long)map->m_lblk, map_len); 324621ca087aSDmitry Monakhov 324767a5da56SZheng Liu sbi = EXT4_SB(inode->i_sb); 324821ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 324921ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3250bc2d9db4SLukas Czerner if (eof_block < map->m_lblk + map_len) 3251bc2d9db4SLukas Czerner eof_block = map->m_lblk + map_len; 325256055d3aSAmit Arora 325356055d3aSAmit Arora depth = ext_depth(inode); 32546f91bc5fSEric Gouriou eh = path[depth].p_hdr; 325556055d3aSAmit Arora ex = path[depth].p_ext; 325656055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 325756055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3258adb23551SZheng Liu zero_ex.ee_len = 0; 325921ca087aSDmitry Monakhov 32606f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 32616f91bc5fSEric Gouriou 32626f91bc5fSEric Gouriou /* Pre-conditions */ 32636f91bc5fSEric Gouriou BUG_ON(!ext4_ext_is_uninitialized(ex)); 32646f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 32656f91bc5fSEric Gouriou 32666f91bc5fSEric Gouriou /* 32676f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 3268bc2d9db4SLukas Czerner * uninitialized extent to its neighbor. This is much cheaper 32696f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 3270bc2d9db4SLukas Czerner * memmove() calls. Transferring to the left is the common case in 3271bc2d9db4SLukas Czerner * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE) 3272bc2d9db4SLukas Czerner * followed by append writes. 32736f91bc5fSEric Gouriou * 32746f91bc5fSEric Gouriou * Limitations of the current logic: 3275bc2d9db4SLukas Czerner * - L1: we do not deal with writes covering the whole extent. 32766f91bc5fSEric Gouriou * This would require removing the extent if the transfer 32776f91bc5fSEric Gouriou * is possible. 3278bc2d9db4SLukas Czerner * - L2: we only attempt to merge with an extent stored in the 32796f91bc5fSEric Gouriou * same extent tree node. 32806f91bc5fSEric Gouriou */ 3281bc2d9db4SLukas Czerner if ((map->m_lblk == ee_block) && 3282bc2d9db4SLukas Czerner /* See if we can merge left */ 3283bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3284bc2d9db4SLukas Czerner (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/ 32856f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 32866f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 3287bc2d9db4SLukas Czerner unsigned int prev_len; 32886f91bc5fSEric Gouriou 3289bc2d9db4SLukas Czerner abut_ex = ex - 1; 3290bc2d9db4SLukas Czerner prev_lblk = le32_to_cpu(abut_ex->ee_block); 3291bc2d9db4SLukas Czerner prev_len = ext4_ext_get_actual_len(abut_ex); 3292bc2d9db4SLukas Czerner prev_pblk = ext4_ext_pblock(abut_ex); 32936f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 32946f91bc5fSEric Gouriou 32956f91bc5fSEric Gouriou /* 3296bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 32976f91bc5fSEric Gouriou * upon those conditions: 3298bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3299bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3300bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3301bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 33026f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 33036f91bc5fSEric Gouriou */ 3304bc2d9db4SLukas Czerner if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 33056f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 33066f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3307bc2d9db4SLukas Czerner (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 33086f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 33096f91bc5fSEric Gouriou if (err) 33106f91bc5fSEric Gouriou goto out; 33116f91bc5fSEric Gouriou 33126f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3313bc2d9db4SLukas Czerner map, ex, abut_ex); 33146f91bc5fSEric Gouriou 3315bc2d9db4SLukas Czerner /* Shift the start of ex by 'map_len' blocks */ 3316bc2d9db4SLukas Czerner ex->ee_block = cpu_to_le32(ee_block + map_len); 3317bc2d9db4SLukas Czerner ext4_ext_store_pblock(ex, ee_pblk + map_len); 3318bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 33196f91bc5fSEric Gouriou ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 33206f91bc5fSEric Gouriou 3321bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3322bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(prev_len + map_len); 33236f91bc5fSEric Gouriou 3324bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3325bc2d9db4SLukas Czerner allocated = map_len; 3326bc2d9db4SLukas Czerner } 3327bc2d9db4SLukas Czerner } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && 3328bc2d9db4SLukas Czerner (map_len < ee_len) && /*L1*/ 3329bc2d9db4SLukas Czerner ex < EXT_LAST_EXTENT(eh)) { /*L2*/ 3330bc2d9db4SLukas Czerner /* See if we can merge right */ 3331bc2d9db4SLukas Czerner ext4_lblk_t next_lblk; 3332bc2d9db4SLukas Czerner ext4_fsblk_t next_pblk, ee_pblk; 3333bc2d9db4SLukas Czerner unsigned int next_len; 3334bc2d9db4SLukas Czerner 3335bc2d9db4SLukas Czerner abut_ex = ex + 1; 3336bc2d9db4SLukas Czerner next_lblk = le32_to_cpu(abut_ex->ee_block); 3337bc2d9db4SLukas Czerner next_len = ext4_ext_get_actual_len(abut_ex); 3338bc2d9db4SLukas Czerner next_pblk = ext4_ext_pblock(abut_ex); 3339bc2d9db4SLukas Czerner ee_pblk = ext4_ext_pblock(ex); 3340bc2d9db4SLukas Czerner 3341bc2d9db4SLukas Czerner /* 3342bc2d9db4SLukas Czerner * A transfer of blocks from 'ex' to 'abut_ex' is allowed 3343bc2d9db4SLukas Czerner * upon those conditions: 3344bc2d9db4SLukas Czerner * - C1: abut_ex is initialized, 3345bc2d9db4SLukas Czerner * - C2: abut_ex is logically abutting ex, 3346bc2d9db4SLukas Czerner * - C3: abut_ex is physically abutting ex, 3347bc2d9db4SLukas Czerner * - C4: abut_ex can receive the additional blocks without 3348bc2d9db4SLukas Czerner * overflowing the (initialized) length limit. 3349bc2d9db4SLukas Czerner */ 3350bc2d9db4SLukas Czerner if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/ 3351bc2d9db4SLukas Czerner ((map->m_lblk + map_len) == next_lblk) && /*C2*/ 3352bc2d9db4SLukas Czerner ((ee_pblk + ee_len) == next_pblk) && /*C3*/ 3353bc2d9db4SLukas Czerner (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/ 3354bc2d9db4SLukas Czerner err = ext4_ext_get_access(handle, inode, path + depth); 3355bc2d9db4SLukas Czerner if (err) 3356bc2d9db4SLukas Czerner goto out; 3357bc2d9db4SLukas Czerner 3358bc2d9db4SLukas Czerner trace_ext4_ext_convert_to_initialized_fastpath(inode, 3359bc2d9db4SLukas Czerner map, ex, abut_ex); 3360bc2d9db4SLukas Czerner 3361bc2d9db4SLukas Czerner /* Shift the start of abut_ex by 'map_len' blocks */ 3362bc2d9db4SLukas Czerner abut_ex->ee_block = cpu_to_le32(next_lblk - map_len); 3363bc2d9db4SLukas Czerner ext4_ext_store_pblock(abut_ex, next_pblk - map_len); 3364bc2d9db4SLukas Czerner ex->ee_len = cpu_to_le16(ee_len - map_len); 3365bc2d9db4SLukas Czerner ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3366bc2d9db4SLukas Czerner 3367bc2d9db4SLukas Czerner /* Extend abut_ex by 'map_len' blocks */ 3368bc2d9db4SLukas Czerner abut_ex->ee_len = cpu_to_le16(next_len + map_len); 3369bc2d9db4SLukas Czerner 3370bc2d9db4SLukas Czerner /* Result: number of initialized blocks past m_lblk */ 3371bc2d9db4SLukas Czerner allocated = map_len; 3372bc2d9db4SLukas Czerner } 3373bc2d9db4SLukas Czerner } 3374bc2d9db4SLukas Czerner if (allocated) { 33756f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 33766f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 33776f91bc5fSEric Gouriou 33786f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3379bc2d9db4SLukas Czerner path[depth].p_ext = abut_ex; 33806f91bc5fSEric Gouriou goto out; 3381bc2d9db4SLukas Czerner } else 3382bc2d9db4SLukas Czerner allocated = ee_len - (map->m_lblk - ee_block); 33836f91bc5fSEric Gouriou 3384667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 338521ca087aSDmitry Monakhov /* 338621ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 338721ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 338821ca087aSDmitry Monakhov */ 3389667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 339021ca087aSDmitry Monakhov 339167a5da56SZheng Liu if (EXT4_EXT_MAY_ZEROOUT & split_flag) 339267a5da56SZheng Liu max_zeroout = sbi->s_extent_max_zeroout_kb >> 33934f42f80aSLukas Czerner (inode->i_sb->s_blocksize_bits - 10); 339467a5da56SZheng Liu 339567a5da56SZheng Liu /* If extent is less than s_max_zeroout_kb, zeroout directly */ 339667a5da56SZheng Liu if (max_zeroout && (ee_len <= max_zeroout)) { 3397667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 33983977c965SAneesh Kumar K.V if (err) 339956055d3aSAmit Arora goto out; 3400adb23551SZheng Liu zero_ex.ee_block = ex->ee_block; 34018cde7ad1SZheng Liu zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3402adb23551SZheng Liu ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 34039df5643aSAneesh Kumar K.V 34049df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 34059df5643aSAneesh Kumar K.V if (err) 34069df5643aSAneesh Kumar K.V goto out; 3407667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3408ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3409ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 341056055d3aSAmit Arora goto out; 3411667eff35SYongqiang Yang } 3412093a088bSAneesh Kumar K.V 3413667eff35SYongqiang Yang /* 3414667eff35SYongqiang Yang * four cases: 3415667eff35SYongqiang Yang * 1. split the extent into three extents. 3416667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3417667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3418667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3419667eff35SYongqiang Yang */ 3420667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3421667eff35SYongqiang Yang split_map.m_len = map->m_len; 3422667eff35SYongqiang Yang 342367a5da56SZheng Liu if (max_zeroout && (allocated > map->m_len)) { 342467a5da56SZheng Liu if (allocated <= max_zeroout) { 3425667eff35SYongqiang Yang /* case 3 */ 3426667eff35SYongqiang Yang zero_ex.ee_block = 34279b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 34289b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3429667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3430667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3431667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3432667eff35SYongqiang Yang if (err) 3433667eff35SYongqiang Yang goto out; 3434667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3435667eff35SYongqiang Yang split_map.m_len = allocated; 343667a5da56SZheng Liu } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3437667eff35SYongqiang Yang /* case 2 */ 3438667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3439667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3440667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3441667eff35SYongqiang Yang ee_block); 3442667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3443667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3444667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3445667eff35SYongqiang Yang if (err) 3446667eff35SYongqiang Yang goto out; 3447667eff35SYongqiang Yang } 3448667eff35SYongqiang Yang 3449667eff35SYongqiang Yang split_map.m_lblk = ee_block; 34509b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 34519b940f8eSAllison Henderson allocated = map->m_len; 3452667eff35SYongqiang Yang } 3453667eff35SYongqiang Yang } 3454667eff35SYongqiang Yang 3455667eff35SYongqiang Yang allocated = ext4_split_extent(handle, inode, path, 345627dd4385SLukas Czerner &split_map, split_flag, flags); 3457667eff35SYongqiang Yang if (allocated < 0) 3458667eff35SYongqiang Yang err = allocated; 3459667eff35SYongqiang Yang 3460667eff35SYongqiang Yang out: 3461adb23551SZheng Liu /* If we have gotten a failure, don't zero out status tree */ 3462adb23551SZheng Liu if (!err) 3463adb23551SZheng Liu err = ext4_es_zeroout(inode, &zero_ex); 3464667eff35SYongqiang Yang return err ? err : allocated; 346556055d3aSAmit Arora } 346656055d3aSAmit Arora 3467c278bfecSAneesh Kumar K.V /* 3468e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 34690031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 34700031462bSMingming Cao * to an uninitialized extent. 34710031462bSMingming Cao * 3472fd018fe8SPaul Bolle * Writing to an uninitialized extent may result in splitting the uninitialized 347330cb27d6SWang Sheng-Hui * extent into multiple initialized/uninitialized extents (up to three) 34740031462bSMingming Cao * There are three possibilities: 34750031462bSMingming Cao * a> There is no split required: Entire extent should be uninitialized 34760031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 34770031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 34780031462bSMingming Cao * 34790031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3480b595076aSUwe Kleine-König * the uninitialized extent split. To prevent ENOSPC occur at the IO 34810031462bSMingming Cao * complete, we need to split the uninitialized extent before DIO submit 3482421f91d2SUwe Kleine-König * the IO. The uninitialized extent called at this time will be split 34830031462bSMingming Cao * into three uninitialized extent(at most). After IO complete, the part 34840031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 34850031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3486ba230c3fSMingming * 3487ba230c3fSMingming * Returns the size of uninitialized extent to be written on success. 34880031462bSMingming Cao */ 34890031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle, 34900031462bSMingming Cao struct inode *inode, 3491e35fd660STheodore Ts'o struct ext4_map_blocks *map, 34920031462bSMingming Cao struct ext4_ext_path *path, 34930031462bSMingming Cao int flags) 34940031462bSMingming Cao { 3495667eff35SYongqiang Yang ext4_lblk_t eof_block; 3496667eff35SYongqiang Yang ext4_lblk_t ee_block; 3497667eff35SYongqiang Yang struct ext4_extent *ex; 3498667eff35SYongqiang Yang unsigned int ee_len; 3499667eff35SYongqiang Yang int split_flag = 0, depth; 35000031462bSMingming Cao 350121ca087aSDmitry Monakhov ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 350221ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3503e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 350421ca087aSDmitry Monakhov 350521ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 350621ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3507e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3508e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 35090031462bSMingming Cao /* 351021ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 351121ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 351221ca087aSDmitry Monakhov */ 3513667eff35SYongqiang Yang depth = ext_depth(inode); 35140031462bSMingming Cao ex = path[depth].p_ext; 3515667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3516667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 35170031462bSMingming Cao 3518667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3519667eff35SYongqiang Yang split_flag |= EXT4_EXT_MARK_UNINIT2; 3520dee1f973SDmitry Monakhov if (flags & EXT4_GET_BLOCKS_CONVERT) 3521dee1f973SDmitry Monakhov split_flag |= EXT4_EXT_DATA_VALID2; 3522667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3523667eff35SYongqiang Yang return ext4_split_extent(handle, inode, path, map, split_flag, flags); 35240031462bSMingming Cao } 3525197217a5SYongqiang Yang 3526c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 35270031462bSMingming Cao struct inode *inode, 3528dee1f973SDmitry Monakhov struct ext4_map_blocks *map, 35290031462bSMingming Cao struct ext4_ext_path *path) 35300031462bSMingming Cao { 35310031462bSMingming Cao struct ext4_extent *ex; 3532dee1f973SDmitry Monakhov ext4_lblk_t ee_block; 3533dee1f973SDmitry Monakhov unsigned int ee_len; 35340031462bSMingming Cao int depth; 35350031462bSMingming Cao int err = 0; 35360031462bSMingming Cao 35370031462bSMingming Cao depth = ext_depth(inode); 35380031462bSMingming Cao ex = path[depth].p_ext; 3539dee1f973SDmitry Monakhov ee_block = le32_to_cpu(ex->ee_block); 3540dee1f973SDmitry Monakhov ee_len = ext4_ext_get_actual_len(ex); 35410031462bSMingming Cao 3542197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3543197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3544dee1f973SDmitry Monakhov (unsigned long long)ee_block, ee_len); 3545dee1f973SDmitry Monakhov 3546ff95ec22SDmitry Monakhov /* If extent is larger than requested it is a clear sign that we still 3547ff95ec22SDmitry Monakhov * have some extent state machine issues left. So extent_split is still 3548ff95ec22SDmitry Monakhov * required. 3549ff95ec22SDmitry Monakhov * TODO: Once all related issues will be fixed this situation should be 3550ff95ec22SDmitry Monakhov * illegal. 3551ff95ec22SDmitry Monakhov */ 3552dee1f973SDmitry Monakhov if (ee_block != map->m_lblk || ee_len > map->m_len) { 3553ff95ec22SDmitry Monakhov #ifdef EXT4_DEBUG 3554ff95ec22SDmitry Monakhov ext4_warning("Inode (%ld) finished: extent logical block %llu," 3555ff95ec22SDmitry Monakhov " len %u; IO logical block %llu, len %u\n", 3556ff95ec22SDmitry Monakhov inode->i_ino, (unsigned long long)ee_block, ee_len, 3557ff95ec22SDmitry Monakhov (unsigned long long)map->m_lblk, map->m_len); 3558ff95ec22SDmitry Monakhov #endif 3559dee1f973SDmitry Monakhov err = ext4_split_unwritten_extents(handle, inode, map, path, 3560dee1f973SDmitry Monakhov EXT4_GET_BLOCKS_CONVERT); 3561dee1f973SDmitry Monakhov if (err < 0) 3562dee1f973SDmitry Monakhov goto out; 3563dee1f973SDmitry Monakhov ext4_ext_drop_refs(path); 3564dee1f973SDmitry Monakhov path = ext4_ext_find_extent(inode, map->m_lblk, path); 3565dee1f973SDmitry Monakhov if (IS_ERR(path)) { 3566dee1f973SDmitry Monakhov err = PTR_ERR(path); 3567dee1f973SDmitry Monakhov goto out; 3568dee1f973SDmitry Monakhov } 3569dee1f973SDmitry Monakhov depth = ext_depth(inode); 3570dee1f973SDmitry Monakhov ex = path[depth].p_ext; 3571dee1f973SDmitry Monakhov } 3572197217a5SYongqiang Yang 35730031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 35740031462bSMingming Cao if (err) 35750031462bSMingming Cao goto out; 35760031462bSMingming Cao /* first mark the extent as initialized */ 35770031462bSMingming Cao ext4_ext_mark_initialized(ex); 35780031462bSMingming Cao 3579197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3580197217a5SYongqiang Yang * borders are not changed 35810031462bSMingming Cao */ 3582ecb94f5fSTheodore Ts'o ext4_ext_try_to_merge(handle, inode, path, ex); 3583197217a5SYongqiang Yang 35840031462bSMingming Cao /* Mark modified extent as dirty */ 3585ecb94f5fSTheodore Ts'o err = ext4_ext_dirty(handle, inode, path + path->p_depth); 35860031462bSMingming Cao out: 35870031462bSMingming Cao ext4_ext_show_leaf(inode, path); 35880031462bSMingming Cao return err; 35890031462bSMingming Cao } 35900031462bSMingming Cao 3591515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3592515f41c3SAneesh Kumar K.V sector_t block, int count) 3593515f41c3SAneesh Kumar K.V { 3594515f41c3SAneesh Kumar K.V int i; 3595515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3596515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3597515f41c3SAneesh Kumar K.V } 3598515f41c3SAneesh Kumar K.V 359958590b06STheodore Ts'o /* 360058590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 360158590b06STheodore Ts'o */ 360258590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3603d002ebf1SEric Sandeen ext4_lblk_t lblk, 360458590b06STheodore Ts'o struct ext4_ext_path *path, 360558590b06STheodore Ts'o unsigned int len) 360658590b06STheodore Ts'o { 360758590b06STheodore Ts'o int i, depth; 360858590b06STheodore Ts'o struct ext4_extent_header *eh; 360965922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 361058590b06STheodore Ts'o 361158590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 361258590b06STheodore Ts'o return 0; 361358590b06STheodore Ts'o 361458590b06STheodore Ts'o depth = ext_depth(inode); 361558590b06STheodore Ts'o eh = path[depth].p_hdr; 361658590b06STheodore Ts'o 3617afcff5d8SLukas Czerner /* 3618afcff5d8SLukas Czerner * We're going to remove EOFBLOCKS_FL entirely in future so we 3619afcff5d8SLukas Czerner * do not care for this case anymore. Simply remove the flag 3620afcff5d8SLukas Czerner * if there are no extents. 3621afcff5d8SLukas Czerner */ 3622afcff5d8SLukas Czerner if (unlikely(!eh->eh_entries)) 3623afcff5d8SLukas Czerner goto out; 362458590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 362558590b06STheodore Ts'o /* 362658590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 362758590b06STheodore Ts'o * last block in the last extent in the file. We test this by 362858590b06STheodore Ts'o * first checking to see if the caller to 362958590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 363058590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 363158590b06STheodore Ts'o * this turns out to be false, we can bail out from this 363258590b06STheodore Ts'o * function immediately. 363358590b06STheodore Ts'o */ 3634d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 363558590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 363658590b06STheodore Ts'o return 0; 363758590b06STheodore Ts'o /* 363858590b06STheodore Ts'o * If the caller does appear to be planning to write at or 363958590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 364058590b06STheodore Ts'o * if the current extent is the last extent in the file, by 364158590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 364258590b06STheodore Ts'o * at each level of the tree. 364358590b06STheodore Ts'o */ 364458590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 364558590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 364658590b06STheodore Ts'o return 0; 3647afcff5d8SLukas Czerner out: 364858590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 364958590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 365058590b06STheodore Ts'o } 365158590b06STheodore Ts'o 36527b415bf6SAditya Kali /** 36537b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 36547b415bf6SAditya Kali * 36557d1b1fbcSZheng Liu * Return 1 if there is a delalloc block in the range, otherwise 0. 36567b415bf6SAditya Kali */ 3657f7fec032SZheng Liu int ext4_find_delalloc_range(struct inode *inode, 36587b415bf6SAditya Kali ext4_lblk_t lblk_start, 36597d1b1fbcSZheng Liu ext4_lblk_t lblk_end) 36607b415bf6SAditya Kali { 36617d1b1fbcSZheng Liu struct extent_status es; 36627b415bf6SAditya Kali 3663e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es); 366406b0c886SZheng Liu if (es.es_len == 0) 36657d1b1fbcSZheng Liu return 0; /* there is no delay extent in this tree */ 366606b0c886SZheng Liu else if (es.es_lblk <= lblk_start && 366706b0c886SZheng Liu lblk_start < es.es_lblk + es.es_len) 36687b415bf6SAditya Kali return 1; 366906b0c886SZheng Liu else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end) 36707d1b1fbcSZheng Liu return 1; 36717b415bf6SAditya Kali else 36727b415bf6SAditya Kali return 0; 36737b415bf6SAditya Kali } 36747b415bf6SAditya Kali 36757d1b1fbcSZheng Liu int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) 36767b415bf6SAditya Kali { 36777b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 36787b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 36797b415bf6SAditya Kali lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 36807b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 36817b415bf6SAditya Kali 36827d1b1fbcSZheng Liu return ext4_find_delalloc_range(inode, lblk_start, lblk_end); 36837b415bf6SAditya Kali } 36847b415bf6SAditya Kali 36857b415bf6SAditya Kali /** 36867b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 36877b415bf6SAditya Kali * are under delalloc and were reserved quota for. 36887b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 36897b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 36907b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 36917b415bf6SAditya Kali * The cases to look for are: 36927b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 36937b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 36947b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 36957b415bf6SAditya Kali * are not delalloc'ed. 36967b415bf6SAditya Kali * Ex: 36977b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 36987b415bf6SAditya Kali * |++++++ allocated ++++++| 36997b415bf6SAditya Kali * ==> 4 complete clusters in above example 37007b415bf6SAditya Kali * 37017b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 37027b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 37037b415bf6SAditya Kali * cluster. 37047b415bf6SAditya Kali * Ex: 37057b415bf6SAditya Kali * |----====c========|========c========| 37067b415bf6SAditya Kali * |++++++ allocated ++++++| 37077b415bf6SAditya Kali * ==> 1 complete clusters in above example 37087b415bf6SAditya Kali * 37097b415bf6SAditya Kali * Ex: 37107b415bf6SAditya Kali * |================c================| 37117b415bf6SAditya Kali * |++++++ allocated ++++++| 37127b415bf6SAditya Kali * ==> 0 complete clusters in above example 37137b415bf6SAditya Kali * 37147b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 37157b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 37167b415bf6SAditya Kali * this 'allocated' range. 37177b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 37187b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 37197b415bf6SAditya Kali */ 37207b415bf6SAditya Kali static unsigned int 37217b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 37227b415bf6SAditya Kali unsigned int num_blks) 37237b415bf6SAditya Kali { 37247b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 37257b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 37267b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 37277b415bf6SAditya Kali unsigned int allocated_clusters = 0; 37287b415bf6SAditya Kali 37297b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 37307b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 37317b415bf6SAditya Kali 37327b415bf6SAditya Kali /* max possible clusters for this allocation */ 37337b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 37347b415bf6SAditya Kali 3735d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3736d8990240SAditya Kali 37377b415bf6SAditya Kali /* Check towards left side */ 37387b415bf6SAditya Kali c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 37397b415bf6SAditya Kali if (c_offset) { 37407b415bf6SAditya Kali lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 37417b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 37427b415bf6SAditya Kali 37437d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 37447b415bf6SAditya Kali allocated_clusters--; 37457b415bf6SAditya Kali } 37467b415bf6SAditya Kali 37477b415bf6SAditya Kali /* Now check towards right. */ 37487b415bf6SAditya Kali c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 37497b415bf6SAditya Kali if (allocated_clusters && c_offset) { 37507b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 37517b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 37527b415bf6SAditya Kali 37537d1b1fbcSZheng Liu if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) 37547b415bf6SAditya Kali allocated_clusters--; 37557b415bf6SAditya Kali } 37567b415bf6SAditya Kali 37577b415bf6SAditya Kali return allocated_clusters; 37587b415bf6SAditya Kali } 37597b415bf6SAditya Kali 37600031462bSMingming Cao static int 37610031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3762e35fd660STheodore Ts'o struct ext4_map_blocks *map, 37630031462bSMingming Cao struct ext4_ext_path *path, int flags, 3764e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 37650031462bSMingming Cao { 37660031462bSMingming Cao int ret = 0; 37670031462bSMingming Cao int err = 0; 3768f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 37690031462bSMingming Cao 37700031462bSMingming Cao ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " 377188635ca2SZheng Liu "block %llu, max_blocks %u, flags %x, allocated %u\n", 3772e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 37730031462bSMingming Cao flags, allocated); 37740031462bSMingming Cao ext4_ext_show_leaf(inode, path); 37750031462bSMingming Cao 377627dd4385SLukas Czerner /* 377727dd4385SLukas Czerner * When writing into uninitialized space, we should not fail to 377827dd4385SLukas Czerner * allocate metadata blocks for the new extent block if needed. 377927dd4385SLukas Czerner */ 378027dd4385SLukas Czerner flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL; 378127dd4385SLukas Czerner 3782b5645534SZheng Liu trace_ext4_ext_handle_uninitialized_extents(inode, map, flags, 3783b5645534SZheng Liu allocated, newblock); 3784d8990240SAditya Kali 3785c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3786744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3787e35fd660STheodore Ts'o ret = ext4_split_unwritten_extents(handle, inode, map, 3788e35fd660STheodore Ts'o path, flags); 378982e54229SDmitry Monakhov if (ret <= 0) 379082e54229SDmitry Monakhov goto out; 37915f524950SMingming /* 37925f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 379325985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 37945f524950SMingming * completed 37955f524950SMingming */ 37960edeb71dSTao Ma if (io) 37970edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io); 37980edeb71dSTao Ma else 379919f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3800a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 3801744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 3802e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 38030031462bSMingming Cao goto out; 38040031462bSMingming Cao } 3805c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3806744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3807dee1f973SDmitry Monakhov ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 38080031462bSMingming Cao path); 380958590b06STheodore Ts'o if (ret >= 0) { 3810b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 3811d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 3812d002ebf1SEric Sandeen path, map->m_len); 381358590b06STheodore Ts'o } else 381458590b06STheodore Ts'o err = ret; 3815cdee7843SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 3816cdee7843SZheng Liu if (allocated > map->m_len) 3817cdee7843SZheng Liu allocated = map->m_len; 3818cdee7843SZheng Liu map->m_len = allocated; 38190031462bSMingming Cao goto out2; 38200031462bSMingming Cao } 38210031462bSMingming Cao /* buffered IO case */ 38220031462bSMingming Cao /* 38230031462bSMingming Cao * repeat fallocate creation request 38240031462bSMingming Cao * we already have an unwritten extent 38250031462bSMingming Cao */ 3826a25a4e1aSZheng Liu if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) { 3827a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 38280031462bSMingming Cao goto map_out; 3829a25a4e1aSZheng Liu } 38300031462bSMingming Cao 38310031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 38320031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 38330031462bSMingming Cao /* 38340031462bSMingming Cao * We have blocks reserved already. We 38350031462bSMingming Cao * return allocated blocks so that delalloc 38360031462bSMingming Cao * won't do block reservation for us. But 38370031462bSMingming Cao * the buffer head will be unmapped so that 38380031462bSMingming Cao * a read from the block returns 0s. 38390031462bSMingming Cao */ 3840e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 38410031462bSMingming Cao goto out1; 38420031462bSMingming Cao } 38430031462bSMingming Cao 38440031462bSMingming Cao /* buffered write, writepage time, convert*/ 384527dd4385SLukas Czerner ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags); 3846a4e5d88bSDmitry Monakhov if (ret >= 0) 3847b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 38480031462bSMingming Cao out: 38490031462bSMingming Cao if (ret <= 0) { 38500031462bSMingming Cao err = ret; 38510031462bSMingming Cao goto out2; 38520031462bSMingming Cao } else 38530031462bSMingming Cao allocated = ret; 3854e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 3855515f41c3SAneesh Kumar K.V /* 3856515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 3857515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 3858515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 3859515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 3860515f41c3SAneesh Kumar K.V * new. 3861515f41c3SAneesh Kumar K.V */ 3862e35fd660STheodore Ts'o if (allocated > map->m_len) { 3863515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3864e35fd660STheodore Ts'o newblock + map->m_len, 3865e35fd660STheodore Ts'o allocated - map->m_len); 3866e35fd660STheodore Ts'o allocated = map->m_len; 3867515f41c3SAneesh Kumar K.V } 38683a225670SZheng Liu map->m_len = allocated; 38695f634d06SAneesh Kumar K.V 38705f634d06SAneesh Kumar K.V /* 38715f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 38725f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 38735f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 38745f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 38755f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 38765f634d06SAneesh Kumar K.V */ 38777b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 38787b415bf6SAditya Kali unsigned int reserved_clusters; 38797b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 38807b415bf6SAditya Kali map->m_lblk, map->m_len); 38817b415bf6SAditya Kali if (reserved_clusters) 38827b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 38837b415bf6SAditya Kali reserved_clusters, 38847b415bf6SAditya Kali 0); 38857b415bf6SAditya Kali } 38865f634d06SAneesh Kumar K.V 38870031462bSMingming Cao map_out: 3888e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 3889a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 3890a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3891a4e5d88bSDmitry Monakhov map->m_len); 3892a4e5d88bSDmitry Monakhov if (err < 0) 3893a4e5d88bSDmitry Monakhov goto out2; 3894a4e5d88bSDmitry Monakhov } 38950031462bSMingming Cao out1: 3896e35fd660STheodore Ts'o if (allocated > map->m_len) 3897e35fd660STheodore Ts'o allocated = map->m_len; 38980031462bSMingming Cao ext4_ext_show_leaf(inode, path); 3899e35fd660STheodore Ts'o map->m_pblk = newblock; 3900e35fd660STheodore Ts'o map->m_len = allocated; 39010031462bSMingming Cao out2: 39020031462bSMingming Cao if (path) { 39030031462bSMingming Cao ext4_ext_drop_refs(path); 39040031462bSMingming Cao kfree(path); 39050031462bSMingming Cao } 39060031462bSMingming Cao return err ? err : allocated; 39070031462bSMingming Cao } 390858590b06STheodore Ts'o 39090031462bSMingming Cao /* 39104d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 39114d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 39124d33b1efSTheodore Ts'o * allocated in an extent. 3913d8990240SAditya Kali * @sb The filesystem superblock structure 39144d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 39154d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 39164d33b1efSTheodore Ts'o * cluster allocation 39174d33b1efSTheodore Ts'o * 39184d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 39194d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 39204d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 39214d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 39224d33b1efSTheodore Ts'o * want to catch. The first is this case: 39234d33b1efSTheodore Ts'o * 39244d33b1efSTheodore Ts'o * |--- cluster # N--| 39254d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 39264d33b1efSTheodore Ts'o * |==========| 39274d33b1efSTheodore Ts'o * 39284d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 39294d33b1efSTheodore Ts'o * 39304d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 39314d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 39324d33b1efSTheodore Ts'o * |=======================| 39334d33b1efSTheodore Ts'o * 39344d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 39354d33b1efSTheodore Ts'o * within the same cluster: 39364d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 39374d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 39384d33b1efSTheodore Ts'o * |------ requested region ------| 39394d33b1efSTheodore Ts'o * |================| 39404d33b1efSTheodore Ts'o * 39414d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 39424d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 39434d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 39444d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 39454d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 39464d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 39474d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 39484d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 39494d33b1efSTheodore Ts'o */ 3950d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 39514d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 39524d33b1efSTheodore Ts'o struct ext4_extent *ex, 39534d33b1efSTheodore Ts'o struct ext4_ext_path *path) 39544d33b1efSTheodore Ts'o { 3955d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 39564d33b1efSTheodore Ts'o ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 39574d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 395814d7f3efSCurt Wohlgemuth ext4_lblk_t rr_cluster_start; 39594d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 39604d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 39614d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 39624d33b1efSTheodore Ts'o 39634d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 39644d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 39654d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 39664d33b1efSTheodore Ts'o 39674d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 39684d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 39694d33b1efSTheodore Ts'o 39704d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 39714d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 39724d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 39734d33b1efSTheodore Ts'o ee_start += ee_len - 1; 39744d33b1efSTheodore Ts'o map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 39754d33b1efSTheodore Ts'o c_offset; 39764d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 39774d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 39784d33b1efSTheodore Ts'o /* 39794d33b1efSTheodore Ts'o * Check for and handle this case: 39804d33b1efSTheodore Ts'o * 39814d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 39824d33b1efSTheodore Ts'o * |------- extent ----| 39834d33b1efSTheodore Ts'o * |--- requested region ---| 39844d33b1efSTheodore Ts'o * |===========| 39854d33b1efSTheodore Ts'o */ 39864d33b1efSTheodore Ts'o 39874d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 39884d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 39894d33b1efSTheodore Ts'o 39904d33b1efSTheodore Ts'o /* 39914d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 39924d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 39934d33b1efSTheodore Ts'o * 39944d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 39954d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 39964d33b1efSTheodore Ts'o * |------ requested region ------| 39974d33b1efSTheodore Ts'o * |================| 39984d33b1efSTheodore Ts'o */ 39994d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 40004d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 40014d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 40024d33b1efSTheodore Ts'o } 4003d8990240SAditya Kali 4004d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 40054d33b1efSTheodore Ts'o return 1; 40064d33b1efSTheodore Ts'o } 4007d8990240SAditya Kali 4008d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 40094d33b1efSTheodore Ts'o return 0; 40104d33b1efSTheodore Ts'o } 40114d33b1efSTheodore Ts'o 40124d33b1efSTheodore Ts'o 40134d33b1efSTheodore Ts'o /* 4014f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 4015f5ab0d1fSMingming Cao * 4016f5ab0d1fSMingming Cao * 4017c278bfecSAneesh Kumar K.V * Need to be called with 40180e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 40190e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 4020f5ab0d1fSMingming Cao * 4021f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 4022f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 4023f5ab0d1fSMingming Cao * buffer head is unmapped 4024f5ab0d1fSMingming Cao * otherwise blocks are mapped 4025f5ab0d1fSMingming Cao * 4026f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 4027f5ab0d1fSMingming Cao * buffer head is unmapped 4028f5ab0d1fSMingming Cao * 4029f5ab0d1fSMingming Cao * return < 0, error case. 4030c278bfecSAneesh Kumar K.V */ 4031e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 4032e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4033a86c6181SAlex Tomas { 4034a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 40354d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 40364d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 40370562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 403837794732SZheng Liu int free_on_err = 0, err = 0, depth; 40394d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 404081fdbb4aSYongqiang Yang unsigned int allocated_clusters = 0; 4041c9de560dSAlex Tomas struct ext4_allocation_request ar; 4042f45ee3a1SDmitry Monakhov ext4_io_end_t *io = ext4_inode_aio(inode); 40434d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 404482e54229SDmitry Monakhov int set_unwritten = 0; 4045a86c6181SAlex Tomas 404684fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 4047e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 40480562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4049a86c6181SAlex Tomas 4050a86c6181SAlex Tomas /* find extent for this block */ 4051e35fd660STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 4052a86c6181SAlex Tomas if (IS_ERR(path)) { 4053a86c6181SAlex Tomas err = PTR_ERR(path); 4054a86c6181SAlex Tomas path = NULL; 4055a86c6181SAlex Tomas goto out2; 4056a86c6181SAlex Tomas } 4057a86c6181SAlex Tomas 4058a86c6181SAlex Tomas depth = ext_depth(inode); 4059a86c6181SAlex Tomas 4060a86c6181SAlex Tomas /* 4061d0d856e8SRandy Dunlap * consistent leaf must not be empty; 4062d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 4063a86c6181SAlex Tomas * this is why assert can't be put in ext4_ext_find_extent() 4064a86c6181SAlex Tomas */ 4065273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4066273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 4067f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 4068f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 4069f70f362bSTheodore Ts'o path[depth].p_block); 4070034fb4c9SSurbhi Palande err = -EIO; 4071034fb4c9SSurbhi Palande goto out2; 4072034fb4c9SSurbhi Palande } 4073a86c6181SAlex Tomas 40747e028976SAvantika Mathur ex = path[depth].p_ext; 40757e028976SAvantika Mathur if (ex) { 4076725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 4077bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 4078a2df2a63SAmit Arora unsigned short ee_len; 4079471d4011SSuparna Bhattacharya 4080471d4011SSuparna Bhattacharya /* 4081471d4011SSuparna Bhattacharya * Uninitialized extents are treated as holes, except that 408256055d3aSAmit Arora * we split out initialized portions during a write. 4083471d4011SSuparna Bhattacharya */ 4084a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 4085d8990240SAditya Kali 4086d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 4087d8990240SAditya Kali 4088d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 4089e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 4090e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 4091d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 4092e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 4093e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 4094a86c6181SAlex Tomas ee_block, ee_len, newblock); 409556055d3aSAmit Arora 409669eb33dcSZheng Liu if (!ext4_ext_is_uninitialized(ex)) 4097a86c6181SAlex Tomas goto out; 409869eb33dcSZheng Liu 409937794732SZheng Liu allocated = ext4_ext_handle_uninitialized_extents( 4100e861304bSAllison Henderson handle, inode, map, path, flags, 4101e861304bSAllison Henderson allocated, newblock); 410237794732SZheng Liu goto out3; 410356055d3aSAmit Arora } 4104a86c6181SAlex Tomas } 4105a86c6181SAlex Tomas 41067b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 41077d1b1fbcSZheng Liu ext4_find_delalloc_cluster(inode, map->m_lblk)) 41087b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 41097b415bf6SAditya Kali 4110a86c6181SAlex Tomas /* 4111d0d856e8SRandy Dunlap * requested block isn't allocated yet; 4112a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 4113a86c6181SAlex Tomas */ 4114c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 411556055d3aSAmit Arora /* 411656055d3aSAmit Arora * put just found gap into cache to speed up 411756055d3aSAmit Arora * subsequent requests 411856055d3aSAmit Arora */ 4119d100eef2SZheng Liu if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4120e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 4121a86c6181SAlex Tomas goto out2; 4122a86c6181SAlex Tomas } 41234d33b1efSTheodore Ts'o 4124a86c6181SAlex Tomas /* 4125c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 4126a86c6181SAlex Tomas */ 41277b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 41284d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 41294d33b1efSTheodore Ts'o cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 41304d33b1efSTheodore Ts'o 41314d33b1efSTheodore Ts'o /* 41324d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 41334d33b1efSTheodore Ts'o * by ext4_ext_find_extent() implies a cluster we can use. 41344d33b1efSTheodore Ts'o */ 41354d33b1efSTheodore Ts'o if (cluster_offset && ex && 4136d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 41374d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 41384d33b1efSTheodore Ts'o newblock = map->m_pblk; 41397b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 41404d33b1efSTheodore Ts'o goto got_allocated_blocks; 41414d33b1efSTheodore Ts'o } 4142a86c6181SAlex Tomas 4143c9de560dSAlex Tomas /* find neighbour allocated blocks */ 4144e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 4145c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 4146c9de560dSAlex Tomas if (err) 4147c9de560dSAlex Tomas goto out2; 4148e35fd660STheodore Ts'o ar.lright = map->m_lblk; 41494d33b1efSTheodore Ts'o ex2 = NULL; 41504d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 4151c9de560dSAlex Tomas if (err) 4152c9de560dSAlex Tomas goto out2; 415325d14f98SAmit Arora 41544d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 41554d33b1efSTheodore Ts'o * cluster we can use. */ 41564d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 4157d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 41584d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 41594d33b1efSTheodore Ts'o newblock = map->m_pblk; 41607b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 41614d33b1efSTheodore Ts'o goto got_allocated_blocks; 41624d33b1efSTheodore Ts'o } 41634d33b1efSTheodore Ts'o 4164749269faSAmit Arora /* 4165749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 4166749269faSAmit Arora * a single extent. For an initialized extent this limit is 4167749269faSAmit Arora * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 4168749269faSAmit Arora * EXT_UNINIT_MAX_LEN. 4169749269faSAmit Arora */ 4170e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 4171c2177057STheodore Ts'o !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4172e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 4173e35fd660STheodore Ts'o else if (map->m_len > EXT_UNINIT_MAX_LEN && 4174c2177057STheodore Ts'o (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 4175e35fd660STheodore Ts'o map->m_len = EXT_UNINIT_MAX_LEN; 4176749269faSAmit Arora 4177e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 4178e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 41794d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 418025d14f98SAmit Arora if (err) 4181b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 418225d14f98SAmit Arora else 4183e35fd660STheodore Ts'o allocated = map->m_len; 4184c9de560dSAlex Tomas 4185c9de560dSAlex Tomas /* allocate new block */ 4186c9de560dSAlex Tomas ar.inode = inode; 4187e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 4188e35fd660STheodore Ts'o ar.logical = map->m_lblk; 41894d33b1efSTheodore Ts'o /* 41904d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 41914d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 41924d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 41934d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 41944d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 41954d33b1efSTheodore Ts'o * work correctly. 41964d33b1efSTheodore Ts'o */ 41974d33b1efSTheodore Ts'o offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 41984d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 41994d33b1efSTheodore Ts'o ar.goal -= offset; 42004d33b1efSTheodore Ts'o ar.logical -= offset; 4201c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4202c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4203c9de560dSAlex Tomas else 4204c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4205c9de560dSAlex Tomas ar.flags = 0; 4206556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4207556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4208c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4209a86c6181SAlex Tomas if (!newblock) 4210a86c6181SAlex Tomas goto out2; 421184fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4212498e5f24STheodore Ts'o ar.goal, newblock, allocated); 42134d33b1efSTheodore Ts'o free_on_err = 1; 42147b415bf6SAditya Kali allocated_clusters = ar.len; 42154d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 42164d33b1efSTheodore Ts'o if (ar.len > allocated) 42174d33b1efSTheodore Ts'o ar.len = allocated; 4218a86c6181SAlex Tomas 42194d33b1efSTheodore Ts'o got_allocated_blocks: 4220a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 42214d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4222c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 42238d5d02e6SMingming Cao /* Mark uninitialized */ 42248d5d02e6SMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4225a2df2a63SAmit Arora ext4_ext_mark_uninitialized(&newex); 4226a25a4e1aSZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 42278d5d02e6SMingming Cao /* 4228744692dcSJiaying Zhang * io_end structure was created for every IO write to an 422925985edcSLucas De Marchi * uninitialized extent. To avoid unnecessary conversion, 4230744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 42315f524950SMingming * For non asycn direct IO case, flag the inode state 423225985edcSLucas De Marchi * that we need to perform conversion when IO is done. 42338d5d02e6SMingming Cao */ 423482e54229SDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_PRE_IO)) 423582e54229SDmitry Monakhov set_unwritten = 1; 4236744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 4237e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 42388d5d02e6SMingming Cao } 4239c8d46e41SJiaying Zhang 4240a4e5d88bSDmitry Monakhov err = 0; 4241a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4242a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4243a4e5d88bSDmitry Monakhov path, ar.len); 4244575a1d4bSJiaying Zhang if (!err) 4245575a1d4bSJiaying Zhang err = ext4_ext_insert_extent(handle, inode, path, 4246575a1d4bSJiaying Zhang &newex, flags); 424782e54229SDmitry Monakhov 424882e54229SDmitry Monakhov if (!err && set_unwritten) { 424982e54229SDmitry Monakhov if (io) 425082e54229SDmitry Monakhov ext4_set_io_unwritten_flag(inode, io); 425182e54229SDmitry Monakhov else 425282e54229SDmitry Monakhov ext4_set_inode_state(inode, 425382e54229SDmitry Monakhov EXT4_STATE_DIO_UNWRITTEN); 425482e54229SDmitry Monakhov } 425582e54229SDmitry Monakhov 42564d33b1efSTheodore Ts'o if (err && free_on_err) { 42577132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 42587132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4259315054f0SAlex Tomas /* free data blocks we just allocated */ 4260c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4261c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4262c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 42637dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 42647132de74SMaxim Patlasov ext4_ext_get_actual_len(&newex), fb_flags); 4265a86c6181SAlex Tomas goto out2; 4266315054f0SAlex Tomas } 4267a86c6181SAlex Tomas 4268a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4269bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4270b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4271e35fd660STheodore Ts'o if (allocated > map->m_len) 4272e35fd660STheodore Ts'o allocated = map->m_len; 4273e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4274a86c6181SAlex Tomas 4275b436b9beSJan Kara /* 42765f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 42775f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 42785f634d06SAneesh Kumar K.V */ 42797b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 428081fdbb4aSYongqiang Yang unsigned int reserved_clusters; 42817b415bf6SAditya Kali /* 428281fdbb4aSYongqiang Yang * Check how many clusters we had reserved this allocated range 42837b415bf6SAditya Kali */ 42847b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 42857b415bf6SAditya Kali map->m_lblk, allocated); 42867b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 42877b415bf6SAditya Kali if (reserved_clusters) { 42887b415bf6SAditya Kali /* 42897b415bf6SAditya Kali * We have clusters reserved for this range. 42907b415bf6SAditya Kali * But since we are not doing actual allocation 42917b415bf6SAditya Kali * and are simply using blocks from previously 42927b415bf6SAditya Kali * allocated cluster, we should release the 42937b415bf6SAditya Kali * reservation and not claim quota. 42947b415bf6SAditya Kali */ 42957b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 42967b415bf6SAditya Kali reserved_clusters, 0); 42977b415bf6SAditya Kali } 42987b415bf6SAditya Kali } else { 42997b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 43007b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 43015356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 43027b415bf6SAditya Kali int reservation = allocated_clusters - 43037b415bf6SAditya Kali reserved_clusters; 43047b415bf6SAditya Kali /* 43057b415bf6SAditya Kali * It seems we claimed few clusters outside of 43067b415bf6SAditya Kali * the range of this allocation. We should give 43077b415bf6SAditya Kali * it back to the reservation pool. This can 43087b415bf6SAditya Kali * happen in the following case: 43097b415bf6SAditya Kali * 43107b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 43117b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 43127b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 43137b415bf6SAditya Kali * * First comes delayed allocation write for 43147b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 43157b415bf6SAditya Kali * previous delayed allocated blocks in the 43167b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 43177b415bf6SAditya Kali * for this write. 43187b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 43197b415bf6SAditya Kali * In this case, we will reserve 2 clusters 43207b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 43217b415bf6SAditya Kali * that range has a delayed allocated blocks. 43227b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 43237b415bf6SAditya Kali * * Now, during the delayed allocation writeout 43247b415bf6SAditya Kali * time, we will first write blocks [3-8] and 43257b415bf6SAditya Kali * allocate 3 clusters for writing these 43267b415bf6SAditya Kali * blocks. Also, we would claim all these 43277b415bf6SAditya Kali * three clusters above. 43287b415bf6SAditya Kali * * Now when we come here to writeout the 43297b415bf6SAditya Kali * blocks [10-11], we would expect to claim 43307b415bf6SAditya Kali * the reservation of 1 cluster we had made 43317b415bf6SAditya Kali * (and we would claim it since there are no 43327b415bf6SAditya Kali * more delayed allocated blocks in the range 43337b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 43347b415bf6SAditya Kali * already gone to 0. 43357b415bf6SAditya Kali * 43367b415bf6SAditya Kali * Thus, at the step 4 above when we determine 43377b415bf6SAditya Kali * that there are still some unwritten delayed 43387b415bf6SAditya Kali * allocated blocks outside of our current 43397b415bf6SAditya Kali * block range, we should increment the 43407b415bf6SAditya Kali * reserved clusters count so that when the 43417b415bf6SAditya Kali * remaining blocks finally gets written, we 43427b415bf6SAditya Kali * could claim them. 43437b415bf6SAditya Kali */ 43445356f261SAditya Kali dquot_reserve_block(inode, 43455356f261SAditya Kali EXT4_C2B(sbi, reservation)); 43465356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 43475356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 43485356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 43497b415bf6SAditya Kali } 4350232ec872SLukas Czerner /* 4351232ec872SLukas Czerner * We will claim quota for all newly allocated blocks. 4352232ec872SLukas Czerner * We're updating the reserved space *after* the 4353232ec872SLukas Czerner * correction above so we do not accidentally free 4354232ec872SLukas Czerner * all the metadata reservation because we might 4355232ec872SLukas Czerner * actually need it later on. 4356232ec872SLukas Czerner */ 4357232ec872SLukas Czerner ext4_da_update_reserve_space(inode, allocated_clusters, 4358232ec872SLukas Czerner 1); 43597b415bf6SAditya Kali } 43607b415bf6SAditya Kali } 43615f634d06SAneesh Kumar K.V 43625f634d06SAneesh Kumar K.V /* 4363b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4364b436b9beSJan Kara * when it is _not_ an uninitialized extent. 4365b436b9beSJan Kara */ 436669eb33dcSZheng Liu if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) 4367b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 436869eb33dcSZheng Liu else 4369b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4370a86c6181SAlex Tomas out: 4371e35fd660STheodore Ts'o if (allocated > map->m_len) 4372e35fd660STheodore Ts'o allocated = map->m_len; 4373a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4374e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4375e35fd660STheodore Ts'o map->m_pblk = newblock; 4376e35fd660STheodore Ts'o map->m_len = allocated; 4377a86c6181SAlex Tomas out2: 4378a86c6181SAlex Tomas if (path) { 4379a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4380a86c6181SAlex Tomas kfree(path); 4381a86c6181SAlex Tomas } 4382e861304bSAllison Henderson 438337794732SZheng Liu out3: 438419b303d8SZheng Liu trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated); 4385e7b319e3SYongqiang Yang 43867877191cSLukas Czerner return err ? err : allocated; 4387a86c6181SAlex Tomas } 4388a86c6181SAlex Tomas 4389819c4920STheodore Ts'o void ext4_ext_truncate(handle_t *handle, struct inode *inode) 4390a86c6181SAlex Tomas { 4391a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4392725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4393a86c6181SAlex Tomas int err = 0; 4394a86c6181SAlex Tomas 4395a86c6181SAlex Tomas /* 4396d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4397d0d856e8SRandy Dunlap * Probably we need not scan at all, 4398d0d856e8SRandy Dunlap * because page truncation is enough. 4399a86c6181SAlex Tomas */ 4400a86c6181SAlex Tomas 4401a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4402a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4403a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4404a86c6181SAlex Tomas 4405a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4406a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 440751865fdaSZheng Liu err = ext4_es_remove_extent(inode, last_block, 440851865fdaSZheng Liu EXT_MAX_BLOCKS - last_block); 44095f95d21fSLukas Czerner err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); 4410a86c6181SAlex Tomas } 4411a86c6181SAlex Tomas 4412fd28784aSAneesh Kumar K.V static void ext4_falloc_update_inode(struct inode *inode, 4413fd28784aSAneesh Kumar K.V int mode, loff_t new_size, int update_ctime) 4414fd28784aSAneesh Kumar K.V { 4415fd28784aSAneesh Kumar K.V struct timespec now; 4416fd28784aSAneesh Kumar K.V 4417fd28784aSAneesh Kumar K.V if (update_ctime) { 4418fd28784aSAneesh Kumar K.V now = current_fs_time(inode->i_sb); 4419fd28784aSAneesh Kumar K.V if (!timespec_equal(&inode->i_ctime, &now)) 4420fd28784aSAneesh Kumar K.V inode->i_ctime = now; 4421fd28784aSAneesh Kumar K.V } 4422fd28784aSAneesh Kumar K.V /* 4423fd28784aSAneesh Kumar K.V * Update only when preallocation was requested beyond 4424fd28784aSAneesh Kumar K.V * the file size. 4425fd28784aSAneesh Kumar K.V */ 4426cf17fea6SAneesh Kumar K.V if (!(mode & FALLOC_FL_KEEP_SIZE)) { 4427cf17fea6SAneesh Kumar K.V if (new_size > i_size_read(inode)) 4428fd28784aSAneesh Kumar K.V i_size_write(inode, new_size); 4429cf17fea6SAneesh Kumar K.V if (new_size > EXT4_I(inode)->i_disksize) 4430cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_size); 4431c8d46e41SJiaying Zhang } else { 4432c8d46e41SJiaying Zhang /* 4433c8d46e41SJiaying Zhang * Mark that we allocate beyond EOF so the subsequent truncate 4434c8d46e41SJiaying Zhang * can proceed even if the new size is the same as i_size. 4435c8d46e41SJiaying Zhang */ 4436c8d46e41SJiaying Zhang if (new_size > i_size_read(inode)) 443712e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4438fd28784aSAneesh Kumar K.V } 4439fd28784aSAneesh Kumar K.V 4440fd28784aSAneesh Kumar K.V } 4441fd28784aSAneesh Kumar K.V 4442a2df2a63SAmit Arora /* 44432fe17c10SChristoph Hellwig * preallocate space for a file. This implements ext4's fallocate file 4444a2df2a63SAmit Arora * operation, which gets called from sys_fallocate system call. 4445a2df2a63SAmit Arora * For block-mapped files, posix_fallocate should fall back to the method 4446a2df2a63SAmit Arora * of writing zeroes to the required new blocks (the same behavior which is 4447a2df2a63SAmit Arora * expected for file systems which do not support fallocate() system call). 4448a2df2a63SAmit Arora */ 44492fe17c10SChristoph Hellwig long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4450a2df2a63SAmit Arora { 4451496ad9aaSAl Viro struct inode *inode = file_inode(file); 4452a2df2a63SAmit Arora handle_t *handle; 4453fd28784aSAneesh Kumar K.V loff_t new_size; 4454498e5f24STheodore Ts'o unsigned int max_blocks; 4455a2df2a63SAmit Arora int ret = 0; 4456a2df2a63SAmit Arora int ret2 = 0; 4457a2df2a63SAmit Arora int retries = 0; 4458a4e5d88bSDmitry Monakhov int flags; 44592ed88685STheodore Ts'o struct ext4_map_blocks map; 4460a2df2a63SAmit Arora unsigned int credits, blkbits = inode->i_blkbits; 4461a2df2a63SAmit Arora 4462a4bb6b64SAllison Henderson /* Return error if mode is not supported */ 4463a4bb6b64SAllison Henderson if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 4464a4bb6b64SAllison Henderson return -EOPNOTSUPP; 4465a4bb6b64SAllison Henderson 4466a4bb6b64SAllison Henderson if (mode & FALLOC_FL_PUNCH_HOLE) 4467a4bb6b64SAllison Henderson return ext4_punch_hole(file, offset, len); 4468a4bb6b64SAllison Henderson 44690c8d414fSTao Ma ret = ext4_convert_inline_data(inode); 44700c8d414fSTao Ma if (ret) 44710c8d414fSTao Ma return ret; 44720c8d414fSTao Ma 44738bad6fc8SZheng Liu /* 44748bad6fc8SZheng Liu * currently supporting (pre)allocate mode for extent-based 44758bad6fc8SZheng Liu * files _only_ 44768bad6fc8SZheng Liu */ 44778bad6fc8SZheng Liu if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 44788bad6fc8SZheng Liu return -EOPNOTSUPP; 44798bad6fc8SZheng Liu 44800562e0baSJiaying Zhang trace_ext4_fallocate_enter(inode, offset, len, mode); 44812ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4482fd28784aSAneesh Kumar K.V /* 4483fd28784aSAneesh Kumar K.V * We can't just convert len to max_blocks because 4484fd28784aSAneesh Kumar K.V * If blocksize = 4096 offset = 3072 and len = 2048 4485fd28784aSAneesh Kumar K.V */ 4486a2df2a63SAmit Arora max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 44872ed88685STheodore Ts'o - map.m_lblk; 4488a2df2a63SAmit Arora /* 4489f3bd1f3fSMingming Cao * credits to insert 1 extent into extent tree 4490a2df2a63SAmit Arora */ 4491f3bd1f3fSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 449255bd725aSAneesh Kumar K.V mutex_lock(&inode->i_mutex); 44936d19c42bSNikanth Karthikesan ret = inode_newsize_ok(inode, (len + offset)); 44946d19c42bSNikanth Karthikesan if (ret) { 44956d19c42bSNikanth Karthikesan mutex_unlock(&inode->i_mutex); 44960562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 44976d19c42bSNikanth Karthikesan return ret; 44986d19c42bSNikanth Karthikesan } 44993c6fe770SGreg Harm flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT; 4500a4e5d88bSDmitry Monakhov if (mode & FALLOC_FL_KEEP_SIZE) 4501a4e5d88bSDmitry Monakhov flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 45023c6fe770SGreg Harm /* 45033c6fe770SGreg Harm * Don't normalize the request if it can fit in one extent so 45043c6fe770SGreg Harm * that it doesn't get unnecessarily split into multiple 45053c6fe770SGreg Harm * extents. 45063c6fe770SGreg Harm */ 45073c6fe770SGreg Harm if (len <= EXT_UNINIT_MAX_LEN << blkbits) 45083c6fe770SGreg Harm flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; 450960d4616fSDmitry Monakhov 4510a2df2a63SAmit Arora retry: 4511a2df2a63SAmit Arora while (ret >= 0 && ret < max_blocks) { 45122ed88685STheodore Ts'o map.m_lblk = map.m_lblk + ret; 45132ed88685STheodore Ts'o map.m_len = max_blocks = max_blocks - ret; 45149924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 45159924a92aSTheodore Ts'o credits); 4516a2df2a63SAmit Arora if (IS_ERR(handle)) { 4517a2df2a63SAmit Arora ret = PTR_ERR(handle); 4518a2df2a63SAmit Arora break; 4519a2df2a63SAmit Arora } 4520a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4521221879c9SAneesh Kumar K.V if (ret <= 0) { 45222c98615dSAneesh Kumar K.V #ifdef EXT4FS_DEBUG 4523b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4524b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 4525b06acd38SLukas Czerner "ext4_ext_map_blocks returned %d", 4526b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 4527b06acd38SLukas Czerner map.m_len, ret); 45282c98615dSAneesh Kumar K.V #endif 4529a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4530a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4531a2df2a63SAmit Arora break; 4532a2df2a63SAmit Arora } 45332ed88685STheodore Ts'o if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 4534fd28784aSAneesh Kumar K.V blkbits) >> blkbits)) 4535fd28784aSAneesh Kumar K.V new_size = offset + len; 4536fd28784aSAneesh Kumar K.V else 453729ae07b7SUtako Kusaka new_size = ((loff_t) map.m_lblk + ret) << blkbits; 4538a2df2a63SAmit Arora 4539fd28784aSAneesh Kumar K.V ext4_falloc_update_inode(inode, mode, new_size, 45402ed88685STheodore Ts'o (map.m_flags & EXT4_MAP_NEW)); 4541a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4542f4e95b33SZheng Liu if ((file->f_flags & O_SYNC) && ret >= max_blocks) 4543f4e95b33SZheng Liu ext4_handle_sync(handle); 4544a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4545a2df2a63SAmit Arora if (ret2) 4546a2df2a63SAmit Arora break; 4547a2df2a63SAmit Arora } 4548fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4549fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4550fd28784aSAneesh Kumar K.V ret = 0; 4551a2df2a63SAmit Arora goto retry; 4552a2df2a63SAmit Arora } 455355bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 45540562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, 45550562e0baSJiaying Zhang ret > 0 ? ret2 : ret); 4556a2df2a63SAmit Arora return ret > 0 ? ret2 : ret; 4557a2df2a63SAmit Arora } 45586873fa0dSEric Sandeen 45596873fa0dSEric Sandeen /* 45600031462bSMingming Cao * This function convert a range of blocks to written extents 45610031462bSMingming Cao * The caller of this function will pass the start offset and the size. 45620031462bSMingming Cao * all unwritten extents within this range will be converted to 45630031462bSMingming Cao * written extents. 45640031462bSMingming Cao * 45650031462bSMingming Cao * This function is called from the direct IO end io call back 45660031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4567109f5565SMingming * Returns 0 on success. 45680031462bSMingming Cao */ 4569*6b523df4SJan Kara int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode, 4570*6b523df4SJan Kara loff_t offset, ssize_t len) 45710031462bSMingming Cao { 45720031462bSMingming Cao unsigned int max_blocks; 45730031462bSMingming Cao int ret = 0; 45740031462bSMingming Cao int ret2 = 0; 45752ed88685STheodore Ts'o struct ext4_map_blocks map; 45760031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 45770031462bSMingming Cao 45782ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 45790031462bSMingming Cao /* 45800031462bSMingming Cao * We can't just convert len to max_blocks because 45810031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 45820031462bSMingming Cao */ 45832ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 45842ed88685STheodore Ts'o map.m_lblk); 45850031462bSMingming Cao /* 4586*6b523df4SJan Kara * This is somewhat ugly but the idea is clear: When transaction is 4587*6b523df4SJan Kara * reserved, everything goes into it. Otherwise we rather start several 4588*6b523df4SJan Kara * smaller transactions for conversion of each extent separately. 4589*6b523df4SJan Kara */ 4590*6b523df4SJan Kara if (handle) { 4591*6b523df4SJan Kara handle = ext4_journal_start_reserved(handle, 4592*6b523df4SJan Kara EXT4_HT_EXT_CONVERT); 4593*6b523df4SJan Kara if (IS_ERR(handle)) 4594*6b523df4SJan Kara return PTR_ERR(handle); 4595*6b523df4SJan Kara credits = 0; 4596*6b523df4SJan Kara } else { 4597*6b523df4SJan Kara /* 45980031462bSMingming Cao * credits to insert 1 extent into extent tree 45990031462bSMingming Cao */ 46000031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 4601*6b523df4SJan Kara } 46020031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 46032ed88685STheodore Ts'o map.m_lblk += ret; 46042ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 4605*6b523df4SJan Kara if (credits) { 4606*6b523df4SJan Kara handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 4607*6b523df4SJan Kara credits); 46080031462bSMingming Cao if (IS_ERR(handle)) { 46090031462bSMingming Cao ret = PTR_ERR(handle); 46100031462bSMingming Cao break; 46110031462bSMingming Cao } 4612*6b523df4SJan Kara } 46132ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4614c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 4615b06acd38SLukas Czerner if (ret <= 0) 4616b06acd38SLukas Czerner ext4_warning(inode->i_sb, 4617b06acd38SLukas Czerner "inode #%lu: block %u: len %u: " 461892b97816STheodore Ts'o "ext4_ext_map_blocks returned %d", 4619b06acd38SLukas Czerner inode->i_ino, map.m_lblk, 462092b97816STheodore Ts'o map.m_len, ret); 46210031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4622*6b523df4SJan Kara if (credits) 46230031462bSMingming Cao ret2 = ext4_journal_stop(handle); 46240031462bSMingming Cao if (ret <= 0 || ret2) 46250031462bSMingming Cao break; 46260031462bSMingming Cao } 4627*6b523df4SJan Kara if (!credits) 4628*6b523df4SJan Kara ret2 = ext4_journal_stop(handle); 46290031462bSMingming Cao return ret > 0 ? ret2 : ret; 46300031462bSMingming Cao } 46316d9c85ebSYongqiang Yang 46320031462bSMingming Cao /* 463369eb33dcSZheng Liu * If newes is not existing extent (newes->ec_pblk equals zero) find 463469eb33dcSZheng Liu * delayed extent at start of newes and update newes accordingly and 463591dd8c11SLukas Czerner * return start of the next delayed extent. 463691dd8c11SLukas Czerner * 463769eb33dcSZheng Liu * If newes is existing extent (newes->ec_pblk is not equal zero) 463891dd8c11SLukas Czerner * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed 463969eb33dcSZheng Liu * extent found. Leave newes unmodified. 46406873fa0dSEric Sandeen */ 464191dd8c11SLukas Czerner static int ext4_find_delayed_extent(struct inode *inode, 464269eb33dcSZheng Liu struct extent_status *newes) 46436873fa0dSEric Sandeen { 4644b3aff3e3SZheng Liu struct extent_status es; 4645be401363SZheng Liu ext4_lblk_t block, next_del; 46466873fa0dSEric Sandeen 464769eb33dcSZheng Liu if (newes->es_pblk == 0) { 4648e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, newes->es_lblk, 4649e30b5dcaSYan, Zheng newes->es_lblk + newes->es_len - 1, &es); 4650e30b5dcaSYan, Zheng 46516d9c85ebSYongqiang Yang /* 465269eb33dcSZheng Liu * No extent in extent-tree contains block @newes->es_pblk, 46536d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 46546d9c85ebSYongqiang Yang */ 465506b0c886SZheng Liu if (es.es_len == 0) 4656b3aff3e3SZheng Liu /* A hole found. */ 465791dd8c11SLukas Czerner return 0; 46586d9c85ebSYongqiang Yang 465969eb33dcSZheng Liu if (es.es_lblk > newes->es_lblk) { 4660b3aff3e3SZheng Liu /* A hole found. */ 466169eb33dcSZheng Liu newes->es_len = min(es.es_lblk - newes->es_lblk, 466269eb33dcSZheng Liu newes->es_len); 466391dd8c11SLukas Czerner return 0; 46646873fa0dSEric Sandeen } 46656d9c85ebSYongqiang Yang 466669eb33dcSZheng Liu newes->es_len = es.es_lblk + es.es_len - newes->es_lblk; 46676d9c85ebSYongqiang Yang } 46686873fa0dSEric Sandeen 466969eb33dcSZheng Liu block = newes->es_lblk + newes->es_len; 4670e30b5dcaSYan, Zheng ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es); 4671be401363SZheng Liu if (es.es_len == 0) 4672be401363SZheng Liu next_del = EXT_MAX_BLOCKS; 4673be401363SZheng Liu else 4674be401363SZheng Liu next_del = es.es_lblk; 4675be401363SZheng Liu 467691dd8c11SLukas Czerner return next_del; 46776873fa0dSEric Sandeen } 46786873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 46796873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 46806873fa0dSEric Sandeen 46813a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 46823a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 46836873fa0dSEric Sandeen { 46846873fa0dSEric Sandeen __u64 physical = 0; 46856873fa0dSEric Sandeen __u64 length; 46866873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 46876873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 46886873fa0dSEric Sandeen int error = 0; 46896873fa0dSEric Sandeen 46906873fa0dSEric Sandeen /* in-inode? */ 469119f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 46926873fa0dSEric Sandeen struct ext4_iloc iloc; 46936873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 46946873fa0dSEric Sandeen 46956873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 46966873fa0dSEric Sandeen if (error) 46976873fa0dSEric Sandeen return error; 4698a60697f4SJan Kara physical = (__u64)iloc.bh->b_blocknr << blockbits; 46996873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 47006873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 47016873fa0dSEric Sandeen physical += offset; 47026873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 47036873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 4704fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 47056873fa0dSEric Sandeen } else { /* external block */ 4706a60697f4SJan Kara physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; 47076873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 47086873fa0dSEric Sandeen } 47096873fa0dSEric Sandeen 47106873fa0dSEric Sandeen if (physical) 47116873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 47126873fa0dSEric Sandeen length, flags); 47136873fa0dSEric Sandeen return (error < 0 ? error : 0); 47146873fa0dSEric Sandeen } 47156873fa0dSEric Sandeen 47166873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 47176873fa0dSEric Sandeen __u64 start, __u64 len) 47186873fa0dSEric Sandeen { 47196873fa0dSEric Sandeen ext4_lblk_t start_blk; 47206873fa0dSEric Sandeen int error = 0; 47216873fa0dSEric Sandeen 472294191985STao Ma if (ext4_has_inline_data(inode)) { 472394191985STao Ma int has_inline = 1; 472494191985STao Ma 472594191985STao Ma error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 472694191985STao Ma 472794191985STao Ma if (has_inline) 472894191985STao Ma return error; 472994191985STao Ma } 473094191985STao Ma 47316873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 473212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 47336873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 47346873fa0dSEric Sandeen ext4_get_block); 47356873fa0dSEric Sandeen 47366873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 47376873fa0dSEric Sandeen return -EBADR; 47386873fa0dSEric Sandeen 47396873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 47406873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 47416873fa0dSEric Sandeen } else { 4742aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4743aca92ff6SLeonard Michlmayr __u64 last_blk; 4744aca92ff6SLeonard Michlmayr 47456873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4746aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4747f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4748f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4749aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 47506873fa0dSEric Sandeen 47516873fa0dSEric Sandeen /* 475291dd8c11SLukas Czerner * Walk the extent tree gathering extent information 475391dd8c11SLukas Czerner * and pushing extents back to the user. 47546873fa0dSEric Sandeen */ 475591dd8c11SLukas Czerner error = ext4_fill_fiemap_extents(inode, start_blk, 475691dd8c11SLukas Czerner len_blks, fieinfo); 47576873fa0dSEric Sandeen } 47586873fa0dSEric Sandeen 47596873fa0dSEric Sandeen return error; 47606873fa0dSEric Sandeen } 4761