1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/module.h> 33a86c6181SAlex Tomas #include <linux/fs.h> 34a86c6181SAlex Tomas #include <linux/time.h> 35cd02ff0bSMingming Cao #include <linux/jbd2.h> 36a86c6181SAlex Tomas #include <linux/highuid.h> 37a86c6181SAlex Tomas #include <linux/pagemap.h> 38a86c6181SAlex Tomas #include <linux/quotaops.h> 39a86c6181SAlex Tomas #include <linux/string.h> 40a86c6181SAlex Tomas #include <linux/slab.h> 41a2df2a63SAmit Arora #include <linux/falloc.h> 42a86c6181SAlex Tomas #include <asm/uaccess.h> 436873fa0dSEric Sandeen #include <linux/fiemap.h> 443dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 45a86c6181SAlex Tomas 460562e0baSJiaying Zhang #include <trace/events/ext4.h> 470562e0baSJiaying Zhang 48d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 49d583fb87SAllison Henderson struct inode *inode, 50d583fb87SAllison Henderson struct ext4_ext_path *path, 51d583fb87SAllison Henderson struct ext4_map_blocks *map, 52d583fb87SAllison Henderson int split_flag, 53d583fb87SAllison Henderson int flags); 54d583fb87SAllison Henderson 55487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 56487caeefSJan Kara struct inode *inode, 57487caeefSJan Kara int needed) 58a86c6181SAlex Tomas { 59a86c6181SAlex Tomas int err; 60a86c6181SAlex Tomas 610390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 620390131bSFrank Mayhar return 0; 63a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 649102e4faSShen Feng return 0; 659102e4faSShen Feng err = ext4_journal_extend(handle, needed); 660123c939STheodore Ts'o if (err <= 0) 679102e4faSShen Feng return err; 68487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 690617b83fSDmitry Monakhov if (err == 0) 700617b83fSDmitry Monakhov err = -EAGAIN; 71487caeefSJan Kara 72487caeefSJan Kara return err; 73a86c6181SAlex Tomas } 74a86c6181SAlex Tomas 75a86c6181SAlex Tomas /* 76a86c6181SAlex Tomas * could return: 77a86c6181SAlex Tomas * - EROFS 78a86c6181SAlex Tomas * - ENOMEM 79a86c6181SAlex Tomas */ 80a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 81a86c6181SAlex Tomas struct ext4_ext_path *path) 82a86c6181SAlex Tomas { 83a86c6181SAlex Tomas if (path->p_bh) { 84a86c6181SAlex Tomas /* path points to block */ 85a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 86a86c6181SAlex Tomas } 87a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 88a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 89a86c6181SAlex Tomas return 0; 90a86c6181SAlex Tomas } 91a86c6181SAlex Tomas 92a86c6181SAlex Tomas /* 93a86c6181SAlex Tomas * could return: 94a86c6181SAlex Tomas * - EROFS 95a86c6181SAlex Tomas * - ENOMEM 96a86c6181SAlex Tomas * - EIO 97a86c6181SAlex Tomas */ 989ea7a0dfSTheodore Ts'o #define ext4_ext_dirty(handle, inode, path) \ 999ea7a0dfSTheodore Ts'o __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 1009ea7a0dfSTheodore Ts'o static int __ext4_ext_dirty(const char *where, unsigned int line, 1019ea7a0dfSTheodore Ts'o handle_t *handle, struct inode *inode, 102a86c6181SAlex Tomas struct ext4_ext_path *path) 103a86c6181SAlex Tomas { 104a86c6181SAlex Tomas int err; 105a86c6181SAlex Tomas if (path->p_bh) { 106a86c6181SAlex Tomas /* path points to block */ 1079ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1089ea7a0dfSTheodore Ts'o inode, path->p_bh); 109a86c6181SAlex Tomas } else { 110a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 111a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 112a86c6181SAlex Tomas } 113a86c6181SAlex Tomas return err; 114a86c6181SAlex Tomas } 115a86c6181SAlex Tomas 116f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 117a86c6181SAlex Tomas struct ext4_ext_path *path, 118725d26d3SAneesh Kumar K.V ext4_lblk_t block) 119a86c6181SAlex Tomas { 120a86c6181SAlex Tomas int depth; 121a86c6181SAlex Tomas 122a86c6181SAlex Tomas if (path) { 123a86c6181SAlex Tomas struct ext4_extent *ex; 124a86c6181SAlex Tomas depth = path->p_depth; 125a86c6181SAlex Tomas 126ad4fb9caSKazuya Mio /* 127ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 128ad4fb9caSKazuya Mio * filling in a file which will eventually be 129ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 130ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 131ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 132ad4fb9caSKazuya Mio * executable file, or some database extending a table 133ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 134ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 135ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 136ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 137ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 138ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 139ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 140b8d6568aSTao Ma * especially if the latter case turns out to be 141ad4fb9caSKazuya Mio * common. 142ad4fb9caSKazuya Mio */ 1437e028976SAvantika Mathur ex = path[depth].p_ext; 144ad4fb9caSKazuya Mio if (ex) { 145ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 146ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 147ad4fb9caSKazuya Mio 148ad4fb9caSKazuya Mio if (block > ext_block) 149ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 150ad4fb9caSKazuya Mio else 151ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 152ad4fb9caSKazuya Mio } 153a86c6181SAlex Tomas 154d0d856e8SRandy Dunlap /* it looks like index is empty; 155d0d856e8SRandy Dunlap * try to find starting block from index itself */ 156a86c6181SAlex Tomas if (path[depth].p_bh) 157a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 158a86c6181SAlex Tomas } 159a86c6181SAlex Tomas 160a86c6181SAlex Tomas /* OK. use inode's group */ 161f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 162a86c6181SAlex Tomas } 163a86c6181SAlex Tomas 164654b4908SAneesh Kumar K.V /* 165654b4908SAneesh Kumar K.V * Allocation for a meta data block 166654b4908SAneesh Kumar K.V */ 167f65e6fbaSAlex Tomas static ext4_fsblk_t 168654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 169a86c6181SAlex Tomas struct ext4_ext_path *path, 17055f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 171a86c6181SAlex Tomas { 172f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 173a86c6181SAlex Tomas 174a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 17555f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 17655f020dbSAllison Henderson NULL, err); 177a86c6181SAlex Tomas return newblock; 178a86c6181SAlex Tomas } 179a86c6181SAlex Tomas 18055ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 181a86c6181SAlex Tomas { 182a86c6181SAlex Tomas int size; 183a86c6181SAlex Tomas 184a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 185a86c6181SAlex Tomas / sizeof(struct ext4_extent); 18655ad63bfSTheodore Ts'o if (!check) { 187bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 188a86c6181SAlex Tomas if (size > 6) 189a86c6181SAlex Tomas size = 6; 190a86c6181SAlex Tomas #endif 19155ad63bfSTheodore Ts'o } 192a86c6181SAlex Tomas return size; 193a86c6181SAlex Tomas } 194a86c6181SAlex Tomas 19555ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 196a86c6181SAlex Tomas { 197a86c6181SAlex Tomas int size; 198a86c6181SAlex Tomas 199a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 200a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 20155ad63bfSTheodore Ts'o if (!check) { 202bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 203a86c6181SAlex Tomas if (size > 5) 204a86c6181SAlex Tomas size = 5; 205a86c6181SAlex Tomas #endif 20655ad63bfSTheodore Ts'o } 207a86c6181SAlex Tomas return size; 208a86c6181SAlex Tomas } 209a86c6181SAlex Tomas 21055ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 211a86c6181SAlex Tomas { 212a86c6181SAlex Tomas int size; 213a86c6181SAlex Tomas 214a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 215a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 216a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 21755ad63bfSTheodore Ts'o if (!check) { 218bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 219a86c6181SAlex Tomas if (size > 3) 220a86c6181SAlex Tomas size = 3; 221a86c6181SAlex Tomas #endif 22255ad63bfSTheodore Ts'o } 223a86c6181SAlex Tomas return size; 224a86c6181SAlex Tomas } 225a86c6181SAlex Tomas 22655ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 227a86c6181SAlex Tomas { 228a86c6181SAlex Tomas int size; 229a86c6181SAlex Tomas 230a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 231a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 232a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 23355ad63bfSTheodore Ts'o if (!check) { 234bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 235a86c6181SAlex Tomas if (size > 4) 236a86c6181SAlex Tomas size = 4; 237a86c6181SAlex Tomas #endif 23855ad63bfSTheodore Ts'o } 239a86c6181SAlex Tomas return size; 240a86c6181SAlex Tomas } 241a86c6181SAlex Tomas 242d2a17637SMingming Cao /* 243d2a17637SMingming Cao * Calculate the number of metadata blocks needed 244d2a17637SMingming Cao * to allocate @blocks 245d2a17637SMingming Cao * Worse case is one block per extent 246d2a17637SMingming Cao */ 24701f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 248d2a17637SMingming Cao { 2499d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 2509d0be502STheodore Ts'o int idxs, num = 0; 251d2a17637SMingming Cao 2529d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 2539d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 254d2a17637SMingming Cao 255d2a17637SMingming Cao /* 2569d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 2579d0be502STheodore Ts'o * previous da block, it can share index blocks with the 2589d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 2599d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 2609d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 2619d0be502STheodore Ts'o * another index blocks. 262d2a17637SMingming Cao */ 2639d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 2649d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 2659d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 2669d0be502STheodore Ts'o num++; 2679d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 2689d0be502STheodore Ts'o num++; 2699d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 2709d0be502STheodore Ts'o num++; 2719d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 2729d0be502STheodore Ts'o } else 2739d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 2749d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 275d2a17637SMingming Cao return num; 276d2a17637SMingming Cao } 277d2a17637SMingming Cao 2789d0be502STheodore Ts'o /* 2799d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 2809d0be502STheodore Ts'o * every level of the inode's extent tree. 2819d0be502STheodore Ts'o */ 2829d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 2839d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 2849d0be502STheodore Ts'o return ext_depth(inode) + 1; 2859d0be502STheodore Ts'o } 2869d0be502STheodore Ts'o 287c29c0ae7SAlex Tomas static int 288c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 289c29c0ae7SAlex Tomas { 290c29c0ae7SAlex Tomas int max; 291c29c0ae7SAlex Tomas 292c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 293c29c0ae7SAlex Tomas if (depth == 0) 29455ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 295c29c0ae7SAlex Tomas else 29655ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 297c29c0ae7SAlex Tomas } else { 298c29c0ae7SAlex Tomas if (depth == 0) 29955ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 300c29c0ae7SAlex Tomas else 30155ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 302c29c0ae7SAlex Tomas } 303c29c0ae7SAlex Tomas 304c29c0ae7SAlex Tomas return max; 305c29c0ae7SAlex Tomas } 306c29c0ae7SAlex Tomas 30756b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 30856b19868SAneesh Kumar K.V { 309bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 31056b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 311e84a26ceSTheodore Ts'o 3126fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 31356b19868SAneesh Kumar K.V } 31456b19868SAneesh Kumar K.V 31556b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 31656b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 31756b19868SAneesh Kumar K.V { 318bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 319e84a26ceSTheodore Ts'o 3206fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 32156b19868SAneesh Kumar K.V } 32256b19868SAneesh Kumar K.V 32356b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 32456b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 32556b19868SAneesh Kumar K.V int depth) 32656b19868SAneesh Kumar K.V { 32756b19868SAneesh Kumar K.V struct ext4_extent *ext; 32856b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx; 32956b19868SAneesh Kumar K.V unsigned short entries; 33056b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 33156b19868SAneesh Kumar K.V return 1; 33256b19868SAneesh Kumar K.V 33356b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 33456b19868SAneesh Kumar K.V 33556b19868SAneesh Kumar K.V if (depth == 0) { 33656b19868SAneesh Kumar K.V /* leaf entries */ 33756b19868SAneesh Kumar K.V ext = EXT_FIRST_EXTENT(eh); 33856b19868SAneesh Kumar K.V while (entries) { 33956b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 34056b19868SAneesh Kumar K.V return 0; 34156b19868SAneesh Kumar K.V ext++; 34256b19868SAneesh Kumar K.V entries--; 34356b19868SAneesh Kumar K.V } 34456b19868SAneesh Kumar K.V } else { 34556b19868SAneesh Kumar K.V ext_idx = EXT_FIRST_INDEX(eh); 34656b19868SAneesh Kumar K.V while (entries) { 34756b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 34856b19868SAneesh Kumar K.V return 0; 34956b19868SAneesh Kumar K.V ext_idx++; 35056b19868SAneesh Kumar K.V entries--; 35156b19868SAneesh Kumar K.V } 35256b19868SAneesh Kumar K.V } 35356b19868SAneesh Kumar K.V return 1; 35456b19868SAneesh Kumar K.V } 35556b19868SAneesh Kumar K.V 356c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 357c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 358c29c0ae7SAlex Tomas int depth) 359c29c0ae7SAlex Tomas { 360c29c0ae7SAlex Tomas const char *error_msg; 361c29c0ae7SAlex Tomas int max = 0; 362c29c0ae7SAlex Tomas 363c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 364c29c0ae7SAlex Tomas error_msg = "invalid magic"; 365c29c0ae7SAlex Tomas goto corrupted; 366c29c0ae7SAlex Tomas } 367c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 368c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 369c29c0ae7SAlex Tomas goto corrupted; 370c29c0ae7SAlex Tomas } 371c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 372c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 373c29c0ae7SAlex Tomas goto corrupted; 374c29c0ae7SAlex Tomas } 375c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 376c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 377c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 378c29c0ae7SAlex Tomas goto corrupted; 379c29c0ae7SAlex Tomas } 380c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 381c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 382c29c0ae7SAlex Tomas goto corrupted; 383c29c0ae7SAlex Tomas } 38456b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 38556b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 38656b19868SAneesh Kumar K.V goto corrupted; 38756b19868SAneesh Kumar K.V } 388c29c0ae7SAlex Tomas return 0; 389c29c0ae7SAlex Tomas 390c29c0ae7SAlex Tomas corrupted: 391c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 39224676da4STheodore Ts'o "bad header/extent: %s - magic %x, " 393c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 39424676da4STheodore Ts'o error_msg, le16_to_cpu(eh->eh_magic), 395c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 396c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 397c29c0ae7SAlex Tomas 398c29c0ae7SAlex Tomas return -EIO; 399c29c0ae7SAlex Tomas } 400c29c0ae7SAlex Tomas 40156b19868SAneesh Kumar K.V #define ext4_ext_check(inode, eh, depth) \ 402c398eda0STheodore Ts'o __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 403c29c0ae7SAlex Tomas 4047a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4057a262f7cSAneesh Kumar K.V { 4067a262f7cSAneesh Kumar K.V return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 4077a262f7cSAneesh Kumar K.V } 4087a262f7cSAneesh Kumar K.V 409a86c6181SAlex Tomas #ifdef EXT_DEBUG 410a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 411a86c6181SAlex Tomas { 412a86c6181SAlex Tomas int k, l = path->p_depth; 413a86c6181SAlex Tomas 414a86c6181SAlex Tomas ext_debug("path:"); 415a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 416a86c6181SAlex Tomas if (path->p_idx) { 4172ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 418bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 419a86c6181SAlex Tomas } else if (path->p_ext) { 420553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 421a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 422553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 423a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 424bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 425a86c6181SAlex Tomas } else 426a86c6181SAlex Tomas ext_debug(" []"); 427a86c6181SAlex Tomas } 428a86c6181SAlex Tomas ext_debug("\n"); 429a86c6181SAlex Tomas } 430a86c6181SAlex Tomas 431a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 432a86c6181SAlex Tomas { 433a86c6181SAlex Tomas int depth = ext_depth(inode); 434a86c6181SAlex Tomas struct ext4_extent_header *eh; 435a86c6181SAlex Tomas struct ext4_extent *ex; 436a86c6181SAlex Tomas int i; 437a86c6181SAlex Tomas 438a86c6181SAlex Tomas if (!path) 439a86c6181SAlex Tomas return; 440a86c6181SAlex Tomas 441a86c6181SAlex Tomas eh = path[depth].p_hdr; 442a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 443a86c6181SAlex Tomas 444553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 445553f9008SMingming 446a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 447553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 448553f9008SMingming ext4_ext_is_uninitialized(ex), 449bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 450a86c6181SAlex Tomas } 451a86c6181SAlex Tomas ext_debug("\n"); 452a86c6181SAlex Tomas } 4531b16da77SYongqiang Yang 4541b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 4551b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 4561b16da77SYongqiang Yang { 4571b16da77SYongqiang Yang int depth = ext_depth(inode); 4581b16da77SYongqiang Yang struct ext4_extent *ex; 4591b16da77SYongqiang Yang 4601b16da77SYongqiang Yang if (depth != level) { 4611b16da77SYongqiang Yang struct ext4_extent_idx *idx; 4621b16da77SYongqiang Yang idx = path[level].p_idx; 4631b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 4641b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 4651b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 4661b16da77SYongqiang Yang ext4_idx_pblock(idx), 4671b16da77SYongqiang Yang newblock); 4681b16da77SYongqiang Yang idx++; 4691b16da77SYongqiang Yang } 4701b16da77SYongqiang Yang 4711b16da77SYongqiang Yang return; 4721b16da77SYongqiang Yang } 4731b16da77SYongqiang Yang 4741b16da77SYongqiang Yang ex = path[depth].p_ext; 4751b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 4761b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 4771b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 4781b16da77SYongqiang Yang ext4_ext_pblock(ex), 4791b16da77SYongqiang Yang ext4_ext_is_uninitialized(ex), 4801b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 4811b16da77SYongqiang Yang newblock); 4821b16da77SYongqiang Yang ex++; 4831b16da77SYongqiang Yang } 4841b16da77SYongqiang Yang } 4851b16da77SYongqiang Yang 486a86c6181SAlex Tomas #else 487a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 488a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 4891b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 490a86c6181SAlex Tomas #endif 491a86c6181SAlex Tomas 492b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 493a86c6181SAlex Tomas { 494a86c6181SAlex Tomas int depth = path->p_depth; 495a86c6181SAlex Tomas int i; 496a86c6181SAlex Tomas 497a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 498a86c6181SAlex Tomas if (path->p_bh) { 499a86c6181SAlex Tomas brelse(path->p_bh); 500a86c6181SAlex Tomas path->p_bh = NULL; 501a86c6181SAlex Tomas } 502a86c6181SAlex Tomas } 503a86c6181SAlex Tomas 504a86c6181SAlex Tomas /* 505d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 506d0d856e8SRandy Dunlap * binary search for the closest index of the given block 507c29c0ae7SAlex Tomas * the header must be checked before calling this 508a86c6181SAlex Tomas */ 509a86c6181SAlex Tomas static void 510725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 511725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 512a86c6181SAlex Tomas { 513a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 514a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 515a86c6181SAlex Tomas 516a86c6181SAlex Tomas 517bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 518a86c6181SAlex Tomas 519a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 520e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 521a86c6181SAlex Tomas while (l <= r) { 522a86c6181SAlex Tomas m = l + (r - l) / 2; 523a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 524a86c6181SAlex Tomas r = m - 1; 525a86c6181SAlex Tomas else 526a86c6181SAlex Tomas l = m + 1; 52726d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 52826d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 52926d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 530a86c6181SAlex Tomas } 531a86c6181SAlex Tomas 532a86c6181SAlex Tomas path->p_idx = l - 1; 533f65e6fbaSAlex Tomas ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 534bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 535a86c6181SAlex Tomas 536a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 537a86c6181SAlex Tomas { 538a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 539a86c6181SAlex Tomas int k; 540a86c6181SAlex Tomas 541a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 542a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 543a86c6181SAlex Tomas if (k != 0 && 544a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 5454776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 5464776004fSTheodore Ts'o "first=0x%p\n", k, 547a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 5484776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 549a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 550a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 551a86c6181SAlex Tomas } 552a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 553a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 554a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 555a86c6181SAlex Tomas break; 556a86c6181SAlex Tomas chix = ix; 557a86c6181SAlex Tomas } 558a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 559a86c6181SAlex Tomas } 560a86c6181SAlex Tomas #endif 561a86c6181SAlex Tomas 562a86c6181SAlex Tomas } 563a86c6181SAlex Tomas 564a86c6181SAlex Tomas /* 565d0d856e8SRandy Dunlap * ext4_ext_binsearch: 566d0d856e8SRandy Dunlap * binary search for closest extent of the given block 567c29c0ae7SAlex Tomas * the header must be checked before calling this 568a86c6181SAlex Tomas */ 569a86c6181SAlex Tomas static void 570725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 571725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 572a86c6181SAlex Tomas { 573a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 574a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 575a86c6181SAlex Tomas 576a86c6181SAlex Tomas if (eh->eh_entries == 0) { 577a86c6181SAlex Tomas /* 578d0d856e8SRandy Dunlap * this leaf is empty: 579a86c6181SAlex Tomas * we get such a leaf in split/add case 580a86c6181SAlex Tomas */ 581a86c6181SAlex Tomas return; 582a86c6181SAlex Tomas } 583a86c6181SAlex Tomas 584bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 585a86c6181SAlex Tomas 586a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 587e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 588a86c6181SAlex Tomas 589a86c6181SAlex Tomas while (l <= r) { 590a86c6181SAlex Tomas m = l + (r - l) / 2; 591a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 592a86c6181SAlex Tomas r = m - 1; 593a86c6181SAlex Tomas else 594a86c6181SAlex Tomas l = m + 1; 59526d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 59626d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 59726d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 598a86c6181SAlex Tomas } 599a86c6181SAlex Tomas 600a86c6181SAlex Tomas path->p_ext = l - 1; 601553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 602a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 603bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 604553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 605a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 606a86c6181SAlex Tomas 607a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 608a86c6181SAlex Tomas { 609a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 610a86c6181SAlex Tomas int k; 611a86c6181SAlex Tomas 612a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 613a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 614a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 615a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 616a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 617a86c6181SAlex Tomas break; 618a86c6181SAlex Tomas chex = ex; 619a86c6181SAlex Tomas } 620a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 621a86c6181SAlex Tomas } 622a86c6181SAlex Tomas #endif 623a86c6181SAlex Tomas 624a86c6181SAlex Tomas } 625a86c6181SAlex Tomas 626a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 627a86c6181SAlex Tomas { 628a86c6181SAlex Tomas struct ext4_extent_header *eh; 629a86c6181SAlex Tomas 630a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 631a86c6181SAlex Tomas eh->eh_depth = 0; 632a86c6181SAlex Tomas eh->eh_entries = 0; 633a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 63455ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 635a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 636a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 637a86c6181SAlex Tomas return 0; 638a86c6181SAlex Tomas } 639a86c6181SAlex Tomas 640a86c6181SAlex Tomas struct ext4_ext_path * 641725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 642725d26d3SAneesh Kumar K.V struct ext4_ext_path *path) 643a86c6181SAlex Tomas { 644a86c6181SAlex Tomas struct ext4_extent_header *eh; 645a86c6181SAlex Tomas struct buffer_head *bh; 646a86c6181SAlex Tomas short int depth, i, ppos = 0, alloc = 0; 647a86c6181SAlex Tomas 648a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 649c29c0ae7SAlex Tomas depth = ext_depth(inode); 650a86c6181SAlex Tomas 651a86c6181SAlex Tomas /* account possible depth increase */ 652a86c6181SAlex Tomas if (!path) { 6535d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 654a86c6181SAlex Tomas GFP_NOFS); 655a86c6181SAlex Tomas if (!path) 656a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 657a86c6181SAlex Tomas alloc = 1; 658a86c6181SAlex Tomas } 659a86c6181SAlex Tomas path[0].p_hdr = eh; 6601973adcbSShen Feng path[0].p_bh = NULL; 661a86c6181SAlex Tomas 662c29c0ae7SAlex Tomas i = depth; 663a86c6181SAlex Tomas /* walk through the tree */ 664a86c6181SAlex Tomas while (i) { 6657a262f7cSAneesh Kumar K.V int need_to_validate = 0; 6667a262f7cSAneesh Kumar K.V 667a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 668a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 669c29c0ae7SAlex Tomas 670a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 671bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 672a86c6181SAlex Tomas path[ppos].p_depth = i; 673a86c6181SAlex Tomas path[ppos].p_ext = NULL; 674a86c6181SAlex Tomas 6757a262f7cSAneesh Kumar K.V bh = sb_getblk(inode->i_sb, path[ppos].p_block); 6767a262f7cSAneesh Kumar K.V if (unlikely(!bh)) 677a86c6181SAlex Tomas goto err; 6787a262f7cSAneesh Kumar K.V if (!bh_uptodate_or_lock(bh)) { 6790562e0baSJiaying Zhang trace_ext4_ext_load_extent(inode, block, 6800562e0baSJiaying Zhang path[ppos].p_block); 6817a262f7cSAneesh Kumar K.V if (bh_submit_read(bh) < 0) { 6827a262f7cSAneesh Kumar K.V put_bh(bh); 6837a262f7cSAneesh Kumar K.V goto err; 6847a262f7cSAneesh Kumar K.V } 6857a262f7cSAneesh Kumar K.V /* validate the extent entries */ 6867a262f7cSAneesh Kumar K.V need_to_validate = 1; 6877a262f7cSAneesh Kumar K.V } 688a86c6181SAlex Tomas eh = ext_block_hdr(bh); 689a86c6181SAlex Tomas ppos++; 690273df556SFrank Mayhar if (unlikely(ppos > depth)) { 691273df556SFrank Mayhar put_bh(bh); 692273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 693273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 694273df556SFrank Mayhar goto err; 695273df556SFrank Mayhar } 696a86c6181SAlex Tomas path[ppos].p_bh = bh; 697a86c6181SAlex Tomas path[ppos].p_hdr = eh; 698a86c6181SAlex Tomas i--; 699a86c6181SAlex Tomas 7007a262f7cSAneesh Kumar K.V if (need_to_validate && ext4_ext_check(inode, eh, i)) 701a86c6181SAlex Tomas goto err; 702a86c6181SAlex Tomas } 703a86c6181SAlex Tomas 704a86c6181SAlex Tomas path[ppos].p_depth = i; 705a86c6181SAlex Tomas path[ppos].p_ext = NULL; 706a86c6181SAlex Tomas path[ppos].p_idx = NULL; 707a86c6181SAlex Tomas 708a86c6181SAlex Tomas /* find extent */ 709a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 7101973adcbSShen Feng /* if not an empty leaf */ 7111973adcbSShen Feng if (path[ppos].p_ext) 712bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 713a86c6181SAlex Tomas 714a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 715a86c6181SAlex Tomas 716a86c6181SAlex Tomas return path; 717a86c6181SAlex Tomas 718a86c6181SAlex Tomas err: 719a86c6181SAlex Tomas ext4_ext_drop_refs(path); 720a86c6181SAlex Tomas if (alloc) 721a86c6181SAlex Tomas kfree(path); 722a86c6181SAlex Tomas return ERR_PTR(-EIO); 723a86c6181SAlex Tomas } 724a86c6181SAlex Tomas 725a86c6181SAlex Tomas /* 726d0d856e8SRandy Dunlap * ext4_ext_insert_index: 727d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 728d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 729a86c6181SAlex Tomas */ 7301f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 731a86c6181SAlex Tomas struct ext4_ext_path *curp, 732f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 733a86c6181SAlex Tomas { 734a86c6181SAlex Tomas struct ext4_extent_idx *ix; 735a86c6181SAlex Tomas int len, err; 736a86c6181SAlex Tomas 7377e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 7387e028976SAvantika Mathur if (err) 739a86c6181SAlex Tomas return err; 740a86c6181SAlex Tomas 741273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 742273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 743273df556SFrank Mayhar "logical %d == ei_block %d!", 744273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 745273df556SFrank Mayhar return -EIO; 746273df556SFrank Mayhar } 747d4620315SRobin Dong 748d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 749d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 750d4620315SRobin Dong EXT4_ERROR_INODE(inode, 751d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 752d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 753d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 754d4620315SRobin Dong return -EIO; 755d4620315SRobin Dong } 756d4620315SRobin Dong 757a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 758a86c6181SAlex Tomas /* insert after */ 759*80e675f9SEric Gouriou ext_debug("insert new index %d after: %llu\n", logical, ptr); 760a86c6181SAlex Tomas ix = curp->p_idx + 1; 761a86c6181SAlex Tomas } else { 762a86c6181SAlex Tomas /* insert before */ 763*80e675f9SEric Gouriou ext_debug("insert new index %d before: %llu\n", logical, ptr); 764a86c6181SAlex Tomas ix = curp->p_idx; 765a86c6181SAlex Tomas } 766a86c6181SAlex Tomas 767*80e675f9SEric Gouriou len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1; 768*80e675f9SEric Gouriou BUG_ON(len < 0); 769*80e675f9SEric Gouriou if (len > 0) { 770*80e675f9SEric Gouriou ext_debug("insert new index %d: " 771*80e675f9SEric Gouriou "move %d indices from 0x%p to 0x%p\n", 772*80e675f9SEric Gouriou logical, len, ix, ix + 1); 773*80e675f9SEric Gouriou memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx)); 774*80e675f9SEric Gouriou } 775*80e675f9SEric Gouriou 776f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 777f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 778f472e026STao Ma return -EIO; 779f472e026STao Ma } 780f472e026STao Ma 781a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 782f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 783e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 784a86c6181SAlex Tomas 785273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 786273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 787273df556SFrank Mayhar return -EIO; 788273df556SFrank Mayhar } 789a86c6181SAlex Tomas 790a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 791a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 792a86c6181SAlex Tomas 793a86c6181SAlex Tomas return err; 794a86c6181SAlex Tomas } 795a86c6181SAlex Tomas 796a86c6181SAlex Tomas /* 797d0d856e8SRandy Dunlap * ext4_ext_split: 798d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 799d0d856e8SRandy Dunlap * at depth @at: 800a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 801a86c6181SAlex Tomas * - makes decision where to split 802d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 803a86c6181SAlex Tomas * into the newly allocated blocks 804d0d856e8SRandy Dunlap * - initializes subtree 805a86c6181SAlex Tomas */ 806a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 80755f020dbSAllison Henderson unsigned int flags, 808a86c6181SAlex Tomas struct ext4_ext_path *path, 809a86c6181SAlex Tomas struct ext4_extent *newext, int at) 810a86c6181SAlex Tomas { 811a86c6181SAlex Tomas struct buffer_head *bh = NULL; 812a86c6181SAlex Tomas int depth = ext_depth(inode); 813a86c6181SAlex Tomas struct ext4_extent_header *neh; 814a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 815a86c6181SAlex Tomas int i = at, k, m, a; 816f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 817a86c6181SAlex Tomas __le32 border; 818f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 819a86c6181SAlex Tomas int err = 0; 820a86c6181SAlex Tomas 821a86c6181SAlex Tomas /* make decision: where to split? */ 822d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 823a86c6181SAlex Tomas 824d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 825a86c6181SAlex Tomas * border from split point */ 826273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 827273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 828273df556SFrank Mayhar return -EIO; 829273df556SFrank Mayhar } 830a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 831a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 832d0d856e8SRandy Dunlap ext_debug("leaf will be split." 833a86c6181SAlex Tomas " next leaf starts at %d\n", 834a86c6181SAlex Tomas le32_to_cpu(border)); 835a86c6181SAlex Tomas } else { 836a86c6181SAlex Tomas border = newext->ee_block; 837a86c6181SAlex Tomas ext_debug("leaf will be added." 838a86c6181SAlex Tomas " next leaf starts at %d\n", 839a86c6181SAlex Tomas le32_to_cpu(border)); 840a86c6181SAlex Tomas } 841a86c6181SAlex Tomas 842a86c6181SAlex Tomas /* 843d0d856e8SRandy Dunlap * If error occurs, then we break processing 844d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 845a86c6181SAlex Tomas * be inserted and tree will be in consistent 846d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 847a86c6181SAlex Tomas */ 848a86c6181SAlex Tomas 849a86c6181SAlex Tomas /* 850d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 851d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 852d0d856e8SRandy Dunlap * upon them. 853a86c6181SAlex Tomas */ 8545d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 855a86c6181SAlex Tomas if (!ablocks) 856a86c6181SAlex Tomas return -ENOMEM; 857a86c6181SAlex Tomas 858a86c6181SAlex Tomas /* allocate all needed blocks */ 859a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 860a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 861654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 86255f020dbSAllison Henderson newext, &err, flags); 863a86c6181SAlex Tomas if (newblock == 0) 864a86c6181SAlex Tomas goto cleanup; 865a86c6181SAlex Tomas ablocks[a] = newblock; 866a86c6181SAlex Tomas } 867a86c6181SAlex Tomas 868a86c6181SAlex Tomas /* initialize new leaf */ 869a86c6181SAlex Tomas newblock = ablocks[--a]; 870273df556SFrank Mayhar if (unlikely(newblock == 0)) { 871273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 872273df556SFrank Mayhar err = -EIO; 873273df556SFrank Mayhar goto cleanup; 874273df556SFrank Mayhar } 875a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 876a86c6181SAlex Tomas if (!bh) { 877a86c6181SAlex Tomas err = -EIO; 878a86c6181SAlex Tomas goto cleanup; 879a86c6181SAlex Tomas } 880a86c6181SAlex Tomas lock_buffer(bh); 881a86c6181SAlex Tomas 8827e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 8837e028976SAvantika Mathur if (err) 884a86c6181SAlex Tomas goto cleanup; 885a86c6181SAlex Tomas 886a86c6181SAlex Tomas neh = ext_block_hdr(bh); 887a86c6181SAlex Tomas neh->eh_entries = 0; 88855ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 889a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 890a86c6181SAlex Tomas neh->eh_depth = 0; 891a86c6181SAlex Tomas 892d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 893273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 894273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 895273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 896273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 897273df556SFrank Mayhar path[depth].p_hdr->eh_max); 898273df556SFrank Mayhar err = -EIO; 899273df556SFrank Mayhar goto cleanup; 900273df556SFrank Mayhar } 901a86c6181SAlex Tomas /* start copy from next extent */ 9021b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 9031b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 904a86c6181SAlex Tomas if (m) { 9051b16da77SYongqiang Yang struct ext4_extent *ex; 9061b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 9071b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 908e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 909a86c6181SAlex Tomas } 910a86c6181SAlex Tomas 911a86c6181SAlex Tomas set_buffer_uptodate(bh); 912a86c6181SAlex Tomas unlock_buffer(bh); 913a86c6181SAlex Tomas 9140390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9157e028976SAvantika Mathur if (err) 916a86c6181SAlex Tomas goto cleanup; 917a86c6181SAlex Tomas brelse(bh); 918a86c6181SAlex Tomas bh = NULL; 919a86c6181SAlex Tomas 920a86c6181SAlex Tomas /* correct old leaf */ 921a86c6181SAlex Tomas if (m) { 9227e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 9237e028976SAvantika Mathur if (err) 924a86c6181SAlex Tomas goto cleanup; 925e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 9267e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 9277e028976SAvantika Mathur if (err) 928a86c6181SAlex Tomas goto cleanup; 929a86c6181SAlex Tomas 930a86c6181SAlex Tomas } 931a86c6181SAlex Tomas 932a86c6181SAlex Tomas /* create intermediate indexes */ 933a86c6181SAlex Tomas k = depth - at - 1; 934273df556SFrank Mayhar if (unlikely(k < 0)) { 935273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 936273df556SFrank Mayhar err = -EIO; 937273df556SFrank Mayhar goto cleanup; 938273df556SFrank Mayhar } 939a86c6181SAlex Tomas if (k) 940a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 941a86c6181SAlex Tomas /* insert new index into current index block */ 942a86c6181SAlex Tomas /* current depth stored in i var */ 943a86c6181SAlex Tomas i = depth - 1; 944a86c6181SAlex Tomas while (k--) { 945a86c6181SAlex Tomas oldblock = newblock; 946a86c6181SAlex Tomas newblock = ablocks[--a]; 947bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 948a86c6181SAlex Tomas if (!bh) { 949a86c6181SAlex Tomas err = -EIO; 950a86c6181SAlex Tomas goto cleanup; 951a86c6181SAlex Tomas } 952a86c6181SAlex Tomas lock_buffer(bh); 953a86c6181SAlex Tomas 9547e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 9557e028976SAvantika Mathur if (err) 956a86c6181SAlex Tomas goto cleanup; 957a86c6181SAlex Tomas 958a86c6181SAlex Tomas neh = ext_block_hdr(bh); 959a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 960a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 96155ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 962a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 963a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 964a86c6181SAlex Tomas fidx->ei_block = border; 965f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 966a86c6181SAlex Tomas 967bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 968bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 969a86c6181SAlex Tomas 9701b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 971273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 972273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 973273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 974273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 975273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 976273df556SFrank Mayhar err = -EIO; 977273df556SFrank Mayhar goto cleanup; 978273df556SFrank Mayhar } 9791b16da77SYongqiang Yang /* start copy indexes */ 9801b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 9811b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 9821b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 9831b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 984a86c6181SAlex Tomas if (m) { 9851b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 986a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 987e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 988a86c6181SAlex Tomas } 989a86c6181SAlex Tomas set_buffer_uptodate(bh); 990a86c6181SAlex Tomas unlock_buffer(bh); 991a86c6181SAlex Tomas 9920390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9937e028976SAvantika Mathur if (err) 994a86c6181SAlex Tomas goto cleanup; 995a86c6181SAlex Tomas brelse(bh); 996a86c6181SAlex Tomas bh = NULL; 997a86c6181SAlex Tomas 998a86c6181SAlex Tomas /* correct old index */ 999a86c6181SAlex Tomas if (m) { 1000a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1001a86c6181SAlex Tomas if (err) 1002a86c6181SAlex Tomas goto cleanup; 1003e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1004a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1005a86c6181SAlex Tomas if (err) 1006a86c6181SAlex Tomas goto cleanup; 1007a86c6181SAlex Tomas } 1008a86c6181SAlex Tomas 1009a86c6181SAlex Tomas i--; 1010a86c6181SAlex Tomas } 1011a86c6181SAlex Tomas 1012a86c6181SAlex Tomas /* insert new index */ 1013a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1014a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1015a86c6181SAlex Tomas 1016a86c6181SAlex Tomas cleanup: 1017a86c6181SAlex Tomas if (bh) { 1018a86c6181SAlex Tomas if (buffer_locked(bh)) 1019a86c6181SAlex Tomas unlock_buffer(bh); 1020a86c6181SAlex Tomas brelse(bh); 1021a86c6181SAlex Tomas } 1022a86c6181SAlex Tomas 1023a86c6181SAlex Tomas if (err) { 1024a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1025a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1026a86c6181SAlex Tomas if (!ablocks[i]) 1027a86c6181SAlex Tomas continue; 10287dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1029e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1030a86c6181SAlex Tomas } 1031a86c6181SAlex Tomas } 1032a86c6181SAlex Tomas kfree(ablocks); 1033a86c6181SAlex Tomas 1034a86c6181SAlex Tomas return err; 1035a86c6181SAlex Tomas } 1036a86c6181SAlex Tomas 1037a86c6181SAlex Tomas /* 1038d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1039d0d856e8SRandy Dunlap * implements tree growing procedure: 1040a86c6181SAlex Tomas * - allocates new block 1041a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1042d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1043a86c6181SAlex Tomas * just created block 1044a86c6181SAlex Tomas */ 1045a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 104655f020dbSAllison Henderson unsigned int flags, 1047a86c6181SAlex Tomas struct ext4_extent *newext) 1048a86c6181SAlex Tomas { 1049a86c6181SAlex Tomas struct ext4_extent_header *neh; 1050a86c6181SAlex Tomas struct buffer_head *bh; 1051f65e6fbaSAlex Tomas ext4_fsblk_t newblock; 1052a86c6181SAlex Tomas int err = 0; 1053a86c6181SAlex Tomas 10541939dd84SDmitry Monakhov newblock = ext4_ext_new_meta_block(handle, inode, NULL, 105555f020dbSAllison Henderson newext, &err, flags); 1056a86c6181SAlex Tomas if (newblock == 0) 1057a86c6181SAlex Tomas return err; 1058a86c6181SAlex Tomas 1059a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1060a86c6181SAlex Tomas if (!bh) { 1061a86c6181SAlex Tomas err = -EIO; 1062a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 1063a86c6181SAlex Tomas return err; 1064a86c6181SAlex Tomas } 1065a86c6181SAlex Tomas lock_buffer(bh); 1066a86c6181SAlex Tomas 10677e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10687e028976SAvantika Mathur if (err) { 1069a86c6181SAlex Tomas unlock_buffer(bh); 1070a86c6181SAlex Tomas goto out; 1071a86c6181SAlex Tomas } 1072a86c6181SAlex Tomas 1073a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 10741939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 10751939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1076a86c6181SAlex Tomas 1077a86c6181SAlex Tomas /* set size of new block */ 1078a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1079a86c6181SAlex Tomas /* old root could have indexes or leaves 1080a86c6181SAlex Tomas * so calculate e_max right way */ 1081a86c6181SAlex Tomas if (ext_depth(inode)) 108255ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1083a86c6181SAlex Tomas else 108455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1085a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1086a86c6181SAlex Tomas set_buffer_uptodate(bh); 1087a86c6181SAlex Tomas unlock_buffer(bh); 1088a86c6181SAlex Tomas 10890390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 10907e028976SAvantika Mathur if (err) 1091a86c6181SAlex Tomas goto out; 1092a86c6181SAlex Tomas 10931939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1094a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 10951939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 10961939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 10971939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 10981939dd84SDmitry Monakhov /* Root extent block becomes index block */ 10991939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 11001939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 11011939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 11021939dd84SDmitry Monakhov } 11032ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1104a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 11055a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1106bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1107a86c6181SAlex Tomas 11081939dd84SDmitry Monakhov neh->eh_depth = cpu_to_le16(neh->eh_depth + 1); 11091939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1110a86c6181SAlex Tomas out: 1111a86c6181SAlex Tomas brelse(bh); 1112a86c6181SAlex Tomas 1113a86c6181SAlex Tomas return err; 1114a86c6181SAlex Tomas } 1115a86c6181SAlex Tomas 1116a86c6181SAlex Tomas /* 1117d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1118d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1119d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1120a86c6181SAlex Tomas */ 1121a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 112255f020dbSAllison Henderson unsigned int flags, 1123a86c6181SAlex Tomas struct ext4_ext_path *path, 1124a86c6181SAlex Tomas struct ext4_extent *newext) 1125a86c6181SAlex Tomas { 1126a86c6181SAlex Tomas struct ext4_ext_path *curp; 1127a86c6181SAlex Tomas int depth, i, err = 0; 1128a86c6181SAlex Tomas 1129a86c6181SAlex Tomas repeat: 1130a86c6181SAlex Tomas i = depth = ext_depth(inode); 1131a86c6181SAlex Tomas 1132a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1133a86c6181SAlex Tomas curp = path + depth; 1134a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1135a86c6181SAlex Tomas i--; 1136a86c6181SAlex Tomas curp--; 1137a86c6181SAlex Tomas } 1138a86c6181SAlex Tomas 1139d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1140d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1141a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1142a86c6181SAlex Tomas /* if we found index with free entry, then use that 1143a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 114455f020dbSAllison Henderson err = ext4_ext_split(handle, inode, flags, path, newext, i); 1145787e0981SShen Feng if (err) 1146787e0981SShen Feng goto out; 1147a86c6181SAlex Tomas 1148a86c6181SAlex Tomas /* refill path */ 1149a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1150a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1151725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1152a86c6181SAlex Tomas path); 1153a86c6181SAlex Tomas if (IS_ERR(path)) 1154a86c6181SAlex Tomas err = PTR_ERR(path); 1155a86c6181SAlex Tomas } else { 1156a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 11571939dd84SDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, flags, newext); 1158a86c6181SAlex Tomas if (err) 1159a86c6181SAlex Tomas goto out; 1160a86c6181SAlex Tomas 1161a86c6181SAlex Tomas /* refill path */ 1162a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1163a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1164725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1165a86c6181SAlex Tomas path); 1166a86c6181SAlex Tomas if (IS_ERR(path)) { 1167a86c6181SAlex Tomas err = PTR_ERR(path); 1168a86c6181SAlex Tomas goto out; 1169a86c6181SAlex Tomas } 1170a86c6181SAlex Tomas 1171a86c6181SAlex Tomas /* 1172d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1173d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1174a86c6181SAlex Tomas */ 1175a86c6181SAlex Tomas depth = ext_depth(inode); 1176a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1177d0d856e8SRandy Dunlap /* now we need to split */ 1178a86c6181SAlex Tomas goto repeat; 1179a86c6181SAlex Tomas } 1180a86c6181SAlex Tomas } 1181a86c6181SAlex Tomas 1182a86c6181SAlex Tomas out: 1183a86c6181SAlex Tomas return err; 1184a86c6181SAlex Tomas } 1185a86c6181SAlex Tomas 1186a86c6181SAlex Tomas /* 11871988b51eSAlex Tomas * search the closest allocated block to the left for *logical 11881988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 11891988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 11901988b51eSAlex Tomas * returns 0 at @phys 11911988b51eSAlex Tomas * return value contains 0 (success) or error code 11921988b51eSAlex Tomas */ 11931f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 11941f109d5aSTheodore Ts'o struct ext4_ext_path *path, 11951988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 11961988b51eSAlex Tomas { 11971988b51eSAlex Tomas struct ext4_extent_idx *ix; 11981988b51eSAlex Tomas struct ext4_extent *ex; 1199b939e376SAneesh Kumar K.V int depth, ee_len; 12001988b51eSAlex Tomas 1201273df556SFrank Mayhar if (unlikely(path == NULL)) { 1202273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1203273df556SFrank Mayhar return -EIO; 1204273df556SFrank Mayhar } 12051988b51eSAlex Tomas depth = path->p_depth; 12061988b51eSAlex Tomas *phys = 0; 12071988b51eSAlex Tomas 12081988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12091988b51eSAlex Tomas return 0; 12101988b51eSAlex Tomas 12111988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12121988b51eSAlex Tomas * then *logical, but it can be that extent is the 12131988b51eSAlex Tomas * first one in the file */ 12141988b51eSAlex Tomas 12151988b51eSAlex Tomas ex = path[depth].p_ext; 1216b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 12171988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1218273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1219273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1220273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1221273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1222273df556SFrank Mayhar return -EIO; 1223273df556SFrank Mayhar } 12241988b51eSAlex Tomas while (--depth >= 0) { 12251988b51eSAlex Tomas ix = path[depth].p_idx; 1226273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1227273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1228273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 12296ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1230273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 12316ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1232273df556SFrank Mayhar depth); 1233273df556SFrank Mayhar return -EIO; 1234273df556SFrank Mayhar } 12351988b51eSAlex Tomas } 12361988b51eSAlex Tomas return 0; 12371988b51eSAlex Tomas } 12381988b51eSAlex Tomas 1239273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1240273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1241273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1242273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1243273df556SFrank Mayhar return -EIO; 1244273df556SFrank Mayhar } 12451988b51eSAlex Tomas 1246b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1247bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 12481988b51eSAlex Tomas return 0; 12491988b51eSAlex Tomas } 12501988b51eSAlex Tomas 12511988b51eSAlex Tomas /* 12521988b51eSAlex Tomas * search the closest allocated block to the right for *logical 12531988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1254df3ab170STao Ma * if *logical is the largest allocated block, the function 12551988b51eSAlex Tomas * returns 0 at @phys 12561988b51eSAlex Tomas * return value contains 0 (success) or error code 12571988b51eSAlex Tomas */ 12581f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 12591f109d5aSTheodore Ts'o struct ext4_ext_path *path, 12604d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 12614d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 12621988b51eSAlex Tomas { 12631988b51eSAlex Tomas struct buffer_head *bh = NULL; 12641988b51eSAlex Tomas struct ext4_extent_header *eh; 12651988b51eSAlex Tomas struct ext4_extent_idx *ix; 12661988b51eSAlex Tomas struct ext4_extent *ex; 12671988b51eSAlex Tomas ext4_fsblk_t block; 1268395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1269395a87bfSEric Sandeen int ee_len; 12701988b51eSAlex Tomas 1271273df556SFrank Mayhar if (unlikely(path == NULL)) { 1272273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1273273df556SFrank Mayhar return -EIO; 1274273df556SFrank Mayhar } 12751988b51eSAlex Tomas depth = path->p_depth; 12761988b51eSAlex Tomas *phys = 0; 12771988b51eSAlex Tomas 12781988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12791988b51eSAlex Tomas return 0; 12801988b51eSAlex Tomas 12811988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12821988b51eSAlex Tomas * then *logical, but it can be that extent is the 12831988b51eSAlex Tomas * first one in the file */ 12841988b51eSAlex Tomas 12851988b51eSAlex Tomas ex = path[depth].p_ext; 1286b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 12871988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1288273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1289273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1290273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1291273df556SFrank Mayhar depth); 1292273df556SFrank Mayhar return -EIO; 1293273df556SFrank Mayhar } 12941988b51eSAlex Tomas while (--depth >= 0) { 12951988b51eSAlex Tomas ix = path[depth].p_idx; 1296273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1297273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1298273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1299273df556SFrank Mayhar *logical); 1300273df556SFrank Mayhar return -EIO; 1301273df556SFrank Mayhar } 13021988b51eSAlex Tomas } 13034d33b1efSTheodore Ts'o goto found_extent; 13041988b51eSAlex Tomas } 13051988b51eSAlex Tomas 1306273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1307273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1308273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1309273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1310273df556SFrank Mayhar return -EIO; 1311273df556SFrank Mayhar } 13121988b51eSAlex Tomas 13131988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 13141988b51eSAlex Tomas /* next allocated block in this leaf */ 13151988b51eSAlex Tomas ex++; 13164d33b1efSTheodore Ts'o goto found_extent; 13171988b51eSAlex Tomas } 13181988b51eSAlex Tomas 13191988b51eSAlex Tomas /* go up and search for index to the right */ 13201988b51eSAlex Tomas while (--depth >= 0) { 13211988b51eSAlex Tomas ix = path[depth].p_idx; 13221988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 132325f1ee3aSWu Fengguang goto got_index; 13241988b51eSAlex Tomas } 13251988b51eSAlex Tomas 132625f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 13271988b51eSAlex Tomas return 0; 13281988b51eSAlex Tomas 132925f1ee3aSWu Fengguang got_index: 13301988b51eSAlex Tomas /* we've found index to the right, let's 13311988b51eSAlex Tomas * follow it and find the closest allocated 13321988b51eSAlex Tomas * block to the right */ 13331988b51eSAlex Tomas ix++; 1334bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 13351988b51eSAlex Tomas while (++depth < path->p_depth) { 13361988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 13371988b51eSAlex Tomas if (bh == NULL) 13381988b51eSAlex Tomas return -EIO; 13391988b51eSAlex Tomas eh = ext_block_hdr(bh); 1340395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 134156b19868SAneesh Kumar K.V if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 13421988b51eSAlex Tomas put_bh(bh); 13431988b51eSAlex Tomas return -EIO; 13441988b51eSAlex Tomas } 13451988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1346bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 13471988b51eSAlex Tomas put_bh(bh); 13481988b51eSAlex Tomas } 13491988b51eSAlex Tomas 13501988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 13511988b51eSAlex Tomas if (bh == NULL) 13521988b51eSAlex Tomas return -EIO; 13531988b51eSAlex Tomas eh = ext_block_hdr(bh); 135456b19868SAneesh Kumar K.V if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 13551988b51eSAlex Tomas put_bh(bh); 13561988b51eSAlex Tomas return -EIO; 13571988b51eSAlex Tomas } 13581988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 13594d33b1efSTheodore Ts'o found_extent: 13601988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1361bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 13624d33b1efSTheodore Ts'o *ret_ex = ex; 13634d33b1efSTheodore Ts'o if (bh) 13641988b51eSAlex Tomas put_bh(bh); 13651988b51eSAlex Tomas return 0; 13661988b51eSAlex Tomas } 13671988b51eSAlex Tomas 13681988b51eSAlex Tomas /* 1369d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1370f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1371d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1372d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1373d0d856e8SRandy Dunlap * with leaves. 1374a86c6181SAlex Tomas */ 1375725d26d3SAneesh Kumar K.V static ext4_lblk_t 1376a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1377a86c6181SAlex Tomas { 1378a86c6181SAlex Tomas int depth; 1379a86c6181SAlex Tomas 1380a86c6181SAlex Tomas BUG_ON(path == NULL); 1381a86c6181SAlex Tomas depth = path->p_depth; 1382a86c6181SAlex Tomas 1383a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1384f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1385a86c6181SAlex Tomas 1386a86c6181SAlex Tomas while (depth >= 0) { 1387a86c6181SAlex Tomas if (depth == path->p_depth) { 1388a86c6181SAlex Tomas /* leaf */ 13896f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 13906f8ff537SCurt Wohlgemuth path[depth].p_ext != 1391a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1392a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1393a86c6181SAlex Tomas } else { 1394a86c6181SAlex Tomas /* index */ 1395a86c6181SAlex Tomas if (path[depth].p_idx != 1396a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1397a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1398a86c6181SAlex Tomas } 1399a86c6181SAlex Tomas depth--; 1400a86c6181SAlex Tomas } 1401a86c6181SAlex Tomas 1402f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1403a86c6181SAlex Tomas } 1404a86c6181SAlex Tomas 1405a86c6181SAlex Tomas /* 1406d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1407f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1408a86c6181SAlex Tomas */ 14095718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1410a86c6181SAlex Tomas { 1411a86c6181SAlex Tomas int depth; 1412a86c6181SAlex Tomas 1413a86c6181SAlex Tomas BUG_ON(path == NULL); 1414a86c6181SAlex Tomas depth = path->p_depth; 1415a86c6181SAlex Tomas 1416a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1417a86c6181SAlex Tomas if (depth == 0) 1418f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1419a86c6181SAlex Tomas 1420a86c6181SAlex Tomas /* go to index block */ 1421a86c6181SAlex Tomas depth--; 1422a86c6181SAlex Tomas 1423a86c6181SAlex Tomas while (depth >= 0) { 1424a86c6181SAlex Tomas if (path[depth].p_idx != 1425a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1426725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1427725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1428a86c6181SAlex Tomas depth--; 1429a86c6181SAlex Tomas } 1430a86c6181SAlex Tomas 1431f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1432a86c6181SAlex Tomas } 1433a86c6181SAlex Tomas 1434a86c6181SAlex Tomas /* 1435d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1436d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1437d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1438a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1439a86c6181SAlex Tomas */ 14401d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1441a86c6181SAlex Tomas struct ext4_ext_path *path) 1442a86c6181SAlex Tomas { 1443a86c6181SAlex Tomas struct ext4_extent_header *eh; 1444a86c6181SAlex Tomas int depth = ext_depth(inode); 1445a86c6181SAlex Tomas struct ext4_extent *ex; 1446a86c6181SAlex Tomas __le32 border; 1447a86c6181SAlex Tomas int k, err = 0; 1448a86c6181SAlex Tomas 1449a86c6181SAlex Tomas eh = path[depth].p_hdr; 1450a86c6181SAlex Tomas ex = path[depth].p_ext; 1451273df556SFrank Mayhar 1452273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1453273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1454273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1455273df556SFrank Mayhar return -EIO; 1456273df556SFrank Mayhar } 1457a86c6181SAlex Tomas 1458a86c6181SAlex Tomas if (depth == 0) { 1459a86c6181SAlex Tomas /* there is no tree at all */ 1460a86c6181SAlex Tomas return 0; 1461a86c6181SAlex Tomas } 1462a86c6181SAlex Tomas 1463a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1464a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1465a86c6181SAlex Tomas return 0; 1466a86c6181SAlex Tomas } 1467a86c6181SAlex Tomas 1468a86c6181SAlex Tomas /* 1469d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1470a86c6181SAlex Tomas */ 1471a86c6181SAlex Tomas k = depth - 1; 1472a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 14737e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 14747e028976SAvantika Mathur if (err) 1475a86c6181SAlex Tomas return err; 1476a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 14777e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 14787e028976SAvantika Mathur if (err) 1479a86c6181SAlex Tomas return err; 1480a86c6181SAlex Tomas 1481a86c6181SAlex Tomas while (k--) { 1482a86c6181SAlex Tomas /* change all left-side indexes */ 1483a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1484a86c6181SAlex Tomas break; 14857e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 14867e028976SAvantika Mathur if (err) 1487a86c6181SAlex Tomas break; 1488a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 14897e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 14907e028976SAvantika Mathur if (err) 1491a86c6181SAlex Tomas break; 1492a86c6181SAlex Tomas } 1493a86c6181SAlex Tomas 1494a86c6181SAlex Tomas return err; 1495a86c6181SAlex Tomas } 1496a86c6181SAlex Tomas 1497748de673SAkira Fujita int 1498a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1499a86c6181SAlex Tomas struct ext4_extent *ex2) 1500a86c6181SAlex Tomas { 1501749269faSAmit Arora unsigned short ext1_ee_len, ext2_ee_len, max_len; 1502a2df2a63SAmit Arora 1503a2df2a63SAmit Arora /* 1504a2df2a63SAmit Arora * Make sure that either both extents are uninitialized, or 1505a2df2a63SAmit Arora * both are _not_. 1506a2df2a63SAmit Arora */ 1507a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1508a2df2a63SAmit Arora return 0; 1509a2df2a63SAmit Arora 1510749269faSAmit Arora if (ext4_ext_is_uninitialized(ex1)) 1511749269faSAmit Arora max_len = EXT_UNINIT_MAX_LEN; 1512749269faSAmit Arora else 1513749269faSAmit Arora max_len = EXT_INIT_MAX_LEN; 1514749269faSAmit Arora 1515a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1516a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1517a2df2a63SAmit Arora 1518a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 151963f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1520a86c6181SAlex Tomas return 0; 1521a86c6181SAlex Tomas 1522471d4011SSuparna Bhattacharya /* 1523471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1524471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1525d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1526471d4011SSuparna Bhattacharya */ 1527749269faSAmit Arora if (ext1_ee_len + ext2_ee_len > max_len) 1528471d4011SSuparna Bhattacharya return 0; 1529bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1530b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1531a86c6181SAlex Tomas return 0; 1532a86c6181SAlex Tomas #endif 1533a86c6181SAlex Tomas 1534bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1535a86c6181SAlex Tomas return 1; 1536a86c6181SAlex Tomas return 0; 1537a86c6181SAlex Tomas } 1538a86c6181SAlex Tomas 1539a86c6181SAlex Tomas /* 154056055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 154156055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 154256055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 154356055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 154456055d3aSAmit Arora * 1 if they got merged. 154556055d3aSAmit Arora */ 1546197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 154756055d3aSAmit Arora struct ext4_ext_path *path, 154856055d3aSAmit Arora struct ext4_extent *ex) 154956055d3aSAmit Arora { 155056055d3aSAmit Arora struct ext4_extent_header *eh; 155156055d3aSAmit Arora unsigned int depth, len; 155256055d3aSAmit Arora int merge_done = 0; 155356055d3aSAmit Arora int uninitialized = 0; 155456055d3aSAmit Arora 155556055d3aSAmit Arora depth = ext_depth(inode); 155656055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 155756055d3aSAmit Arora eh = path[depth].p_hdr; 155856055d3aSAmit Arora 155956055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 156056055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 156156055d3aSAmit Arora break; 156256055d3aSAmit Arora /* merge with next extent! */ 156356055d3aSAmit Arora if (ext4_ext_is_uninitialized(ex)) 156456055d3aSAmit Arora uninitialized = 1; 156556055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 156656055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 156756055d3aSAmit Arora if (uninitialized) 156856055d3aSAmit Arora ext4_ext_mark_uninitialized(ex); 156956055d3aSAmit Arora 157056055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 157156055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 157256055d3aSAmit Arora * sizeof(struct ext4_extent); 157356055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 157456055d3aSAmit Arora } 1575e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 157656055d3aSAmit Arora merge_done = 1; 157756055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 157856055d3aSAmit Arora if (!eh->eh_entries) 157924676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 158056055d3aSAmit Arora } 158156055d3aSAmit Arora 158256055d3aSAmit Arora return merge_done; 158356055d3aSAmit Arora } 158456055d3aSAmit Arora 158556055d3aSAmit Arora /* 1586197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1587197217a5SYongqiang Yang * return 1 if merge left else 0. 1588197217a5SYongqiang Yang */ 1589197217a5SYongqiang Yang static int ext4_ext_try_to_merge(struct inode *inode, 1590197217a5SYongqiang Yang struct ext4_ext_path *path, 1591197217a5SYongqiang Yang struct ext4_extent *ex) { 1592197217a5SYongqiang Yang struct ext4_extent_header *eh; 1593197217a5SYongqiang Yang unsigned int depth; 1594197217a5SYongqiang Yang int merge_done = 0; 1595197217a5SYongqiang Yang int ret = 0; 1596197217a5SYongqiang Yang 1597197217a5SYongqiang Yang depth = ext_depth(inode); 1598197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1599197217a5SYongqiang Yang eh = path[depth].p_hdr; 1600197217a5SYongqiang Yang 1601197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1602197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1603197217a5SYongqiang Yang 1604197217a5SYongqiang Yang if (!merge_done) 1605197217a5SYongqiang Yang ret = ext4_ext_try_to_merge_right(inode, path, ex); 1606197217a5SYongqiang Yang 1607197217a5SYongqiang Yang return ret; 1608197217a5SYongqiang Yang } 1609197217a5SYongqiang Yang 1610197217a5SYongqiang Yang /* 161125d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 161225d14f98SAmit Arora * existing extent. 161325d14f98SAmit Arora * 161425d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 161525d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 161625d14f98SAmit Arora * If there is no overlap found, it returns 0. 161725d14f98SAmit Arora */ 16184d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 16194d33b1efSTheodore Ts'o struct inode *inode, 162025d14f98SAmit Arora struct ext4_extent *newext, 162125d14f98SAmit Arora struct ext4_ext_path *path) 162225d14f98SAmit Arora { 1623725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 162425d14f98SAmit Arora unsigned int depth, len1; 162525d14f98SAmit Arora unsigned int ret = 0; 162625d14f98SAmit Arora 162725d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1628a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 162925d14f98SAmit Arora depth = ext_depth(inode); 163025d14f98SAmit Arora if (!path[depth].p_ext) 163125d14f98SAmit Arora goto out; 163225d14f98SAmit Arora b2 = le32_to_cpu(path[depth].p_ext->ee_block); 16334d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 163425d14f98SAmit Arora 163525d14f98SAmit Arora /* 163625d14f98SAmit Arora * get the next allocated block if the extent in the path 163725d14f98SAmit Arora * is before the requested block(s) 163825d14f98SAmit Arora */ 163925d14f98SAmit Arora if (b2 < b1) { 164025d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1641f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 164225d14f98SAmit Arora goto out; 16434d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 164425d14f98SAmit Arora } 164525d14f98SAmit Arora 1646725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 164725d14f98SAmit Arora if (b1 + len1 < b1) { 1648f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 164925d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 165025d14f98SAmit Arora ret = 1; 165125d14f98SAmit Arora } 165225d14f98SAmit Arora 165325d14f98SAmit Arora /* check for overlap */ 165425d14f98SAmit Arora if (b1 + len1 > b2) { 165525d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 165625d14f98SAmit Arora ret = 1; 165725d14f98SAmit Arora } 165825d14f98SAmit Arora out: 165925d14f98SAmit Arora return ret; 166025d14f98SAmit Arora } 166125d14f98SAmit Arora 166225d14f98SAmit Arora /* 1663d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1664d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1665d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1666d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1667a86c6181SAlex Tomas */ 1668a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1669a86c6181SAlex Tomas struct ext4_ext_path *path, 16700031462bSMingming Cao struct ext4_extent *newext, int flag) 1671a86c6181SAlex Tomas { 1672a86c6181SAlex Tomas struct ext4_extent_header *eh; 1673a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1674a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1675a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1676725d26d3SAneesh Kumar K.V int depth, len, err; 1677725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1678a2df2a63SAmit Arora unsigned uninitialized = 0; 167955f020dbSAllison Henderson int flags = 0; 1680a86c6181SAlex Tomas 1681273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1682273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1683273df556SFrank Mayhar return -EIO; 1684273df556SFrank Mayhar } 1685a86c6181SAlex Tomas depth = ext_depth(inode); 1686a86c6181SAlex Tomas ex = path[depth].p_ext; 1687273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1688273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1689273df556SFrank Mayhar return -EIO; 1690273df556SFrank Mayhar } 1691a86c6181SAlex Tomas 1692a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1693744692dcSJiaying Zhang if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 16940031462bSMingming Cao && ext4_can_extents_be_merged(inode, ex, newext)) { 1695553f9008SMingming ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", 1696553f9008SMingming ext4_ext_is_uninitialized(newext), 1697a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1698a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1699553f9008SMingming ext4_ext_is_uninitialized(ex), 1700bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1701bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 17027e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 17037e028976SAvantika Mathur if (err) 1704a86c6181SAlex Tomas return err; 1705a2df2a63SAmit Arora 1706a2df2a63SAmit Arora /* 1707a2df2a63SAmit Arora * ext4_can_extents_be_merged should have checked that either 1708a2df2a63SAmit Arora * both extents are uninitialized, or both aren't. Thus we 1709a2df2a63SAmit Arora * need to check only one of them here. 1710a2df2a63SAmit Arora */ 1711a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex)) 1712a2df2a63SAmit Arora uninitialized = 1; 1713a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1714a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1715a2df2a63SAmit Arora if (uninitialized) 1716a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 1717a86c6181SAlex Tomas eh = path[depth].p_hdr; 1718a86c6181SAlex Tomas nearex = ex; 1719a86c6181SAlex Tomas goto merge; 1720a86c6181SAlex Tomas } 1721a86c6181SAlex Tomas 1722a86c6181SAlex Tomas depth = ext_depth(inode); 1723a86c6181SAlex Tomas eh = path[depth].p_hdr; 1724a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1725a86c6181SAlex Tomas goto has_space; 1726a86c6181SAlex Tomas 1727a86c6181SAlex Tomas /* probably next leaf has space for us? */ 1728a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 1729598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 1730598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 17315718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 1732598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 1733a86c6181SAlex Tomas ext_debug("next leaf block - %d\n", next); 1734a86c6181SAlex Tomas BUG_ON(npath != NULL); 1735a86c6181SAlex Tomas npath = ext4_ext_find_extent(inode, next, NULL); 1736a86c6181SAlex Tomas if (IS_ERR(npath)) 1737a86c6181SAlex Tomas return PTR_ERR(npath); 1738a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 1739a86c6181SAlex Tomas eh = npath[depth].p_hdr; 1740a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 174125985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 1742a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 1743a86c6181SAlex Tomas path = npath; 1744ffb505ffSRobin Dong goto has_space; 1745a86c6181SAlex Tomas } 1746a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 1747a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1748a86c6181SAlex Tomas } 1749a86c6181SAlex Tomas 1750a86c6181SAlex Tomas /* 1751d0d856e8SRandy Dunlap * There is no free space in the found leaf. 1752d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 1753a86c6181SAlex Tomas */ 175455f020dbSAllison Henderson if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 175555f020dbSAllison Henderson flags = EXT4_MB_USE_ROOT_BLOCKS; 175655f020dbSAllison Henderson err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1757a86c6181SAlex Tomas if (err) 1758a86c6181SAlex Tomas goto cleanup; 1759a86c6181SAlex Tomas depth = ext_depth(inode); 1760a86c6181SAlex Tomas eh = path[depth].p_hdr; 1761a86c6181SAlex Tomas 1762a86c6181SAlex Tomas has_space: 1763a86c6181SAlex Tomas nearex = path[depth].p_ext; 1764a86c6181SAlex Tomas 17657e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 17667e028976SAvantika Mathur if (err) 1767a86c6181SAlex Tomas goto cleanup; 1768a86c6181SAlex Tomas 1769a86c6181SAlex Tomas if (!nearex) { 1770a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 1771553f9008SMingming ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", 1772a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1773bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1774553f9008SMingming ext4_ext_is_uninitialized(newext), 1775a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 1776*80e675f9SEric Gouriou nearex = EXT_FIRST_EXTENT(eh); 1777a86c6181SAlex Tomas } else { 1778*80e675f9SEric Gouriou if (le32_to_cpu(newext->ee_block) 1779*80e675f9SEric Gouriou > le32_to_cpu(nearex->ee_block)) { 1780*80e675f9SEric Gouriou /* Insert after */ 1781*80e675f9SEric Gouriou ext_debug("insert %d:%llu:[%d]%d %s before: " 1782*80e675f9SEric Gouriou "nearest 0x%p\n" 1783a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1784bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1785553f9008SMingming ext4_ext_is_uninitialized(newext), 1786a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1787*80e675f9SEric Gouriou nearex); 1788*80e675f9SEric Gouriou nearex++; 1789*80e675f9SEric Gouriou } else { 1790*80e675f9SEric Gouriou /* Insert before */ 1791*80e675f9SEric Gouriou BUG_ON(newext->ee_block == nearex->ee_block); 1792*80e675f9SEric Gouriou ext_debug("insert %d:%llu:[%d]%d %s after: " 1793*80e675f9SEric Gouriou "nearest 0x%p\n" 1794*80e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 1795*80e675f9SEric Gouriou ext4_ext_pblock(newext), 1796*80e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 1797*80e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 1798*80e675f9SEric Gouriou nearex); 1799*80e675f9SEric Gouriou } 1800*80e675f9SEric Gouriou len = EXT_LAST_EXTENT(eh) - nearex + 1; 1801*80e675f9SEric Gouriou if (len > 0) { 1802*80e675f9SEric Gouriou ext_debug("insert %d:%llu:[%d]%d: " 1803*80e675f9SEric Gouriou "move %d extents from 0x%p to 0x%p\n", 1804*80e675f9SEric Gouriou le32_to_cpu(newext->ee_block), 1805*80e675f9SEric Gouriou ext4_ext_pblock(newext), 1806*80e675f9SEric Gouriou ext4_ext_is_uninitialized(newext), 1807*80e675f9SEric Gouriou ext4_ext_get_actual_len(newext), 1808*80e675f9SEric Gouriou len, nearex, nearex + 1); 1809*80e675f9SEric Gouriou memmove(nearex + 1, nearex, 1810*80e675f9SEric Gouriou len * sizeof(struct ext4_extent)); 1811*80e675f9SEric Gouriou } 1812a86c6181SAlex Tomas } 1813a86c6181SAlex Tomas 1814e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 1815*80e675f9SEric Gouriou path[depth].p_ext = nearex; 1816a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 1817bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 1818a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 1819a86c6181SAlex Tomas 1820a86c6181SAlex Tomas merge: 1821a86c6181SAlex Tomas /* try to merge extents to the right */ 1822744692dcSJiaying Zhang if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 182356055d3aSAmit Arora ext4_ext_try_to_merge(inode, path, nearex); 1824a86c6181SAlex Tomas 1825a86c6181SAlex Tomas /* try to merge extents to the left */ 1826a86c6181SAlex Tomas 1827a86c6181SAlex Tomas /* time to correct all indexes above */ 1828a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 1829a86c6181SAlex Tomas if (err) 1830a86c6181SAlex Tomas goto cleanup; 1831a86c6181SAlex Tomas 1832a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + depth); 1833a86c6181SAlex Tomas 1834a86c6181SAlex Tomas cleanup: 1835a86c6181SAlex Tomas if (npath) { 1836a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 1837a86c6181SAlex Tomas kfree(npath); 1838a86c6181SAlex Tomas } 1839a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 1840a86c6181SAlex Tomas return err; 1841a86c6181SAlex Tomas } 1842a86c6181SAlex Tomas 18431f109d5aSTheodore Ts'o static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, 18446873fa0dSEric Sandeen ext4_lblk_t num, ext_prepare_callback func, 18456873fa0dSEric Sandeen void *cbdata) 18466873fa0dSEric Sandeen { 18476873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 18486873fa0dSEric Sandeen struct ext4_ext_cache cbex; 18496873fa0dSEric Sandeen struct ext4_extent *ex; 18506873fa0dSEric Sandeen ext4_lblk_t next, start = 0, end = 0; 18516873fa0dSEric Sandeen ext4_lblk_t last = block + num; 18526873fa0dSEric Sandeen int depth, exists, err = 0; 18536873fa0dSEric Sandeen 18546873fa0dSEric Sandeen BUG_ON(func == NULL); 18556873fa0dSEric Sandeen BUG_ON(inode == NULL); 18566873fa0dSEric Sandeen 1857f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 18586873fa0dSEric Sandeen num = last - block; 18596873fa0dSEric Sandeen /* find extent for this block */ 1860fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 18616873fa0dSEric Sandeen path = ext4_ext_find_extent(inode, block, path); 1862fab3a549STheodore Ts'o up_read(&EXT4_I(inode)->i_data_sem); 18636873fa0dSEric Sandeen if (IS_ERR(path)) { 18646873fa0dSEric Sandeen err = PTR_ERR(path); 18656873fa0dSEric Sandeen path = NULL; 18666873fa0dSEric Sandeen break; 18676873fa0dSEric Sandeen } 18686873fa0dSEric Sandeen 18696873fa0dSEric Sandeen depth = ext_depth(inode); 1870273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1871273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1872273df556SFrank Mayhar err = -EIO; 1873273df556SFrank Mayhar break; 1874273df556SFrank Mayhar } 18756873fa0dSEric Sandeen ex = path[depth].p_ext; 18766873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 18776873fa0dSEric Sandeen 18786873fa0dSEric Sandeen exists = 0; 18796873fa0dSEric Sandeen if (!ex) { 18806873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 18816873fa0dSEric Sandeen * all requested space */ 18826873fa0dSEric Sandeen start = block; 18836873fa0dSEric Sandeen end = block + num; 18846873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 18856873fa0dSEric Sandeen /* need to allocate space before found extent */ 18866873fa0dSEric Sandeen start = block; 18876873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 18886873fa0dSEric Sandeen if (block + num < end) 18896873fa0dSEric Sandeen end = block + num; 18906873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 18916873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 18926873fa0dSEric Sandeen /* need to allocate space after found extent */ 18936873fa0dSEric Sandeen start = block; 18946873fa0dSEric Sandeen end = block + num; 18956873fa0dSEric Sandeen if (end >= next) 18966873fa0dSEric Sandeen end = next; 18976873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 18986873fa0dSEric Sandeen /* 18996873fa0dSEric Sandeen * some part of requested space is covered 19006873fa0dSEric Sandeen * by found extent 19016873fa0dSEric Sandeen */ 19026873fa0dSEric Sandeen start = block; 19036873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 19046873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 19056873fa0dSEric Sandeen if (block + num < end) 19066873fa0dSEric Sandeen end = block + num; 19076873fa0dSEric Sandeen exists = 1; 19086873fa0dSEric Sandeen } else { 19096873fa0dSEric Sandeen BUG(); 19106873fa0dSEric Sandeen } 19116873fa0dSEric Sandeen BUG_ON(end <= start); 19126873fa0dSEric Sandeen 19136873fa0dSEric Sandeen if (!exists) { 19146873fa0dSEric Sandeen cbex.ec_block = start; 19156873fa0dSEric Sandeen cbex.ec_len = end - start; 19166873fa0dSEric Sandeen cbex.ec_start = 0; 19176873fa0dSEric Sandeen } else { 19186873fa0dSEric Sandeen cbex.ec_block = le32_to_cpu(ex->ee_block); 19196873fa0dSEric Sandeen cbex.ec_len = ext4_ext_get_actual_len(ex); 1920bf89d16fSTheodore Ts'o cbex.ec_start = ext4_ext_pblock(ex); 19216873fa0dSEric Sandeen } 19226873fa0dSEric Sandeen 1923273df556SFrank Mayhar if (unlikely(cbex.ec_len == 0)) { 1924273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); 1925273df556SFrank Mayhar err = -EIO; 1926273df556SFrank Mayhar break; 1927273df556SFrank Mayhar } 1928c03f8aa9SLukas Czerner err = func(inode, next, &cbex, ex, cbdata); 19296873fa0dSEric Sandeen ext4_ext_drop_refs(path); 19306873fa0dSEric Sandeen 19316873fa0dSEric Sandeen if (err < 0) 19326873fa0dSEric Sandeen break; 19336873fa0dSEric Sandeen 19346873fa0dSEric Sandeen if (err == EXT_REPEAT) 19356873fa0dSEric Sandeen continue; 19366873fa0dSEric Sandeen else if (err == EXT_BREAK) { 19376873fa0dSEric Sandeen err = 0; 19386873fa0dSEric Sandeen break; 19396873fa0dSEric Sandeen } 19406873fa0dSEric Sandeen 19416873fa0dSEric Sandeen if (ext_depth(inode) != depth) { 19426873fa0dSEric Sandeen /* depth was changed. we have to realloc path */ 19436873fa0dSEric Sandeen kfree(path); 19446873fa0dSEric Sandeen path = NULL; 19456873fa0dSEric Sandeen } 19466873fa0dSEric Sandeen 19476873fa0dSEric Sandeen block = cbex.ec_block + cbex.ec_len; 19486873fa0dSEric Sandeen } 19496873fa0dSEric Sandeen 19506873fa0dSEric Sandeen if (path) { 19516873fa0dSEric Sandeen ext4_ext_drop_refs(path); 19526873fa0dSEric Sandeen kfree(path); 19536873fa0dSEric Sandeen } 19546873fa0dSEric Sandeen 19556873fa0dSEric Sandeen return err; 19566873fa0dSEric Sandeen } 19576873fa0dSEric Sandeen 195809b88252SAvantika Mathur static void 1959725d26d3SAneesh Kumar K.V ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 1960b05e6ae5STheodore Ts'o __u32 len, ext4_fsblk_t start) 1961a86c6181SAlex Tomas { 1962a86c6181SAlex Tomas struct ext4_ext_cache *cex; 1963a86c6181SAlex Tomas BUG_ON(len == 0); 19642ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1965d8990240SAditya Kali trace_ext4_ext_put_in_cache(inode, block, len, start); 1966a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 1967a86c6181SAlex Tomas cex->ec_block = block; 1968a86c6181SAlex Tomas cex->ec_len = len; 1969a86c6181SAlex Tomas cex->ec_start = start; 19702ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1971a86c6181SAlex Tomas } 1972a86c6181SAlex Tomas 1973a86c6181SAlex Tomas /* 1974d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 1975d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 1976a86c6181SAlex Tomas * and cache this gap 1977a86c6181SAlex Tomas */ 197809b88252SAvantika Mathur static void 1979a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1980725d26d3SAneesh Kumar K.V ext4_lblk_t block) 1981a86c6181SAlex Tomas { 1982a86c6181SAlex Tomas int depth = ext_depth(inode); 1983725d26d3SAneesh Kumar K.V unsigned long len; 1984725d26d3SAneesh Kumar K.V ext4_lblk_t lblock; 1985a86c6181SAlex Tomas struct ext4_extent *ex; 1986a86c6181SAlex Tomas 1987a86c6181SAlex Tomas ex = path[depth].p_ext; 1988a86c6181SAlex Tomas if (ex == NULL) { 1989a86c6181SAlex Tomas /* there is no extent yet, so gap is [0;-] */ 1990a86c6181SAlex Tomas lblock = 0; 1991f17722f9SLukas Czerner len = EXT_MAX_BLOCKS; 1992a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 1993a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 1994a86c6181SAlex Tomas lblock = block; 1995a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 1996bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 1997bba90743SEric Sandeen block, 1998bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 1999bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2000a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2001a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2002725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2003a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2004a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2005725d26d3SAneesh Kumar K.V 2006725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2007bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2008bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2009bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2010bba90743SEric Sandeen block); 2011725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2012725d26d3SAneesh Kumar K.V len = next - lblock; 2013a86c6181SAlex Tomas } else { 2014a86c6181SAlex Tomas lblock = len = 0; 2015a86c6181SAlex Tomas BUG(); 2016a86c6181SAlex Tomas } 2017a86c6181SAlex Tomas 2018bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2019b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, lblock, len, 0); 2020a86c6181SAlex Tomas } 2021a86c6181SAlex Tomas 2022b05e6ae5STheodore Ts'o /* 2023b7ca1e8eSRobin Dong * ext4_ext_check_cache() 2024a4bb6b64SAllison Henderson * Checks to see if the given block is in the cache. 2025a4bb6b64SAllison Henderson * If it is, the cached extent is stored in the given 2026a4bb6b64SAllison Henderson * cache extent pointer. If the cached extent is a hole, 2027a4bb6b64SAllison Henderson * this routine should be used instead of 2028a4bb6b64SAllison Henderson * ext4_ext_in_cache if the calling function needs to 2029a4bb6b64SAllison Henderson * know the size of the hole. 2030a4bb6b64SAllison Henderson * 2031a4bb6b64SAllison Henderson * @inode: The files inode 2032a4bb6b64SAllison Henderson * @block: The block to look for in the cache 2033a4bb6b64SAllison Henderson * @ex: Pointer where the cached extent will be stored 2034a4bb6b64SAllison Henderson * if it contains block 2035a4bb6b64SAllison Henderson * 2036b05e6ae5STheodore Ts'o * Return 0 if cache is invalid; 1 if the cache is valid 2037b05e6ae5STheodore Ts'o */ 2038a4bb6b64SAllison Henderson static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, 2039a4bb6b64SAllison Henderson struct ext4_ext_cache *ex){ 2040a86c6181SAlex Tomas struct ext4_ext_cache *cex; 204177f4135fSVivek Haldar struct ext4_sb_info *sbi; 2042b05e6ae5STheodore Ts'o int ret = 0; 2043a86c6181SAlex Tomas 20442ec0ae3aSTheodore Ts'o /* 20452ec0ae3aSTheodore Ts'o * We borrow i_block_reservation_lock to protect i_cached_extent 20462ec0ae3aSTheodore Ts'o */ 20472ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2048a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 204977f4135fSVivek Haldar sbi = EXT4_SB(inode->i_sb); 2050a86c6181SAlex Tomas 2051a86c6181SAlex Tomas /* has cache valid data? */ 2052b05e6ae5STheodore Ts'o if (cex->ec_len == 0) 20532ec0ae3aSTheodore Ts'o goto errout; 2054a86c6181SAlex Tomas 2055731eb1a0SAkinobu Mita if (in_range(block, cex->ec_block, cex->ec_len)) { 2056a4bb6b64SAllison Henderson memcpy(ex, cex, sizeof(struct ext4_ext_cache)); 2057bba90743SEric Sandeen ext_debug("%u cached by %u:%u:%llu\n", 2058bba90743SEric Sandeen block, 2059bba90743SEric Sandeen cex->ec_block, cex->ec_len, cex->ec_start); 2060b05e6ae5STheodore Ts'o ret = 1; 2061a86c6181SAlex Tomas } 20622ec0ae3aSTheodore Ts'o errout: 206377f4135fSVivek Haldar if (!ret) 206477f4135fSVivek Haldar sbi->extent_cache_misses++; 206577f4135fSVivek Haldar else 206677f4135fSVivek Haldar sbi->extent_cache_hits++; 2067d8990240SAditya Kali trace_ext4_ext_in_cache(inode, block, ret); 20682ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 20692ec0ae3aSTheodore Ts'o return ret; 2070a86c6181SAlex Tomas } 2071a86c6181SAlex Tomas 2072a86c6181SAlex Tomas /* 2073a4bb6b64SAllison Henderson * ext4_ext_in_cache() 2074a4bb6b64SAllison Henderson * Checks to see if the given block is in the cache. 2075a4bb6b64SAllison Henderson * If it is, the cached extent is stored in the given 2076a4bb6b64SAllison Henderson * extent pointer. 2077a4bb6b64SAllison Henderson * 2078a4bb6b64SAllison Henderson * @inode: The files inode 2079a4bb6b64SAllison Henderson * @block: The block to look for in the cache 2080a4bb6b64SAllison Henderson * @ex: Pointer where the cached extent will be stored 2081a4bb6b64SAllison Henderson * if it contains block 2082a4bb6b64SAllison Henderson * 2083a4bb6b64SAllison Henderson * Return 0 if cache is invalid; 1 if the cache is valid 2084a4bb6b64SAllison Henderson */ 2085a4bb6b64SAllison Henderson static int 2086a4bb6b64SAllison Henderson ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2087a4bb6b64SAllison Henderson struct ext4_extent *ex) 2088a4bb6b64SAllison Henderson { 2089a4bb6b64SAllison Henderson struct ext4_ext_cache cex; 2090a4bb6b64SAllison Henderson int ret = 0; 2091a4bb6b64SAllison Henderson 2092a4bb6b64SAllison Henderson if (ext4_ext_check_cache(inode, block, &cex)) { 2093a4bb6b64SAllison Henderson ex->ee_block = cpu_to_le32(cex.ec_block); 2094a4bb6b64SAllison Henderson ext4_ext_store_pblock(ex, cex.ec_start); 2095a4bb6b64SAllison Henderson ex->ee_len = cpu_to_le16(cex.ec_len); 2096a4bb6b64SAllison Henderson ret = 1; 2097a4bb6b64SAllison Henderson } 2098a4bb6b64SAllison Henderson 2099a4bb6b64SAllison Henderson return ret; 2100a4bb6b64SAllison Henderson } 2101a4bb6b64SAllison Henderson 2102a4bb6b64SAllison Henderson 2103a4bb6b64SAllison Henderson /* 2104d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2105d0d856e8SRandy Dunlap * removes index from the index block. 2106a86c6181SAlex Tomas */ 21071d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2108a86c6181SAlex Tomas struct ext4_ext_path *path) 2109a86c6181SAlex Tomas { 2110a86c6181SAlex Tomas int err; 2111f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2112a86c6181SAlex Tomas 2113a86c6181SAlex Tomas /* free index block */ 2114a86c6181SAlex Tomas path--; 2115bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2116273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2117273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2118273df556SFrank Mayhar return -EIO; 2119273df556SFrank Mayhar } 21207e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 21217e028976SAvantika Mathur if (err) 2122a86c6181SAlex Tomas return err; 21230e1147b0SRobin Dong 21240e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 21250e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 21260e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 21270e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 21280e1147b0SRobin Dong } 21290e1147b0SRobin Dong 2130e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 21317e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 21327e028976SAvantika Mathur if (err) 2133a86c6181SAlex Tomas return err; 21342ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2135d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2136d8990240SAditya Kali 21377dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2138e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2139a86c6181SAlex Tomas return err; 2140a86c6181SAlex Tomas } 2141a86c6181SAlex Tomas 2142a86c6181SAlex Tomas /* 2143ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2144ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2145ee12b630SMingming Cao * to the extent tree. 2146ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2147ee12b630SMingming Cao * under i_data_sem. 2148a86c6181SAlex Tomas */ 2149525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2150a86c6181SAlex Tomas struct ext4_ext_path *path) 2151a86c6181SAlex Tomas { 2152a86c6181SAlex Tomas if (path) { 2153ee12b630SMingming Cao int depth = ext_depth(inode); 2154f3bd1f3fSMingming Cao int ret = 0; 2155ee12b630SMingming Cao 2156a86c6181SAlex Tomas /* probably there is space in leaf? */ 2157a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2158ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2159ee12b630SMingming Cao 2160ee12b630SMingming Cao /* 2161ee12b630SMingming Cao * There are some space in the leaf tree, no 2162ee12b630SMingming Cao * need to account for leaf block credit 2163ee12b630SMingming Cao * 2164ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2165df3ab170STao Ma * and other metadata blocks still need to be 2166ee12b630SMingming Cao * accounted. 2167ee12b630SMingming Cao */ 2168525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2169ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 21705887e98bSAneesh Kumar K.V return ret; 2171ee12b630SMingming Cao } 2172ee12b630SMingming Cao } 2173ee12b630SMingming Cao 2174525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2175a86c6181SAlex Tomas } 2176a86c6181SAlex Tomas 2177a86c6181SAlex Tomas /* 2178ee12b630SMingming Cao * How many index/leaf blocks need to change/allocate to modify nrblocks? 2179ee12b630SMingming Cao * 2180ee12b630SMingming Cao * if nrblocks are fit in a single extent (chunk flag is 1), then 2181ee12b630SMingming Cao * in the worse case, each tree level index/leaf need to be changed 2182ee12b630SMingming Cao * if the tree split due to insert a new extent, then the old tree 2183ee12b630SMingming Cao * index/leaf need to be updated too 2184ee12b630SMingming Cao * 2185ee12b630SMingming Cao * If the nrblocks are discontiguous, they could cause 2186ee12b630SMingming Cao * the whole tree split more than once, but this is really rare. 2187a86c6181SAlex Tomas */ 2188525f4ed8SMingming Cao int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2189ee12b630SMingming Cao { 2190ee12b630SMingming Cao int index; 2191ee12b630SMingming Cao int depth = ext_depth(inode); 2192a86c6181SAlex Tomas 2193ee12b630SMingming Cao if (chunk) 2194ee12b630SMingming Cao index = depth * 2; 2195ee12b630SMingming Cao else 2196ee12b630SMingming Cao index = depth * 3; 2197a86c6181SAlex Tomas 2198ee12b630SMingming Cao return index; 2199a86c6181SAlex Tomas } 2200a86c6181SAlex Tomas 2201a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2202a86c6181SAlex Tomas struct ext4_extent *ex, 22030aa06000STheodore Ts'o ext4_fsblk_t *partial_cluster, 2204725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2205a86c6181SAlex Tomas { 22060aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2207a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 22080aa06000STheodore Ts'o ext4_fsblk_t pblk; 2209e6362609STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 2210a86c6181SAlex Tomas 2211c9de560dSAlex Tomas if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2212e6362609STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 22130aa06000STheodore Ts'o /* 22140aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 22150aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 22160aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 22170aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 22180aa06000STheodore Ts'o * or at the end of the ext4_truncate() operation. 22190aa06000STheodore Ts'o */ 22200aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 22210aa06000STheodore Ts'o 2222d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 22230aa06000STheodore Ts'o /* 22240aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 22250aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 22260aa06000STheodore Ts'o * partial cluster here. 22270aa06000STheodore Ts'o */ 22280aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 22290aa06000STheodore Ts'o if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 22300aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 22310aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 22320aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 22330aa06000STheodore Ts'o *partial_cluster = 0; 22340aa06000STheodore Ts'o } 22350aa06000STheodore Ts'o 2236a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2237a86c6181SAlex Tomas { 2238a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2239a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2240a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2241a86c6181SAlex Tomas sbi->s_ext_extents++; 2242a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2243a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2244a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2245a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2246a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2247a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2248a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2249a86c6181SAlex Tomas } 2250a86c6181SAlex Tomas #endif 2251a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2252a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2253a86c6181SAlex Tomas /* tail removal */ 2254725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2255725d26d3SAneesh Kumar K.V 2256a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 22570aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 22580aa06000STheodore Ts'o ext_debug("free last %u blocks starting %llu\n", num, pblk); 22590aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 22600aa06000STheodore Ts'o /* 22610aa06000STheodore Ts'o * If the block range to be freed didn't start at the 22620aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 22630aa06000STheodore Ts'o * extent, save the partial cluster here, since we 22640aa06000STheodore Ts'o * might need to delete if we determine that the 22650aa06000STheodore Ts'o * truncate operation has removed all of the blocks in 22660aa06000STheodore Ts'o * the cluster. 22670aa06000STheodore Ts'o */ 22680aa06000STheodore Ts'o if (pblk & (sbi->s_cluster_ratio - 1) && 22690aa06000STheodore Ts'o (ee_len == num)) 22700aa06000STheodore Ts'o *partial_cluster = EXT4_B2C(sbi, pblk); 22710aa06000STheodore Ts'o else 22720aa06000STheodore Ts'o *partial_cluster = 0; 2273a86c6181SAlex Tomas } else if (from == le32_to_cpu(ex->ee_block) 2274a2df2a63SAmit Arora && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2275d583fb87SAllison Henderson /* head removal */ 2276d583fb87SAllison Henderson ext4_lblk_t num; 2277d583fb87SAllison Henderson ext4_fsblk_t start; 2278d583fb87SAllison Henderson 2279d583fb87SAllison Henderson num = to - from; 2280d583fb87SAllison Henderson start = ext4_ext_pblock(ex); 2281d583fb87SAllison Henderson 2282d583fb87SAllison Henderson ext_debug("free first %u blocks starting %llu\n", num, start); 2283ee90d57eSH Hartley Sweeten ext4_free_blocks(handle, inode, NULL, start, num, flags); 2284d583fb87SAllison Henderson 2285a86c6181SAlex Tomas } else { 2286725d26d3SAneesh Kumar K.V printk(KERN_INFO "strange request: removal(2) " 2287725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2288a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2289a86c6181SAlex Tomas } 2290a86c6181SAlex Tomas return 0; 2291a86c6181SAlex Tomas } 2292a86c6181SAlex Tomas 2293d583fb87SAllison Henderson 2294d583fb87SAllison Henderson /* 2295d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 2296d583fb87SAllison Henderson * blocks appearing between "start" and "end", and splits the extents 2297d583fb87SAllison Henderson * if "start" and "end" appear in the same extent 2298d583fb87SAllison Henderson * 2299d583fb87SAllison Henderson * @handle: The journal handle 2300d583fb87SAllison Henderson * @inode: The files inode 2301d583fb87SAllison Henderson * @path: The path to the leaf 2302d583fb87SAllison Henderson * @start: The first block to remove 2303d583fb87SAllison Henderson * @end: The last block to remove 2304d583fb87SAllison Henderson */ 2305a86c6181SAlex Tomas static int 2306a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 23070aa06000STheodore Ts'o struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, 23080aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2309a86c6181SAlex Tomas { 23100aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2311a86c6181SAlex Tomas int err = 0, correct_index = 0; 2312a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2313a86c6181SAlex Tomas struct ext4_extent_header *eh; 2314750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2315725d26d3SAneesh Kumar K.V unsigned num; 2316725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2317a86c6181SAlex Tomas unsigned short ex_ee_len; 2318a2df2a63SAmit Arora unsigned uninitialized = 0; 2319a86c6181SAlex Tomas struct ext4_extent *ex; 2320a86c6181SAlex Tomas 2321c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 2322725d26d3SAneesh Kumar K.V ext_debug("truncate since %u in leaf\n", start); 2323a86c6181SAlex Tomas if (!path[depth].p_hdr) 2324a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2325a86c6181SAlex Tomas eh = path[depth].p_hdr; 2326273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2327273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2328273df556SFrank Mayhar return -EIO; 2329273df556SFrank Mayhar } 2330a86c6181SAlex Tomas /* find where to start removing */ 2331a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2332a86c6181SAlex Tomas 2333a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2334a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2335a86c6181SAlex Tomas 2336d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2337d8990240SAditya Kali 2338a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2339a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2340a41f2071SAneesh Kumar K.V 2341a41f2071SAneesh Kumar K.V if (ext4_ext_is_uninitialized(ex)) 2342a41f2071SAneesh Kumar K.V uninitialized = 1; 2343a41f2071SAneesh Kumar K.V else 2344a41f2071SAneesh Kumar K.V uninitialized = 0; 2345a41f2071SAneesh Kumar K.V 2346553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2347553f9008SMingming uninitialized, ex_ee_len); 2348a86c6181SAlex Tomas path[depth].p_ext = ex; 2349a86c6181SAlex Tomas 2350a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2351d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2352d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2353a86c6181SAlex Tomas 2354a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2355a86c6181SAlex Tomas 2356d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 2357d583fb87SAllison Henderson if (end <= ex_ee_block) { 2358d583fb87SAllison Henderson ex--; 2359d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2360d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2361d583fb87SAllison Henderson continue; 2362750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2363750c9c47SDmitry Monakhov EXT4_ERROR_INODE(inode," bad truncate %u:%u\n", 2364d583fb87SAllison Henderson start, end); 2365d583fb87SAllison Henderson err = -EIO; 2366d583fb87SAllison Henderson goto out; 2367a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2368a86c6181SAlex Tomas /* remove tail of the extent */ 2369750c9c47SDmitry Monakhov num = a - ex_ee_block; 2370a86c6181SAlex Tomas } else { 2371a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2372a86c6181SAlex Tomas num = 0; 2373d583fb87SAllison Henderson } 237434071da7STheodore Ts'o /* 237534071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 237634071da7STheodore Ts'o * descriptor) for each block group; assume two block 237734071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 237834071da7STheodore Ts'o * the worst case 237934071da7STheodore Ts'o */ 238034071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2381a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2382a86c6181SAlex Tomas correct_index = 1; 2383a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2384a86c6181SAlex Tomas } 23855aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2386a86c6181SAlex Tomas 2387487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 23889102e4faSShen Feng if (err) 2389a86c6181SAlex Tomas goto out; 2390a86c6181SAlex Tomas 2391a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2392a86c6181SAlex Tomas if (err) 2393a86c6181SAlex Tomas goto out; 2394a86c6181SAlex Tomas 23950aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 23960aa06000STheodore Ts'o a, b); 2397a86c6181SAlex Tomas if (err) 2398a86c6181SAlex Tomas goto out; 2399a86c6181SAlex Tomas 2400750c9c47SDmitry Monakhov if (num == 0) 2401d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2402f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2403a86c6181SAlex Tomas 2404a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2405749269faSAmit Arora /* 2406749269faSAmit Arora * Do not mark uninitialized if all the blocks in the 2407749269faSAmit Arora * extent have been removed. 2408749269faSAmit Arora */ 2409749269faSAmit Arora if (uninitialized && num) 2410a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 2411d583fb87SAllison Henderson /* 2412d583fb87SAllison Henderson * If the extent was completely released, 2413d583fb87SAllison Henderson * we need to remove it from the leaf 2414d583fb87SAllison Henderson */ 2415d583fb87SAllison Henderson if (num == 0) { 2416f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2417d583fb87SAllison Henderson /* 2418d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2419d583fb87SAllison Henderson * extents up when an extent is removed so that 2420d583fb87SAllison Henderson * we dont have blank extents in the middle 2421d583fb87SAllison Henderson */ 2422d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2423d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2424d583fb87SAllison Henderson 2425d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2426d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2427d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2428d583fb87SAllison Henderson } 2429d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 24300aa06000STheodore Ts'o } else 24310aa06000STheodore Ts'o *partial_cluster = 0; 2432d583fb87SAllison Henderson 2433750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2434750c9c47SDmitry Monakhov if (err) 2435750c9c47SDmitry Monakhov goto out; 2436750c9c47SDmitry Monakhov 24372ae02107SMingming Cao ext_debug("new extent: %u:%u:%llu\n", block, num, 2438bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2439a86c6181SAlex Tomas ex--; 2440a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2441a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2442a86c6181SAlex Tomas } 2443a86c6181SAlex Tomas 2444a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2445a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2446a86c6181SAlex Tomas 24470aa06000STheodore Ts'o /* 24480aa06000STheodore Ts'o * If there is still a entry in the leaf node, check to see if 24490aa06000STheodore Ts'o * it references the partial cluster. This is the only place 24500aa06000STheodore Ts'o * where it could; if it doesn't, we can free the cluster. 24510aa06000STheodore Ts'o */ 24520aa06000STheodore Ts'o if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && 24530aa06000STheodore Ts'o (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 24540aa06000STheodore Ts'o *partial_cluster)) { 24550aa06000STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 24560aa06000STheodore Ts'o 24570aa06000STheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 24580aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 24590aa06000STheodore Ts'o 24600aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 24610aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 24620aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 24630aa06000STheodore Ts'o *partial_cluster = 0; 24640aa06000STheodore Ts'o } 24650aa06000STheodore Ts'o 2466a86c6181SAlex Tomas /* if this leaf is free, then we should 2467a86c6181SAlex Tomas * remove it from index block above */ 2468a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2469a86c6181SAlex Tomas err = ext4_ext_rm_idx(handle, inode, path + depth); 2470a86c6181SAlex Tomas 2471a86c6181SAlex Tomas out: 2472a86c6181SAlex Tomas return err; 2473a86c6181SAlex Tomas } 2474a86c6181SAlex Tomas 2475a86c6181SAlex Tomas /* 2476d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2477d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2478a86c6181SAlex Tomas */ 247909b88252SAvantika Mathur static int 2480a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2481a86c6181SAlex Tomas { 2482a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2483a86c6181SAlex Tomas 2484a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2485a86c6181SAlex Tomas return 0; 2486a86c6181SAlex Tomas 2487a86c6181SAlex Tomas /* 2488d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2489a86c6181SAlex Tomas * so we have to consider current index for truncation 2490a86c6181SAlex Tomas */ 2491a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2492a86c6181SAlex Tomas return 0; 2493a86c6181SAlex Tomas return 1; 2494a86c6181SAlex Tomas } 2495a86c6181SAlex Tomas 2496c6a0371cSAllison Henderson static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) 2497a86c6181SAlex Tomas { 2498a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 2499a86c6181SAlex Tomas int depth = ext_depth(inode); 2500a86c6181SAlex Tomas struct ext4_ext_path *path; 25010aa06000STheodore Ts'o ext4_fsblk_t partial_cluster = 0; 2502a86c6181SAlex Tomas handle_t *handle; 25030617b83fSDmitry Monakhov int i, err; 2504a86c6181SAlex Tomas 2505725d26d3SAneesh Kumar K.V ext_debug("truncate since %u\n", start); 2506a86c6181SAlex Tomas 2507a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 2508a86c6181SAlex Tomas handle = ext4_journal_start(inode, depth + 1); 2509a86c6181SAlex Tomas if (IS_ERR(handle)) 2510a86c6181SAlex Tomas return PTR_ERR(handle); 2511a86c6181SAlex Tomas 25120617b83fSDmitry Monakhov again: 2513a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 2514a86c6181SAlex Tomas 2515d8990240SAditya Kali trace_ext4_ext_remove_space(inode, start, depth); 2516d8990240SAditya Kali 2517a86c6181SAlex Tomas /* 2518d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2519d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2520a86c6181SAlex Tomas */ 25210617b83fSDmitry Monakhov depth = ext_depth(inode); 2522216553c4SJosef Bacik path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); 2523a86c6181SAlex Tomas if (path == NULL) { 2524a86c6181SAlex Tomas ext4_journal_stop(handle); 2525a86c6181SAlex Tomas return -ENOMEM; 2526a86c6181SAlex Tomas } 25270617b83fSDmitry Monakhov path[0].p_depth = depth; 2528a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 252956b19868SAneesh Kumar K.V if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2530a86c6181SAlex Tomas err = -EIO; 2531a86c6181SAlex Tomas goto out; 2532a86c6181SAlex Tomas } 25330617b83fSDmitry Monakhov i = err = 0; 2534a86c6181SAlex Tomas 2535a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2536a86c6181SAlex Tomas if (i == depth) { 2537a86c6181SAlex Tomas /* this is leaf block */ 2538d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 25390aa06000STheodore Ts'o &partial_cluster, start, 25400aa06000STheodore Ts'o EXT_MAX_BLOCKS - 1); 2541d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2542a86c6181SAlex Tomas brelse(path[i].p_bh); 2543a86c6181SAlex Tomas path[i].p_bh = NULL; 2544a86c6181SAlex Tomas i--; 2545a86c6181SAlex Tomas continue; 2546a86c6181SAlex Tomas } 2547a86c6181SAlex Tomas 2548a86c6181SAlex Tomas /* this is index block */ 2549a86c6181SAlex Tomas if (!path[i].p_hdr) { 2550a86c6181SAlex Tomas ext_debug("initialize header\n"); 2551a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2552a86c6181SAlex Tomas } 2553a86c6181SAlex Tomas 2554a86c6181SAlex Tomas if (!path[i].p_idx) { 2555d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2556a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2557a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2558a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2559a86c6181SAlex Tomas path[i].p_hdr, 2560a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2561a86c6181SAlex Tomas } else { 2562d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2563a86c6181SAlex Tomas path[i].p_idx--; 2564a86c6181SAlex Tomas } 2565a86c6181SAlex Tomas 2566a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2567a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2568a86c6181SAlex Tomas path[i].p_idx); 2569a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2570c29c0ae7SAlex Tomas struct buffer_head *bh; 2571a86c6181SAlex Tomas /* go to the next level */ 25722ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2573bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2574a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 2575bf89d16fSTheodore Ts'o bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2576c29c0ae7SAlex Tomas if (!bh) { 2577a86c6181SAlex Tomas /* should we reset i_size? */ 2578a86c6181SAlex Tomas err = -EIO; 2579a86c6181SAlex Tomas break; 2580a86c6181SAlex Tomas } 2581c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2582c29c0ae7SAlex Tomas err = -EIO; 2583c29c0ae7SAlex Tomas break; 2584c29c0ae7SAlex Tomas } 258556b19868SAneesh Kumar K.V if (ext4_ext_check(inode, ext_block_hdr(bh), 2586c29c0ae7SAlex Tomas depth - i - 1)) { 2587c29c0ae7SAlex Tomas err = -EIO; 2588c29c0ae7SAlex Tomas break; 2589c29c0ae7SAlex Tomas } 2590c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2591a86c6181SAlex Tomas 2592d0d856e8SRandy Dunlap /* save actual number of indexes since this 2593d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2594a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2595a86c6181SAlex Tomas i++; 2596a86c6181SAlex Tomas } else { 2597d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2598a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2599d0d856e8SRandy Dunlap /* index is empty, remove it; 2600a86c6181SAlex Tomas * handle must be already prepared by the 2601a86c6181SAlex Tomas * truncatei_leaf() */ 2602a86c6181SAlex Tomas err = ext4_ext_rm_idx(handle, inode, path + i); 2603a86c6181SAlex Tomas } 2604d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2605a86c6181SAlex Tomas brelse(path[i].p_bh); 2606a86c6181SAlex Tomas path[i].p_bh = NULL; 2607a86c6181SAlex Tomas i--; 2608a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2609a86c6181SAlex Tomas } 2610a86c6181SAlex Tomas } 2611a86c6181SAlex Tomas 2612d8990240SAditya Kali trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, 2613d8990240SAditya Kali path->p_hdr->eh_entries); 2614d8990240SAditya Kali 26157b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 26167b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 26177b415bf6SAditya Kali * cluster as well. */ 26187b415bf6SAditya Kali if (partial_cluster && path->p_hdr->eh_entries == 0) { 26197b415bf6SAditya Kali int flags = EXT4_FREE_BLOCKS_FORGET; 26207b415bf6SAditya Kali 26217b415bf6SAditya Kali if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 26227b415bf6SAditya Kali flags |= EXT4_FREE_BLOCKS_METADATA; 26237b415bf6SAditya Kali 26247b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 26257b415bf6SAditya Kali EXT4_C2B(EXT4_SB(sb), partial_cluster), 26267b415bf6SAditya Kali EXT4_SB(sb)->s_cluster_ratio, flags); 26277b415bf6SAditya Kali partial_cluster = 0; 26287b415bf6SAditya Kali } 26297b415bf6SAditya Kali 2630a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2631a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2632a86c6181SAlex Tomas /* 2633d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 2634d0d856e8SRandy Dunlap * so we need to correct eh_depth 2635a86c6181SAlex Tomas */ 2636a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 2637a86c6181SAlex Tomas if (err == 0) { 2638a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 2639a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 264055ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 2641a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 2642a86c6181SAlex Tomas } 2643a86c6181SAlex Tomas } 2644a86c6181SAlex Tomas out: 2645a86c6181SAlex Tomas ext4_ext_drop_refs(path); 2646a86c6181SAlex Tomas kfree(path); 26470617b83fSDmitry Monakhov if (err == -EAGAIN) 26480617b83fSDmitry Monakhov goto again; 2649a86c6181SAlex Tomas ext4_journal_stop(handle); 2650a86c6181SAlex Tomas 2651a86c6181SAlex Tomas return err; 2652a86c6181SAlex Tomas } 2653a86c6181SAlex Tomas 2654a86c6181SAlex Tomas /* 2655a86c6181SAlex Tomas * called at mount time 2656a86c6181SAlex Tomas */ 2657a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 2658a86c6181SAlex Tomas { 2659a86c6181SAlex Tomas /* 2660a86c6181SAlex Tomas * possible initialization would be here 2661a86c6181SAlex Tomas */ 2662a86c6181SAlex Tomas 266383982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 266490576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 26654776004fSTheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled"); 2666bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 2667bbf2f9fbSRobert P. J. Day printk(", aggressive tests"); 2668a86c6181SAlex Tomas #endif 2669a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 2670a86c6181SAlex Tomas printk(", check binsearch"); 2671a86c6181SAlex Tomas #endif 2672a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2673a86c6181SAlex Tomas printk(", stats"); 2674a86c6181SAlex Tomas #endif 2675a86c6181SAlex Tomas printk("\n"); 267690576c0bSTheodore Ts'o #endif 2677a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2678a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2679a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 2680a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 2681a86c6181SAlex Tomas #endif 2682a86c6181SAlex Tomas } 2683a86c6181SAlex Tomas } 2684a86c6181SAlex Tomas 2685a86c6181SAlex Tomas /* 2686a86c6181SAlex Tomas * called at umount time 2687a86c6181SAlex Tomas */ 2688a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 2689a86c6181SAlex Tomas { 269083982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2691a86c6181SAlex Tomas return; 2692a86c6181SAlex Tomas 2693a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2694a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2695a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2696a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2697a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 2698a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 2699a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2700a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2701a86c6181SAlex Tomas } 2702a86c6181SAlex Tomas #endif 2703a86c6181SAlex Tomas } 2704a86c6181SAlex Tomas 2705093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 2706093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2707093a088bSAneesh Kumar K.V { 27082407518dSLukas Czerner ext4_fsblk_t ee_pblock; 27092407518dSLukas Czerner unsigned int ee_len; 2710b720303dSJing Zhang int ret; 2711093a088bSAneesh Kumar K.V 2712093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 2713bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 2714093a088bSAneesh Kumar K.V 2715a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 27162407518dSLukas Czerner if (ret > 0) 27172407518dSLukas Czerner ret = 0; 2718093a088bSAneesh Kumar K.V 27192407518dSLukas Czerner return ret; 2720093a088bSAneesh Kumar K.V } 2721093a088bSAneesh Kumar K.V 272247ea3bb5SYongqiang Yang /* 272347ea3bb5SYongqiang Yang * used by extent splitting. 272447ea3bb5SYongqiang Yang */ 272547ea3bb5SYongqiang Yang #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 272647ea3bb5SYongqiang Yang due to ENOSPC */ 272747ea3bb5SYongqiang Yang #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 272847ea3bb5SYongqiang Yang #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 272947ea3bb5SYongqiang Yang 273047ea3bb5SYongqiang Yang /* 273147ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 273247ea3bb5SYongqiang Yang * 273347ea3bb5SYongqiang Yang * @handle: the journal handle 273447ea3bb5SYongqiang Yang * @inode: the file inode 273547ea3bb5SYongqiang Yang * @path: the path to the extent 273647ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 273747ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 273847ea3bb5SYongqiang Yang * the states(init or uninit) of new extents. 273947ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 274047ea3bb5SYongqiang Yang * 274147ea3bb5SYongqiang Yang * 274247ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 274347ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 274447ea3bb5SYongqiang Yang * 274547ea3bb5SYongqiang Yang * There are two cases: 274647ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 274747ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 274847ea3bb5SYongqiang Yang * 274947ea3bb5SYongqiang Yang * return 0 on success. 275047ea3bb5SYongqiang Yang */ 275147ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 275247ea3bb5SYongqiang Yang struct inode *inode, 275347ea3bb5SYongqiang Yang struct ext4_ext_path *path, 275447ea3bb5SYongqiang Yang ext4_lblk_t split, 275547ea3bb5SYongqiang Yang int split_flag, 275647ea3bb5SYongqiang Yang int flags) 275747ea3bb5SYongqiang Yang { 275847ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 275947ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 276047ea3bb5SYongqiang Yang struct ext4_extent *ex, newex, orig_ex; 276147ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 276247ea3bb5SYongqiang Yang unsigned int ee_len, depth; 276347ea3bb5SYongqiang Yang int err = 0; 276447ea3bb5SYongqiang Yang 276547ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 276647ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 276747ea3bb5SYongqiang Yang 276847ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 276947ea3bb5SYongqiang Yang 277047ea3bb5SYongqiang Yang depth = ext_depth(inode); 277147ea3bb5SYongqiang Yang ex = path[depth].p_ext; 277247ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 277347ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 277447ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 277547ea3bb5SYongqiang Yang 277647ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 277747ea3bb5SYongqiang Yang 277847ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 277947ea3bb5SYongqiang Yang if (err) 278047ea3bb5SYongqiang Yang goto out; 278147ea3bb5SYongqiang Yang 278247ea3bb5SYongqiang Yang if (split == ee_block) { 278347ea3bb5SYongqiang Yang /* 278447ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 278547ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 278647ea3bb5SYongqiang Yang * is not needed. 278747ea3bb5SYongqiang Yang */ 278847ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 278947ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 279047ea3bb5SYongqiang Yang else 279147ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 279247ea3bb5SYongqiang Yang 279347ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 279447ea3bb5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 279547ea3bb5SYongqiang Yang 279647ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 279747ea3bb5SYongqiang Yang goto out; 279847ea3bb5SYongqiang Yang } 279947ea3bb5SYongqiang Yang 280047ea3bb5SYongqiang Yang /* case a */ 280147ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 280247ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 280347ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT1) 280447ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 280547ea3bb5SYongqiang Yang 280647ea3bb5SYongqiang Yang /* 280747ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 280847ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 280947ea3bb5SYongqiang Yang */ 281047ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 281147ea3bb5SYongqiang Yang if (err) 281247ea3bb5SYongqiang Yang goto fix_extent_len; 281347ea3bb5SYongqiang Yang 281447ea3bb5SYongqiang Yang ex2 = &newex; 281547ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 281647ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 281747ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 281847ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 281947ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex2); 282047ea3bb5SYongqiang Yang 282147ea3bb5SYongqiang Yang err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 282247ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 282347ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 282447ea3bb5SYongqiang Yang if (err) 282547ea3bb5SYongqiang Yang goto fix_extent_len; 282647ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 282747ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le32(ee_len); 282847ea3bb5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 282947ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 283047ea3bb5SYongqiang Yang goto out; 283147ea3bb5SYongqiang Yang } else if (err) 283247ea3bb5SYongqiang Yang goto fix_extent_len; 283347ea3bb5SYongqiang Yang 283447ea3bb5SYongqiang Yang out: 283547ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 283647ea3bb5SYongqiang Yang return err; 283747ea3bb5SYongqiang Yang 283847ea3bb5SYongqiang Yang fix_extent_len: 283947ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 284047ea3bb5SYongqiang Yang ext4_ext_dirty(handle, inode, path + depth); 284147ea3bb5SYongqiang Yang return err; 284247ea3bb5SYongqiang Yang } 284347ea3bb5SYongqiang Yang 284447ea3bb5SYongqiang Yang /* 284547ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 284647ea3bb5SYongqiang Yang * by @map as split_flags indicates 284747ea3bb5SYongqiang Yang * 284847ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (upto three) 284947ea3bb5SYongqiang Yang * There are three possibilities: 285047ea3bb5SYongqiang Yang * a> There is no split required 285147ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 285247ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 285347ea3bb5SYongqiang Yang * 285447ea3bb5SYongqiang Yang */ 285547ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 285647ea3bb5SYongqiang Yang struct inode *inode, 285747ea3bb5SYongqiang Yang struct ext4_ext_path *path, 285847ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 285947ea3bb5SYongqiang Yang int split_flag, 286047ea3bb5SYongqiang Yang int flags) 286147ea3bb5SYongqiang Yang { 286247ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 286347ea3bb5SYongqiang Yang struct ext4_extent *ex; 286447ea3bb5SYongqiang Yang unsigned int ee_len, depth; 286547ea3bb5SYongqiang Yang int err = 0; 286647ea3bb5SYongqiang Yang int uninitialized; 286747ea3bb5SYongqiang Yang int split_flag1, flags1; 286847ea3bb5SYongqiang Yang 286947ea3bb5SYongqiang Yang depth = ext_depth(inode); 287047ea3bb5SYongqiang Yang ex = path[depth].p_ext; 287147ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 287247ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 287347ea3bb5SYongqiang Yang uninitialized = ext4_ext_is_uninitialized(ex); 287447ea3bb5SYongqiang Yang 287547ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 287647ea3bb5SYongqiang Yang split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 287747ea3bb5SYongqiang Yang EXT4_EXT_MAY_ZEROOUT : 0; 287847ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 287947ea3bb5SYongqiang Yang if (uninitialized) 288047ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 288147ea3bb5SYongqiang Yang EXT4_EXT_MARK_UNINIT2; 288247ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 288347ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 288493917411SYongqiang Yang if (err) 288593917411SYongqiang Yang goto out; 288647ea3bb5SYongqiang Yang } 288747ea3bb5SYongqiang Yang 288847ea3bb5SYongqiang Yang ext4_ext_drop_refs(path); 288947ea3bb5SYongqiang Yang path = ext4_ext_find_extent(inode, map->m_lblk, path); 289047ea3bb5SYongqiang Yang if (IS_ERR(path)) 289147ea3bb5SYongqiang Yang return PTR_ERR(path); 289247ea3bb5SYongqiang Yang 289347ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 289447ea3bb5SYongqiang Yang split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 289547ea3bb5SYongqiang Yang EXT4_EXT_MAY_ZEROOUT : 0; 289647ea3bb5SYongqiang Yang if (uninitialized) 289747ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1; 289847ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 289947ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT2; 290047ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 290147ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 290247ea3bb5SYongqiang Yang if (err) 290347ea3bb5SYongqiang Yang goto out; 290447ea3bb5SYongqiang Yang } 290547ea3bb5SYongqiang Yang 290647ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 290747ea3bb5SYongqiang Yang out: 290847ea3bb5SYongqiang Yang return err ? err : map->m_len; 290947ea3bb5SYongqiang Yang } 291047ea3bb5SYongqiang Yang 29113977c965SAneesh Kumar K.V #define EXT4_EXT_ZERO_LEN 7 291256055d3aSAmit Arora /* 2913e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 291456055d3aSAmit Arora * to an uninitialized extent. It may result in splitting the uninitialized 291556055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 291656055d3aSAmit Arora * uninitialized). 291756055d3aSAmit Arora * There are three possibilities: 291856055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 291956055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 292056055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 29216f91bc5fSEric Gouriou * 29226f91bc5fSEric Gouriou * Pre-conditions: 29236f91bc5fSEric Gouriou * - The extent pointed to by 'path' is uninitialized. 29246f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 29256f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 29266f91bc5fSEric Gouriou * 29276f91bc5fSEric Gouriou * Post-conditions on success: 29286f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 29296f91bc5fSEric Gouriou * that are allocated and initialized. 29306f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 293156055d3aSAmit Arora */ 2932725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 2933725d26d3SAneesh Kumar K.V struct inode *inode, 2934e35fd660STheodore Ts'o struct ext4_map_blocks *map, 2935e35fd660STheodore Ts'o struct ext4_ext_path *path) 293656055d3aSAmit Arora { 29376f91bc5fSEric Gouriou struct ext4_extent_header *eh; 2938667eff35SYongqiang Yang struct ext4_map_blocks split_map; 2939667eff35SYongqiang Yang struct ext4_extent zero_ex; 2940667eff35SYongqiang Yang struct ext4_extent *ex; 294121ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 2942f85b287aSDan Carpenter unsigned int ee_len, depth; 2943f85b287aSDan Carpenter int allocated; 294456055d3aSAmit Arora int err = 0; 2945667eff35SYongqiang Yang int split_flag = 0; 294621ca087aSDmitry Monakhov 294721ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 294821ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 2949e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 295021ca087aSDmitry Monakhov 295121ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 295221ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 2953e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 2954e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 295556055d3aSAmit Arora 295656055d3aSAmit Arora depth = ext_depth(inode); 29576f91bc5fSEric Gouriou eh = path[depth].p_hdr; 295856055d3aSAmit Arora ex = path[depth].p_ext; 295956055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 296056055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 2961e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 296221ca087aSDmitry Monakhov 29636f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 29646f91bc5fSEric Gouriou 29656f91bc5fSEric Gouriou /* Pre-conditions */ 29666f91bc5fSEric Gouriou BUG_ON(!ext4_ext_is_uninitialized(ex)); 29676f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 29686f91bc5fSEric Gouriou BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len); 29696f91bc5fSEric Gouriou 29706f91bc5fSEric Gouriou /* 29716f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 29726f91bc5fSEric Gouriou * uninitialized extent to its left neighbor. This is much cheaper 29736f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 29746f91bc5fSEric Gouriou * memmove() calls. This is the common case in steady state for 29756f91bc5fSEric Gouriou * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append 29766f91bc5fSEric Gouriou * writes. 29776f91bc5fSEric Gouriou * 29786f91bc5fSEric Gouriou * Limitations of the current logic: 29796f91bc5fSEric Gouriou * - L1: we only deal with writes at the start of the extent. 29806f91bc5fSEric Gouriou * The approach could be extended to writes at the end 29816f91bc5fSEric Gouriou * of the extent but this scenario was deemed less common. 29826f91bc5fSEric Gouriou * - L2: we do not deal with writes covering the whole extent. 29836f91bc5fSEric Gouriou * This would require removing the extent if the transfer 29846f91bc5fSEric Gouriou * is possible. 29856f91bc5fSEric Gouriou * - L3: we only attempt to merge with an extent stored in the 29866f91bc5fSEric Gouriou * same extent tree node. 29876f91bc5fSEric Gouriou */ 29886f91bc5fSEric Gouriou if ((map->m_lblk == ee_block) && /*L1*/ 29896f91bc5fSEric Gouriou (map->m_len < ee_len) && /*L2*/ 29906f91bc5fSEric Gouriou (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ 29916f91bc5fSEric Gouriou struct ext4_extent *prev_ex; 29926f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 29936f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 29946f91bc5fSEric Gouriou unsigned int prev_len, write_len; 29956f91bc5fSEric Gouriou 29966f91bc5fSEric Gouriou prev_ex = ex - 1; 29976f91bc5fSEric Gouriou prev_lblk = le32_to_cpu(prev_ex->ee_block); 29986f91bc5fSEric Gouriou prev_len = ext4_ext_get_actual_len(prev_ex); 29996f91bc5fSEric Gouriou prev_pblk = ext4_ext_pblock(prev_ex); 30006f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 30016f91bc5fSEric Gouriou write_len = map->m_len; 30026f91bc5fSEric Gouriou 30036f91bc5fSEric Gouriou /* 30046f91bc5fSEric Gouriou * A transfer of blocks from 'ex' to 'prev_ex' is allowed 30056f91bc5fSEric Gouriou * upon those conditions: 30066f91bc5fSEric Gouriou * - C1: prev_ex is initialized, 30076f91bc5fSEric Gouriou * - C2: prev_ex is logically abutting ex, 30086f91bc5fSEric Gouriou * - C3: prev_ex is physically abutting ex, 30096f91bc5fSEric Gouriou * - C4: prev_ex can receive the additional blocks without 30106f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 30116f91bc5fSEric Gouriou */ 30126f91bc5fSEric Gouriou if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ 30136f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 30146f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 30156f91bc5fSEric Gouriou (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ 30166f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 30176f91bc5fSEric Gouriou if (err) 30186f91bc5fSEric Gouriou goto out; 30196f91bc5fSEric Gouriou 30206f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 30216f91bc5fSEric Gouriou map, ex, prev_ex); 30226f91bc5fSEric Gouriou 30236f91bc5fSEric Gouriou /* Shift the start of ex by 'write_len' blocks */ 30246f91bc5fSEric Gouriou ex->ee_block = cpu_to_le32(ee_block + write_len); 30256f91bc5fSEric Gouriou ext4_ext_store_pblock(ex, ee_pblk + write_len); 30266f91bc5fSEric Gouriou ex->ee_len = cpu_to_le16(ee_len - write_len); 30276f91bc5fSEric Gouriou ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 30286f91bc5fSEric Gouriou 30296f91bc5fSEric Gouriou /* Extend prev_ex by 'write_len' blocks */ 30306f91bc5fSEric Gouriou prev_ex->ee_len = cpu_to_le16(prev_len + write_len); 30316f91bc5fSEric Gouriou 30326f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 30336f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 30346f91bc5fSEric Gouriou 30356f91bc5fSEric Gouriou /* Update path to point to the right extent */ 30366f91bc5fSEric Gouriou path[depth].p_ext = prev_ex; 30376f91bc5fSEric Gouriou 30386f91bc5fSEric Gouriou /* Result: number of initialized blocks past m_lblk */ 30396f91bc5fSEric Gouriou allocated = write_len; 30406f91bc5fSEric Gouriou goto out; 30416f91bc5fSEric Gouriou } 30426f91bc5fSEric Gouriou } 30436f91bc5fSEric Gouriou 3044667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 304521ca087aSDmitry Monakhov /* 304621ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 304721ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 304821ca087aSDmitry Monakhov */ 3049667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 305021ca087aSDmitry Monakhov 30513977c965SAneesh Kumar K.V /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 3052667eff35SYongqiang Yang if (ee_len <= 2*EXT4_EXT_ZERO_LEN && 3053667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3054667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 30553977c965SAneesh Kumar K.V if (err) 305656055d3aSAmit Arora goto out; 30579df5643aSAneesh Kumar K.V 30589df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 30599df5643aSAneesh Kumar K.V if (err) 30609df5643aSAneesh Kumar K.V goto out; 3061667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3062667eff35SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 306356055d3aSAmit Arora err = ext4_ext_dirty(handle, inode, path + depth); 306456055d3aSAmit Arora goto out; 3065667eff35SYongqiang Yang } 3066093a088bSAneesh Kumar K.V 3067667eff35SYongqiang Yang /* 3068667eff35SYongqiang Yang * four cases: 3069667eff35SYongqiang Yang * 1. split the extent into three extents. 3070667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3071667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3072667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3073667eff35SYongqiang Yang */ 3074667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3075667eff35SYongqiang Yang split_map.m_len = map->m_len; 3076667eff35SYongqiang Yang 3077667eff35SYongqiang Yang if (allocated > map->m_len) { 3078667eff35SYongqiang Yang if (allocated <= EXT4_EXT_ZERO_LEN && 3079667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3080667eff35SYongqiang Yang /* case 3 */ 3081667eff35SYongqiang Yang zero_ex.ee_block = 30829b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 30839b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3084667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3085667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3086667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3087667eff35SYongqiang Yang if (err) 3088667eff35SYongqiang Yang goto out; 3089667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3090667eff35SYongqiang Yang split_map.m_len = allocated; 3091667eff35SYongqiang Yang } else if ((map->m_lblk - ee_block + map->m_len < 3092667eff35SYongqiang Yang EXT4_EXT_ZERO_LEN) && 3093667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3094667eff35SYongqiang Yang /* case 2 */ 3095667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3096667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3097667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3098667eff35SYongqiang Yang ee_block); 3099667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3100667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3101667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3102667eff35SYongqiang Yang if (err) 3103667eff35SYongqiang Yang goto out; 3104667eff35SYongqiang Yang } 3105667eff35SYongqiang Yang 3106667eff35SYongqiang Yang split_map.m_lblk = ee_block; 31079b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 31089b940f8eSAllison Henderson allocated = map->m_len; 3109667eff35SYongqiang Yang } 3110667eff35SYongqiang Yang } 3111667eff35SYongqiang Yang 3112667eff35SYongqiang Yang allocated = ext4_split_extent(handle, inode, path, 3113667eff35SYongqiang Yang &split_map, split_flag, 0); 3114667eff35SYongqiang Yang if (allocated < 0) 3115667eff35SYongqiang Yang err = allocated; 3116667eff35SYongqiang Yang 3117667eff35SYongqiang Yang out: 3118667eff35SYongqiang Yang return err ? err : allocated; 311956055d3aSAmit Arora } 312056055d3aSAmit Arora 3121c278bfecSAneesh Kumar K.V /* 3122e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 31230031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 31240031462bSMingming Cao * to an uninitialized extent. 31250031462bSMingming Cao * 3126fd018fe8SPaul Bolle * Writing to an uninitialized extent may result in splitting the uninitialized 3127b595076aSUwe Kleine-König * extent into multiple /initialized uninitialized extents (up to three) 31280031462bSMingming Cao * There are three possibilities: 31290031462bSMingming Cao * a> There is no split required: Entire extent should be uninitialized 31300031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 31310031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 31320031462bSMingming Cao * 31330031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3134b595076aSUwe Kleine-König * the uninitialized extent split. To prevent ENOSPC occur at the IO 31350031462bSMingming Cao * complete, we need to split the uninitialized extent before DIO submit 3136421f91d2SUwe Kleine-König * the IO. The uninitialized extent called at this time will be split 31370031462bSMingming Cao * into three uninitialized extent(at most). After IO complete, the part 31380031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 31390031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3140ba230c3fSMingming * 3141ba230c3fSMingming * Returns the size of uninitialized extent to be written on success. 31420031462bSMingming Cao */ 31430031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle, 31440031462bSMingming Cao struct inode *inode, 3145e35fd660STheodore Ts'o struct ext4_map_blocks *map, 31460031462bSMingming Cao struct ext4_ext_path *path, 31470031462bSMingming Cao int flags) 31480031462bSMingming Cao { 3149667eff35SYongqiang Yang ext4_lblk_t eof_block; 3150667eff35SYongqiang Yang ext4_lblk_t ee_block; 3151667eff35SYongqiang Yang struct ext4_extent *ex; 3152667eff35SYongqiang Yang unsigned int ee_len; 3153667eff35SYongqiang Yang int split_flag = 0, depth; 31540031462bSMingming Cao 315521ca087aSDmitry Monakhov ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 315621ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3157e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 315821ca087aSDmitry Monakhov 315921ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 316021ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3161e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3162e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 31630031462bSMingming Cao /* 316421ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 316521ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 316621ca087aSDmitry Monakhov */ 3167667eff35SYongqiang Yang depth = ext_depth(inode); 31680031462bSMingming Cao ex = path[depth].p_ext; 3169667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3170667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 31710031462bSMingming Cao 3172667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3173667eff35SYongqiang Yang split_flag |= EXT4_EXT_MARK_UNINIT2; 31740031462bSMingming Cao 3175667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3176667eff35SYongqiang Yang return ext4_split_extent(handle, inode, path, map, split_flag, flags); 31770031462bSMingming Cao } 3178197217a5SYongqiang Yang 3179c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 31800031462bSMingming Cao struct inode *inode, 31810031462bSMingming Cao struct ext4_ext_path *path) 31820031462bSMingming Cao { 31830031462bSMingming Cao struct ext4_extent *ex; 31840031462bSMingming Cao int depth; 31850031462bSMingming Cao int err = 0; 31860031462bSMingming Cao 31870031462bSMingming Cao depth = ext_depth(inode); 31880031462bSMingming Cao ex = path[depth].p_ext; 31890031462bSMingming Cao 3190197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3191197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3192197217a5SYongqiang Yang (unsigned long long)le32_to_cpu(ex->ee_block), 3193197217a5SYongqiang Yang ext4_ext_get_actual_len(ex)); 3194197217a5SYongqiang Yang 31950031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 31960031462bSMingming Cao if (err) 31970031462bSMingming Cao goto out; 31980031462bSMingming Cao /* first mark the extent as initialized */ 31990031462bSMingming Cao ext4_ext_mark_initialized(ex); 32000031462bSMingming Cao 3201197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3202197217a5SYongqiang Yang * borders are not changed 32030031462bSMingming Cao */ 3204197217a5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 3205197217a5SYongqiang Yang 32060031462bSMingming Cao /* Mark modified extent as dirty */ 32070031462bSMingming Cao err = ext4_ext_dirty(handle, inode, path + depth); 32080031462bSMingming Cao out: 32090031462bSMingming Cao ext4_ext_show_leaf(inode, path); 32100031462bSMingming Cao return err; 32110031462bSMingming Cao } 32120031462bSMingming Cao 3213515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3214515f41c3SAneesh Kumar K.V sector_t block, int count) 3215515f41c3SAneesh Kumar K.V { 3216515f41c3SAneesh Kumar K.V int i; 3217515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3218515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3219515f41c3SAneesh Kumar K.V } 3220515f41c3SAneesh Kumar K.V 322158590b06STheodore Ts'o /* 322258590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 322358590b06STheodore Ts'o */ 322458590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3225d002ebf1SEric Sandeen ext4_lblk_t lblk, 322658590b06STheodore Ts'o struct ext4_ext_path *path, 322758590b06STheodore Ts'o unsigned int len) 322858590b06STheodore Ts'o { 322958590b06STheodore Ts'o int i, depth; 323058590b06STheodore Ts'o struct ext4_extent_header *eh; 323165922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 323258590b06STheodore Ts'o 323358590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 323458590b06STheodore Ts'o return 0; 323558590b06STheodore Ts'o 323658590b06STheodore Ts'o depth = ext_depth(inode); 323758590b06STheodore Ts'o eh = path[depth].p_hdr; 323858590b06STheodore Ts'o 323958590b06STheodore Ts'o if (unlikely(!eh->eh_entries)) { 324058590b06STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " 324158590b06STheodore Ts'o "EOFBLOCKS_FL set"); 324258590b06STheodore Ts'o return -EIO; 324358590b06STheodore Ts'o } 324458590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 324558590b06STheodore Ts'o /* 324658590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 324758590b06STheodore Ts'o * last block in the last extent in the file. We test this by 324858590b06STheodore Ts'o * first checking to see if the caller to 324958590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 325058590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 325158590b06STheodore Ts'o * this turns out to be false, we can bail out from this 325258590b06STheodore Ts'o * function immediately. 325358590b06STheodore Ts'o */ 3254d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 325558590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 325658590b06STheodore Ts'o return 0; 325758590b06STheodore Ts'o /* 325858590b06STheodore Ts'o * If the caller does appear to be planning to write at or 325958590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 326058590b06STheodore Ts'o * if the current extent is the last extent in the file, by 326158590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 326258590b06STheodore Ts'o * at each level of the tree. 326358590b06STheodore Ts'o */ 326458590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 326558590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 326658590b06STheodore Ts'o return 0; 326758590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 326858590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 326958590b06STheodore Ts'o } 327058590b06STheodore Ts'o 32717b415bf6SAditya Kali /** 32727b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 32737b415bf6SAditya Kali * 32747b415bf6SAditya Kali * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns 32757b415bf6SAditya Kali * whether there are any buffers marked for delayed allocation. It returns '1' 32767b415bf6SAditya Kali * on the first delalloc'ed buffer head found. If no buffer head in the given 32777b415bf6SAditya Kali * range is marked for delalloc, it returns 0. 32787b415bf6SAditya Kali * lblk_start should always be <= lblk_end. 32797b415bf6SAditya Kali * search_hint_reverse is to indicate that searching in reverse from lblk_end to 32807b415bf6SAditya Kali * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed 32817b415bf6SAditya Kali * block sooner). This is useful when blocks are truncated sequentially from 32827b415bf6SAditya Kali * lblk_start towards lblk_end. 32837b415bf6SAditya Kali */ 32847b415bf6SAditya Kali static int ext4_find_delalloc_range(struct inode *inode, 32857b415bf6SAditya Kali ext4_lblk_t lblk_start, 32867b415bf6SAditya Kali ext4_lblk_t lblk_end, 32877b415bf6SAditya Kali int search_hint_reverse) 32887b415bf6SAditya Kali { 32897b415bf6SAditya Kali struct address_space *mapping = inode->i_mapping; 32907b415bf6SAditya Kali struct buffer_head *head, *bh = NULL; 32917b415bf6SAditya Kali struct page *page; 32927b415bf6SAditya Kali ext4_lblk_t i, pg_lblk; 32937b415bf6SAditya Kali pgoff_t index; 32947b415bf6SAditya Kali 32957b415bf6SAditya Kali /* reverse search wont work if fs block size is less than page size */ 32967b415bf6SAditya Kali if (inode->i_blkbits < PAGE_CACHE_SHIFT) 32977b415bf6SAditya Kali search_hint_reverse = 0; 32987b415bf6SAditya Kali 32997b415bf6SAditya Kali if (search_hint_reverse) 33007b415bf6SAditya Kali i = lblk_end; 33017b415bf6SAditya Kali else 33027b415bf6SAditya Kali i = lblk_start; 33037b415bf6SAditya Kali 33047b415bf6SAditya Kali index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 33057b415bf6SAditya Kali 33067b415bf6SAditya Kali while ((i >= lblk_start) && (i <= lblk_end)) { 33077b415bf6SAditya Kali page = find_get_page(mapping, index); 33085356f261SAditya Kali if (!page) 33097b415bf6SAditya Kali goto nextpage; 33107b415bf6SAditya Kali 33117b415bf6SAditya Kali if (!page_has_buffers(page)) 33127b415bf6SAditya Kali goto nextpage; 33137b415bf6SAditya Kali 33147b415bf6SAditya Kali head = page_buffers(page); 33157b415bf6SAditya Kali if (!head) 33167b415bf6SAditya Kali goto nextpage; 33177b415bf6SAditya Kali 33187b415bf6SAditya Kali bh = head; 33197b415bf6SAditya Kali pg_lblk = index << (PAGE_CACHE_SHIFT - 33207b415bf6SAditya Kali inode->i_blkbits); 33217b415bf6SAditya Kali do { 33227b415bf6SAditya Kali if (unlikely(pg_lblk < lblk_start)) { 33237b415bf6SAditya Kali /* 33247b415bf6SAditya Kali * This is possible when fs block size is less 33257b415bf6SAditya Kali * than page size and our cluster starts/ends in 33267b415bf6SAditya Kali * middle of the page. So we need to skip the 33277b415bf6SAditya Kali * initial few blocks till we reach the 'lblk' 33287b415bf6SAditya Kali */ 33297b415bf6SAditya Kali pg_lblk++; 33307b415bf6SAditya Kali continue; 33317b415bf6SAditya Kali } 33327b415bf6SAditya Kali 33335356f261SAditya Kali /* Check if the buffer is delayed allocated and that it 33345356f261SAditya Kali * is not yet mapped. (when da-buffers are mapped during 33355356f261SAditya Kali * their writeout, their da_mapped bit is set.) 33365356f261SAditya Kali */ 33375356f261SAditya Kali if (buffer_delay(bh) && !buffer_da_mapped(bh)) { 33387b415bf6SAditya Kali page_cache_release(page); 3339d8990240SAditya Kali trace_ext4_find_delalloc_range(inode, 3340d8990240SAditya Kali lblk_start, lblk_end, 3341d8990240SAditya Kali search_hint_reverse, 3342d8990240SAditya Kali 1, i); 33437b415bf6SAditya Kali return 1; 33447b415bf6SAditya Kali } 33457b415bf6SAditya Kali if (search_hint_reverse) 33467b415bf6SAditya Kali i--; 33477b415bf6SAditya Kali else 33487b415bf6SAditya Kali i++; 33497b415bf6SAditya Kali } while ((i >= lblk_start) && (i <= lblk_end) && 33507b415bf6SAditya Kali ((bh = bh->b_this_page) != head)); 33517b415bf6SAditya Kali nextpage: 33527b415bf6SAditya Kali if (page) 33537b415bf6SAditya Kali page_cache_release(page); 33547b415bf6SAditya Kali /* 33557b415bf6SAditya Kali * Move to next page. 'i' will be the first lblk in the next 33567b415bf6SAditya Kali * page. 33577b415bf6SAditya Kali */ 33587b415bf6SAditya Kali if (search_hint_reverse) 33597b415bf6SAditya Kali index--; 33607b415bf6SAditya Kali else 33617b415bf6SAditya Kali index++; 33627b415bf6SAditya Kali i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 33637b415bf6SAditya Kali } 33647b415bf6SAditya Kali 3365d8990240SAditya Kali trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end, 3366d8990240SAditya Kali search_hint_reverse, 0, 0); 33677b415bf6SAditya Kali return 0; 33687b415bf6SAditya Kali } 33697b415bf6SAditya Kali 33707b415bf6SAditya Kali int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk, 33717b415bf6SAditya Kali int search_hint_reverse) 33727b415bf6SAditya Kali { 33737b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 33747b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 33757b415bf6SAditya Kali lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 33767b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 33777b415bf6SAditya Kali 33787b415bf6SAditya Kali return ext4_find_delalloc_range(inode, lblk_start, lblk_end, 33797b415bf6SAditya Kali search_hint_reverse); 33807b415bf6SAditya Kali } 33817b415bf6SAditya Kali 33827b415bf6SAditya Kali /** 33837b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 33847b415bf6SAditya Kali * are under delalloc and were reserved quota for. 33857b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 33867b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 33877b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 33887b415bf6SAditya Kali * The cases to look for are: 33897b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 33907b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 33917b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 33927b415bf6SAditya Kali * are not delalloc'ed. 33937b415bf6SAditya Kali * Ex: 33947b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 33957b415bf6SAditya Kali * |++++++ allocated ++++++| 33967b415bf6SAditya Kali * ==> 4 complete clusters in above example 33977b415bf6SAditya Kali * 33987b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 33997b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 34007b415bf6SAditya Kali * cluster. 34017b415bf6SAditya Kali * Ex: 34027b415bf6SAditya Kali * |----====c========|========c========| 34037b415bf6SAditya Kali * |++++++ allocated ++++++| 34047b415bf6SAditya Kali * ==> 1 complete clusters in above example 34057b415bf6SAditya Kali * 34067b415bf6SAditya Kali * Ex: 34077b415bf6SAditya Kali * |================c================| 34087b415bf6SAditya Kali * |++++++ allocated ++++++| 34097b415bf6SAditya Kali * ==> 0 complete clusters in above example 34107b415bf6SAditya Kali * 34117b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 34127b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 34137b415bf6SAditya Kali * this 'allocated' range. 34147b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 34157b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 34167b415bf6SAditya Kali */ 34177b415bf6SAditya Kali static unsigned int 34187b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 34197b415bf6SAditya Kali unsigned int num_blks) 34207b415bf6SAditya Kali { 34217b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 34227b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 34237b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 34247b415bf6SAditya Kali unsigned int allocated_clusters = 0; 34257b415bf6SAditya Kali 34267b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 34277b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 34287b415bf6SAditya Kali 34297b415bf6SAditya Kali /* max possible clusters for this allocation */ 34307b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 34317b415bf6SAditya Kali 3432d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3433d8990240SAditya Kali 34347b415bf6SAditya Kali /* Check towards left side */ 34357b415bf6SAditya Kali c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 34367b415bf6SAditya Kali if (c_offset) { 34377b415bf6SAditya Kali lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 34387b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 34397b415bf6SAditya Kali 34407b415bf6SAditya Kali if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) 34417b415bf6SAditya Kali allocated_clusters--; 34427b415bf6SAditya Kali } 34437b415bf6SAditya Kali 34447b415bf6SAditya Kali /* Now check towards right. */ 34457b415bf6SAditya Kali c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 34467b415bf6SAditya Kali if (allocated_clusters && c_offset) { 34477b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 34487b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 34497b415bf6SAditya Kali 34507b415bf6SAditya Kali if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) 34517b415bf6SAditya Kali allocated_clusters--; 34527b415bf6SAditya Kali } 34537b415bf6SAditya Kali 34547b415bf6SAditya Kali return allocated_clusters; 34557b415bf6SAditya Kali } 34567b415bf6SAditya Kali 34570031462bSMingming Cao static int 34580031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3459e35fd660STheodore Ts'o struct ext4_map_blocks *map, 34600031462bSMingming Cao struct ext4_ext_path *path, int flags, 3461e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 34620031462bSMingming Cao { 34630031462bSMingming Cao int ret = 0; 34640031462bSMingming Cao int err = 0; 34658d5d02e6SMingming Cao ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 34660031462bSMingming Cao 34670031462bSMingming Cao ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" 34680031462bSMingming Cao "block %llu, max_blocks %u, flags %d, allocated %u", 3469e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 34700031462bSMingming Cao flags, allocated); 34710031462bSMingming Cao ext4_ext_show_leaf(inode, path); 34720031462bSMingming Cao 3473d8990240SAditya Kali trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated, 3474d8990240SAditya Kali newblock); 3475d8990240SAditya Kali 3476c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3477744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3478e35fd660STheodore Ts'o ret = ext4_split_unwritten_extents(handle, inode, map, 3479e35fd660STheodore Ts'o path, flags); 34805f524950SMingming /* 34815f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 348225985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 34835f524950SMingming * completed 34845f524950SMingming */ 3485b3ff0569STao Ma if (io) { 3486b3ff0569STao Ma if (!(io->flag & EXT4_IO_END_UNWRITTEN)) { 3487bd2d0210STheodore Ts'o io->flag = EXT4_IO_END_UNWRITTEN; 3488e9e3bcecSEric Sandeen atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3489b3ff0569STao Ma } 3490e9e3bcecSEric Sandeen } else 349119f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3492744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 3493e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 34940031462bSMingming Cao goto out; 34950031462bSMingming Cao } 3496c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3497744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3498c7064ef1SJiaying Zhang ret = ext4_convert_unwritten_extents_endio(handle, inode, 34990031462bSMingming Cao path); 350058590b06STheodore Ts'o if (ret >= 0) { 3501b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 3502d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 3503d002ebf1SEric Sandeen path, map->m_len); 350458590b06STheodore Ts'o } else 350558590b06STheodore Ts'o err = ret; 35060031462bSMingming Cao goto out2; 35070031462bSMingming Cao } 35080031462bSMingming Cao /* buffered IO case */ 35090031462bSMingming Cao /* 35100031462bSMingming Cao * repeat fallocate creation request 35110031462bSMingming Cao * we already have an unwritten extent 35120031462bSMingming Cao */ 35130031462bSMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) 35140031462bSMingming Cao goto map_out; 35150031462bSMingming Cao 35160031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 35170031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 35180031462bSMingming Cao /* 35190031462bSMingming Cao * We have blocks reserved already. We 35200031462bSMingming Cao * return allocated blocks so that delalloc 35210031462bSMingming Cao * won't do block reservation for us. But 35220031462bSMingming Cao * the buffer head will be unmapped so that 35230031462bSMingming Cao * a read from the block returns 0s. 35240031462bSMingming Cao */ 3525e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 35260031462bSMingming Cao goto out1; 35270031462bSMingming Cao } 35280031462bSMingming Cao 35290031462bSMingming Cao /* buffered write, writepage time, convert*/ 3530e35fd660STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3531a4e5d88bSDmitry Monakhov if (ret >= 0) 3532b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 35330031462bSMingming Cao out: 35340031462bSMingming Cao if (ret <= 0) { 35350031462bSMingming Cao err = ret; 35360031462bSMingming Cao goto out2; 35370031462bSMingming Cao } else 35380031462bSMingming Cao allocated = ret; 3539e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 3540515f41c3SAneesh Kumar K.V /* 3541515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 3542515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 3543515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 3544515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 3545515f41c3SAneesh Kumar K.V * new. 3546515f41c3SAneesh Kumar K.V */ 3547e35fd660STheodore Ts'o if (allocated > map->m_len) { 3548515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3549e35fd660STheodore Ts'o newblock + map->m_len, 3550e35fd660STheodore Ts'o allocated - map->m_len); 3551e35fd660STheodore Ts'o allocated = map->m_len; 3552515f41c3SAneesh Kumar K.V } 35535f634d06SAneesh Kumar K.V 35545f634d06SAneesh Kumar K.V /* 35555f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 35565f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 35575f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 35585f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 35595f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 35605f634d06SAneesh Kumar K.V */ 35617b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 35627b415bf6SAditya Kali unsigned int reserved_clusters; 35637b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 35647b415bf6SAditya Kali map->m_lblk, map->m_len); 35657b415bf6SAditya Kali if (reserved_clusters) 35667b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 35677b415bf6SAditya Kali reserved_clusters, 35687b415bf6SAditya Kali 0); 35697b415bf6SAditya Kali } 35705f634d06SAneesh Kumar K.V 35710031462bSMingming Cao map_out: 3572e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 3573a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 3574a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3575a4e5d88bSDmitry Monakhov map->m_len); 3576a4e5d88bSDmitry Monakhov if (err < 0) 3577a4e5d88bSDmitry Monakhov goto out2; 3578a4e5d88bSDmitry Monakhov } 35790031462bSMingming Cao out1: 3580e35fd660STheodore Ts'o if (allocated > map->m_len) 3581e35fd660STheodore Ts'o allocated = map->m_len; 35820031462bSMingming Cao ext4_ext_show_leaf(inode, path); 3583e35fd660STheodore Ts'o map->m_pblk = newblock; 3584e35fd660STheodore Ts'o map->m_len = allocated; 35850031462bSMingming Cao out2: 35860031462bSMingming Cao if (path) { 35870031462bSMingming Cao ext4_ext_drop_refs(path); 35880031462bSMingming Cao kfree(path); 35890031462bSMingming Cao } 35900031462bSMingming Cao return err ? err : allocated; 35910031462bSMingming Cao } 359258590b06STheodore Ts'o 35930031462bSMingming Cao /* 35944d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 35954d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 35964d33b1efSTheodore Ts'o * allocated in an extent. 3597d8990240SAditya Kali * @sb The filesystem superblock structure 35984d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 35994d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 36004d33b1efSTheodore Ts'o * cluster allocation 36014d33b1efSTheodore Ts'o * 36024d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 36034d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 36044d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 36054d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 36064d33b1efSTheodore Ts'o * want to catch. The first is this case: 36074d33b1efSTheodore Ts'o * 36084d33b1efSTheodore Ts'o * |--- cluster # N--| 36094d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 36104d33b1efSTheodore Ts'o * |==========| 36114d33b1efSTheodore Ts'o * 36124d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 36134d33b1efSTheodore Ts'o * 36144d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 36154d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 36164d33b1efSTheodore Ts'o * |=======================| 36174d33b1efSTheodore Ts'o * 36184d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 36194d33b1efSTheodore Ts'o * within the same cluster: 36204d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 36214d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 36224d33b1efSTheodore Ts'o * |------ requested region ------| 36234d33b1efSTheodore Ts'o * |================| 36244d33b1efSTheodore Ts'o * 36254d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 36264d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 36274d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 36284d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 36294d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 36304d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 36314d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 36324d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 36334d33b1efSTheodore Ts'o */ 3634d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 36354d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 36364d33b1efSTheodore Ts'o struct ext4_extent *ex, 36374d33b1efSTheodore Ts'o struct ext4_ext_path *path) 36384d33b1efSTheodore Ts'o { 3639d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 36404d33b1efSTheodore Ts'o ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 36414d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 36424d33b1efSTheodore Ts'o ext4_lblk_t rr_cluster_start, rr_cluster_end; 36434d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 36444d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 36454d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 36464d33b1efSTheodore Ts'o 36474d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 36484d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 36494d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 36504d33b1efSTheodore Ts'o 36514d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 36524d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 36534d33b1efSTheodore Ts'o rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1); 36544d33b1efSTheodore Ts'o 36554d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 36564d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 36574d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 36584d33b1efSTheodore Ts'o ee_start += ee_len - 1; 36594d33b1efSTheodore Ts'o map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 36604d33b1efSTheodore Ts'o c_offset; 36614d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 36624d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 36634d33b1efSTheodore Ts'o /* 36644d33b1efSTheodore Ts'o * Check for and handle this case: 36654d33b1efSTheodore Ts'o * 36664d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 36674d33b1efSTheodore Ts'o * |------- extent ----| 36684d33b1efSTheodore Ts'o * |--- requested region ---| 36694d33b1efSTheodore Ts'o * |===========| 36704d33b1efSTheodore Ts'o */ 36714d33b1efSTheodore Ts'o 36724d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 36734d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 36744d33b1efSTheodore Ts'o 36754d33b1efSTheodore Ts'o /* 36764d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 36774d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 36784d33b1efSTheodore Ts'o * 36794d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 36804d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 36814d33b1efSTheodore Ts'o * |------ requested region ------| 36824d33b1efSTheodore Ts'o * |================| 36834d33b1efSTheodore Ts'o */ 36844d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 36854d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 36864d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 36874d33b1efSTheodore Ts'o } 3688d8990240SAditya Kali 3689d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 36904d33b1efSTheodore Ts'o return 1; 36914d33b1efSTheodore Ts'o } 3692d8990240SAditya Kali 3693d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 36944d33b1efSTheodore Ts'o return 0; 36954d33b1efSTheodore Ts'o } 36964d33b1efSTheodore Ts'o 36974d33b1efSTheodore Ts'o 36984d33b1efSTheodore Ts'o /* 3699f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 3700f5ab0d1fSMingming Cao * 3701f5ab0d1fSMingming Cao * 3702c278bfecSAneesh Kumar K.V * Need to be called with 37030e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 37040e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 3705f5ab0d1fSMingming Cao * 3706f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 3707f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 3708f5ab0d1fSMingming Cao * buffer head is unmapped 3709f5ab0d1fSMingming Cao * otherwise blocks are mapped 3710f5ab0d1fSMingming Cao * 3711f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 3712f5ab0d1fSMingming Cao * buffer head is unmapped 3713f5ab0d1fSMingming Cao * 3714f5ab0d1fSMingming Cao * return < 0, error case. 3715c278bfecSAneesh Kumar K.V */ 3716e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3717e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 3718a86c6181SAlex Tomas { 3719a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 37204d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 37214d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 37220562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 37234d33b1efSTheodore Ts'o int free_on_err = 0, err = 0, depth, ret; 37244d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 37257b415bf6SAditya Kali unsigned int allocated_clusters = 0, reserved_clusters = 0; 3726e861304bSAllison Henderson unsigned int punched_out = 0; 3727e861304bSAllison Henderson unsigned int result = 0; 3728c9de560dSAlex Tomas struct ext4_allocation_request ar; 37298d5d02e6SMingming Cao ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 37304d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 3731e861304bSAllison Henderson struct ext4_map_blocks punch_map; 3732a86c6181SAlex Tomas 373384fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 3734e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 37350562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3736a86c6181SAlex Tomas 3737a86c6181SAlex Tomas /* check in cache */ 3738015861baSRobin Dong if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) && 3739015861baSRobin Dong ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3740b05e6ae5STheodore Ts'o if (!newex.ee_start_lo && !newex.ee_start_hi) { 37417b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 37427b415bf6SAditya Kali ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) 37437b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 37447b415bf6SAditya Kali 3745c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 374656055d3aSAmit Arora /* 374756055d3aSAmit Arora * block isn't allocated yet and 374856055d3aSAmit Arora * user doesn't want to allocate it 374956055d3aSAmit Arora */ 3750a86c6181SAlex Tomas goto out2; 3751a86c6181SAlex Tomas } 3752a86c6181SAlex Tomas /* we should allocate requested block */ 3753b05e6ae5STheodore Ts'o } else { 3754a86c6181SAlex Tomas /* block is already allocated */ 37557b415bf6SAditya Kali if (sbi->s_cluster_ratio > 1) 37567b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 3757e35fd660STheodore Ts'o newblock = map->m_lblk 3758a86c6181SAlex Tomas - le32_to_cpu(newex.ee_block) 3759bf89d16fSTheodore Ts'o + ext4_ext_pblock(&newex); 3760d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3761b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex) - 3762e35fd660STheodore Ts'o (map->m_lblk - le32_to_cpu(newex.ee_block)); 3763a86c6181SAlex Tomas goto out; 3764a86c6181SAlex Tomas } 3765a86c6181SAlex Tomas } 3766a86c6181SAlex Tomas 3767a86c6181SAlex Tomas /* find extent for this block */ 3768e35fd660STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 3769a86c6181SAlex Tomas if (IS_ERR(path)) { 3770a86c6181SAlex Tomas err = PTR_ERR(path); 3771a86c6181SAlex Tomas path = NULL; 3772a86c6181SAlex Tomas goto out2; 3773a86c6181SAlex Tomas } 3774a86c6181SAlex Tomas 3775a86c6181SAlex Tomas depth = ext_depth(inode); 3776a86c6181SAlex Tomas 3777a86c6181SAlex Tomas /* 3778d0d856e8SRandy Dunlap * consistent leaf must not be empty; 3779d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 3780a86c6181SAlex Tomas * this is why assert can't be put in ext4_ext_find_extent() 3781a86c6181SAlex Tomas */ 3782273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3783273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 3784f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 3785f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 3786f70f362bSTheodore Ts'o path[depth].p_block); 3787034fb4c9SSurbhi Palande err = -EIO; 3788034fb4c9SSurbhi Palande goto out2; 3789034fb4c9SSurbhi Palande } 3790a86c6181SAlex Tomas 37917e028976SAvantika Mathur ex = path[depth].p_ext; 37927e028976SAvantika Mathur if (ex) { 3793725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3794bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3795a2df2a63SAmit Arora unsigned short ee_len; 3796471d4011SSuparna Bhattacharya 3797471d4011SSuparna Bhattacharya /* 3798471d4011SSuparna Bhattacharya * Uninitialized extents are treated as holes, except that 379956055d3aSAmit Arora * we split out initialized portions during a write. 3800471d4011SSuparna Bhattacharya */ 3801a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3802d8990240SAditya Kali 3803d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 3804d8990240SAditya Kali 3805d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 3806e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 38070aa06000STheodore Ts'o ext4_fsblk_t partial_cluster = 0; 38080aa06000STheodore Ts'o 3809e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 3810d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3811e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 3812e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3813a86c6181SAlex Tomas ee_block, ee_len, newblock); 381456055d3aSAmit Arora 3815e861304bSAllison Henderson if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { 3816e861304bSAllison Henderson /* 3817e861304bSAllison Henderson * Do not put uninitialized extent 3818e861304bSAllison Henderson * in the cache 3819e861304bSAllison Henderson */ 382056055d3aSAmit Arora if (!ext4_ext_is_uninitialized(ex)) { 3821a2df2a63SAmit Arora ext4_ext_put_in_cache(inode, ee_block, 3822b05e6ae5STheodore Ts'o ee_len, ee_start); 3823a86c6181SAlex Tomas goto out; 3824a86c6181SAlex Tomas } 3825e861304bSAllison Henderson ret = ext4_ext_handle_uninitialized_extents( 3826e861304bSAllison Henderson handle, inode, map, path, flags, 3827e861304bSAllison Henderson allocated, newblock); 38280031462bSMingming Cao return ret; 382956055d3aSAmit Arora } 3830e861304bSAllison Henderson 3831e861304bSAllison Henderson /* 3832e861304bSAllison Henderson * Punch out the map length, but only to the 3833e861304bSAllison Henderson * end of the extent 3834e861304bSAllison Henderson */ 3835e861304bSAllison Henderson punched_out = allocated < map->m_len ? 3836e861304bSAllison Henderson allocated : map->m_len; 3837e861304bSAllison Henderson 3838e861304bSAllison Henderson /* 3839e861304bSAllison Henderson * Sense extents need to be converted to 3840e861304bSAllison Henderson * uninitialized, they must fit in an 3841e861304bSAllison Henderson * uninitialized extent 3842e861304bSAllison Henderson */ 3843e861304bSAllison Henderson if (punched_out > EXT_UNINIT_MAX_LEN) 3844e861304bSAllison Henderson punched_out = EXT_UNINIT_MAX_LEN; 3845e861304bSAllison Henderson 3846e861304bSAllison Henderson punch_map.m_lblk = map->m_lblk; 3847e861304bSAllison Henderson punch_map.m_pblk = newblock; 3848e861304bSAllison Henderson punch_map.m_len = punched_out; 3849e861304bSAllison Henderson punch_map.m_flags = 0; 3850e861304bSAllison Henderson 3851e861304bSAllison Henderson /* Check to see if the extent needs to be split */ 3852e861304bSAllison Henderson if (punch_map.m_len != ee_len || 3853e861304bSAllison Henderson punch_map.m_lblk != ee_block) { 3854e861304bSAllison Henderson 3855e861304bSAllison Henderson ret = ext4_split_extent(handle, inode, 3856e861304bSAllison Henderson path, &punch_map, 0, 3857e861304bSAllison Henderson EXT4_GET_BLOCKS_PUNCH_OUT_EXT | 3858e861304bSAllison Henderson EXT4_GET_BLOCKS_PRE_IO); 3859e861304bSAllison Henderson 3860e861304bSAllison Henderson if (ret < 0) { 3861e861304bSAllison Henderson err = ret; 3862e861304bSAllison Henderson goto out2; 3863e861304bSAllison Henderson } 3864e861304bSAllison Henderson /* 3865e861304bSAllison Henderson * find extent for the block at 3866e861304bSAllison Henderson * the start of the hole 3867e861304bSAllison Henderson */ 3868e861304bSAllison Henderson ext4_ext_drop_refs(path); 3869e861304bSAllison Henderson kfree(path); 3870e861304bSAllison Henderson 3871e861304bSAllison Henderson path = ext4_ext_find_extent(inode, 3872e861304bSAllison Henderson map->m_lblk, NULL); 3873e861304bSAllison Henderson if (IS_ERR(path)) { 3874e861304bSAllison Henderson err = PTR_ERR(path); 3875e861304bSAllison Henderson path = NULL; 3876e861304bSAllison Henderson goto out2; 3877e861304bSAllison Henderson } 3878e861304bSAllison Henderson 3879e861304bSAllison Henderson depth = ext_depth(inode); 3880e861304bSAllison Henderson ex = path[depth].p_ext; 3881e861304bSAllison Henderson ee_len = ext4_ext_get_actual_len(ex); 3882e861304bSAllison Henderson ee_block = le32_to_cpu(ex->ee_block); 3883e861304bSAllison Henderson ee_start = ext4_ext_pblock(ex); 3884e861304bSAllison Henderson 3885e861304bSAllison Henderson } 3886e861304bSAllison Henderson 3887e861304bSAllison Henderson ext4_ext_mark_uninitialized(ex); 3888e861304bSAllison Henderson 3889f7d0d379SAllison Henderson ext4_ext_invalidate_cache(inode); 3890f7d0d379SAllison Henderson 3891f7d0d379SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 38920aa06000STheodore Ts'o &partial_cluster, map->m_lblk, 38930aa06000STheodore Ts'o map->m_lblk + punched_out); 3894f7d0d379SAllison Henderson 3895f7d0d379SAllison Henderson if (!err && path->p_hdr->eh_entries == 0) { 3896f7d0d379SAllison Henderson /* 3897f7d0d379SAllison Henderson * Punch hole freed all of this sub tree, 3898f7d0d379SAllison Henderson * so we need to correct eh_depth 3899f7d0d379SAllison Henderson */ 3900f7d0d379SAllison Henderson err = ext4_ext_get_access(handle, inode, path); 3901f7d0d379SAllison Henderson if (err == 0) { 3902f7d0d379SAllison Henderson ext_inode_hdr(inode)->eh_depth = 0; 3903f7d0d379SAllison Henderson ext_inode_hdr(inode)->eh_max = 3904f7d0d379SAllison Henderson cpu_to_le16(ext4_ext_space_root( 3905f7d0d379SAllison Henderson inode, 0)); 3906f7d0d379SAllison Henderson 3907f7d0d379SAllison Henderson err = ext4_ext_dirty( 3908f7d0d379SAllison Henderson handle, inode, path); 3909f7d0d379SAllison Henderson } 3910f7d0d379SAllison Henderson } 3911e861304bSAllison Henderson 3912e861304bSAllison Henderson goto out2; 3913e861304bSAllison Henderson } 3914a86c6181SAlex Tomas } 3915a86c6181SAlex Tomas 39167b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 39177b415bf6SAditya Kali ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) 39187b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39197b415bf6SAditya Kali 3920a86c6181SAlex Tomas /* 3921d0d856e8SRandy Dunlap * requested block isn't allocated yet; 3922a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 3923a86c6181SAlex Tomas */ 3924c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 392556055d3aSAmit Arora /* 392656055d3aSAmit Arora * put just found gap into cache to speed up 392756055d3aSAmit Arora * subsequent requests 392856055d3aSAmit Arora */ 3929e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 3930a86c6181SAlex Tomas goto out2; 3931a86c6181SAlex Tomas } 39324d33b1efSTheodore Ts'o 3933a86c6181SAlex Tomas /* 3934c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 3935a86c6181SAlex Tomas */ 39367b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 39374d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 39384d33b1efSTheodore Ts'o cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 39394d33b1efSTheodore Ts'o 39404d33b1efSTheodore Ts'o /* 39414d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 39424d33b1efSTheodore Ts'o * by ext4_ext_find_extent() implies a cluster we can use. 39434d33b1efSTheodore Ts'o */ 39444d33b1efSTheodore Ts'o if (cluster_offset && ex && 3945d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 39464d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 39474d33b1efSTheodore Ts'o newblock = map->m_pblk; 39487b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39494d33b1efSTheodore Ts'o goto got_allocated_blocks; 39504d33b1efSTheodore Ts'o } 3951a86c6181SAlex Tomas 3952c9de560dSAlex Tomas /* find neighbour allocated blocks */ 3953e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 3954c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 3955c9de560dSAlex Tomas if (err) 3956c9de560dSAlex Tomas goto out2; 3957e35fd660STheodore Ts'o ar.lright = map->m_lblk; 39584d33b1efSTheodore Ts'o ex2 = NULL; 39594d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 3960c9de560dSAlex Tomas if (err) 3961c9de560dSAlex Tomas goto out2; 396225d14f98SAmit Arora 39634d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 39644d33b1efSTheodore Ts'o * cluster we can use. */ 39654d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 3966d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 39674d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 39684d33b1efSTheodore Ts'o newblock = map->m_pblk; 39697b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39704d33b1efSTheodore Ts'o goto got_allocated_blocks; 39714d33b1efSTheodore Ts'o } 39724d33b1efSTheodore Ts'o 3973749269faSAmit Arora /* 3974749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 3975749269faSAmit Arora * a single extent. For an initialized extent this limit is 3976749269faSAmit Arora * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 3977749269faSAmit Arora * EXT_UNINIT_MAX_LEN. 3978749269faSAmit Arora */ 3979e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 3980c2177057STheodore Ts'o !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3981e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 3982e35fd660STheodore Ts'o else if (map->m_len > EXT_UNINIT_MAX_LEN && 3983c2177057STheodore Ts'o (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3984e35fd660STheodore Ts'o map->m_len = EXT_UNINIT_MAX_LEN; 3985749269faSAmit Arora 3986e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 3987e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 39884d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 398925d14f98SAmit Arora if (err) 3990b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 399125d14f98SAmit Arora else 3992e35fd660STheodore Ts'o allocated = map->m_len; 3993c9de560dSAlex Tomas 3994c9de560dSAlex Tomas /* allocate new block */ 3995c9de560dSAlex Tomas ar.inode = inode; 3996e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 3997e35fd660STheodore Ts'o ar.logical = map->m_lblk; 39984d33b1efSTheodore Ts'o /* 39994d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 40004d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 40014d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 40024d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 40034d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 40044d33b1efSTheodore Ts'o * work correctly. 40054d33b1efSTheodore Ts'o */ 40064d33b1efSTheodore Ts'o offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 40074d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 40084d33b1efSTheodore Ts'o ar.goal -= offset; 40094d33b1efSTheodore Ts'o ar.logical -= offset; 4010c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4011c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4012c9de560dSAlex Tomas else 4013c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4014c9de560dSAlex Tomas ar.flags = 0; 4015556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4016556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4017c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4018a86c6181SAlex Tomas if (!newblock) 4019a86c6181SAlex Tomas goto out2; 402084fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4021498e5f24STheodore Ts'o ar.goal, newblock, allocated); 40224d33b1efSTheodore Ts'o free_on_err = 1; 40237b415bf6SAditya Kali allocated_clusters = ar.len; 40244d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 40254d33b1efSTheodore Ts'o if (ar.len > allocated) 40264d33b1efSTheodore Ts'o ar.len = allocated; 4027a86c6181SAlex Tomas 40284d33b1efSTheodore Ts'o got_allocated_blocks: 4029a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 40304d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4031c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 40328d5d02e6SMingming Cao /* Mark uninitialized */ 40338d5d02e6SMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4034a2df2a63SAmit Arora ext4_ext_mark_uninitialized(&newex); 40358d5d02e6SMingming Cao /* 4036744692dcSJiaying Zhang * io_end structure was created for every IO write to an 403725985edcSLucas De Marchi * uninitialized extent. To avoid unnecessary conversion, 4038744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 40395f524950SMingming * For non asycn direct IO case, flag the inode state 404025985edcSLucas De Marchi * that we need to perform conversion when IO is done. 40418d5d02e6SMingming Cao */ 4042744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 4043b3ff0569STao Ma if (io) { 4044b3ff0569STao Ma if (!(io->flag & EXT4_IO_END_UNWRITTEN)) { 4045bd2d0210STheodore Ts'o io->flag = EXT4_IO_END_UNWRITTEN; 4046e9e3bcecSEric Sandeen atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 4047b3ff0569STao Ma } 4048e9e3bcecSEric Sandeen } else 404919f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 405019f5fb7aSTheodore Ts'o EXT4_STATE_DIO_UNWRITTEN); 40515f524950SMingming } 4052744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 4053e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 40548d5d02e6SMingming Cao } 4055c8d46e41SJiaying Zhang 4056a4e5d88bSDmitry Monakhov err = 0; 4057a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4058a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4059a4e5d88bSDmitry Monakhov path, ar.len); 4060575a1d4bSJiaying Zhang if (!err) 4061575a1d4bSJiaying Zhang err = ext4_ext_insert_extent(handle, inode, path, 4062575a1d4bSJiaying Zhang &newex, flags); 40634d33b1efSTheodore Ts'o if (err && free_on_err) { 40647132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 40657132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4066315054f0SAlex Tomas /* free data blocks we just allocated */ 4067c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4068c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4069c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 40707dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 40717132de74SMaxim Patlasov ext4_ext_get_actual_len(&newex), fb_flags); 4072a86c6181SAlex Tomas goto out2; 4073315054f0SAlex Tomas } 4074a86c6181SAlex Tomas 4075a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4076bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4077b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4078e35fd660STheodore Ts'o if (allocated > map->m_len) 4079e35fd660STheodore Ts'o allocated = map->m_len; 4080e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4081a86c6181SAlex Tomas 4082b436b9beSJan Kara /* 40835f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 40845f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 40855f634d06SAneesh Kumar K.V */ 40867b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 40877b415bf6SAditya Kali /* 40887b415bf6SAditya Kali * Check how many clusters we had reserved this allocted range. 40897b415bf6SAditya Kali */ 40907b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 40917b415bf6SAditya Kali map->m_lblk, allocated); 40927b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 40937b415bf6SAditya Kali if (reserved_clusters) { 40947b415bf6SAditya Kali /* 40957b415bf6SAditya Kali * We have clusters reserved for this range. 40967b415bf6SAditya Kali * But since we are not doing actual allocation 40977b415bf6SAditya Kali * and are simply using blocks from previously 40987b415bf6SAditya Kali * allocated cluster, we should release the 40997b415bf6SAditya Kali * reservation and not claim quota. 41007b415bf6SAditya Kali */ 41017b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 41027b415bf6SAditya Kali reserved_clusters, 0); 41037b415bf6SAditya Kali } 41047b415bf6SAditya Kali } else { 41057b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 41067b415bf6SAditya Kali /* We will claim quota for all newly allocated blocks.*/ 41077b415bf6SAditya Kali ext4_da_update_reserve_space(inode, allocated_clusters, 41087b415bf6SAditya Kali 1); 41097b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 41105356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 41117b415bf6SAditya Kali int reservation = allocated_clusters - 41127b415bf6SAditya Kali reserved_clusters; 41137b415bf6SAditya Kali /* 41147b415bf6SAditya Kali * It seems we claimed few clusters outside of 41157b415bf6SAditya Kali * the range of this allocation. We should give 41167b415bf6SAditya Kali * it back to the reservation pool. This can 41177b415bf6SAditya Kali * happen in the following case: 41187b415bf6SAditya Kali * 41197b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 41207b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 41217b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 41227b415bf6SAditya Kali * * First comes delayed allocation write for 41237b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 41247b415bf6SAditya Kali * previous delayed allocated blocks in the 41257b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 41267b415bf6SAditya Kali * for this write. 41277b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 41287b415bf6SAditya Kali * In this case, we will reserve 2 clusters 41297b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 41307b415bf6SAditya Kali * that range has a delayed allocated blocks. 41317b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 41327b415bf6SAditya Kali * * Now, during the delayed allocation writeout 41337b415bf6SAditya Kali * time, we will first write blocks [3-8] and 41347b415bf6SAditya Kali * allocate 3 clusters for writing these 41357b415bf6SAditya Kali * blocks. Also, we would claim all these 41367b415bf6SAditya Kali * three clusters above. 41377b415bf6SAditya Kali * * Now when we come here to writeout the 41387b415bf6SAditya Kali * blocks [10-11], we would expect to claim 41397b415bf6SAditya Kali * the reservation of 1 cluster we had made 41407b415bf6SAditya Kali * (and we would claim it since there are no 41417b415bf6SAditya Kali * more delayed allocated blocks in the range 41427b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 41437b415bf6SAditya Kali * already gone to 0. 41447b415bf6SAditya Kali * 41457b415bf6SAditya Kali * Thus, at the step 4 above when we determine 41467b415bf6SAditya Kali * that there are still some unwritten delayed 41477b415bf6SAditya Kali * allocated blocks outside of our current 41487b415bf6SAditya Kali * block range, we should increment the 41497b415bf6SAditya Kali * reserved clusters count so that when the 41507b415bf6SAditya Kali * remaining blocks finally gets written, we 41517b415bf6SAditya Kali * could claim them. 41527b415bf6SAditya Kali */ 41535356f261SAditya Kali dquot_reserve_block(inode, 41545356f261SAditya Kali EXT4_C2B(sbi, reservation)); 41555356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 41565356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 41575356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 41587b415bf6SAditya Kali } 41597b415bf6SAditya Kali } 41607b415bf6SAditya Kali } 41615f634d06SAneesh Kumar K.V 41625f634d06SAneesh Kumar K.V /* 4163b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4164b436b9beSJan Kara * when it is _not_ an uninitialized extent. 4165b436b9beSJan Kara */ 4166b436b9beSJan Kara if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 4167b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); 4168b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4169b436b9beSJan Kara } else 4170b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4171a86c6181SAlex Tomas out: 4172e35fd660STheodore Ts'o if (allocated > map->m_len) 4173e35fd660STheodore Ts'o allocated = map->m_len; 4174a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4175e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4176e35fd660STheodore Ts'o map->m_pblk = newblock; 4177e35fd660STheodore Ts'o map->m_len = allocated; 4178a86c6181SAlex Tomas out2: 4179a86c6181SAlex Tomas if (path) { 4180a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4181a86c6181SAlex Tomas kfree(path); 4182a86c6181SAlex Tomas } 41830562e0baSJiaying Zhang trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 41840562e0baSJiaying Zhang newblock, map->m_len, err ? err : allocated); 4185e861304bSAllison Henderson 4186e861304bSAllison Henderson result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? 4187e861304bSAllison Henderson punched_out : allocated; 4188e861304bSAllison Henderson 4189e861304bSAllison Henderson return err ? err : result; 4190a86c6181SAlex Tomas } 4191a86c6181SAlex Tomas 4192cf108bcaSJan Kara void ext4_ext_truncate(struct inode *inode) 4193a86c6181SAlex Tomas { 4194a86c6181SAlex Tomas struct address_space *mapping = inode->i_mapping; 4195a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4196725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4197a86c6181SAlex Tomas handle_t *handle; 4198189e868fSAllison Henderson loff_t page_len; 4199a86c6181SAlex Tomas int err = 0; 4200a86c6181SAlex Tomas 4201a86c6181SAlex Tomas /* 42023889fd57SJiaying Zhang * finish any pending end_io work so we won't run the risk of 42033889fd57SJiaying Zhang * converting any truncated blocks to initialized later 42043889fd57SJiaying Zhang */ 42053889fd57SJiaying Zhang ext4_flush_completed_IO(inode); 42063889fd57SJiaying Zhang 42073889fd57SJiaying Zhang /* 4208a86c6181SAlex Tomas * probably first extent we're gonna free will be last in block 4209a86c6181SAlex Tomas */ 4210f3bd1f3fSMingming Cao err = ext4_writepage_trans_blocks(inode); 4211a86c6181SAlex Tomas handle = ext4_journal_start(inode, err); 4212cf108bcaSJan Kara if (IS_ERR(handle)) 4213a86c6181SAlex Tomas return; 4214a86c6181SAlex Tomas 4215189e868fSAllison Henderson if (inode->i_size % PAGE_CACHE_SIZE != 0) { 4216189e868fSAllison Henderson page_len = PAGE_CACHE_SIZE - 4217189e868fSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 4218189e868fSAllison Henderson 4219189e868fSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4220189e868fSAllison Henderson mapping, inode->i_size, page_len, 0); 4221189e868fSAllison Henderson 4222189e868fSAllison Henderson if (err) 4223189e868fSAllison Henderson goto out_stop; 4224189e868fSAllison Henderson } 4225a86c6181SAlex Tomas 42269ddfc3dcSJan Kara if (ext4_orphan_add(handle, inode)) 42279ddfc3dcSJan Kara goto out_stop; 42289ddfc3dcSJan Kara 42290e855ac8SAneesh Kumar K.V down_write(&EXT4_I(inode)->i_data_sem); 4230a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 4231a86c6181SAlex Tomas 4232c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4233c9de560dSAlex Tomas 4234a86c6181SAlex Tomas /* 4235d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4236d0d856e8SRandy Dunlap * Probably we need not scan at all, 4237d0d856e8SRandy Dunlap * because page truncation is enough. 4238a86c6181SAlex Tomas */ 4239a86c6181SAlex Tomas 4240a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4241a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4242a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4243a86c6181SAlex Tomas 4244a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4245a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 4246c6a0371cSAllison Henderson err = ext4_ext_remove_space(inode, last_block); 4247a86c6181SAlex Tomas 4248a86c6181SAlex Tomas /* In a multi-transaction truncate, we only make the final 424956055d3aSAmit Arora * transaction synchronous. 425056055d3aSAmit Arora */ 4251a86c6181SAlex Tomas if (IS_SYNC(inode)) 42520390131bSFrank Mayhar ext4_handle_sync(handle); 4253a86c6181SAlex Tomas 42549ddfc3dcSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 4255f6d2f6b3SEric Gouriou 4256f6d2f6b3SEric Gouriou out_stop: 4257a86c6181SAlex Tomas /* 4258d0d856e8SRandy Dunlap * If this was a simple ftruncate() and the file will remain alive, 4259a86c6181SAlex Tomas * then we need to clear up the orphan record which we created above. 4260a86c6181SAlex Tomas * However, if this was a real unlink then we were called by 4261a86c6181SAlex Tomas * ext4_delete_inode(), and we allow that function to clean up the 4262a86c6181SAlex Tomas * orphan info for us. 4263a86c6181SAlex Tomas */ 4264a86c6181SAlex Tomas if (inode->i_nlink) 4265a86c6181SAlex Tomas ext4_orphan_del(handle, inode); 4266a86c6181SAlex Tomas 4267ef737728SSolofo Ramangalahy inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4268ef737728SSolofo Ramangalahy ext4_mark_inode_dirty(handle, inode); 4269a86c6181SAlex Tomas ext4_journal_stop(handle); 4270a86c6181SAlex Tomas } 4271a86c6181SAlex Tomas 4272fd28784aSAneesh Kumar K.V static void ext4_falloc_update_inode(struct inode *inode, 4273fd28784aSAneesh Kumar K.V int mode, loff_t new_size, int update_ctime) 4274fd28784aSAneesh Kumar K.V { 4275fd28784aSAneesh Kumar K.V struct timespec now; 4276fd28784aSAneesh Kumar K.V 4277fd28784aSAneesh Kumar K.V if (update_ctime) { 4278fd28784aSAneesh Kumar K.V now = current_fs_time(inode->i_sb); 4279fd28784aSAneesh Kumar K.V if (!timespec_equal(&inode->i_ctime, &now)) 4280fd28784aSAneesh Kumar K.V inode->i_ctime = now; 4281fd28784aSAneesh Kumar K.V } 4282fd28784aSAneesh Kumar K.V /* 4283fd28784aSAneesh Kumar K.V * Update only when preallocation was requested beyond 4284fd28784aSAneesh Kumar K.V * the file size. 4285fd28784aSAneesh Kumar K.V */ 4286cf17fea6SAneesh Kumar K.V if (!(mode & FALLOC_FL_KEEP_SIZE)) { 4287cf17fea6SAneesh Kumar K.V if (new_size > i_size_read(inode)) 4288fd28784aSAneesh Kumar K.V i_size_write(inode, new_size); 4289cf17fea6SAneesh Kumar K.V if (new_size > EXT4_I(inode)->i_disksize) 4290cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_size); 4291c8d46e41SJiaying Zhang } else { 4292c8d46e41SJiaying Zhang /* 4293c8d46e41SJiaying Zhang * Mark that we allocate beyond EOF so the subsequent truncate 4294c8d46e41SJiaying Zhang * can proceed even if the new size is the same as i_size. 4295c8d46e41SJiaying Zhang */ 4296c8d46e41SJiaying Zhang if (new_size > i_size_read(inode)) 429712e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4298fd28784aSAneesh Kumar K.V } 4299fd28784aSAneesh Kumar K.V 4300fd28784aSAneesh Kumar K.V } 4301fd28784aSAneesh Kumar K.V 4302a2df2a63SAmit Arora /* 43032fe17c10SChristoph Hellwig * preallocate space for a file. This implements ext4's fallocate file 4304a2df2a63SAmit Arora * operation, which gets called from sys_fallocate system call. 4305a2df2a63SAmit Arora * For block-mapped files, posix_fallocate should fall back to the method 4306a2df2a63SAmit Arora * of writing zeroes to the required new blocks (the same behavior which is 4307a2df2a63SAmit Arora * expected for file systems which do not support fallocate() system call). 4308a2df2a63SAmit Arora */ 43092fe17c10SChristoph Hellwig long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4310a2df2a63SAmit Arora { 43112fe17c10SChristoph Hellwig struct inode *inode = file->f_path.dentry->d_inode; 4312a2df2a63SAmit Arora handle_t *handle; 4313fd28784aSAneesh Kumar K.V loff_t new_size; 4314498e5f24STheodore Ts'o unsigned int max_blocks; 4315a2df2a63SAmit Arora int ret = 0; 4316a2df2a63SAmit Arora int ret2 = 0; 4317a2df2a63SAmit Arora int retries = 0; 4318a4e5d88bSDmitry Monakhov int flags; 43192ed88685STheodore Ts'o struct ext4_map_blocks map; 4320a2df2a63SAmit Arora unsigned int credits, blkbits = inode->i_blkbits; 4321a2df2a63SAmit Arora 4322a2df2a63SAmit Arora /* 4323a2df2a63SAmit Arora * currently supporting (pre)allocate mode for extent-based 4324a2df2a63SAmit Arora * files _only_ 4325a2df2a63SAmit Arora */ 432612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4327a2df2a63SAmit Arora return -EOPNOTSUPP; 4328a2df2a63SAmit Arora 4329a4bb6b64SAllison Henderson /* Return error if mode is not supported */ 4330a4bb6b64SAllison Henderson if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 4331a4bb6b64SAllison Henderson return -EOPNOTSUPP; 4332a4bb6b64SAllison Henderson 4333a4bb6b64SAllison Henderson if (mode & FALLOC_FL_PUNCH_HOLE) 4334a4bb6b64SAllison Henderson return ext4_punch_hole(file, offset, len); 4335a4bb6b64SAllison Henderson 43360562e0baSJiaying Zhang trace_ext4_fallocate_enter(inode, offset, len, mode); 43372ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4338fd28784aSAneesh Kumar K.V /* 4339fd28784aSAneesh Kumar K.V * We can't just convert len to max_blocks because 4340fd28784aSAneesh Kumar K.V * If blocksize = 4096 offset = 3072 and len = 2048 4341fd28784aSAneesh Kumar K.V */ 4342a2df2a63SAmit Arora max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 43432ed88685STheodore Ts'o - map.m_lblk; 4344a2df2a63SAmit Arora /* 4345f3bd1f3fSMingming Cao * credits to insert 1 extent into extent tree 4346a2df2a63SAmit Arora */ 4347f3bd1f3fSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 434855bd725aSAneesh Kumar K.V mutex_lock(&inode->i_mutex); 43496d19c42bSNikanth Karthikesan ret = inode_newsize_ok(inode, (len + offset)); 43506d19c42bSNikanth Karthikesan if (ret) { 43516d19c42bSNikanth Karthikesan mutex_unlock(&inode->i_mutex); 43520562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 43536d19c42bSNikanth Karthikesan return ret; 43546d19c42bSNikanth Karthikesan } 4355a4e5d88bSDmitry Monakhov flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT | 4356a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_NO_NORMALIZE; 4357a4e5d88bSDmitry Monakhov if (mode & FALLOC_FL_KEEP_SIZE) 4358a4e5d88bSDmitry Monakhov flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4359a2df2a63SAmit Arora retry: 4360a2df2a63SAmit Arora while (ret >= 0 && ret < max_blocks) { 43612ed88685STheodore Ts'o map.m_lblk = map.m_lblk + ret; 43622ed88685STheodore Ts'o map.m_len = max_blocks = max_blocks - ret; 4363a2df2a63SAmit Arora handle = ext4_journal_start(inode, credits); 4364a2df2a63SAmit Arora if (IS_ERR(handle)) { 4365a2df2a63SAmit Arora ret = PTR_ERR(handle); 4366a2df2a63SAmit Arora break; 4367a2df2a63SAmit Arora } 4368a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4369221879c9SAneesh Kumar K.V if (ret <= 0) { 43702c98615dSAneesh Kumar K.V #ifdef EXT4FS_DEBUG 43712c98615dSAneesh Kumar K.V WARN_ON(ret <= 0); 4372e35fd660STheodore Ts'o printk(KERN_ERR "%s: ext4_ext_map_blocks " 43732c98615dSAneesh Kumar K.V "returned error inode#%lu, block=%u, " 43749fd9784cSThadeu Lima de Souza Cascardo "max_blocks=%u", __func__, 4375a6371b63SKazuya Mio inode->i_ino, map.m_lblk, max_blocks); 43762c98615dSAneesh Kumar K.V #endif 4377a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4378a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4379a2df2a63SAmit Arora break; 4380a2df2a63SAmit Arora } 43812ed88685STheodore Ts'o if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 4382fd28784aSAneesh Kumar K.V blkbits) >> blkbits)) 4383fd28784aSAneesh Kumar K.V new_size = offset + len; 4384fd28784aSAneesh Kumar K.V else 438529ae07b7SUtako Kusaka new_size = ((loff_t) map.m_lblk + ret) << blkbits; 4386a2df2a63SAmit Arora 4387fd28784aSAneesh Kumar K.V ext4_falloc_update_inode(inode, mode, new_size, 43882ed88685STheodore Ts'o (map.m_flags & EXT4_MAP_NEW)); 4389a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4390a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4391a2df2a63SAmit Arora if (ret2) 4392a2df2a63SAmit Arora break; 4393a2df2a63SAmit Arora } 4394fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4395fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4396fd28784aSAneesh Kumar K.V ret = 0; 4397a2df2a63SAmit Arora goto retry; 4398a2df2a63SAmit Arora } 439955bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 44000562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, 44010562e0baSJiaying Zhang ret > 0 ? ret2 : ret); 4402a2df2a63SAmit Arora return ret > 0 ? ret2 : ret; 4403a2df2a63SAmit Arora } 44046873fa0dSEric Sandeen 44056873fa0dSEric Sandeen /* 44060031462bSMingming Cao * This function convert a range of blocks to written extents 44070031462bSMingming Cao * The caller of this function will pass the start offset and the size. 44080031462bSMingming Cao * all unwritten extents within this range will be converted to 44090031462bSMingming Cao * written extents. 44100031462bSMingming Cao * 44110031462bSMingming Cao * This function is called from the direct IO end io call back 44120031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4413109f5565SMingming * Returns 0 on success. 44140031462bSMingming Cao */ 44150031462bSMingming Cao int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 4416a1de02dcSEric Sandeen ssize_t len) 44170031462bSMingming Cao { 44180031462bSMingming Cao handle_t *handle; 44190031462bSMingming Cao unsigned int max_blocks; 44200031462bSMingming Cao int ret = 0; 44210031462bSMingming Cao int ret2 = 0; 44222ed88685STheodore Ts'o struct ext4_map_blocks map; 44230031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 44240031462bSMingming Cao 44252ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 44260031462bSMingming Cao /* 44270031462bSMingming Cao * We can't just convert len to max_blocks because 44280031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 44290031462bSMingming Cao */ 44302ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 44312ed88685STheodore Ts'o map.m_lblk); 44320031462bSMingming Cao /* 44330031462bSMingming Cao * credits to insert 1 extent into extent tree 44340031462bSMingming Cao */ 44350031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 44360031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 44372ed88685STheodore Ts'o map.m_lblk += ret; 44382ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 44390031462bSMingming Cao handle = ext4_journal_start(inode, credits); 44400031462bSMingming Cao if (IS_ERR(handle)) { 44410031462bSMingming Cao ret = PTR_ERR(handle); 44420031462bSMingming Cao break; 44430031462bSMingming Cao } 44442ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4445c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 44460031462bSMingming Cao if (ret <= 0) { 44470031462bSMingming Cao WARN_ON(ret <= 0); 4448e35fd660STheodore Ts'o printk(KERN_ERR "%s: ext4_ext_map_blocks " 44490031462bSMingming Cao "returned error inode#%lu, block=%u, " 44500031462bSMingming Cao "max_blocks=%u", __func__, 44512ed88685STheodore Ts'o inode->i_ino, map.m_lblk, map.m_len); 44520031462bSMingming Cao } 44530031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 44540031462bSMingming Cao ret2 = ext4_journal_stop(handle); 44550031462bSMingming Cao if (ret <= 0 || ret2 ) 44560031462bSMingming Cao break; 44570031462bSMingming Cao } 44580031462bSMingming Cao return ret > 0 ? ret2 : ret; 44590031462bSMingming Cao } 44606d9c85ebSYongqiang Yang 44610031462bSMingming Cao /* 44626873fa0dSEric Sandeen * Callback function called for each extent to gather FIEMAP information. 44636873fa0dSEric Sandeen */ 4464c03f8aa9SLukas Czerner static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, 44656873fa0dSEric Sandeen struct ext4_ext_cache *newex, struct ext4_extent *ex, 44666873fa0dSEric Sandeen void *data) 44676873fa0dSEric Sandeen { 44686873fa0dSEric Sandeen __u64 logical; 44696873fa0dSEric Sandeen __u64 physical; 44706873fa0dSEric Sandeen __u64 length; 44716873fa0dSEric Sandeen __u32 flags = 0; 44726d9c85ebSYongqiang Yang int ret = 0; 44736d9c85ebSYongqiang Yang struct fiemap_extent_info *fieinfo = data; 44746d9c85ebSYongqiang Yang unsigned char blksize_bits; 44756873fa0dSEric Sandeen 44766d9c85ebSYongqiang Yang blksize_bits = inode->i_sb->s_blocksize_bits; 44776873fa0dSEric Sandeen logical = (__u64)newex->ec_block << blksize_bits; 44786873fa0dSEric Sandeen 4479b05e6ae5STheodore Ts'o if (newex->ec_start == 0) { 44806d9c85ebSYongqiang Yang /* 44816d9c85ebSYongqiang Yang * No extent in extent-tree contains block @newex->ec_start, 44826d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 44836d9c85ebSYongqiang Yang * 44846d9c85ebSYongqiang Yang * Holes or delayed-extents are processed as follows. 44856d9c85ebSYongqiang Yang * 1. lookup dirty pages with specified range in pagecache. 44866d9c85ebSYongqiang Yang * If no page is got, then there is no delayed-extent and 44876d9c85ebSYongqiang Yang * return with EXT_CONTINUE. 44886d9c85ebSYongqiang Yang * 2. find the 1st mapped buffer, 44896d9c85ebSYongqiang Yang * 3. check if the mapped buffer is both in the request range 44906d9c85ebSYongqiang Yang * and a delayed buffer. If not, there is no delayed-extent, 44916d9c85ebSYongqiang Yang * then return. 44926d9c85ebSYongqiang Yang * 4. a delayed-extent is found, the extent will be collected. 44936d9c85ebSYongqiang Yang */ 44946d9c85ebSYongqiang Yang ext4_lblk_t end = 0; 44956d9c85ebSYongqiang Yang pgoff_t last_offset; 44966873fa0dSEric Sandeen pgoff_t offset; 44976d9c85ebSYongqiang Yang pgoff_t index; 4498b221349fSYongqiang Yang pgoff_t start_index = 0; 44996d9c85ebSYongqiang Yang struct page **pages = NULL; 45006873fa0dSEric Sandeen struct buffer_head *bh = NULL; 45016d9c85ebSYongqiang Yang struct buffer_head *head = NULL; 45026d9c85ebSYongqiang Yang unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); 45036d9c85ebSYongqiang Yang 45046d9c85ebSYongqiang Yang pages = kmalloc(PAGE_SIZE, GFP_KERNEL); 45056d9c85ebSYongqiang Yang if (pages == NULL) 45066d9c85ebSYongqiang Yang return -ENOMEM; 45076873fa0dSEric Sandeen 45086873fa0dSEric Sandeen offset = logical >> PAGE_SHIFT; 45096d9c85ebSYongqiang Yang repeat: 45106d9c85ebSYongqiang Yang last_offset = offset; 45116d9c85ebSYongqiang Yang head = NULL; 45126d9c85ebSYongqiang Yang ret = find_get_pages_tag(inode->i_mapping, &offset, 45136d9c85ebSYongqiang Yang PAGECACHE_TAG_DIRTY, nr_pages, pages); 45146873fa0dSEric Sandeen 45156d9c85ebSYongqiang Yang if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 45166d9c85ebSYongqiang Yang /* First time, try to find a mapped buffer. */ 45176d9c85ebSYongqiang Yang if (ret == 0) { 45186d9c85ebSYongqiang Yang out: 45196d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 45206d9c85ebSYongqiang Yang page_cache_release(pages[index]); 45216d9c85ebSYongqiang Yang /* just a hole. */ 45226d9c85ebSYongqiang Yang kfree(pages); 45236873fa0dSEric Sandeen return EXT_CONTINUE; 45246873fa0dSEric Sandeen } 4525b221349fSYongqiang Yang index = 0; 45266d9c85ebSYongqiang Yang 4527b221349fSYongqiang Yang next_page: 45286d9c85ebSYongqiang Yang /* Try to find the 1st mapped buffer. */ 4529b221349fSYongqiang Yang end = ((__u64)pages[index]->index << PAGE_SHIFT) >> 45306d9c85ebSYongqiang Yang blksize_bits; 4531b221349fSYongqiang Yang if (!page_has_buffers(pages[index])) 45326d9c85ebSYongqiang Yang goto out; 4533b221349fSYongqiang Yang head = page_buffers(pages[index]); 45346d9c85ebSYongqiang Yang if (!head) 45356d9c85ebSYongqiang Yang goto out; 45366d9c85ebSYongqiang Yang 4537b221349fSYongqiang Yang index++; 45386d9c85ebSYongqiang Yang bh = head; 45396d9c85ebSYongqiang Yang do { 4540b221349fSYongqiang Yang if (end >= newex->ec_block + 45416d9c85ebSYongqiang Yang newex->ec_len) 45426d9c85ebSYongqiang Yang /* The buffer is out of 45436d9c85ebSYongqiang Yang * the request range. 45446d9c85ebSYongqiang Yang */ 45456d9c85ebSYongqiang Yang goto out; 4546b221349fSYongqiang Yang 4547b221349fSYongqiang Yang if (buffer_mapped(bh) && 4548b221349fSYongqiang Yang end >= newex->ec_block) { 4549b221349fSYongqiang Yang start_index = index - 1; 4550b221349fSYongqiang Yang /* get the 1st mapped buffer. */ 45516d9c85ebSYongqiang Yang goto found_mapped_buffer; 45526d9c85ebSYongqiang Yang } 4553b221349fSYongqiang Yang 45546d9c85ebSYongqiang Yang bh = bh->b_this_page; 45556d9c85ebSYongqiang Yang end++; 45566d9c85ebSYongqiang Yang } while (bh != head); 45576d9c85ebSYongqiang Yang 4558b221349fSYongqiang Yang /* No mapped buffer in the range found in this page, 4559b221349fSYongqiang Yang * We need to look up next page. 4560b221349fSYongqiang Yang */ 4561b221349fSYongqiang Yang if (index >= ret) { 4562b221349fSYongqiang Yang /* There is no page left, but we need to limit 4563b221349fSYongqiang Yang * newex->ec_len. 4564b221349fSYongqiang Yang */ 4565b221349fSYongqiang Yang newex->ec_len = end - newex->ec_block; 45666d9c85ebSYongqiang Yang goto out; 4567b221349fSYongqiang Yang } 4568b221349fSYongqiang Yang goto next_page; 45696d9c85ebSYongqiang Yang } else { 45706d9c85ebSYongqiang Yang /*Find contiguous delayed buffers. */ 45716d9c85ebSYongqiang Yang if (ret > 0 && pages[0]->index == last_offset) 45726d9c85ebSYongqiang Yang head = page_buffers(pages[0]); 45736d9c85ebSYongqiang Yang bh = head; 4574b221349fSYongqiang Yang index = 1; 4575b221349fSYongqiang Yang start_index = 0; 45766d9c85ebSYongqiang Yang } 45776d9c85ebSYongqiang Yang 45786d9c85ebSYongqiang Yang found_mapped_buffer: 45796d9c85ebSYongqiang Yang if (bh != NULL && buffer_delay(bh)) { 45806d9c85ebSYongqiang Yang /* 1st or contiguous delayed buffer found. */ 45816d9c85ebSYongqiang Yang if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 45826d9c85ebSYongqiang Yang /* 45836d9c85ebSYongqiang Yang * 1st delayed buffer found, record 45846d9c85ebSYongqiang Yang * the start of extent. 45856d9c85ebSYongqiang Yang */ 45866d9c85ebSYongqiang Yang flags |= FIEMAP_EXTENT_DELALLOC; 45876d9c85ebSYongqiang Yang newex->ec_block = end; 45886d9c85ebSYongqiang Yang logical = (__u64)end << blksize_bits; 45896d9c85ebSYongqiang Yang } 45906d9c85ebSYongqiang Yang /* Find contiguous delayed buffers. */ 45916d9c85ebSYongqiang Yang do { 45926d9c85ebSYongqiang Yang if (!buffer_delay(bh)) 45936d9c85ebSYongqiang Yang goto found_delayed_extent; 45946d9c85ebSYongqiang Yang bh = bh->b_this_page; 45956d9c85ebSYongqiang Yang end++; 45966d9c85ebSYongqiang Yang } while (bh != head); 45976d9c85ebSYongqiang Yang 4598b221349fSYongqiang Yang for (; index < ret; index++) { 45996d9c85ebSYongqiang Yang if (!page_has_buffers(pages[index])) { 46006d9c85ebSYongqiang Yang bh = NULL; 46016d9c85ebSYongqiang Yang break; 46026d9c85ebSYongqiang Yang } 46036d9c85ebSYongqiang Yang head = page_buffers(pages[index]); 46046d9c85ebSYongqiang Yang if (!head) { 46056d9c85ebSYongqiang Yang bh = NULL; 46066d9c85ebSYongqiang Yang break; 46076d9c85ebSYongqiang Yang } 4608b221349fSYongqiang Yang 46096d9c85ebSYongqiang Yang if (pages[index]->index != 4610b221349fSYongqiang Yang pages[start_index]->index + index 4611b221349fSYongqiang Yang - start_index) { 46126d9c85ebSYongqiang Yang /* Blocks are not contiguous. */ 46136d9c85ebSYongqiang Yang bh = NULL; 46146d9c85ebSYongqiang Yang break; 46156d9c85ebSYongqiang Yang } 46166d9c85ebSYongqiang Yang bh = head; 46176d9c85ebSYongqiang Yang do { 46186d9c85ebSYongqiang Yang if (!buffer_delay(bh)) 46196d9c85ebSYongqiang Yang /* Delayed-extent ends. */ 46206d9c85ebSYongqiang Yang goto found_delayed_extent; 46216d9c85ebSYongqiang Yang bh = bh->b_this_page; 46226d9c85ebSYongqiang Yang end++; 46236d9c85ebSYongqiang Yang } while (bh != head); 46246d9c85ebSYongqiang Yang } 46256d9c85ebSYongqiang Yang } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) 46266d9c85ebSYongqiang Yang /* a hole found. */ 46276d9c85ebSYongqiang Yang goto out; 46286d9c85ebSYongqiang Yang 46296d9c85ebSYongqiang Yang found_delayed_extent: 46306d9c85ebSYongqiang Yang newex->ec_len = min(end - newex->ec_block, 46316d9c85ebSYongqiang Yang (ext4_lblk_t)EXT_INIT_MAX_LEN); 46326d9c85ebSYongqiang Yang if (ret == nr_pages && bh != NULL && 46336d9c85ebSYongqiang Yang newex->ec_len < EXT_INIT_MAX_LEN && 46346d9c85ebSYongqiang Yang buffer_delay(bh)) { 46356d9c85ebSYongqiang Yang /* Have not collected an extent and continue. */ 46366d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 46376d9c85ebSYongqiang Yang page_cache_release(pages[index]); 46386d9c85ebSYongqiang Yang goto repeat; 46396d9c85ebSYongqiang Yang } 46406d9c85ebSYongqiang Yang 46416d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 46426d9c85ebSYongqiang Yang page_cache_release(pages[index]); 46436d9c85ebSYongqiang Yang kfree(pages); 46446873fa0dSEric Sandeen } 46456873fa0dSEric Sandeen 46466873fa0dSEric Sandeen physical = (__u64)newex->ec_start << blksize_bits; 46476873fa0dSEric Sandeen length = (__u64)newex->ec_len << blksize_bits; 46486873fa0dSEric Sandeen 46496873fa0dSEric Sandeen if (ex && ext4_ext_is_uninitialized(ex)) 46506873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_UNWRITTEN; 46516873fa0dSEric Sandeen 4652c03f8aa9SLukas Czerner if (next == EXT_MAX_BLOCKS) 46536873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_LAST; 46546873fa0dSEric Sandeen 46556d9c85ebSYongqiang Yang ret = fiemap_fill_next_extent(fieinfo, logical, physical, 46566873fa0dSEric Sandeen length, flags); 46576d9c85ebSYongqiang Yang if (ret < 0) 46586d9c85ebSYongqiang Yang return ret; 46596d9c85ebSYongqiang Yang if (ret == 1) 46606873fa0dSEric Sandeen return EXT_BREAK; 46616873fa0dSEric Sandeen return EXT_CONTINUE; 46626873fa0dSEric Sandeen } 46636873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 46646873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 46656873fa0dSEric Sandeen 46663a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 46673a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 46686873fa0dSEric Sandeen { 46696873fa0dSEric Sandeen __u64 physical = 0; 46706873fa0dSEric Sandeen __u64 length; 46716873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 46726873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 46736873fa0dSEric Sandeen int error = 0; 46746873fa0dSEric Sandeen 46756873fa0dSEric Sandeen /* in-inode? */ 467619f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 46776873fa0dSEric Sandeen struct ext4_iloc iloc; 46786873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 46796873fa0dSEric Sandeen 46806873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 46816873fa0dSEric Sandeen if (error) 46826873fa0dSEric Sandeen return error; 46836873fa0dSEric Sandeen physical = iloc.bh->b_blocknr << blockbits; 46846873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 46856873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 46866873fa0dSEric Sandeen physical += offset; 46876873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 46886873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 4689fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 46906873fa0dSEric Sandeen } else { /* external block */ 46916873fa0dSEric Sandeen physical = EXT4_I(inode)->i_file_acl << blockbits; 46926873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 46936873fa0dSEric Sandeen } 46946873fa0dSEric Sandeen 46956873fa0dSEric Sandeen if (physical) 46966873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 46976873fa0dSEric Sandeen length, flags); 46986873fa0dSEric Sandeen return (error < 0 ? error : 0); 46996873fa0dSEric Sandeen } 47006873fa0dSEric Sandeen 4701a4bb6b64SAllison Henderson /* 4702a4bb6b64SAllison Henderson * ext4_ext_punch_hole 4703a4bb6b64SAllison Henderson * 4704a4bb6b64SAllison Henderson * Punches a hole of "length" bytes in a file starting 4705a4bb6b64SAllison Henderson * at byte "offset" 4706a4bb6b64SAllison Henderson * 4707a4bb6b64SAllison Henderson * @inode: The inode of the file to punch a hole in 4708a4bb6b64SAllison Henderson * @offset: The starting byte offset of the hole 4709a4bb6b64SAllison Henderson * @length: The length of the hole 4710a4bb6b64SAllison Henderson * 4711a4bb6b64SAllison Henderson * Returns the number of blocks removed or negative on err 4712a4bb6b64SAllison Henderson */ 4713a4bb6b64SAllison Henderson int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) 4714a4bb6b64SAllison Henderson { 4715a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 4716a4bb6b64SAllison Henderson struct super_block *sb = inode->i_sb; 4717a4bb6b64SAllison Henderson struct ext4_ext_cache cache_ex; 4718a4bb6b64SAllison Henderson ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks; 4719a4bb6b64SAllison Henderson struct address_space *mapping = inode->i_mapping; 4720a4bb6b64SAllison Henderson struct ext4_map_blocks map; 4721a4bb6b64SAllison Henderson handle_t *handle; 4722ba06208aSAllison Henderson loff_t first_page, last_page, page_len; 4723ba06208aSAllison Henderson loff_t first_page_offset, last_page_offset; 4724a4bb6b64SAllison Henderson int ret, credits, blocks_released, err = 0; 4725a4bb6b64SAllison Henderson 47262be4751bSAllison Henderson /* No need to punch hole beyond i_size */ 47272be4751bSAllison Henderson if (offset >= inode->i_size) 47282be4751bSAllison Henderson return 0; 47292be4751bSAllison Henderson 47302be4751bSAllison Henderson /* 47312be4751bSAllison Henderson * If the hole extends beyond i_size, set the hole 47322be4751bSAllison Henderson * to end after the page that contains i_size 47332be4751bSAllison Henderson */ 47342be4751bSAllison Henderson if (offset + length > inode->i_size) { 47352be4751bSAllison Henderson length = inode->i_size + 47362be4751bSAllison Henderson PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 47372be4751bSAllison Henderson offset; 47382be4751bSAllison Henderson } 47392be4751bSAllison Henderson 4740a4bb6b64SAllison Henderson first_block = (offset + sb->s_blocksize - 1) >> 4741a4bb6b64SAllison Henderson EXT4_BLOCK_SIZE_BITS(sb); 4742a4bb6b64SAllison Henderson last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4743a4bb6b64SAllison Henderson 4744a4bb6b64SAllison Henderson first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 4745a4bb6b64SAllison Henderson last_page = (offset + length) >> PAGE_CACHE_SHIFT; 4746a4bb6b64SAllison Henderson 4747a4bb6b64SAllison Henderson first_page_offset = first_page << PAGE_CACHE_SHIFT; 4748a4bb6b64SAllison Henderson last_page_offset = last_page << PAGE_CACHE_SHIFT; 4749a4bb6b64SAllison Henderson 4750a4bb6b64SAllison Henderson /* 4751a4bb6b64SAllison Henderson * Write out all dirty pages to avoid race conditions 4752a4bb6b64SAllison Henderson * Then release them. 4753a4bb6b64SAllison Henderson */ 4754a4bb6b64SAllison Henderson if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4755a4bb6b64SAllison Henderson err = filemap_write_and_wait_range(mapping, 47562be4751bSAllison Henderson offset, offset + length - 1); 4757a4bb6b64SAllison Henderson 4758a4bb6b64SAllison Henderson if (err) 4759a4bb6b64SAllison Henderson return err; 4760a4bb6b64SAllison Henderson } 4761a4bb6b64SAllison Henderson 4762a4bb6b64SAllison Henderson /* Now release the pages */ 4763a4bb6b64SAllison Henderson if (last_page_offset > first_page_offset) { 4764a4bb6b64SAllison Henderson truncate_inode_pages_range(mapping, first_page_offset, 4765a4bb6b64SAllison Henderson last_page_offset-1); 4766a4bb6b64SAllison Henderson } 4767a4bb6b64SAllison Henderson 4768a4bb6b64SAllison Henderson /* finish any pending end_io work */ 4769a4bb6b64SAllison Henderson ext4_flush_completed_IO(inode); 4770a4bb6b64SAllison Henderson 4771a4bb6b64SAllison Henderson credits = ext4_writepage_trans_blocks(inode); 4772a4bb6b64SAllison Henderson handle = ext4_journal_start(inode, credits); 4773a4bb6b64SAllison Henderson if (IS_ERR(handle)) 4774a4bb6b64SAllison Henderson return PTR_ERR(handle); 4775a4bb6b64SAllison Henderson 4776a4bb6b64SAllison Henderson err = ext4_orphan_add(handle, inode); 4777a4bb6b64SAllison Henderson if (err) 4778a4bb6b64SAllison Henderson goto out; 4779a4bb6b64SAllison Henderson 4780a4bb6b64SAllison Henderson /* 4781ba06208aSAllison Henderson * Now we need to zero out the non-page-aligned data in the 4782ba06208aSAllison Henderson * pages at the start and tail of the hole, and unmap the buffer 4783ba06208aSAllison Henderson * heads for the block aligned regions of the page that were 4784ba06208aSAllison Henderson * completely zeroed. 4785a4bb6b64SAllison Henderson */ 4786ba06208aSAllison Henderson if (first_page > last_page) { 4787ba06208aSAllison Henderson /* 4788ba06208aSAllison Henderson * If the file space being truncated is contained within a page 4789ba06208aSAllison Henderson * just zero out and unmap the middle of that page 4790ba06208aSAllison Henderson */ 4791ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4792ba06208aSAllison Henderson mapping, offset, length, 0); 4793a4bb6b64SAllison Henderson 4794ba06208aSAllison Henderson if (err) 4795ba06208aSAllison Henderson goto out; 4796ba06208aSAllison Henderson } else { 4797ba06208aSAllison Henderson /* 4798ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4799ba06208aSAllison Henderson * the start of the hole 4800ba06208aSAllison Henderson */ 4801ba06208aSAllison Henderson page_len = first_page_offset - offset; 4802ba06208aSAllison Henderson if (page_len > 0) { 4803ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4804ba06208aSAllison Henderson offset, page_len, 0); 4805ba06208aSAllison Henderson if (err) 4806ba06208aSAllison Henderson goto out; 4807ba06208aSAllison Henderson } 4808ba06208aSAllison Henderson 4809ba06208aSAllison Henderson /* 4810ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4811ba06208aSAllison Henderson * the end of the hole 4812ba06208aSAllison Henderson */ 4813ba06208aSAllison Henderson page_len = offset + length - last_page_offset; 4814ba06208aSAllison Henderson if (page_len > 0) { 4815ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4816ba06208aSAllison Henderson last_page_offset, page_len, 0); 4817ba06208aSAllison Henderson if (err) 4818ba06208aSAllison Henderson goto out; 4819a4bb6b64SAllison Henderson } 4820a4bb6b64SAllison Henderson } 4821a4bb6b64SAllison Henderson 48222be4751bSAllison Henderson 48232be4751bSAllison Henderson /* 48242be4751bSAllison Henderson * If i_size is contained in the last page, we need to 48252be4751bSAllison Henderson * unmap and zero the partial page after i_size 48262be4751bSAllison Henderson */ 48272be4751bSAllison Henderson if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && 48282be4751bSAllison Henderson inode->i_size % PAGE_CACHE_SIZE != 0) { 48292be4751bSAllison Henderson 48302be4751bSAllison Henderson page_len = PAGE_CACHE_SIZE - 48312be4751bSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 48322be4751bSAllison Henderson 48332be4751bSAllison Henderson if (page_len > 0) { 48342be4751bSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 48352be4751bSAllison Henderson mapping, inode->i_size, page_len, 0); 48362be4751bSAllison Henderson 48372be4751bSAllison Henderson if (err) 48382be4751bSAllison Henderson goto out; 48392be4751bSAllison Henderson } 48402be4751bSAllison Henderson } 48412be4751bSAllison Henderson 4842a4bb6b64SAllison Henderson /* If there are no blocks to remove, return now */ 4843a4bb6b64SAllison Henderson if (first_block >= last_block) 4844a4bb6b64SAllison Henderson goto out; 4845a4bb6b64SAllison Henderson 4846a4bb6b64SAllison Henderson down_write(&EXT4_I(inode)->i_data_sem); 4847a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4848a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4849a4bb6b64SAllison Henderson 4850a4bb6b64SAllison Henderson /* 4851a4bb6b64SAllison Henderson * Loop over all the blocks and identify blocks 4852a4bb6b64SAllison Henderson * that need to be punched out 4853a4bb6b64SAllison Henderson */ 4854a4bb6b64SAllison Henderson iblock = first_block; 4855a4bb6b64SAllison Henderson blocks_released = 0; 4856a4bb6b64SAllison Henderson while (iblock < last_block) { 4857a4bb6b64SAllison Henderson max_blocks = last_block - iblock; 4858a4bb6b64SAllison Henderson num_blocks = 1; 4859a4bb6b64SAllison Henderson memset(&map, 0, sizeof(map)); 4860a4bb6b64SAllison Henderson map.m_lblk = iblock; 4861a4bb6b64SAllison Henderson map.m_len = max_blocks; 4862a4bb6b64SAllison Henderson ret = ext4_ext_map_blocks(handle, inode, &map, 4863a4bb6b64SAllison Henderson EXT4_GET_BLOCKS_PUNCH_OUT_EXT); 4864a4bb6b64SAllison Henderson 4865a4bb6b64SAllison Henderson if (ret > 0) { 4866a4bb6b64SAllison Henderson blocks_released += ret; 4867a4bb6b64SAllison Henderson num_blocks = ret; 4868a4bb6b64SAllison Henderson } else if (ret == 0) { 4869a4bb6b64SAllison Henderson /* 4870a4bb6b64SAllison Henderson * If map blocks could not find the block, 4871a4bb6b64SAllison Henderson * then it is in a hole. If the hole was 4872a4bb6b64SAllison Henderson * not already cached, then map blocks should 4873a4bb6b64SAllison Henderson * put it in the cache. So we can get the hole 4874a4bb6b64SAllison Henderson * out of the cache 4875a4bb6b64SAllison Henderson */ 4876a4bb6b64SAllison Henderson memset(&cache_ex, 0, sizeof(cache_ex)); 4877a4bb6b64SAllison Henderson if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) && 4878a4bb6b64SAllison Henderson !cache_ex.ec_start) { 4879a4bb6b64SAllison Henderson 4880a4bb6b64SAllison Henderson /* The hole is cached */ 4881a4bb6b64SAllison Henderson num_blocks = cache_ex.ec_block + 4882a4bb6b64SAllison Henderson cache_ex.ec_len - iblock; 4883a4bb6b64SAllison Henderson 4884a4bb6b64SAllison Henderson } else { 4885a4bb6b64SAllison Henderson /* The block could not be identified */ 4886a4bb6b64SAllison Henderson err = -EIO; 4887a4bb6b64SAllison Henderson break; 4888a4bb6b64SAllison Henderson } 4889a4bb6b64SAllison Henderson } else { 4890a4bb6b64SAllison Henderson /* Map blocks error */ 4891a4bb6b64SAllison Henderson err = ret; 4892a4bb6b64SAllison Henderson break; 4893a4bb6b64SAllison Henderson } 4894a4bb6b64SAllison Henderson 4895a4bb6b64SAllison Henderson if (num_blocks == 0) { 4896a4bb6b64SAllison Henderson /* This condition should never happen */ 4897a4bb6b64SAllison Henderson ext_debug("Block lookup failed"); 4898a4bb6b64SAllison Henderson err = -EIO; 4899a4bb6b64SAllison Henderson break; 4900a4bb6b64SAllison Henderson } 4901a4bb6b64SAllison Henderson 4902a4bb6b64SAllison Henderson iblock += num_blocks; 4903a4bb6b64SAllison Henderson } 4904a4bb6b64SAllison Henderson 4905a4bb6b64SAllison Henderson if (blocks_released > 0) { 4906a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4907a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4908a4bb6b64SAllison Henderson } 4909a4bb6b64SAllison Henderson 4910a4bb6b64SAllison Henderson if (IS_SYNC(inode)) 4911a4bb6b64SAllison Henderson ext4_handle_sync(handle); 4912a4bb6b64SAllison Henderson 4913a4bb6b64SAllison Henderson up_write(&EXT4_I(inode)->i_data_sem); 4914a4bb6b64SAllison Henderson 4915a4bb6b64SAllison Henderson out: 4916a4bb6b64SAllison Henderson ext4_orphan_del(handle, inode); 4917a4bb6b64SAllison Henderson inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4918a4bb6b64SAllison Henderson ext4_mark_inode_dirty(handle, inode); 4919a4bb6b64SAllison Henderson ext4_journal_stop(handle); 4920a4bb6b64SAllison Henderson return err; 4921a4bb6b64SAllison Henderson } 49226873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 49236873fa0dSEric Sandeen __u64 start, __u64 len) 49246873fa0dSEric Sandeen { 49256873fa0dSEric Sandeen ext4_lblk_t start_blk; 49266873fa0dSEric Sandeen int error = 0; 49276873fa0dSEric Sandeen 49286873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 492912e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 49306873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 49316873fa0dSEric Sandeen ext4_get_block); 49326873fa0dSEric Sandeen 49336873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 49346873fa0dSEric Sandeen return -EBADR; 49356873fa0dSEric Sandeen 49366873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 49376873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 49386873fa0dSEric Sandeen } else { 4939aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4940aca92ff6SLeonard Michlmayr __u64 last_blk; 4941aca92ff6SLeonard Michlmayr 49426873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4943aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4944f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4945f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4946aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 49476873fa0dSEric Sandeen 49486873fa0dSEric Sandeen /* 49496873fa0dSEric Sandeen * Walk the extent tree gathering extent information. 49506873fa0dSEric Sandeen * ext4_ext_fiemap_cb will push extents back to user. 49516873fa0dSEric Sandeen */ 49526873fa0dSEric Sandeen error = ext4_ext_walk_space(inode, start_blk, len_blks, 49536873fa0dSEric Sandeen ext4_ext_fiemap_cb, fieinfo); 49546873fa0dSEric Sandeen } 49556873fa0dSEric Sandeen 49566873fa0dSEric Sandeen return error; 49576873fa0dSEric Sandeen } 4958