1a86c6181SAlex Tomas /* 2a86c6181SAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3a86c6181SAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 4a86c6181SAlex Tomas * 5a86c6181SAlex Tomas * Architecture independence: 6a86c6181SAlex Tomas * Copyright (c) 2005, Bull S.A. 7a86c6181SAlex Tomas * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8a86c6181SAlex Tomas * 9a86c6181SAlex Tomas * This program is free software; you can redistribute it and/or modify 10a86c6181SAlex Tomas * it under the terms of the GNU General Public License version 2 as 11a86c6181SAlex Tomas * published by the Free Software Foundation. 12a86c6181SAlex Tomas * 13a86c6181SAlex Tomas * This program is distributed in the hope that it will be useful, 14a86c6181SAlex Tomas * but WITHOUT ANY WARRANTY; without even the implied warranty of 15a86c6181SAlex Tomas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16a86c6181SAlex Tomas * GNU General Public License for more details. 17a86c6181SAlex Tomas * 18a86c6181SAlex Tomas * You should have received a copy of the GNU General Public Licens 19a86c6181SAlex Tomas * along with this program; if not, write to the Free Software 20a86c6181SAlex Tomas * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 21a86c6181SAlex Tomas */ 22a86c6181SAlex Tomas 23a86c6181SAlex Tomas /* 24a86c6181SAlex Tomas * Extents support for EXT4 25a86c6181SAlex Tomas * 26a86c6181SAlex Tomas * TODO: 27a86c6181SAlex Tomas * - ext4*_error() should be used in some situations 28a86c6181SAlex Tomas * - analyze all BUG()/BUG_ON(), use -EIO where appropriate 29a86c6181SAlex Tomas * - smart tree reduction 30a86c6181SAlex Tomas */ 31a86c6181SAlex Tomas 32a86c6181SAlex Tomas #include <linux/module.h> 33a86c6181SAlex Tomas #include <linux/fs.h> 34a86c6181SAlex Tomas #include <linux/time.h> 35cd02ff0bSMingming Cao #include <linux/jbd2.h> 36a86c6181SAlex Tomas #include <linux/highuid.h> 37a86c6181SAlex Tomas #include <linux/pagemap.h> 38a86c6181SAlex Tomas #include <linux/quotaops.h> 39a86c6181SAlex Tomas #include <linux/string.h> 40a86c6181SAlex Tomas #include <linux/slab.h> 41a2df2a63SAmit Arora #include <linux/falloc.h> 42a86c6181SAlex Tomas #include <asm/uaccess.h> 436873fa0dSEric Sandeen #include <linux/fiemap.h> 443dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 45a86c6181SAlex Tomas 460562e0baSJiaying Zhang #include <trace/events/ext4.h> 470562e0baSJiaying Zhang 48d583fb87SAllison Henderson static int ext4_split_extent(handle_t *handle, 49d583fb87SAllison Henderson struct inode *inode, 50d583fb87SAllison Henderson struct ext4_ext_path *path, 51d583fb87SAllison Henderson struct ext4_map_blocks *map, 52d583fb87SAllison Henderson int split_flag, 53d583fb87SAllison Henderson int flags); 54d583fb87SAllison Henderson 55487caeefSJan Kara static int ext4_ext_truncate_extend_restart(handle_t *handle, 56487caeefSJan Kara struct inode *inode, 57487caeefSJan Kara int needed) 58a86c6181SAlex Tomas { 59a86c6181SAlex Tomas int err; 60a86c6181SAlex Tomas 610390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 620390131bSFrank Mayhar return 0; 63a86c6181SAlex Tomas if (handle->h_buffer_credits > needed) 649102e4faSShen Feng return 0; 659102e4faSShen Feng err = ext4_journal_extend(handle, needed); 660123c939STheodore Ts'o if (err <= 0) 679102e4faSShen Feng return err; 68487caeefSJan Kara err = ext4_truncate_restart_trans(handle, inode, needed); 690617b83fSDmitry Monakhov if (err == 0) 700617b83fSDmitry Monakhov err = -EAGAIN; 71487caeefSJan Kara 72487caeefSJan Kara return err; 73a86c6181SAlex Tomas } 74a86c6181SAlex Tomas 75a86c6181SAlex Tomas /* 76a86c6181SAlex Tomas * could return: 77a86c6181SAlex Tomas * - EROFS 78a86c6181SAlex Tomas * - ENOMEM 79a86c6181SAlex Tomas */ 80a86c6181SAlex Tomas static int ext4_ext_get_access(handle_t *handle, struct inode *inode, 81a86c6181SAlex Tomas struct ext4_ext_path *path) 82a86c6181SAlex Tomas { 83a86c6181SAlex Tomas if (path->p_bh) { 84a86c6181SAlex Tomas /* path points to block */ 85a86c6181SAlex Tomas return ext4_journal_get_write_access(handle, path->p_bh); 86a86c6181SAlex Tomas } 87a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 88a86c6181SAlex Tomas /* we use in-core data, no need to protect them */ 89a86c6181SAlex Tomas return 0; 90a86c6181SAlex Tomas } 91a86c6181SAlex Tomas 92a86c6181SAlex Tomas /* 93a86c6181SAlex Tomas * could return: 94a86c6181SAlex Tomas * - EROFS 95a86c6181SAlex Tomas * - ENOMEM 96a86c6181SAlex Tomas * - EIO 97a86c6181SAlex Tomas */ 989ea7a0dfSTheodore Ts'o #define ext4_ext_dirty(handle, inode, path) \ 999ea7a0dfSTheodore Ts'o __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path)) 1009ea7a0dfSTheodore Ts'o static int __ext4_ext_dirty(const char *where, unsigned int line, 1019ea7a0dfSTheodore Ts'o handle_t *handle, struct inode *inode, 102a86c6181SAlex Tomas struct ext4_ext_path *path) 103a86c6181SAlex Tomas { 104a86c6181SAlex Tomas int err; 105a86c6181SAlex Tomas if (path->p_bh) { 106a86c6181SAlex Tomas /* path points to block */ 1079ea7a0dfSTheodore Ts'o err = __ext4_handle_dirty_metadata(where, line, handle, 1089ea7a0dfSTheodore Ts'o inode, path->p_bh); 109a86c6181SAlex Tomas } else { 110a86c6181SAlex Tomas /* path points to leaf/index in inode body */ 111a86c6181SAlex Tomas err = ext4_mark_inode_dirty(handle, inode); 112a86c6181SAlex Tomas } 113a86c6181SAlex Tomas return err; 114a86c6181SAlex Tomas } 115a86c6181SAlex Tomas 116f65e6fbaSAlex Tomas static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, 117a86c6181SAlex Tomas struct ext4_ext_path *path, 118725d26d3SAneesh Kumar K.V ext4_lblk_t block) 119a86c6181SAlex Tomas { 120a86c6181SAlex Tomas int depth; 121a86c6181SAlex Tomas 122a86c6181SAlex Tomas if (path) { 123a86c6181SAlex Tomas struct ext4_extent *ex; 124a86c6181SAlex Tomas depth = path->p_depth; 125a86c6181SAlex Tomas 126ad4fb9caSKazuya Mio /* 127ad4fb9caSKazuya Mio * Try to predict block placement assuming that we are 128ad4fb9caSKazuya Mio * filling in a file which will eventually be 129ad4fb9caSKazuya Mio * non-sparse --- i.e., in the case of libbfd writing 130ad4fb9caSKazuya Mio * an ELF object sections out-of-order but in a way 131ad4fb9caSKazuya Mio * the eventually results in a contiguous object or 132ad4fb9caSKazuya Mio * executable file, or some database extending a table 133ad4fb9caSKazuya Mio * space file. However, this is actually somewhat 134ad4fb9caSKazuya Mio * non-ideal if we are writing a sparse file such as 135ad4fb9caSKazuya Mio * qemu or KVM writing a raw image file that is going 136ad4fb9caSKazuya Mio * to stay fairly sparse, since it will end up 137ad4fb9caSKazuya Mio * fragmenting the file system's free space. Maybe we 138ad4fb9caSKazuya Mio * should have some hueristics or some way to allow 139ad4fb9caSKazuya Mio * userspace to pass a hint to file system, 140b8d6568aSTao Ma * especially if the latter case turns out to be 141ad4fb9caSKazuya Mio * common. 142ad4fb9caSKazuya Mio */ 1437e028976SAvantika Mathur ex = path[depth].p_ext; 144ad4fb9caSKazuya Mio if (ex) { 145ad4fb9caSKazuya Mio ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); 146ad4fb9caSKazuya Mio ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); 147ad4fb9caSKazuya Mio 148ad4fb9caSKazuya Mio if (block > ext_block) 149ad4fb9caSKazuya Mio return ext_pblk + (block - ext_block); 150ad4fb9caSKazuya Mio else 151ad4fb9caSKazuya Mio return ext_pblk - (ext_block - block); 152ad4fb9caSKazuya Mio } 153a86c6181SAlex Tomas 154d0d856e8SRandy Dunlap /* it looks like index is empty; 155d0d856e8SRandy Dunlap * try to find starting block from index itself */ 156a86c6181SAlex Tomas if (path[depth].p_bh) 157a86c6181SAlex Tomas return path[depth].p_bh->b_blocknr; 158a86c6181SAlex Tomas } 159a86c6181SAlex Tomas 160a86c6181SAlex Tomas /* OK. use inode's group */ 161f86186b4SEric Sandeen return ext4_inode_to_goal_block(inode); 162a86c6181SAlex Tomas } 163a86c6181SAlex Tomas 164654b4908SAneesh Kumar K.V /* 165654b4908SAneesh Kumar K.V * Allocation for a meta data block 166654b4908SAneesh Kumar K.V */ 167f65e6fbaSAlex Tomas static ext4_fsblk_t 168654b4908SAneesh Kumar K.V ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, 169a86c6181SAlex Tomas struct ext4_ext_path *path, 17055f020dbSAllison Henderson struct ext4_extent *ex, int *err, unsigned int flags) 171a86c6181SAlex Tomas { 172f65e6fbaSAlex Tomas ext4_fsblk_t goal, newblock; 173a86c6181SAlex Tomas 174a86c6181SAlex Tomas goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); 17555f020dbSAllison Henderson newblock = ext4_new_meta_blocks(handle, inode, goal, flags, 17655f020dbSAllison Henderson NULL, err); 177a86c6181SAlex Tomas return newblock; 178a86c6181SAlex Tomas } 179a86c6181SAlex Tomas 18055ad63bfSTheodore Ts'o static inline int ext4_ext_space_block(struct inode *inode, int check) 181a86c6181SAlex Tomas { 182a86c6181SAlex Tomas int size; 183a86c6181SAlex Tomas 184a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 185a86c6181SAlex Tomas / sizeof(struct ext4_extent); 18655ad63bfSTheodore Ts'o if (!check) { 187bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 188a86c6181SAlex Tomas if (size > 6) 189a86c6181SAlex Tomas size = 6; 190a86c6181SAlex Tomas #endif 19155ad63bfSTheodore Ts'o } 192a86c6181SAlex Tomas return size; 193a86c6181SAlex Tomas } 194a86c6181SAlex Tomas 19555ad63bfSTheodore Ts'o static inline int ext4_ext_space_block_idx(struct inode *inode, int check) 196a86c6181SAlex Tomas { 197a86c6181SAlex Tomas int size; 198a86c6181SAlex Tomas 199a86c6181SAlex Tomas size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 200a86c6181SAlex Tomas / sizeof(struct ext4_extent_idx); 20155ad63bfSTheodore Ts'o if (!check) { 202bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 203a86c6181SAlex Tomas if (size > 5) 204a86c6181SAlex Tomas size = 5; 205a86c6181SAlex Tomas #endif 20655ad63bfSTheodore Ts'o } 207a86c6181SAlex Tomas return size; 208a86c6181SAlex Tomas } 209a86c6181SAlex Tomas 21055ad63bfSTheodore Ts'o static inline int ext4_ext_space_root(struct inode *inode, int check) 211a86c6181SAlex Tomas { 212a86c6181SAlex Tomas int size; 213a86c6181SAlex Tomas 214a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 215a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 216a86c6181SAlex Tomas size /= sizeof(struct ext4_extent); 21755ad63bfSTheodore Ts'o if (!check) { 218bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 219a86c6181SAlex Tomas if (size > 3) 220a86c6181SAlex Tomas size = 3; 221a86c6181SAlex Tomas #endif 22255ad63bfSTheodore Ts'o } 223a86c6181SAlex Tomas return size; 224a86c6181SAlex Tomas } 225a86c6181SAlex Tomas 22655ad63bfSTheodore Ts'o static inline int ext4_ext_space_root_idx(struct inode *inode, int check) 227a86c6181SAlex Tomas { 228a86c6181SAlex Tomas int size; 229a86c6181SAlex Tomas 230a86c6181SAlex Tomas size = sizeof(EXT4_I(inode)->i_data); 231a86c6181SAlex Tomas size -= sizeof(struct ext4_extent_header); 232a86c6181SAlex Tomas size /= sizeof(struct ext4_extent_idx); 23355ad63bfSTheodore Ts'o if (!check) { 234bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 235a86c6181SAlex Tomas if (size > 4) 236a86c6181SAlex Tomas size = 4; 237a86c6181SAlex Tomas #endif 23855ad63bfSTheodore Ts'o } 239a86c6181SAlex Tomas return size; 240a86c6181SAlex Tomas } 241a86c6181SAlex Tomas 242d2a17637SMingming Cao /* 243d2a17637SMingming Cao * Calculate the number of metadata blocks needed 244d2a17637SMingming Cao * to allocate @blocks 245d2a17637SMingming Cao * Worse case is one block per extent 246d2a17637SMingming Cao */ 24701f49d0bSTheodore Ts'o int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 248d2a17637SMingming Cao { 2499d0be502STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 2509d0be502STheodore Ts'o int idxs, num = 0; 251d2a17637SMingming Cao 2529d0be502STheodore Ts'o idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) 2539d0be502STheodore Ts'o / sizeof(struct ext4_extent_idx)); 254d2a17637SMingming Cao 255d2a17637SMingming Cao /* 2569d0be502STheodore Ts'o * If the new delayed allocation block is contiguous with the 2579d0be502STheodore Ts'o * previous da block, it can share index blocks with the 2589d0be502STheodore Ts'o * previous block, so we only need to allocate a new index 2599d0be502STheodore Ts'o * block every idxs leaf blocks. At ldxs**2 blocks, we need 2609d0be502STheodore Ts'o * an additional index block, and at ldxs**3 blocks, yet 2619d0be502STheodore Ts'o * another index blocks. 262d2a17637SMingming Cao */ 2639d0be502STheodore Ts'o if (ei->i_da_metadata_calc_len && 2649d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock+1 == lblock) { 2659d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % idxs) == 0) 2669d0be502STheodore Ts'o num++; 2679d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0) 2689d0be502STheodore Ts'o num++; 2699d0be502STheodore Ts'o if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) { 2709d0be502STheodore Ts'o num++; 2719d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 2729d0be502STheodore Ts'o } else 2739d0be502STheodore Ts'o ei->i_da_metadata_calc_len++; 2749d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock++; 275d2a17637SMingming Cao return num; 276d2a17637SMingming Cao } 277d2a17637SMingming Cao 2789d0be502STheodore Ts'o /* 2799d0be502STheodore Ts'o * In the worst case we need a new set of index blocks at 2809d0be502STheodore Ts'o * every level of the inode's extent tree. 2819d0be502STheodore Ts'o */ 2829d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 1; 2839d0be502STheodore Ts'o ei->i_da_metadata_calc_last_lblock = lblock; 2849d0be502STheodore Ts'o return ext_depth(inode) + 1; 2859d0be502STheodore Ts'o } 2869d0be502STheodore Ts'o 287c29c0ae7SAlex Tomas static int 288c29c0ae7SAlex Tomas ext4_ext_max_entries(struct inode *inode, int depth) 289c29c0ae7SAlex Tomas { 290c29c0ae7SAlex Tomas int max; 291c29c0ae7SAlex Tomas 292c29c0ae7SAlex Tomas if (depth == ext_depth(inode)) { 293c29c0ae7SAlex Tomas if (depth == 0) 29455ad63bfSTheodore Ts'o max = ext4_ext_space_root(inode, 1); 295c29c0ae7SAlex Tomas else 29655ad63bfSTheodore Ts'o max = ext4_ext_space_root_idx(inode, 1); 297c29c0ae7SAlex Tomas } else { 298c29c0ae7SAlex Tomas if (depth == 0) 29955ad63bfSTheodore Ts'o max = ext4_ext_space_block(inode, 1); 300c29c0ae7SAlex Tomas else 30155ad63bfSTheodore Ts'o max = ext4_ext_space_block_idx(inode, 1); 302c29c0ae7SAlex Tomas } 303c29c0ae7SAlex Tomas 304c29c0ae7SAlex Tomas return max; 305c29c0ae7SAlex Tomas } 306c29c0ae7SAlex Tomas 30756b19868SAneesh Kumar K.V static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) 30856b19868SAneesh Kumar K.V { 309bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_ext_pblock(ext); 31056b19868SAneesh Kumar K.V int len = ext4_ext_get_actual_len(ext); 311e84a26ceSTheodore Ts'o 3126fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); 31356b19868SAneesh Kumar K.V } 31456b19868SAneesh Kumar K.V 31556b19868SAneesh Kumar K.V static int ext4_valid_extent_idx(struct inode *inode, 31656b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx) 31756b19868SAneesh Kumar K.V { 318bf89d16fSTheodore Ts'o ext4_fsblk_t block = ext4_idx_pblock(ext_idx); 319e84a26ceSTheodore Ts'o 3206fd058f7STheodore Ts'o return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); 32156b19868SAneesh Kumar K.V } 32256b19868SAneesh Kumar K.V 32356b19868SAneesh Kumar K.V static int ext4_valid_extent_entries(struct inode *inode, 32456b19868SAneesh Kumar K.V struct ext4_extent_header *eh, 32556b19868SAneesh Kumar K.V int depth) 32656b19868SAneesh Kumar K.V { 32756b19868SAneesh Kumar K.V struct ext4_extent *ext; 32856b19868SAneesh Kumar K.V struct ext4_extent_idx *ext_idx; 32956b19868SAneesh Kumar K.V unsigned short entries; 33056b19868SAneesh Kumar K.V if (eh->eh_entries == 0) 33156b19868SAneesh Kumar K.V return 1; 33256b19868SAneesh Kumar K.V 33356b19868SAneesh Kumar K.V entries = le16_to_cpu(eh->eh_entries); 33456b19868SAneesh Kumar K.V 33556b19868SAneesh Kumar K.V if (depth == 0) { 33656b19868SAneesh Kumar K.V /* leaf entries */ 33756b19868SAneesh Kumar K.V ext = EXT_FIRST_EXTENT(eh); 33856b19868SAneesh Kumar K.V while (entries) { 33956b19868SAneesh Kumar K.V if (!ext4_valid_extent(inode, ext)) 34056b19868SAneesh Kumar K.V return 0; 34156b19868SAneesh Kumar K.V ext++; 34256b19868SAneesh Kumar K.V entries--; 34356b19868SAneesh Kumar K.V } 34456b19868SAneesh Kumar K.V } else { 34556b19868SAneesh Kumar K.V ext_idx = EXT_FIRST_INDEX(eh); 34656b19868SAneesh Kumar K.V while (entries) { 34756b19868SAneesh Kumar K.V if (!ext4_valid_extent_idx(inode, ext_idx)) 34856b19868SAneesh Kumar K.V return 0; 34956b19868SAneesh Kumar K.V ext_idx++; 35056b19868SAneesh Kumar K.V entries--; 35156b19868SAneesh Kumar K.V } 35256b19868SAneesh Kumar K.V } 35356b19868SAneesh Kumar K.V return 1; 35456b19868SAneesh Kumar K.V } 35556b19868SAneesh Kumar K.V 356c398eda0STheodore Ts'o static int __ext4_ext_check(const char *function, unsigned int line, 357c398eda0STheodore Ts'o struct inode *inode, struct ext4_extent_header *eh, 358c29c0ae7SAlex Tomas int depth) 359c29c0ae7SAlex Tomas { 360c29c0ae7SAlex Tomas const char *error_msg; 361c29c0ae7SAlex Tomas int max = 0; 362c29c0ae7SAlex Tomas 363c29c0ae7SAlex Tomas if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) { 364c29c0ae7SAlex Tomas error_msg = "invalid magic"; 365c29c0ae7SAlex Tomas goto corrupted; 366c29c0ae7SAlex Tomas } 367c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) { 368c29c0ae7SAlex Tomas error_msg = "unexpected eh_depth"; 369c29c0ae7SAlex Tomas goto corrupted; 370c29c0ae7SAlex Tomas } 371c29c0ae7SAlex Tomas if (unlikely(eh->eh_max == 0)) { 372c29c0ae7SAlex Tomas error_msg = "invalid eh_max"; 373c29c0ae7SAlex Tomas goto corrupted; 374c29c0ae7SAlex Tomas } 375c29c0ae7SAlex Tomas max = ext4_ext_max_entries(inode, depth); 376c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_max) > max)) { 377c29c0ae7SAlex Tomas error_msg = "too large eh_max"; 378c29c0ae7SAlex Tomas goto corrupted; 379c29c0ae7SAlex Tomas } 380c29c0ae7SAlex Tomas if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) { 381c29c0ae7SAlex Tomas error_msg = "invalid eh_entries"; 382c29c0ae7SAlex Tomas goto corrupted; 383c29c0ae7SAlex Tomas } 38456b19868SAneesh Kumar K.V if (!ext4_valid_extent_entries(inode, eh, depth)) { 38556b19868SAneesh Kumar K.V error_msg = "invalid extent entries"; 38656b19868SAneesh Kumar K.V goto corrupted; 38756b19868SAneesh Kumar K.V } 388c29c0ae7SAlex Tomas return 0; 389c29c0ae7SAlex Tomas 390c29c0ae7SAlex Tomas corrupted: 391c398eda0STheodore Ts'o ext4_error_inode(inode, function, line, 0, 39224676da4STheodore Ts'o "bad header/extent: %s - magic %x, " 393c29c0ae7SAlex Tomas "entries %u, max %u(%u), depth %u(%u)", 39424676da4STheodore Ts'o error_msg, le16_to_cpu(eh->eh_magic), 395c29c0ae7SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), 396c29c0ae7SAlex Tomas max, le16_to_cpu(eh->eh_depth), depth); 397c29c0ae7SAlex Tomas 398c29c0ae7SAlex Tomas return -EIO; 399c29c0ae7SAlex Tomas } 400c29c0ae7SAlex Tomas 40156b19868SAneesh Kumar K.V #define ext4_ext_check(inode, eh, depth) \ 402c398eda0STheodore Ts'o __ext4_ext_check(__func__, __LINE__, inode, eh, depth) 403c29c0ae7SAlex Tomas 4047a262f7cSAneesh Kumar K.V int ext4_ext_check_inode(struct inode *inode) 4057a262f7cSAneesh Kumar K.V { 4067a262f7cSAneesh Kumar K.V return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 4077a262f7cSAneesh Kumar K.V } 4087a262f7cSAneesh Kumar K.V 409a86c6181SAlex Tomas #ifdef EXT_DEBUG 410a86c6181SAlex Tomas static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 411a86c6181SAlex Tomas { 412a86c6181SAlex Tomas int k, l = path->p_depth; 413a86c6181SAlex Tomas 414a86c6181SAlex Tomas ext_debug("path:"); 415a86c6181SAlex Tomas for (k = 0; k <= l; k++, path++) { 416a86c6181SAlex Tomas if (path->p_idx) { 4172ae02107SMingming Cao ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), 418bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 419a86c6181SAlex Tomas } else if (path->p_ext) { 420553f9008SMingming ext_debug(" %d:[%d]%d:%llu ", 421a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 422553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 423a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext), 424bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext)); 425a86c6181SAlex Tomas } else 426a86c6181SAlex Tomas ext_debug(" []"); 427a86c6181SAlex Tomas } 428a86c6181SAlex Tomas ext_debug("\n"); 429a86c6181SAlex Tomas } 430a86c6181SAlex Tomas 431a86c6181SAlex Tomas static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) 432a86c6181SAlex Tomas { 433a86c6181SAlex Tomas int depth = ext_depth(inode); 434a86c6181SAlex Tomas struct ext4_extent_header *eh; 435a86c6181SAlex Tomas struct ext4_extent *ex; 436a86c6181SAlex Tomas int i; 437a86c6181SAlex Tomas 438a86c6181SAlex Tomas if (!path) 439a86c6181SAlex Tomas return; 440a86c6181SAlex Tomas 441a86c6181SAlex Tomas eh = path[depth].p_hdr; 442a86c6181SAlex Tomas ex = EXT_FIRST_EXTENT(eh); 443a86c6181SAlex Tomas 444553f9008SMingming ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); 445553f9008SMingming 446a86c6181SAlex Tomas for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { 447553f9008SMingming ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), 448553f9008SMingming ext4_ext_is_uninitialized(ex), 449bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex)); 450a86c6181SAlex Tomas } 451a86c6181SAlex Tomas ext_debug("\n"); 452a86c6181SAlex Tomas } 4531b16da77SYongqiang Yang 4541b16da77SYongqiang Yang static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 4551b16da77SYongqiang Yang ext4_fsblk_t newblock, int level) 4561b16da77SYongqiang Yang { 4571b16da77SYongqiang Yang int depth = ext_depth(inode); 4581b16da77SYongqiang Yang struct ext4_extent *ex; 4591b16da77SYongqiang Yang 4601b16da77SYongqiang Yang if (depth != level) { 4611b16da77SYongqiang Yang struct ext4_extent_idx *idx; 4621b16da77SYongqiang Yang idx = path[level].p_idx; 4631b16da77SYongqiang Yang while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 4641b16da77SYongqiang Yang ext_debug("%d: move %d:%llu in new index %llu\n", level, 4651b16da77SYongqiang Yang le32_to_cpu(idx->ei_block), 4661b16da77SYongqiang Yang ext4_idx_pblock(idx), 4671b16da77SYongqiang Yang newblock); 4681b16da77SYongqiang Yang idx++; 4691b16da77SYongqiang Yang } 4701b16da77SYongqiang Yang 4711b16da77SYongqiang Yang return; 4721b16da77SYongqiang Yang } 4731b16da77SYongqiang Yang 4741b16da77SYongqiang Yang ex = path[depth].p_ext; 4751b16da77SYongqiang Yang while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 4761b16da77SYongqiang Yang ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 4771b16da77SYongqiang Yang le32_to_cpu(ex->ee_block), 4781b16da77SYongqiang Yang ext4_ext_pblock(ex), 4791b16da77SYongqiang Yang ext4_ext_is_uninitialized(ex), 4801b16da77SYongqiang Yang ext4_ext_get_actual_len(ex), 4811b16da77SYongqiang Yang newblock); 4821b16da77SYongqiang Yang ex++; 4831b16da77SYongqiang Yang } 4841b16da77SYongqiang Yang } 4851b16da77SYongqiang Yang 486a86c6181SAlex Tomas #else 487a86c6181SAlex Tomas #define ext4_ext_show_path(inode, path) 488a86c6181SAlex Tomas #define ext4_ext_show_leaf(inode, path) 4891b16da77SYongqiang Yang #define ext4_ext_show_move(inode, path, newblock, level) 490a86c6181SAlex Tomas #endif 491a86c6181SAlex Tomas 492b35905c1SAneesh Kumar K.V void ext4_ext_drop_refs(struct ext4_ext_path *path) 493a86c6181SAlex Tomas { 494a86c6181SAlex Tomas int depth = path->p_depth; 495a86c6181SAlex Tomas int i; 496a86c6181SAlex Tomas 497a86c6181SAlex Tomas for (i = 0; i <= depth; i++, path++) 498a86c6181SAlex Tomas if (path->p_bh) { 499a86c6181SAlex Tomas brelse(path->p_bh); 500a86c6181SAlex Tomas path->p_bh = NULL; 501a86c6181SAlex Tomas } 502a86c6181SAlex Tomas } 503a86c6181SAlex Tomas 504a86c6181SAlex Tomas /* 505d0d856e8SRandy Dunlap * ext4_ext_binsearch_idx: 506d0d856e8SRandy Dunlap * binary search for the closest index of the given block 507c29c0ae7SAlex Tomas * the header must be checked before calling this 508a86c6181SAlex Tomas */ 509a86c6181SAlex Tomas static void 510725d26d3SAneesh Kumar K.V ext4_ext_binsearch_idx(struct inode *inode, 511725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 512a86c6181SAlex Tomas { 513a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 514a86c6181SAlex Tomas struct ext4_extent_idx *r, *l, *m; 515a86c6181SAlex Tomas 516a86c6181SAlex Tomas 517bba90743SEric Sandeen ext_debug("binsearch for %u(idx): ", block); 518a86c6181SAlex Tomas 519a86c6181SAlex Tomas l = EXT_FIRST_INDEX(eh) + 1; 520e9f410b1SDmitry Monakhov r = EXT_LAST_INDEX(eh); 521a86c6181SAlex Tomas while (l <= r) { 522a86c6181SAlex Tomas m = l + (r - l) / 2; 523a86c6181SAlex Tomas if (block < le32_to_cpu(m->ei_block)) 524a86c6181SAlex Tomas r = m - 1; 525a86c6181SAlex Tomas else 526a86c6181SAlex Tomas l = m + 1; 52726d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block), 52826d535edSDmitry Monakhov m, le32_to_cpu(m->ei_block), 52926d535edSDmitry Monakhov r, le32_to_cpu(r->ei_block)); 530a86c6181SAlex Tomas } 531a86c6181SAlex Tomas 532a86c6181SAlex Tomas path->p_idx = l - 1; 533f65e6fbaSAlex Tomas ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 534bf89d16fSTheodore Ts'o ext4_idx_pblock(path->p_idx)); 535a86c6181SAlex Tomas 536a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 537a86c6181SAlex Tomas { 538a86c6181SAlex Tomas struct ext4_extent_idx *chix, *ix; 539a86c6181SAlex Tomas int k; 540a86c6181SAlex Tomas 541a86c6181SAlex Tomas chix = ix = EXT_FIRST_INDEX(eh); 542a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) { 543a86c6181SAlex Tomas if (k != 0 && 544a86c6181SAlex Tomas le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) { 5454776004fSTheodore Ts'o printk(KERN_DEBUG "k=%d, ix=0x%p, " 5464776004fSTheodore Ts'o "first=0x%p\n", k, 547a86c6181SAlex Tomas ix, EXT_FIRST_INDEX(eh)); 5484776004fSTheodore Ts'o printk(KERN_DEBUG "%u <= %u\n", 549a86c6181SAlex Tomas le32_to_cpu(ix->ei_block), 550a86c6181SAlex Tomas le32_to_cpu(ix[-1].ei_block)); 551a86c6181SAlex Tomas } 552a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ix->ei_block) 553a86c6181SAlex Tomas <= le32_to_cpu(ix[-1].ei_block)); 554a86c6181SAlex Tomas if (block < le32_to_cpu(ix->ei_block)) 555a86c6181SAlex Tomas break; 556a86c6181SAlex Tomas chix = ix; 557a86c6181SAlex Tomas } 558a86c6181SAlex Tomas BUG_ON(chix != path->p_idx); 559a86c6181SAlex Tomas } 560a86c6181SAlex Tomas #endif 561a86c6181SAlex Tomas 562a86c6181SAlex Tomas } 563a86c6181SAlex Tomas 564a86c6181SAlex Tomas /* 565d0d856e8SRandy Dunlap * ext4_ext_binsearch: 566d0d856e8SRandy Dunlap * binary search for closest extent of the given block 567c29c0ae7SAlex Tomas * the header must be checked before calling this 568a86c6181SAlex Tomas */ 569a86c6181SAlex Tomas static void 570725d26d3SAneesh Kumar K.V ext4_ext_binsearch(struct inode *inode, 571725d26d3SAneesh Kumar K.V struct ext4_ext_path *path, ext4_lblk_t block) 572a86c6181SAlex Tomas { 573a86c6181SAlex Tomas struct ext4_extent_header *eh = path->p_hdr; 574a86c6181SAlex Tomas struct ext4_extent *r, *l, *m; 575a86c6181SAlex Tomas 576a86c6181SAlex Tomas if (eh->eh_entries == 0) { 577a86c6181SAlex Tomas /* 578d0d856e8SRandy Dunlap * this leaf is empty: 579a86c6181SAlex Tomas * we get such a leaf in split/add case 580a86c6181SAlex Tomas */ 581a86c6181SAlex Tomas return; 582a86c6181SAlex Tomas } 583a86c6181SAlex Tomas 584bba90743SEric Sandeen ext_debug("binsearch for %u: ", block); 585a86c6181SAlex Tomas 586a86c6181SAlex Tomas l = EXT_FIRST_EXTENT(eh) + 1; 587e9f410b1SDmitry Monakhov r = EXT_LAST_EXTENT(eh); 588a86c6181SAlex Tomas 589a86c6181SAlex Tomas while (l <= r) { 590a86c6181SAlex Tomas m = l + (r - l) / 2; 591a86c6181SAlex Tomas if (block < le32_to_cpu(m->ee_block)) 592a86c6181SAlex Tomas r = m - 1; 593a86c6181SAlex Tomas else 594a86c6181SAlex Tomas l = m + 1; 59526d535edSDmitry Monakhov ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block), 59626d535edSDmitry Monakhov m, le32_to_cpu(m->ee_block), 59726d535edSDmitry Monakhov r, le32_to_cpu(r->ee_block)); 598a86c6181SAlex Tomas } 599a86c6181SAlex Tomas 600a86c6181SAlex Tomas path->p_ext = l - 1; 601553f9008SMingming ext_debug(" -> %d:%llu:[%d]%d ", 602a86c6181SAlex Tomas le32_to_cpu(path->p_ext->ee_block), 603bf89d16fSTheodore Ts'o ext4_ext_pblock(path->p_ext), 604553f9008SMingming ext4_ext_is_uninitialized(path->p_ext), 605a2df2a63SAmit Arora ext4_ext_get_actual_len(path->p_ext)); 606a86c6181SAlex Tomas 607a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 608a86c6181SAlex Tomas { 609a86c6181SAlex Tomas struct ext4_extent *chex, *ex; 610a86c6181SAlex Tomas int k; 611a86c6181SAlex Tomas 612a86c6181SAlex Tomas chex = ex = EXT_FIRST_EXTENT(eh); 613a86c6181SAlex Tomas for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) { 614a86c6181SAlex Tomas BUG_ON(k && le32_to_cpu(ex->ee_block) 615a86c6181SAlex Tomas <= le32_to_cpu(ex[-1].ee_block)); 616a86c6181SAlex Tomas if (block < le32_to_cpu(ex->ee_block)) 617a86c6181SAlex Tomas break; 618a86c6181SAlex Tomas chex = ex; 619a86c6181SAlex Tomas } 620a86c6181SAlex Tomas BUG_ON(chex != path->p_ext); 621a86c6181SAlex Tomas } 622a86c6181SAlex Tomas #endif 623a86c6181SAlex Tomas 624a86c6181SAlex Tomas } 625a86c6181SAlex Tomas 626a86c6181SAlex Tomas int ext4_ext_tree_init(handle_t *handle, struct inode *inode) 627a86c6181SAlex Tomas { 628a86c6181SAlex Tomas struct ext4_extent_header *eh; 629a86c6181SAlex Tomas 630a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 631a86c6181SAlex Tomas eh->eh_depth = 0; 632a86c6181SAlex Tomas eh->eh_entries = 0; 633a86c6181SAlex Tomas eh->eh_magic = EXT4_EXT_MAGIC; 63455ad63bfSTheodore Ts'o eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); 635a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 636a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 637a86c6181SAlex Tomas return 0; 638a86c6181SAlex Tomas } 639a86c6181SAlex Tomas 640a86c6181SAlex Tomas struct ext4_ext_path * 641725d26d3SAneesh Kumar K.V ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 642725d26d3SAneesh Kumar K.V struct ext4_ext_path *path) 643a86c6181SAlex Tomas { 644a86c6181SAlex Tomas struct ext4_extent_header *eh; 645a86c6181SAlex Tomas struct buffer_head *bh; 646a86c6181SAlex Tomas short int depth, i, ppos = 0, alloc = 0; 647a86c6181SAlex Tomas 648a86c6181SAlex Tomas eh = ext_inode_hdr(inode); 649c29c0ae7SAlex Tomas depth = ext_depth(inode); 650a86c6181SAlex Tomas 651a86c6181SAlex Tomas /* account possible depth increase */ 652a86c6181SAlex Tomas if (!path) { 6535d4958f9SAvantika Mathur path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 654a86c6181SAlex Tomas GFP_NOFS); 655a86c6181SAlex Tomas if (!path) 656a86c6181SAlex Tomas return ERR_PTR(-ENOMEM); 657a86c6181SAlex Tomas alloc = 1; 658a86c6181SAlex Tomas } 659a86c6181SAlex Tomas path[0].p_hdr = eh; 6601973adcbSShen Feng path[0].p_bh = NULL; 661a86c6181SAlex Tomas 662c29c0ae7SAlex Tomas i = depth; 663a86c6181SAlex Tomas /* walk through the tree */ 664a86c6181SAlex Tomas while (i) { 6657a262f7cSAneesh Kumar K.V int need_to_validate = 0; 6667a262f7cSAneesh Kumar K.V 667a86c6181SAlex Tomas ext_debug("depth %d: num %d, max %d\n", 668a86c6181SAlex Tomas ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 669c29c0ae7SAlex Tomas 670a86c6181SAlex Tomas ext4_ext_binsearch_idx(inode, path + ppos, block); 671bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 672a86c6181SAlex Tomas path[ppos].p_depth = i; 673a86c6181SAlex Tomas path[ppos].p_ext = NULL; 674a86c6181SAlex Tomas 6757a262f7cSAneesh Kumar K.V bh = sb_getblk(inode->i_sb, path[ppos].p_block); 6767a262f7cSAneesh Kumar K.V if (unlikely(!bh)) 677a86c6181SAlex Tomas goto err; 6787a262f7cSAneesh Kumar K.V if (!bh_uptodate_or_lock(bh)) { 6790562e0baSJiaying Zhang trace_ext4_ext_load_extent(inode, block, 6800562e0baSJiaying Zhang path[ppos].p_block); 6817a262f7cSAneesh Kumar K.V if (bh_submit_read(bh) < 0) { 6827a262f7cSAneesh Kumar K.V put_bh(bh); 6837a262f7cSAneesh Kumar K.V goto err; 6847a262f7cSAneesh Kumar K.V } 6857a262f7cSAneesh Kumar K.V /* validate the extent entries */ 6867a262f7cSAneesh Kumar K.V need_to_validate = 1; 6877a262f7cSAneesh Kumar K.V } 688a86c6181SAlex Tomas eh = ext_block_hdr(bh); 689a86c6181SAlex Tomas ppos++; 690273df556SFrank Mayhar if (unlikely(ppos > depth)) { 691273df556SFrank Mayhar put_bh(bh); 692273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 693273df556SFrank Mayhar "ppos %d > depth %d", ppos, depth); 694273df556SFrank Mayhar goto err; 695273df556SFrank Mayhar } 696a86c6181SAlex Tomas path[ppos].p_bh = bh; 697a86c6181SAlex Tomas path[ppos].p_hdr = eh; 698a86c6181SAlex Tomas i--; 699a86c6181SAlex Tomas 7007a262f7cSAneesh Kumar K.V if (need_to_validate && ext4_ext_check(inode, eh, i)) 701a86c6181SAlex Tomas goto err; 702a86c6181SAlex Tomas } 703a86c6181SAlex Tomas 704a86c6181SAlex Tomas path[ppos].p_depth = i; 705a86c6181SAlex Tomas path[ppos].p_ext = NULL; 706a86c6181SAlex Tomas path[ppos].p_idx = NULL; 707a86c6181SAlex Tomas 708a86c6181SAlex Tomas /* find extent */ 709a86c6181SAlex Tomas ext4_ext_binsearch(inode, path + ppos, block); 7101973adcbSShen Feng /* if not an empty leaf */ 7111973adcbSShen Feng if (path[ppos].p_ext) 712bf89d16fSTheodore Ts'o path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); 713a86c6181SAlex Tomas 714a86c6181SAlex Tomas ext4_ext_show_path(inode, path); 715a86c6181SAlex Tomas 716a86c6181SAlex Tomas return path; 717a86c6181SAlex Tomas 718a86c6181SAlex Tomas err: 719a86c6181SAlex Tomas ext4_ext_drop_refs(path); 720a86c6181SAlex Tomas if (alloc) 721a86c6181SAlex Tomas kfree(path); 722a86c6181SAlex Tomas return ERR_PTR(-EIO); 723a86c6181SAlex Tomas } 724a86c6181SAlex Tomas 725a86c6181SAlex Tomas /* 726d0d856e8SRandy Dunlap * ext4_ext_insert_index: 727d0d856e8SRandy Dunlap * insert new index [@logical;@ptr] into the block at @curp; 728d0d856e8SRandy Dunlap * check where to insert: before @curp or after @curp 729a86c6181SAlex Tomas */ 7301f109d5aSTheodore Ts'o static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, 731a86c6181SAlex Tomas struct ext4_ext_path *curp, 732f65e6fbaSAlex Tomas int logical, ext4_fsblk_t ptr) 733a86c6181SAlex Tomas { 734a86c6181SAlex Tomas struct ext4_extent_idx *ix; 735a86c6181SAlex Tomas int len, err; 736a86c6181SAlex Tomas 7377e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, curp); 7387e028976SAvantika Mathur if (err) 739a86c6181SAlex Tomas return err; 740a86c6181SAlex Tomas 741273df556SFrank Mayhar if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) { 742273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 743273df556SFrank Mayhar "logical %d == ei_block %d!", 744273df556SFrank Mayhar logical, le32_to_cpu(curp->p_idx->ei_block)); 745273df556SFrank Mayhar return -EIO; 746273df556SFrank Mayhar } 747d4620315SRobin Dong 748d4620315SRobin Dong if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries) 749d4620315SRobin Dong >= le16_to_cpu(curp->p_hdr->eh_max))) { 750d4620315SRobin Dong EXT4_ERROR_INODE(inode, 751d4620315SRobin Dong "eh_entries %d >= eh_max %d!", 752d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_entries), 753d4620315SRobin Dong le16_to_cpu(curp->p_hdr->eh_max)); 754d4620315SRobin Dong return -EIO; 755d4620315SRobin Dong } 756d4620315SRobin Dong 757a86c6181SAlex Tomas len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx; 758a86c6181SAlex Tomas if (logical > le32_to_cpu(curp->p_idx->ei_block)) { 759a86c6181SAlex Tomas /* insert after */ 760a86c6181SAlex Tomas if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) { 761a86c6181SAlex Tomas len = (len - 1) * sizeof(struct ext4_extent_idx); 762a86c6181SAlex Tomas len = len < 0 ? 0 : len; 76326d535edSDmitry Monakhov ext_debug("insert new index %d after: %llu. " 764a86c6181SAlex Tomas "move %d from 0x%p to 0x%p\n", 765a86c6181SAlex Tomas logical, ptr, len, 766a86c6181SAlex Tomas (curp->p_idx + 1), (curp->p_idx + 2)); 767a86c6181SAlex Tomas memmove(curp->p_idx + 2, curp->p_idx + 1, len); 768a86c6181SAlex Tomas } 769a86c6181SAlex Tomas ix = curp->p_idx + 1; 770a86c6181SAlex Tomas } else { 771a86c6181SAlex Tomas /* insert before */ 772a86c6181SAlex Tomas len = len * sizeof(struct ext4_extent_idx); 773a86c6181SAlex Tomas len = len < 0 ? 0 : len; 77426d535edSDmitry Monakhov ext_debug("insert new index %d before: %llu. " 775a86c6181SAlex Tomas "move %d from 0x%p to 0x%p\n", 776a86c6181SAlex Tomas logical, ptr, len, 777a86c6181SAlex Tomas curp->p_idx, (curp->p_idx + 1)); 778a86c6181SAlex Tomas memmove(curp->p_idx + 1, curp->p_idx, len); 779a86c6181SAlex Tomas ix = curp->p_idx; 780a86c6181SAlex Tomas } 781a86c6181SAlex Tomas 782f472e026STao Ma if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) { 783f472e026STao Ma EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!"); 784f472e026STao Ma return -EIO; 785f472e026STao Ma } 786f472e026STao Ma 787a86c6181SAlex Tomas ix->ei_block = cpu_to_le32(logical); 788f65e6fbaSAlex Tomas ext4_idx_store_pblock(ix, ptr); 789e8546d06SMarcin Slusarz le16_add_cpu(&curp->p_hdr->eh_entries, 1); 790a86c6181SAlex Tomas 791273df556SFrank Mayhar if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) { 792273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!"); 793273df556SFrank Mayhar return -EIO; 794273df556SFrank Mayhar } 795a86c6181SAlex Tomas 796a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, curp); 797a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 798a86c6181SAlex Tomas 799a86c6181SAlex Tomas return err; 800a86c6181SAlex Tomas } 801a86c6181SAlex Tomas 802a86c6181SAlex Tomas /* 803d0d856e8SRandy Dunlap * ext4_ext_split: 804d0d856e8SRandy Dunlap * inserts new subtree into the path, using free index entry 805d0d856e8SRandy Dunlap * at depth @at: 806a86c6181SAlex Tomas * - allocates all needed blocks (new leaf and all intermediate index blocks) 807a86c6181SAlex Tomas * - makes decision where to split 808d0d856e8SRandy Dunlap * - moves remaining extents and index entries (right to the split point) 809a86c6181SAlex Tomas * into the newly allocated blocks 810d0d856e8SRandy Dunlap * - initializes subtree 811a86c6181SAlex Tomas */ 812a86c6181SAlex Tomas static int ext4_ext_split(handle_t *handle, struct inode *inode, 81355f020dbSAllison Henderson unsigned int flags, 814a86c6181SAlex Tomas struct ext4_ext_path *path, 815a86c6181SAlex Tomas struct ext4_extent *newext, int at) 816a86c6181SAlex Tomas { 817a86c6181SAlex Tomas struct buffer_head *bh = NULL; 818a86c6181SAlex Tomas int depth = ext_depth(inode); 819a86c6181SAlex Tomas struct ext4_extent_header *neh; 820a86c6181SAlex Tomas struct ext4_extent_idx *fidx; 821a86c6181SAlex Tomas int i = at, k, m, a; 822f65e6fbaSAlex Tomas ext4_fsblk_t newblock, oldblock; 823a86c6181SAlex Tomas __le32 border; 824f65e6fbaSAlex Tomas ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ 825a86c6181SAlex Tomas int err = 0; 826a86c6181SAlex Tomas 827a86c6181SAlex Tomas /* make decision: where to split? */ 828d0d856e8SRandy Dunlap /* FIXME: now decision is simplest: at current extent */ 829a86c6181SAlex Tomas 830d0d856e8SRandy Dunlap /* if current leaf will be split, then we should use 831a86c6181SAlex Tomas * border from split point */ 832273df556SFrank Mayhar if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) { 833273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!"); 834273df556SFrank Mayhar return -EIO; 835273df556SFrank Mayhar } 836a86c6181SAlex Tomas if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) { 837a86c6181SAlex Tomas border = path[depth].p_ext[1].ee_block; 838d0d856e8SRandy Dunlap ext_debug("leaf will be split." 839a86c6181SAlex Tomas " next leaf starts at %d\n", 840a86c6181SAlex Tomas le32_to_cpu(border)); 841a86c6181SAlex Tomas } else { 842a86c6181SAlex Tomas border = newext->ee_block; 843a86c6181SAlex Tomas ext_debug("leaf will be added." 844a86c6181SAlex Tomas " next leaf starts at %d\n", 845a86c6181SAlex Tomas le32_to_cpu(border)); 846a86c6181SAlex Tomas } 847a86c6181SAlex Tomas 848a86c6181SAlex Tomas /* 849d0d856e8SRandy Dunlap * If error occurs, then we break processing 850d0d856e8SRandy Dunlap * and mark filesystem read-only. index won't 851a86c6181SAlex Tomas * be inserted and tree will be in consistent 852d0d856e8SRandy Dunlap * state. Next mount will repair buffers too. 853a86c6181SAlex Tomas */ 854a86c6181SAlex Tomas 855a86c6181SAlex Tomas /* 856d0d856e8SRandy Dunlap * Get array to track all allocated blocks. 857d0d856e8SRandy Dunlap * We need this to handle errors and free blocks 858d0d856e8SRandy Dunlap * upon them. 859a86c6181SAlex Tomas */ 8605d4958f9SAvantika Mathur ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS); 861a86c6181SAlex Tomas if (!ablocks) 862a86c6181SAlex Tomas return -ENOMEM; 863a86c6181SAlex Tomas 864a86c6181SAlex Tomas /* allocate all needed blocks */ 865a86c6181SAlex Tomas ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); 866a86c6181SAlex Tomas for (a = 0; a < depth - at; a++) { 867654b4908SAneesh Kumar K.V newblock = ext4_ext_new_meta_block(handle, inode, path, 86855f020dbSAllison Henderson newext, &err, flags); 869a86c6181SAlex Tomas if (newblock == 0) 870a86c6181SAlex Tomas goto cleanup; 871a86c6181SAlex Tomas ablocks[a] = newblock; 872a86c6181SAlex Tomas } 873a86c6181SAlex Tomas 874a86c6181SAlex Tomas /* initialize new leaf */ 875a86c6181SAlex Tomas newblock = ablocks[--a]; 876273df556SFrank Mayhar if (unlikely(newblock == 0)) { 877273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "newblock == 0!"); 878273df556SFrank Mayhar err = -EIO; 879273df556SFrank Mayhar goto cleanup; 880273df556SFrank Mayhar } 881a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 882a86c6181SAlex Tomas if (!bh) { 883a86c6181SAlex Tomas err = -EIO; 884a86c6181SAlex Tomas goto cleanup; 885a86c6181SAlex Tomas } 886a86c6181SAlex Tomas lock_buffer(bh); 887a86c6181SAlex Tomas 8887e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 8897e028976SAvantika Mathur if (err) 890a86c6181SAlex Tomas goto cleanup; 891a86c6181SAlex Tomas 892a86c6181SAlex Tomas neh = ext_block_hdr(bh); 893a86c6181SAlex Tomas neh->eh_entries = 0; 89455ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 895a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 896a86c6181SAlex Tomas neh->eh_depth = 0; 897a86c6181SAlex Tomas 898d0d856e8SRandy Dunlap /* move remainder of path[depth] to the new leaf */ 899273df556SFrank Mayhar if (unlikely(path[depth].p_hdr->eh_entries != 900273df556SFrank Mayhar path[depth].p_hdr->eh_max)) { 901273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!", 902273df556SFrank Mayhar path[depth].p_hdr->eh_entries, 903273df556SFrank Mayhar path[depth].p_hdr->eh_max); 904273df556SFrank Mayhar err = -EIO; 905273df556SFrank Mayhar goto cleanup; 906273df556SFrank Mayhar } 907a86c6181SAlex Tomas /* start copy from next extent */ 9081b16da77SYongqiang Yang m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 9091b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, depth); 910a86c6181SAlex Tomas if (m) { 9111b16da77SYongqiang Yang struct ext4_extent *ex; 9121b16da77SYongqiang Yang ex = EXT_FIRST_EXTENT(neh); 9131b16da77SYongqiang Yang memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 914e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 915a86c6181SAlex Tomas } 916a86c6181SAlex Tomas 917a86c6181SAlex Tomas set_buffer_uptodate(bh); 918a86c6181SAlex Tomas unlock_buffer(bh); 919a86c6181SAlex Tomas 9200390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9217e028976SAvantika Mathur if (err) 922a86c6181SAlex Tomas goto cleanup; 923a86c6181SAlex Tomas brelse(bh); 924a86c6181SAlex Tomas bh = NULL; 925a86c6181SAlex Tomas 926a86c6181SAlex Tomas /* correct old leaf */ 927a86c6181SAlex Tomas if (m) { 9287e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 9297e028976SAvantika Mathur if (err) 930a86c6181SAlex Tomas goto cleanup; 931e8546d06SMarcin Slusarz le16_add_cpu(&path[depth].p_hdr->eh_entries, -m); 9327e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + depth); 9337e028976SAvantika Mathur if (err) 934a86c6181SAlex Tomas goto cleanup; 935a86c6181SAlex Tomas 936a86c6181SAlex Tomas } 937a86c6181SAlex Tomas 938a86c6181SAlex Tomas /* create intermediate indexes */ 939a86c6181SAlex Tomas k = depth - at - 1; 940273df556SFrank Mayhar if (unlikely(k < 0)) { 941273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "k %d < 0!", k); 942273df556SFrank Mayhar err = -EIO; 943273df556SFrank Mayhar goto cleanup; 944273df556SFrank Mayhar } 945a86c6181SAlex Tomas if (k) 946a86c6181SAlex Tomas ext_debug("create %d intermediate indices\n", k); 947a86c6181SAlex Tomas /* insert new index into current index block */ 948a86c6181SAlex Tomas /* current depth stored in i var */ 949a86c6181SAlex Tomas i = depth - 1; 950a86c6181SAlex Tomas while (k--) { 951a86c6181SAlex Tomas oldblock = newblock; 952a86c6181SAlex Tomas newblock = ablocks[--a]; 953bba90743SEric Sandeen bh = sb_getblk(inode->i_sb, newblock); 954a86c6181SAlex Tomas if (!bh) { 955a86c6181SAlex Tomas err = -EIO; 956a86c6181SAlex Tomas goto cleanup; 957a86c6181SAlex Tomas } 958a86c6181SAlex Tomas lock_buffer(bh); 959a86c6181SAlex Tomas 9607e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 9617e028976SAvantika Mathur if (err) 962a86c6181SAlex Tomas goto cleanup; 963a86c6181SAlex Tomas 964a86c6181SAlex Tomas neh = ext_block_hdr(bh); 965a86c6181SAlex Tomas neh->eh_entries = cpu_to_le16(1); 966a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 96755ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 968a86c6181SAlex Tomas neh->eh_depth = cpu_to_le16(depth - i); 969a86c6181SAlex Tomas fidx = EXT_FIRST_INDEX(neh); 970a86c6181SAlex Tomas fidx->ei_block = border; 971f65e6fbaSAlex Tomas ext4_idx_store_pblock(fidx, oldblock); 972a86c6181SAlex Tomas 973bba90743SEric Sandeen ext_debug("int.index at %d (block %llu): %u -> %llu\n", 974bba90743SEric Sandeen i, newblock, le32_to_cpu(border), oldblock); 975a86c6181SAlex Tomas 9761b16da77SYongqiang Yang /* move remainder of path[i] to the new index block */ 977273df556SFrank Mayhar if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 978273df556SFrank Mayhar EXT_LAST_INDEX(path[i].p_hdr))) { 979273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 980273df556SFrank Mayhar "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!", 981273df556SFrank Mayhar le32_to_cpu(path[i].p_ext->ee_block)); 982273df556SFrank Mayhar err = -EIO; 983273df556SFrank Mayhar goto cleanup; 984273df556SFrank Mayhar } 9851b16da77SYongqiang Yang /* start copy indexes */ 9861b16da77SYongqiang Yang m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 9871b16da77SYongqiang Yang ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 9881b16da77SYongqiang Yang EXT_MAX_INDEX(path[i].p_hdr)); 9891b16da77SYongqiang Yang ext4_ext_show_move(inode, path, newblock, i); 990a86c6181SAlex Tomas if (m) { 9911b16da77SYongqiang Yang memmove(++fidx, path[i].p_idx, 992a86c6181SAlex Tomas sizeof(struct ext4_extent_idx) * m); 993e8546d06SMarcin Slusarz le16_add_cpu(&neh->eh_entries, m); 994a86c6181SAlex Tomas } 995a86c6181SAlex Tomas set_buffer_uptodate(bh); 996a86c6181SAlex Tomas unlock_buffer(bh); 997a86c6181SAlex Tomas 9980390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 9997e028976SAvantika Mathur if (err) 1000a86c6181SAlex Tomas goto cleanup; 1001a86c6181SAlex Tomas brelse(bh); 1002a86c6181SAlex Tomas bh = NULL; 1003a86c6181SAlex Tomas 1004a86c6181SAlex Tomas /* correct old index */ 1005a86c6181SAlex Tomas if (m) { 1006a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + i); 1007a86c6181SAlex Tomas if (err) 1008a86c6181SAlex Tomas goto cleanup; 1009e8546d06SMarcin Slusarz le16_add_cpu(&path[i].p_hdr->eh_entries, -m); 1010a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + i); 1011a86c6181SAlex Tomas if (err) 1012a86c6181SAlex Tomas goto cleanup; 1013a86c6181SAlex Tomas } 1014a86c6181SAlex Tomas 1015a86c6181SAlex Tomas i--; 1016a86c6181SAlex Tomas } 1017a86c6181SAlex Tomas 1018a86c6181SAlex Tomas /* insert new index */ 1019a86c6181SAlex Tomas err = ext4_ext_insert_index(handle, inode, path + at, 1020a86c6181SAlex Tomas le32_to_cpu(border), newblock); 1021a86c6181SAlex Tomas 1022a86c6181SAlex Tomas cleanup: 1023a86c6181SAlex Tomas if (bh) { 1024a86c6181SAlex Tomas if (buffer_locked(bh)) 1025a86c6181SAlex Tomas unlock_buffer(bh); 1026a86c6181SAlex Tomas brelse(bh); 1027a86c6181SAlex Tomas } 1028a86c6181SAlex Tomas 1029a86c6181SAlex Tomas if (err) { 1030a86c6181SAlex Tomas /* free all allocated blocks in error case */ 1031a86c6181SAlex Tomas for (i = 0; i < depth; i++) { 1032a86c6181SAlex Tomas if (!ablocks[i]) 1033a86c6181SAlex Tomas continue; 10347dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, 1035e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA); 1036a86c6181SAlex Tomas } 1037a86c6181SAlex Tomas } 1038a86c6181SAlex Tomas kfree(ablocks); 1039a86c6181SAlex Tomas 1040a86c6181SAlex Tomas return err; 1041a86c6181SAlex Tomas } 1042a86c6181SAlex Tomas 1043a86c6181SAlex Tomas /* 1044d0d856e8SRandy Dunlap * ext4_ext_grow_indepth: 1045d0d856e8SRandy Dunlap * implements tree growing procedure: 1046a86c6181SAlex Tomas * - allocates new block 1047a86c6181SAlex Tomas * - moves top-level data (index block or leaf) into the new block 1048d0d856e8SRandy Dunlap * - initializes new top-level, creating index that points to the 1049a86c6181SAlex Tomas * just created block 1050a86c6181SAlex Tomas */ 1051a86c6181SAlex Tomas static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 105255f020dbSAllison Henderson unsigned int flags, 1053a86c6181SAlex Tomas struct ext4_extent *newext) 1054a86c6181SAlex Tomas { 1055a86c6181SAlex Tomas struct ext4_extent_header *neh; 1056a86c6181SAlex Tomas struct buffer_head *bh; 1057f65e6fbaSAlex Tomas ext4_fsblk_t newblock; 1058a86c6181SAlex Tomas int err = 0; 1059a86c6181SAlex Tomas 10601939dd84SDmitry Monakhov newblock = ext4_ext_new_meta_block(handle, inode, NULL, 106155f020dbSAllison Henderson newext, &err, flags); 1062a86c6181SAlex Tomas if (newblock == 0) 1063a86c6181SAlex Tomas return err; 1064a86c6181SAlex Tomas 1065a86c6181SAlex Tomas bh = sb_getblk(inode->i_sb, newblock); 1066a86c6181SAlex Tomas if (!bh) { 1067a86c6181SAlex Tomas err = -EIO; 1068a86c6181SAlex Tomas ext4_std_error(inode->i_sb, err); 1069a86c6181SAlex Tomas return err; 1070a86c6181SAlex Tomas } 1071a86c6181SAlex Tomas lock_buffer(bh); 1072a86c6181SAlex Tomas 10737e028976SAvantika Mathur err = ext4_journal_get_create_access(handle, bh); 10747e028976SAvantika Mathur if (err) { 1075a86c6181SAlex Tomas unlock_buffer(bh); 1076a86c6181SAlex Tomas goto out; 1077a86c6181SAlex Tomas } 1078a86c6181SAlex Tomas 1079a86c6181SAlex Tomas /* move top-level index/leaf into new block */ 10801939dd84SDmitry Monakhov memmove(bh->b_data, EXT4_I(inode)->i_data, 10811939dd84SDmitry Monakhov sizeof(EXT4_I(inode)->i_data)); 1082a86c6181SAlex Tomas 1083a86c6181SAlex Tomas /* set size of new block */ 1084a86c6181SAlex Tomas neh = ext_block_hdr(bh); 1085a86c6181SAlex Tomas /* old root could have indexes or leaves 1086a86c6181SAlex Tomas * so calculate e_max right way */ 1087a86c6181SAlex Tomas if (ext_depth(inode)) 108855ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); 1089a86c6181SAlex Tomas else 109055ad63bfSTheodore Ts'o neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1091a86c6181SAlex Tomas neh->eh_magic = EXT4_EXT_MAGIC; 1092a86c6181SAlex Tomas set_buffer_uptodate(bh); 1093a86c6181SAlex Tomas unlock_buffer(bh); 1094a86c6181SAlex Tomas 10950390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 10967e028976SAvantika Mathur if (err) 1097a86c6181SAlex Tomas goto out; 1098a86c6181SAlex Tomas 10991939dd84SDmitry Monakhov /* Update top-level index: num,max,pointer */ 1100a86c6181SAlex Tomas neh = ext_inode_hdr(inode); 11011939dd84SDmitry Monakhov neh->eh_entries = cpu_to_le16(1); 11021939dd84SDmitry Monakhov ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock); 11031939dd84SDmitry Monakhov if (neh->eh_depth == 0) { 11041939dd84SDmitry Monakhov /* Root extent block becomes index block */ 11051939dd84SDmitry Monakhov neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); 11061939dd84SDmitry Monakhov EXT_FIRST_INDEX(neh)->ei_block = 11071939dd84SDmitry Monakhov EXT_FIRST_EXTENT(neh)->ee_block; 11081939dd84SDmitry Monakhov } 11092ae02107SMingming Cao ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n", 1110a86c6181SAlex Tomas le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max), 11115a0790c2SAndi Kleen le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block), 1112bf89d16fSTheodore Ts'o ext4_idx_pblock(EXT_FIRST_INDEX(neh))); 1113a86c6181SAlex Tomas 11141939dd84SDmitry Monakhov neh->eh_depth = cpu_to_le16(neh->eh_depth + 1); 11151939dd84SDmitry Monakhov ext4_mark_inode_dirty(handle, inode); 1116a86c6181SAlex Tomas out: 1117a86c6181SAlex Tomas brelse(bh); 1118a86c6181SAlex Tomas 1119a86c6181SAlex Tomas return err; 1120a86c6181SAlex Tomas } 1121a86c6181SAlex Tomas 1122a86c6181SAlex Tomas /* 1123d0d856e8SRandy Dunlap * ext4_ext_create_new_leaf: 1124d0d856e8SRandy Dunlap * finds empty index and adds new leaf. 1125d0d856e8SRandy Dunlap * if no free index is found, then it requests in-depth growing. 1126a86c6181SAlex Tomas */ 1127a86c6181SAlex Tomas static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 112855f020dbSAllison Henderson unsigned int flags, 1129a86c6181SAlex Tomas struct ext4_ext_path *path, 1130a86c6181SAlex Tomas struct ext4_extent *newext) 1131a86c6181SAlex Tomas { 1132a86c6181SAlex Tomas struct ext4_ext_path *curp; 1133a86c6181SAlex Tomas int depth, i, err = 0; 1134a86c6181SAlex Tomas 1135a86c6181SAlex Tomas repeat: 1136a86c6181SAlex Tomas i = depth = ext_depth(inode); 1137a86c6181SAlex Tomas 1138a86c6181SAlex Tomas /* walk up to the tree and look for free index entry */ 1139a86c6181SAlex Tomas curp = path + depth; 1140a86c6181SAlex Tomas while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) { 1141a86c6181SAlex Tomas i--; 1142a86c6181SAlex Tomas curp--; 1143a86c6181SAlex Tomas } 1144a86c6181SAlex Tomas 1145d0d856e8SRandy Dunlap /* we use already allocated block for index block, 1146d0d856e8SRandy Dunlap * so subsequent data blocks should be contiguous */ 1147a86c6181SAlex Tomas if (EXT_HAS_FREE_INDEX(curp)) { 1148a86c6181SAlex Tomas /* if we found index with free entry, then use that 1149a86c6181SAlex Tomas * entry: create all needed subtree and add new leaf */ 115055f020dbSAllison Henderson err = ext4_ext_split(handle, inode, flags, path, newext, i); 1151787e0981SShen Feng if (err) 1152787e0981SShen Feng goto out; 1153a86c6181SAlex Tomas 1154a86c6181SAlex Tomas /* refill path */ 1155a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1156a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1157725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1158a86c6181SAlex Tomas path); 1159a86c6181SAlex Tomas if (IS_ERR(path)) 1160a86c6181SAlex Tomas err = PTR_ERR(path); 1161a86c6181SAlex Tomas } else { 1162a86c6181SAlex Tomas /* tree is full, time to grow in depth */ 11631939dd84SDmitry Monakhov err = ext4_ext_grow_indepth(handle, inode, flags, newext); 1164a86c6181SAlex Tomas if (err) 1165a86c6181SAlex Tomas goto out; 1166a86c6181SAlex Tomas 1167a86c6181SAlex Tomas /* refill path */ 1168a86c6181SAlex Tomas ext4_ext_drop_refs(path); 1169a86c6181SAlex Tomas path = ext4_ext_find_extent(inode, 1170725d26d3SAneesh Kumar K.V (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1171a86c6181SAlex Tomas path); 1172a86c6181SAlex Tomas if (IS_ERR(path)) { 1173a86c6181SAlex Tomas err = PTR_ERR(path); 1174a86c6181SAlex Tomas goto out; 1175a86c6181SAlex Tomas } 1176a86c6181SAlex Tomas 1177a86c6181SAlex Tomas /* 1178d0d856e8SRandy Dunlap * only first (depth 0 -> 1) produces free space; 1179d0d856e8SRandy Dunlap * in all other cases we have to split the grown tree 1180a86c6181SAlex Tomas */ 1181a86c6181SAlex Tomas depth = ext_depth(inode); 1182a86c6181SAlex Tomas if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) { 1183d0d856e8SRandy Dunlap /* now we need to split */ 1184a86c6181SAlex Tomas goto repeat; 1185a86c6181SAlex Tomas } 1186a86c6181SAlex Tomas } 1187a86c6181SAlex Tomas 1188a86c6181SAlex Tomas out: 1189a86c6181SAlex Tomas return err; 1190a86c6181SAlex Tomas } 1191a86c6181SAlex Tomas 1192a86c6181SAlex Tomas /* 11931988b51eSAlex Tomas * search the closest allocated block to the left for *logical 11941988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 11951988b51eSAlex Tomas * if *logical is the smallest allocated block, the function 11961988b51eSAlex Tomas * returns 0 at @phys 11971988b51eSAlex Tomas * return value contains 0 (success) or error code 11981988b51eSAlex Tomas */ 11991f109d5aSTheodore Ts'o static int ext4_ext_search_left(struct inode *inode, 12001f109d5aSTheodore Ts'o struct ext4_ext_path *path, 12011988b51eSAlex Tomas ext4_lblk_t *logical, ext4_fsblk_t *phys) 12021988b51eSAlex Tomas { 12031988b51eSAlex Tomas struct ext4_extent_idx *ix; 12041988b51eSAlex Tomas struct ext4_extent *ex; 1205b939e376SAneesh Kumar K.V int depth, ee_len; 12061988b51eSAlex Tomas 1207273df556SFrank Mayhar if (unlikely(path == NULL)) { 1208273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1209273df556SFrank Mayhar return -EIO; 1210273df556SFrank Mayhar } 12111988b51eSAlex Tomas depth = path->p_depth; 12121988b51eSAlex Tomas *phys = 0; 12131988b51eSAlex Tomas 12141988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12151988b51eSAlex Tomas return 0; 12161988b51eSAlex Tomas 12171988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12181988b51eSAlex Tomas * then *logical, but it can be that extent is the 12191988b51eSAlex Tomas * first one in the file */ 12201988b51eSAlex Tomas 12211988b51eSAlex Tomas ex = path[depth].p_ext; 1222b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 12231988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1224273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1225273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1226273df556SFrank Mayhar "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!", 1227273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block)); 1228273df556SFrank Mayhar return -EIO; 1229273df556SFrank Mayhar } 12301988b51eSAlex Tomas while (--depth >= 0) { 12311988b51eSAlex Tomas ix = path[depth].p_idx; 1232273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1233273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1234273df556SFrank Mayhar "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!", 12356ee3b212STao Ma ix != NULL ? le32_to_cpu(ix->ei_block) : 0, 1236273df556SFrank Mayhar EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ? 12376ee3b212STao Ma le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0, 1238273df556SFrank Mayhar depth); 1239273df556SFrank Mayhar return -EIO; 1240273df556SFrank Mayhar } 12411988b51eSAlex Tomas } 12421988b51eSAlex Tomas return 0; 12431988b51eSAlex Tomas } 12441988b51eSAlex Tomas 1245273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1246273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1247273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1248273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1249273df556SFrank Mayhar return -EIO; 1250273df556SFrank Mayhar } 12511988b51eSAlex Tomas 1252b939e376SAneesh Kumar K.V *logical = le32_to_cpu(ex->ee_block) + ee_len - 1; 1253bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex) + ee_len - 1; 12541988b51eSAlex Tomas return 0; 12551988b51eSAlex Tomas } 12561988b51eSAlex Tomas 12571988b51eSAlex Tomas /* 12581988b51eSAlex Tomas * search the closest allocated block to the right for *logical 12591988b51eSAlex Tomas * and returns it at @logical + it's physical address at @phys 1260df3ab170STao Ma * if *logical is the largest allocated block, the function 12611988b51eSAlex Tomas * returns 0 at @phys 12621988b51eSAlex Tomas * return value contains 0 (success) or error code 12631988b51eSAlex Tomas */ 12641f109d5aSTheodore Ts'o static int ext4_ext_search_right(struct inode *inode, 12651f109d5aSTheodore Ts'o struct ext4_ext_path *path, 12664d33b1efSTheodore Ts'o ext4_lblk_t *logical, ext4_fsblk_t *phys, 12674d33b1efSTheodore Ts'o struct ext4_extent **ret_ex) 12681988b51eSAlex Tomas { 12691988b51eSAlex Tomas struct buffer_head *bh = NULL; 12701988b51eSAlex Tomas struct ext4_extent_header *eh; 12711988b51eSAlex Tomas struct ext4_extent_idx *ix; 12721988b51eSAlex Tomas struct ext4_extent *ex; 12731988b51eSAlex Tomas ext4_fsblk_t block; 1274395a87bfSEric Sandeen int depth; /* Note, NOT eh_depth; depth from top of tree */ 1275395a87bfSEric Sandeen int ee_len; 12761988b51eSAlex Tomas 1277273df556SFrank Mayhar if (unlikely(path == NULL)) { 1278273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical); 1279273df556SFrank Mayhar return -EIO; 1280273df556SFrank Mayhar } 12811988b51eSAlex Tomas depth = path->p_depth; 12821988b51eSAlex Tomas *phys = 0; 12831988b51eSAlex Tomas 12841988b51eSAlex Tomas if (depth == 0 && path->p_ext == NULL) 12851988b51eSAlex Tomas return 0; 12861988b51eSAlex Tomas 12871988b51eSAlex Tomas /* usually extent in the path covers blocks smaller 12881988b51eSAlex Tomas * then *logical, but it can be that extent is the 12891988b51eSAlex Tomas * first one in the file */ 12901988b51eSAlex Tomas 12911988b51eSAlex Tomas ex = path[depth].p_ext; 1292b939e376SAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 12931988b51eSAlex Tomas if (*logical < le32_to_cpu(ex->ee_block)) { 1294273df556SFrank Mayhar if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) { 1295273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1296273df556SFrank Mayhar "first_extent(path[%d].p_hdr) != ex", 1297273df556SFrank Mayhar depth); 1298273df556SFrank Mayhar return -EIO; 1299273df556SFrank Mayhar } 13001988b51eSAlex Tomas while (--depth >= 0) { 13011988b51eSAlex Tomas ix = path[depth].p_idx; 1302273df556SFrank Mayhar if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) { 1303273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1304273df556SFrank Mayhar "ix != EXT_FIRST_INDEX *logical %d!", 1305273df556SFrank Mayhar *logical); 1306273df556SFrank Mayhar return -EIO; 1307273df556SFrank Mayhar } 13081988b51eSAlex Tomas } 13094d33b1efSTheodore Ts'o goto found_extent; 13101988b51eSAlex Tomas } 13111988b51eSAlex Tomas 1312273df556SFrank Mayhar if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) { 1313273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1314273df556SFrank Mayhar "logical %d < ee_block %d + ee_len %d!", 1315273df556SFrank Mayhar *logical, le32_to_cpu(ex->ee_block), ee_len); 1316273df556SFrank Mayhar return -EIO; 1317273df556SFrank Mayhar } 13181988b51eSAlex Tomas 13191988b51eSAlex Tomas if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) { 13201988b51eSAlex Tomas /* next allocated block in this leaf */ 13211988b51eSAlex Tomas ex++; 13224d33b1efSTheodore Ts'o goto found_extent; 13231988b51eSAlex Tomas } 13241988b51eSAlex Tomas 13251988b51eSAlex Tomas /* go up and search for index to the right */ 13261988b51eSAlex Tomas while (--depth >= 0) { 13271988b51eSAlex Tomas ix = path[depth].p_idx; 13281988b51eSAlex Tomas if (ix != EXT_LAST_INDEX(path[depth].p_hdr)) 132925f1ee3aSWu Fengguang goto got_index; 13301988b51eSAlex Tomas } 13311988b51eSAlex Tomas 133225f1ee3aSWu Fengguang /* we've gone up to the root and found no index to the right */ 13331988b51eSAlex Tomas return 0; 13341988b51eSAlex Tomas 133525f1ee3aSWu Fengguang got_index: 13361988b51eSAlex Tomas /* we've found index to the right, let's 13371988b51eSAlex Tomas * follow it and find the closest allocated 13381988b51eSAlex Tomas * block to the right */ 13391988b51eSAlex Tomas ix++; 1340bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 13411988b51eSAlex Tomas while (++depth < path->p_depth) { 13421988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 13431988b51eSAlex Tomas if (bh == NULL) 13441988b51eSAlex Tomas return -EIO; 13451988b51eSAlex Tomas eh = ext_block_hdr(bh); 1346395a87bfSEric Sandeen /* subtract from p_depth to get proper eh_depth */ 134756b19868SAneesh Kumar K.V if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 13481988b51eSAlex Tomas put_bh(bh); 13491988b51eSAlex Tomas return -EIO; 13501988b51eSAlex Tomas } 13511988b51eSAlex Tomas ix = EXT_FIRST_INDEX(eh); 1352bf89d16fSTheodore Ts'o block = ext4_idx_pblock(ix); 13531988b51eSAlex Tomas put_bh(bh); 13541988b51eSAlex Tomas } 13551988b51eSAlex Tomas 13561988b51eSAlex Tomas bh = sb_bread(inode->i_sb, block); 13571988b51eSAlex Tomas if (bh == NULL) 13581988b51eSAlex Tomas return -EIO; 13591988b51eSAlex Tomas eh = ext_block_hdr(bh); 136056b19868SAneesh Kumar K.V if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 13611988b51eSAlex Tomas put_bh(bh); 13621988b51eSAlex Tomas return -EIO; 13631988b51eSAlex Tomas } 13641988b51eSAlex Tomas ex = EXT_FIRST_EXTENT(eh); 13654d33b1efSTheodore Ts'o found_extent: 13661988b51eSAlex Tomas *logical = le32_to_cpu(ex->ee_block); 1367bf89d16fSTheodore Ts'o *phys = ext4_ext_pblock(ex); 13684d33b1efSTheodore Ts'o *ret_ex = ex; 13694d33b1efSTheodore Ts'o if (bh) 13701988b51eSAlex Tomas put_bh(bh); 13711988b51eSAlex Tomas return 0; 13721988b51eSAlex Tomas } 13731988b51eSAlex Tomas 13741988b51eSAlex Tomas /* 1375d0d856e8SRandy Dunlap * ext4_ext_next_allocated_block: 1376f17722f9SLukas Czerner * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. 1377d0d856e8SRandy Dunlap * NOTE: it considers block number from index entry as 1378d0d856e8SRandy Dunlap * allocated block. Thus, index entries have to be consistent 1379d0d856e8SRandy Dunlap * with leaves. 1380a86c6181SAlex Tomas */ 1381725d26d3SAneesh Kumar K.V static ext4_lblk_t 1382a86c6181SAlex Tomas ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1383a86c6181SAlex Tomas { 1384a86c6181SAlex Tomas int depth; 1385a86c6181SAlex Tomas 1386a86c6181SAlex Tomas BUG_ON(path == NULL); 1387a86c6181SAlex Tomas depth = path->p_depth; 1388a86c6181SAlex Tomas 1389a86c6181SAlex Tomas if (depth == 0 && path->p_ext == NULL) 1390f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1391a86c6181SAlex Tomas 1392a86c6181SAlex Tomas while (depth >= 0) { 1393a86c6181SAlex Tomas if (depth == path->p_depth) { 1394a86c6181SAlex Tomas /* leaf */ 13956f8ff537SCurt Wohlgemuth if (path[depth].p_ext && 13966f8ff537SCurt Wohlgemuth path[depth].p_ext != 1397a86c6181SAlex Tomas EXT_LAST_EXTENT(path[depth].p_hdr)) 1398a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_ext[1].ee_block); 1399a86c6181SAlex Tomas } else { 1400a86c6181SAlex Tomas /* index */ 1401a86c6181SAlex Tomas if (path[depth].p_idx != 1402a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1403a86c6181SAlex Tomas return le32_to_cpu(path[depth].p_idx[1].ei_block); 1404a86c6181SAlex Tomas } 1405a86c6181SAlex Tomas depth--; 1406a86c6181SAlex Tomas } 1407a86c6181SAlex Tomas 1408f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1409a86c6181SAlex Tomas } 1410a86c6181SAlex Tomas 1411a86c6181SAlex Tomas /* 1412d0d856e8SRandy Dunlap * ext4_ext_next_leaf_block: 1413f17722f9SLukas Czerner * returns first allocated block from next leaf or EXT_MAX_BLOCKS 1414a86c6181SAlex Tomas */ 14155718789dSRobin Dong static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path) 1416a86c6181SAlex Tomas { 1417a86c6181SAlex Tomas int depth; 1418a86c6181SAlex Tomas 1419a86c6181SAlex Tomas BUG_ON(path == NULL); 1420a86c6181SAlex Tomas depth = path->p_depth; 1421a86c6181SAlex Tomas 1422a86c6181SAlex Tomas /* zero-tree has no leaf blocks at all */ 1423a86c6181SAlex Tomas if (depth == 0) 1424f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1425a86c6181SAlex Tomas 1426a86c6181SAlex Tomas /* go to index block */ 1427a86c6181SAlex Tomas depth--; 1428a86c6181SAlex Tomas 1429a86c6181SAlex Tomas while (depth >= 0) { 1430a86c6181SAlex Tomas if (path[depth].p_idx != 1431a86c6181SAlex Tomas EXT_LAST_INDEX(path[depth].p_hdr)) 1432725d26d3SAneesh Kumar K.V return (ext4_lblk_t) 1433725d26d3SAneesh Kumar K.V le32_to_cpu(path[depth].p_idx[1].ei_block); 1434a86c6181SAlex Tomas depth--; 1435a86c6181SAlex Tomas } 1436a86c6181SAlex Tomas 1437f17722f9SLukas Czerner return EXT_MAX_BLOCKS; 1438a86c6181SAlex Tomas } 1439a86c6181SAlex Tomas 1440a86c6181SAlex Tomas /* 1441d0d856e8SRandy Dunlap * ext4_ext_correct_indexes: 1442d0d856e8SRandy Dunlap * if leaf gets modified and modified extent is first in the leaf, 1443d0d856e8SRandy Dunlap * then we have to correct all indexes above. 1444a86c6181SAlex Tomas * TODO: do we need to correct tree in all cases? 1445a86c6181SAlex Tomas */ 14461d03ec98SAneesh Kumar K.V static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode, 1447a86c6181SAlex Tomas struct ext4_ext_path *path) 1448a86c6181SAlex Tomas { 1449a86c6181SAlex Tomas struct ext4_extent_header *eh; 1450a86c6181SAlex Tomas int depth = ext_depth(inode); 1451a86c6181SAlex Tomas struct ext4_extent *ex; 1452a86c6181SAlex Tomas __le32 border; 1453a86c6181SAlex Tomas int k, err = 0; 1454a86c6181SAlex Tomas 1455a86c6181SAlex Tomas eh = path[depth].p_hdr; 1456a86c6181SAlex Tomas ex = path[depth].p_ext; 1457273df556SFrank Mayhar 1458273df556SFrank Mayhar if (unlikely(ex == NULL || eh == NULL)) { 1459273df556SFrank Mayhar EXT4_ERROR_INODE(inode, 1460273df556SFrank Mayhar "ex %p == NULL or eh %p == NULL", ex, eh); 1461273df556SFrank Mayhar return -EIO; 1462273df556SFrank Mayhar } 1463a86c6181SAlex Tomas 1464a86c6181SAlex Tomas if (depth == 0) { 1465a86c6181SAlex Tomas /* there is no tree at all */ 1466a86c6181SAlex Tomas return 0; 1467a86c6181SAlex Tomas } 1468a86c6181SAlex Tomas 1469a86c6181SAlex Tomas if (ex != EXT_FIRST_EXTENT(eh)) { 1470a86c6181SAlex Tomas /* we correct tree if first leaf got modified only */ 1471a86c6181SAlex Tomas return 0; 1472a86c6181SAlex Tomas } 1473a86c6181SAlex Tomas 1474a86c6181SAlex Tomas /* 1475d0d856e8SRandy Dunlap * TODO: we need correction if border is smaller than current one 1476a86c6181SAlex Tomas */ 1477a86c6181SAlex Tomas k = depth - 1; 1478a86c6181SAlex Tomas border = path[depth].p_ext->ee_block; 14797e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 14807e028976SAvantika Mathur if (err) 1481a86c6181SAlex Tomas return err; 1482a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 14837e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 14847e028976SAvantika Mathur if (err) 1485a86c6181SAlex Tomas return err; 1486a86c6181SAlex Tomas 1487a86c6181SAlex Tomas while (k--) { 1488a86c6181SAlex Tomas /* change all left-side indexes */ 1489a86c6181SAlex Tomas if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr)) 1490a86c6181SAlex Tomas break; 14917e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + k); 14927e028976SAvantika Mathur if (err) 1493a86c6181SAlex Tomas break; 1494a86c6181SAlex Tomas path[k].p_idx->ei_block = border; 14957e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path + k); 14967e028976SAvantika Mathur if (err) 1497a86c6181SAlex Tomas break; 1498a86c6181SAlex Tomas } 1499a86c6181SAlex Tomas 1500a86c6181SAlex Tomas return err; 1501a86c6181SAlex Tomas } 1502a86c6181SAlex Tomas 1503748de673SAkira Fujita int 1504a86c6181SAlex Tomas ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, 1505a86c6181SAlex Tomas struct ext4_extent *ex2) 1506a86c6181SAlex Tomas { 1507749269faSAmit Arora unsigned short ext1_ee_len, ext2_ee_len, max_len; 1508a2df2a63SAmit Arora 1509a2df2a63SAmit Arora /* 1510a2df2a63SAmit Arora * Make sure that either both extents are uninitialized, or 1511a2df2a63SAmit Arora * both are _not_. 1512a2df2a63SAmit Arora */ 1513a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) 1514a2df2a63SAmit Arora return 0; 1515a2df2a63SAmit Arora 1516749269faSAmit Arora if (ext4_ext_is_uninitialized(ex1)) 1517749269faSAmit Arora max_len = EXT_UNINIT_MAX_LEN; 1518749269faSAmit Arora else 1519749269faSAmit Arora max_len = EXT_INIT_MAX_LEN; 1520749269faSAmit Arora 1521a2df2a63SAmit Arora ext1_ee_len = ext4_ext_get_actual_len(ex1); 1522a2df2a63SAmit Arora ext2_ee_len = ext4_ext_get_actual_len(ex2); 1523a2df2a63SAmit Arora 1524a2df2a63SAmit Arora if (le32_to_cpu(ex1->ee_block) + ext1_ee_len != 152563f57933SAndrew Morton le32_to_cpu(ex2->ee_block)) 1526a86c6181SAlex Tomas return 0; 1527a86c6181SAlex Tomas 1528471d4011SSuparna Bhattacharya /* 1529471d4011SSuparna Bhattacharya * To allow future support for preallocated extents to be added 1530471d4011SSuparna Bhattacharya * as an RO_COMPAT feature, refuse to merge to extents if 1531d0d856e8SRandy Dunlap * this can result in the top bit of ee_len being set. 1532471d4011SSuparna Bhattacharya */ 1533749269faSAmit Arora if (ext1_ee_len + ext2_ee_len > max_len) 1534471d4011SSuparna Bhattacharya return 0; 1535bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 1536b939e376SAneesh Kumar K.V if (ext1_ee_len >= 4) 1537a86c6181SAlex Tomas return 0; 1538a86c6181SAlex Tomas #endif 1539a86c6181SAlex Tomas 1540bf89d16fSTheodore Ts'o if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2)) 1541a86c6181SAlex Tomas return 1; 1542a86c6181SAlex Tomas return 0; 1543a86c6181SAlex Tomas } 1544a86c6181SAlex Tomas 1545a86c6181SAlex Tomas /* 154656055d3aSAmit Arora * This function tries to merge the "ex" extent to the next extent in the tree. 154756055d3aSAmit Arora * It always tries to merge towards right. If you want to merge towards 154856055d3aSAmit Arora * left, pass "ex - 1" as argument instead of "ex". 154956055d3aSAmit Arora * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns 155056055d3aSAmit Arora * 1 if they got merged. 155156055d3aSAmit Arora */ 1552197217a5SYongqiang Yang static int ext4_ext_try_to_merge_right(struct inode *inode, 155356055d3aSAmit Arora struct ext4_ext_path *path, 155456055d3aSAmit Arora struct ext4_extent *ex) 155556055d3aSAmit Arora { 155656055d3aSAmit Arora struct ext4_extent_header *eh; 155756055d3aSAmit Arora unsigned int depth, len; 155856055d3aSAmit Arora int merge_done = 0; 155956055d3aSAmit Arora int uninitialized = 0; 156056055d3aSAmit Arora 156156055d3aSAmit Arora depth = ext_depth(inode); 156256055d3aSAmit Arora BUG_ON(path[depth].p_hdr == NULL); 156356055d3aSAmit Arora eh = path[depth].p_hdr; 156456055d3aSAmit Arora 156556055d3aSAmit Arora while (ex < EXT_LAST_EXTENT(eh)) { 156656055d3aSAmit Arora if (!ext4_can_extents_be_merged(inode, ex, ex + 1)) 156756055d3aSAmit Arora break; 156856055d3aSAmit Arora /* merge with next extent! */ 156956055d3aSAmit Arora if (ext4_ext_is_uninitialized(ex)) 157056055d3aSAmit Arora uninitialized = 1; 157156055d3aSAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 157256055d3aSAmit Arora + ext4_ext_get_actual_len(ex + 1)); 157356055d3aSAmit Arora if (uninitialized) 157456055d3aSAmit Arora ext4_ext_mark_uninitialized(ex); 157556055d3aSAmit Arora 157656055d3aSAmit Arora if (ex + 1 < EXT_LAST_EXTENT(eh)) { 157756055d3aSAmit Arora len = (EXT_LAST_EXTENT(eh) - ex - 1) 157856055d3aSAmit Arora * sizeof(struct ext4_extent); 157956055d3aSAmit Arora memmove(ex + 1, ex + 2, len); 158056055d3aSAmit Arora } 1581e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, -1); 158256055d3aSAmit Arora merge_done = 1; 158356055d3aSAmit Arora WARN_ON(eh->eh_entries == 0); 158456055d3aSAmit Arora if (!eh->eh_entries) 158524676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!"); 158656055d3aSAmit Arora } 158756055d3aSAmit Arora 158856055d3aSAmit Arora return merge_done; 158956055d3aSAmit Arora } 159056055d3aSAmit Arora 159156055d3aSAmit Arora /* 1592197217a5SYongqiang Yang * This function tries to merge the @ex extent to neighbours in the tree. 1593197217a5SYongqiang Yang * return 1 if merge left else 0. 1594197217a5SYongqiang Yang */ 1595197217a5SYongqiang Yang static int ext4_ext_try_to_merge(struct inode *inode, 1596197217a5SYongqiang Yang struct ext4_ext_path *path, 1597197217a5SYongqiang Yang struct ext4_extent *ex) { 1598197217a5SYongqiang Yang struct ext4_extent_header *eh; 1599197217a5SYongqiang Yang unsigned int depth; 1600197217a5SYongqiang Yang int merge_done = 0; 1601197217a5SYongqiang Yang int ret = 0; 1602197217a5SYongqiang Yang 1603197217a5SYongqiang Yang depth = ext_depth(inode); 1604197217a5SYongqiang Yang BUG_ON(path[depth].p_hdr == NULL); 1605197217a5SYongqiang Yang eh = path[depth].p_hdr; 1606197217a5SYongqiang Yang 1607197217a5SYongqiang Yang if (ex > EXT_FIRST_EXTENT(eh)) 1608197217a5SYongqiang Yang merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); 1609197217a5SYongqiang Yang 1610197217a5SYongqiang Yang if (!merge_done) 1611197217a5SYongqiang Yang ret = ext4_ext_try_to_merge_right(inode, path, ex); 1612197217a5SYongqiang Yang 1613197217a5SYongqiang Yang return ret; 1614197217a5SYongqiang Yang } 1615197217a5SYongqiang Yang 1616197217a5SYongqiang Yang /* 161725d14f98SAmit Arora * check if a portion of the "newext" extent overlaps with an 161825d14f98SAmit Arora * existing extent. 161925d14f98SAmit Arora * 162025d14f98SAmit Arora * If there is an overlap discovered, it updates the length of the newext 162125d14f98SAmit Arora * such that there will be no overlap, and then returns 1. 162225d14f98SAmit Arora * If there is no overlap found, it returns 0. 162325d14f98SAmit Arora */ 16244d33b1efSTheodore Ts'o static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, 16254d33b1efSTheodore Ts'o struct inode *inode, 162625d14f98SAmit Arora struct ext4_extent *newext, 162725d14f98SAmit Arora struct ext4_ext_path *path) 162825d14f98SAmit Arora { 1629725d26d3SAneesh Kumar K.V ext4_lblk_t b1, b2; 163025d14f98SAmit Arora unsigned int depth, len1; 163125d14f98SAmit Arora unsigned int ret = 0; 163225d14f98SAmit Arora 163325d14f98SAmit Arora b1 = le32_to_cpu(newext->ee_block); 1634a2df2a63SAmit Arora len1 = ext4_ext_get_actual_len(newext); 163525d14f98SAmit Arora depth = ext_depth(inode); 163625d14f98SAmit Arora if (!path[depth].p_ext) 163725d14f98SAmit Arora goto out; 163825d14f98SAmit Arora b2 = le32_to_cpu(path[depth].p_ext->ee_block); 16394d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 164025d14f98SAmit Arora 164125d14f98SAmit Arora /* 164225d14f98SAmit Arora * get the next allocated block if the extent in the path 164325d14f98SAmit Arora * is before the requested block(s) 164425d14f98SAmit Arora */ 164525d14f98SAmit Arora if (b2 < b1) { 164625d14f98SAmit Arora b2 = ext4_ext_next_allocated_block(path); 1647f17722f9SLukas Czerner if (b2 == EXT_MAX_BLOCKS) 164825d14f98SAmit Arora goto out; 16494d33b1efSTheodore Ts'o b2 &= ~(sbi->s_cluster_ratio - 1); 165025d14f98SAmit Arora } 165125d14f98SAmit Arora 1652725d26d3SAneesh Kumar K.V /* check for wrap through zero on extent logical start block*/ 165325d14f98SAmit Arora if (b1 + len1 < b1) { 1654f17722f9SLukas Czerner len1 = EXT_MAX_BLOCKS - b1; 165525d14f98SAmit Arora newext->ee_len = cpu_to_le16(len1); 165625d14f98SAmit Arora ret = 1; 165725d14f98SAmit Arora } 165825d14f98SAmit Arora 165925d14f98SAmit Arora /* check for overlap */ 166025d14f98SAmit Arora if (b1 + len1 > b2) { 166125d14f98SAmit Arora newext->ee_len = cpu_to_le16(b2 - b1); 166225d14f98SAmit Arora ret = 1; 166325d14f98SAmit Arora } 166425d14f98SAmit Arora out: 166525d14f98SAmit Arora return ret; 166625d14f98SAmit Arora } 166725d14f98SAmit Arora 166825d14f98SAmit Arora /* 1669d0d856e8SRandy Dunlap * ext4_ext_insert_extent: 1670d0d856e8SRandy Dunlap * tries to merge requsted extent into the existing extent or 1671d0d856e8SRandy Dunlap * inserts requested extent as new one into the tree, 1672d0d856e8SRandy Dunlap * creating new leaf in the no-space case. 1673a86c6181SAlex Tomas */ 1674a86c6181SAlex Tomas int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1675a86c6181SAlex Tomas struct ext4_ext_path *path, 16760031462bSMingming Cao struct ext4_extent *newext, int flag) 1677a86c6181SAlex Tomas { 1678a86c6181SAlex Tomas struct ext4_extent_header *eh; 1679a86c6181SAlex Tomas struct ext4_extent *ex, *fex; 1680a86c6181SAlex Tomas struct ext4_extent *nearex; /* nearest extent */ 1681a86c6181SAlex Tomas struct ext4_ext_path *npath = NULL; 1682725d26d3SAneesh Kumar K.V int depth, len, err; 1683725d26d3SAneesh Kumar K.V ext4_lblk_t next; 1684a2df2a63SAmit Arora unsigned uninitialized = 0; 168555f020dbSAllison Henderson int flags = 0; 1686a86c6181SAlex Tomas 1687273df556SFrank Mayhar if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1688273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1689273df556SFrank Mayhar return -EIO; 1690273df556SFrank Mayhar } 1691a86c6181SAlex Tomas depth = ext_depth(inode); 1692a86c6181SAlex Tomas ex = path[depth].p_ext; 1693273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1694273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1695273df556SFrank Mayhar return -EIO; 1696273df556SFrank Mayhar } 1697a86c6181SAlex Tomas 1698a86c6181SAlex Tomas /* try to insert block into found extent and return */ 1699744692dcSJiaying Zhang if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO) 17000031462bSMingming Cao && ext4_can_extents_be_merged(inode, ex, newext)) { 1701553f9008SMingming ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", 1702553f9008SMingming ext4_ext_is_uninitialized(newext), 1703a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1704a86c6181SAlex Tomas le32_to_cpu(ex->ee_block), 1705553f9008SMingming ext4_ext_is_uninitialized(ex), 1706bf89d16fSTheodore Ts'o ext4_ext_get_actual_len(ex), 1707bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 17087e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 17097e028976SAvantika Mathur if (err) 1710a86c6181SAlex Tomas return err; 1711a2df2a63SAmit Arora 1712a2df2a63SAmit Arora /* 1713a2df2a63SAmit Arora * ext4_can_extents_be_merged should have checked that either 1714a2df2a63SAmit Arora * both extents are uninitialized, or both aren't. Thus we 1715a2df2a63SAmit Arora * need to check only one of them here. 1716a2df2a63SAmit Arora */ 1717a2df2a63SAmit Arora if (ext4_ext_is_uninitialized(ex)) 1718a2df2a63SAmit Arora uninitialized = 1; 1719a2df2a63SAmit Arora ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex) 1720a2df2a63SAmit Arora + ext4_ext_get_actual_len(newext)); 1721a2df2a63SAmit Arora if (uninitialized) 1722a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 1723a86c6181SAlex Tomas eh = path[depth].p_hdr; 1724a86c6181SAlex Tomas nearex = ex; 1725a86c6181SAlex Tomas goto merge; 1726a86c6181SAlex Tomas } 1727a86c6181SAlex Tomas 1728a86c6181SAlex Tomas depth = ext_depth(inode); 1729a86c6181SAlex Tomas eh = path[depth].p_hdr; 1730a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) 1731a86c6181SAlex Tomas goto has_space; 1732a86c6181SAlex Tomas 1733a86c6181SAlex Tomas /* probably next leaf has space for us? */ 1734a86c6181SAlex Tomas fex = EXT_LAST_EXTENT(eh); 1735598dbdf2SRobin Dong next = EXT_MAX_BLOCKS; 1736598dbdf2SRobin Dong if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)) 17375718789dSRobin Dong next = ext4_ext_next_leaf_block(path); 1738598dbdf2SRobin Dong if (next != EXT_MAX_BLOCKS) { 1739a86c6181SAlex Tomas ext_debug("next leaf block - %d\n", next); 1740a86c6181SAlex Tomas BUG_ON(npath != NULL); 1741a86c6181SAlex Tomas npath = ext4_ext_find_extent(inode, next, NULL); 1742a86c6181SAlex Tomas if (IS_ERR(npath)) 1743a86c6181SAlex Tomas return PTR_ERR(npath); 1744a86c6181SAlex Tomas BUG_ON(npath->p_depth != path->p_depth); 1745a86c6181SAlex Tomas eh = npath[depth].p_hdr; 1746a86c6181SAlex Tomas if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) { 174725985edcSLucas De Marchi ext_debug("next leaf isn't full(%d)\n", 1748a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries)); 1749a86c6181SAlex Tomas path = npath; 1750ffb505ffSRobin Dong goto has_space; 1751a86c6181SAlex Tomas } 1752a86c6181SAlex Tomas ext_debug("next leaf has no free space(%d,%d)\n", 1753a86c6181SAlex Tomas le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 1754a86c6181SAlex Tomas } 1755a86c6181SAlex Tomas 1756a86c6181SAlex Tomas /* 1757d0d856e8SRandy Dunlap * There is no free space in the found leaf. 1758d0d856e8SRandy Dunlap * We're gonna add a new leaf in the tree. 1759a86c6181SAlex Tomas */ 176055f020dbSAllison Henderson if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) 176155f020dbSAllison Henderson flags = EXT4_MB_USE_ROOT_BLOCKS; 176255f020dbSAllison Henderson err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); 1763a86c6181SAlex Tomas if (err) 1764a86c6181SAlex Tomas goto cleanup; 1765a86c6181SAlex Tomas depth = ext_depth(inode); 1766a86c6181SAlex Tomas eh = path[depth].p_hdr; 1767a86c6181SAlex Tomas 1768a86c6181SAlex Tomas has_space: 1769a86c6181SAlex Tomas nearex = path[depth].p_ext; 1770a86c6181SAlex Tomas 17717e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path + depth); 17727e028976SAvantika Mathur if (err) 1773a86c6181SAlex Tomas goto cleanup; 1774a86c6181SAlex Tomas 1775a86c6181SAlex Tomas if (!nearex) { 1776a86c6181SAlex Tomas /* there is no extent in this leaf, create first one */ 1777553f9008SMingming ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", 1778a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1779bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1780553f9008SMingming ext4_ext_is_uninitialized(newext), 1781a2df2a63SAmit Arora ext4_ext_get_actual_len(newext)); 1782a86c6181SAlex Tomas path[depth].p_ext = EXT_FIRST_EXTENT(eh); 1783a86c6181SAlex Tomas } else if (le32_to_cpu(newext->ee_block) 1784a86c6181SAlex Tomas > le32_to_cpu(nearex->ee_block)) { 1785a86c6181SAlex Tomas /* BUG_ON(newext->ee_block == nearex->ee_block); */ 1786a86c6181SAlex Tomas if (nearex != EXT_LAST_EXTENT(eh)) { 1787a86c6181SAlex Tomas len = EXT_MAX_EXTENT(eh) - nearex; 1788a86c6181SAlex Tomas len = (len - 1) * sizeof(struct ext4_extent); 1789a86c6181SAlex Tomas len = len < 0 ? 0 : len; 1790553f9008SMingming ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, " 1791a86c6181SAlex Tomas "move %d from 0x%p to 0x%p\n", 1792a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1793bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1794553f9008SMingming ext4_ext_is_uninitialized(newext), 1795a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 1796a86c6181SAlex Tomas nearex, len, nearex + 1, nearex + 2); 1797a86c6181SAlex Tomas memmove(nearex + 2, nearex + 1, len); 1798a86c6181SAlex Tomas } 1799a86c6181SAlex Tomas path[depth].p_ext = nearex + 1; 1800a86c6181SAlex Tomas } else { 1801a86c6181SAlex Tomas BUG_ON(newext->ee_block == nearex->ee_block); 1802a86c6181SAlex Tomas len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); 1803a86c6181SAlex Tomas len = len < 0 ? 0 : len; 1804553f9008SMingming ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, " 1805a86c6181SAlex Tomas "move %d from 0x%p to 0x%p\n", 1806a86c6181SAlex Tomas le32_to_cpu(newext->ee_block), 1807bf89d16fSTheodore Ts'o ext4_ext_pblock(newext), 1808553f9008SMingming ext4_ext_is_uninitialized(newext), 1809a2df2a63SAmit Arora ext4_ext_get_actual_len(newext), 18100737964bSRobin Dong nearex, len, nearex, nearex + 1); 1811a86c6181SAlex Tomas memmove(nearex + 1, nearex, len); 1812a86c6181SAlex Tomas path[depth].p_ext = nearex; 1813a86c6181SAlex Tomas } 1814a86c6181SAlex Tomas 1815e8546d06SMarcin Slusarz le16_add_cpu(&eh->eh_entries, 1); 1816a86c6181SAlex Tomas nearex = path[depth].p_ext; 1817a86c6181SAlex Tomas nearex->ee_block = newext->ee_block; 1818bf89d16fSTheodore Ts'o ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext)); 1819a86c6181SAlex Tomas nearex->ee_len = newext->ee_len; 1820a86c6181SAlex Tomas 1821a86c6181SAlex Tomas merge: 1822a86c6181SAlex Tomas /* try to merge extents to the right */ 1823744692dcSJiaying Zhang if (!(flag & EXT4_GET_BLOCKS_PRE_IO)) 182456055d3aSAmit Arora ext4_ext_try_to_merge(inode, path, nearex); 1825a86c6181SAlex Tomas 1826a86c6181SAlex Tomas /* try to merge extents to the left */ 1827a86c6181SAlex Tomas 1828a86c6181SAlex Tomas /* time to correct all indexes above */ 1829a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 1830a86c6181SAlex Tomas if (err) 1831a86c6181SAlex Tomas goto cleanup; 1832a86c6181SAlex Tomas 1833a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path + depth); 1834a86c6181SAlex Tomas 1835a86c6181SAlex Tomas cleanup: 1836a86c6181SAlex Tomas if (npath) { 1837a86c6181SAlex Tomas ext4_ext_drop_refs(npath); 1838a86c6181SAlex Tomas kfree(npath); 1839a86c6181SAlex Tomas } 1840a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 1841a86c6181SAlex Tomas return err; 1842a86c6181SAlex Tomas } 1843a86c6181SAlex Tomas 18441f109d5aSTheodore Ts'o static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, 18456873fa0dSEric Sandeen ext4_lblk_t num, ext_prepare_callback func, 18466873fa0dSEric Sandeen void *cbdata) 18476873fa0dSEric Sandeen { 18486873fa0dSEric Sandeen struct ext4_ext_path *path = NULL; 18496873fa0dSEric Sandeen struct ext4_ext_cache cbex; 18506873fa0dSEric Sandeen struct ext4_extent *ex; 18516873fa0dSEric Sandeen ext4_lblk_t next, start = 0, end = 0; 18526873fa0dSEric Sandeen ext4_lblk_t last = block + num; 18536873fa0dSEric Sandeen int depth, exists, err = 0; 18546873fa0dSEric Sandeen 18556873fa0dSEric Sandeen BUG_ON(func == NULL); 18566873fa0dSEric Sandeen BUG_ON(inode == NULL); 18576873fa0dSEric Sandeen 1858f17722f9SLukas Czerner while (block < last && block != EXT_MAX_BLOCKS) { 18596873fa0dSEric Sandeen num = last - block; 18606873fa0dSEric Sandeen /* find extent for this block */ 1861fab3a549STheodore Ts'o down_read(&EXT4_I(inode)->i_data_sem); 18626873fa0dSEric Sandeen path = ext4_ext_find_extent(inode, block, path); 1863fab3a549STheodore Ts'o up_read(&EXT4_I(inode)->i_data_sem); 18646873fa0dSEric Sandeen if (IS_ERR(path)) { 18656873fa0dSEric Sandeen err = PTR_ERR(path); 18666873fa0dSEric Sandeen path = NULL; 18676873fa0dSEric Sandeen break; 18686873fa0dSEric Sandeen } 18696873fa0dSEric Sandeen 18706873fa0dSEric Sandeen depth = ext_depth(inode); 1871273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 1872273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 1873273df556SFrank Mayhar err = -EIO; 1874273df556SFrank Mayhar break; 1875273df556SFrank Mayhar } 18766873fa0dSEric Sandeen ex = path[depth].p_ext; 18776873fa0dSEric Sandeen next = ext4_ext_next_allocated_block(path); 18786873fa0dSEric Sandeen 18796873fa0dSEric Sandeen exists = 0; 18806873fa0dSEric Sandeen if (!ex) { 18816873fa0dSEric Sandeen /* there is no extent yet, so try to allocate 18826873fa0dSEric Sandeen * all requested space */ 18836873fa0dSEric Sandeen start = block; 18846873fa0dSEric Sandeen end = block + num; 18856873fa0dSEric Sandeen } else if (le32_to_cpu(ex->ee_block) > block) { 18866873fa0dSEric Sandeen /* need to allocate space before found extent */ 18876873fa0dSEric Sandeen start = block; 18886873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block); 18896873fa0dSEric Sandeen if (block + num < end) 18906873fa0dSEric Sandeen end = block + num; 18916873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block) 18926873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex)) { 18936873fa0dSEric Sandeen /* need to allocate space after found extent */ 18946873fa0dSEric Sandeen start = block; 18956873fa0dSEric Sandeen end = block + num; 18966873fa0dSEric Sandeen if (end >= next) 18976873fa0dSEric Sandeen end = next; 18986873fa0dSEric Sandeen } else if (block >= le32_to_cpu(ex->ee_block)) { 18996873fa0dSEric Sandeen /* 19006873fa0dSEric Sandeen * some part of requested space is covered 19016873fa0dSEric Sandeen * by found extent 19026873fa0dSEric Sandeen */ 19036873fa0dSEric Sandeen start = block; 19046873fa0dSEric Sandeen end = le32_to_cpu(ex->ee_block) 19056873fa0dSEric Sandeen + ext4_ext_get_actual_len(ex); 19066873fa0dSEric Sandeen if (block + num < end) 19076873fa0dSEric Sandeen end = block + num; 19086873fa0dSEric Sandeen exists = 1; 19096873fa0dSEric Sandeen } else { 19106873fa0dSEric Sandeen BUG(); 19116873fa0dSEric Sandeen } 19126873fa0dSEric Sandeen BUG_ON(end <= start); 19136873fa0dSEric Sandeen 19146873fa0dSEric Sandeen if (!exists) { 19156873fa0dSEric Sandeen cbex.ec_block = start; 19166873fa0dSEric Sandeen cbex.ec_len = end - start; 19176873fa0dSEric Sandeen cbex.ec_start = 0; 19186873fa0dSEric Sandeen } else { 19196873fa0dSEric Sandeen cbex.ec_block = le32_to_cpu(ex->ee_block); 19206873fa0dSEric Sandeen cbex.ec_len = ext4_ext_get_actual_len(ex); 1921bf89d16fSTheodore Ts'o cbex.ec_start = ext4_ext_pblock(ex); 19226873fa0dSEric Sandeen } 19236873fa0dSEric Sandeen 1924273df556SFrank Mayhar if (unlikely(cbex.ec_len == 0)) { 1925273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "cbex.ec_len == 0"); 1926273df556SFrank Mayhar err = -EIO; 1927273df556SFrank Mayhar break; 1928273df556SFrank Mayhar } 1929c03f8aa9SLukas Czerner err = func(inode, next, &cbex, ex, cbdata); 19306873fa0dSEric Sandeen ext4_ext_drop_refs(path); 19316873fa0dSEric Sandeen 19326873fa0dSEric Sandeen if (err < 0) 19336873fa0dSEric Sandeen break; 19346873fa0dSEric Sandeen 19356873fa0dSEric Sandeen if (err == EXT_REPEAT) 19366873fa0dSEric Sandeen continue; 19376873fa0dSEric Sandeen else if (err == EXT_BREAK) { 19386873fa0dSEric Sandeen err = 0; 19396873fa0dSEric Sandeen break; 19406873fa0dSEric Sandeen } 19416873fa0dSEric Sandeen 19426873fa0dSEric Sandeen if (ext_depth(inode) != depth) { 19436873fa0dSEric Sandeen /* depth was changed. we have to realloc path */ 19446873fa0dSEric Sandeen kfree(path); 19456873fa0dSEric Sandeen path = NULL; 19466873fa0dSEric Sandeen } 19476873fa0dSEric Sandeen 19486873fa0dSEric Sandeen block = cbex.ec_block + cbex.ec_len; 19496873fa0dSEric Sandeen } 19506873fa0dSEric Sandeen 19516873fa0dSEric Sandeen if (path) { 19526873fa0dSEric Sandeen ext4_ext_drop_refs(path); 19536873fa0dSEric Sandeen kfree(path); 19546873fa0dSEric Sandeen } 19556873fa0dSEric Sandeen 19566873fa0dSEric Sandeen return err; 19576873fa0dSEric Sandeen } 19586873fa0dSEric Sandeen 195909b88252SAvantika Mathur static void 1960725d26d3SAneesh Kumar K.V ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 1961b05e6ae5STheodore Ts'o __u32 len, ext4_fsblk_t start) 1962a86c6181SAlex Tomas { 1963a86c6181SAlex Tomas struct ext4_ext_cache *cex; 1964a86c6181SAlex Tomas BUG_ON(len == 0); 19652ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1966d8990240SAditya Kali trace_ext4_ext_put_in_cache(inode, block, len, start); 1967a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 1968a86c6181SAlex Tomas cex->ec_block = block; 1969a86c6181SAlex Tomas cex->ec_len = len; 1970a86c6181SAlex Tomas cex->ec_start = start; 19712ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1972a86c6181SAlex Tomas } 1973a86c6181SAlex Tomas 1974a86c6181SAlex Tomas /* 1975d0d856e8SRandy Dunlap * ext4_ext_put_gap_in_cache: 1976d0d856e8SRandy Dunlap * calculate boundaries of the gap that the requested block fits into 1977a86c6181SAlex Tomas * and cache this gap 1978a86c6181SAlex Tomas */ 197909b88252SAvantika Mathur static void 1980a86c6181SAlex Tomas ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, 1981725d26d3SAneesh Kumar K.V ext4_lblk_t block) 1982a86c6181SAlex Tomas { 1983a86c6181SAlex Tomas int depth = ext_depth(inode); 1984725d26d3SAneesh Kumar K.V unsigned long len; 1985725d26d3SAneesh Kumar K.V ext4_lblk_t lblock; 1986a86c6181SAlex Tomas struct ext4_extent *ex; 1987a86c6181SAlex Tomas 1988a86c6181SAlex Tomas ex = path[depth].p_ext; 1989a86c6181SAlex Tomas if (ex == NULL) { 1990a86c6181SAlex Tomas /* there is no extent yet, so gap is [0;-] */ 1991a86c6181SAlex Tomas lblock = 0; 1992f17722f9SLukas Czerner len = EXT_MAX_BLOCKS; 1993a86c6181SAlex Tomas ext_debug("cache gap(whole file):"); 1994a86c6181SAlex Tomas } else if (block < le32_to_cpu(ex->ee_block)) { 1995a86c6181SAlex Tomas lblock = block; 1996a86c6181SAlex Tomas len = le32_to_cpu(ex->ee_block) - block; 1997bba90743SEric Sandeen ext_debug("cache gap(before): %u [%u:%u]", 1998bba90743SEric Sandeen block, 1999bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2000bba90743SEric Sandeen ext4_ext_get_actual_len(ex)); 2001a86c6181SAlex Tomas } else if (block >= le32_to_cpu(ex->ee_block) 2002a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex)) { 2003725d26d3SAneesh Kumar K.V ext4_lblk_t next; 2004a86c6181SAlex Tomas lblock = le32_to_cpu(ex->ee_block) 2005a2df2a63SAmit Arora + ext4_ext_get_actual_len(ex); 2006725d26d3SAneesh Kumar K.V 2007725d26d3SAneesh Kumar K.V next = ext4_ext_next_allocated_block(path); 2008bba90743SEric Sandeen ext_debug("cache gap(after): [%u:%u] %u", 2009bba90743SEric Sandeen le32_to_cpu(ex->ee_block), 2010bba90743SEric Sandeen ext4_ext_get_actual_len(ex), 2011bba90743SEric Sandeen block); 2012725d26d3SAneesh Kumar K.V BUG_ON(next == lblock); 2013725d26d3SAneesh Kumar K.V len = next - lblock; 2014a86c6181SAlex Tomas } else { 2015a86c6181SAlex Tomas lblock = len = 0; 2016a86c6181SAlex Tomas BUG(); 2017a86c6181SAlex Tomas } 2018a86c6181SAlex Tomas 2019bba90743SEric Sandeen ext_debug(" -> %u:%lu\n", lblock, len); 2020b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, lblock, len, 0); 2021a86c6181SAlex Tomas } 2022a86c6181SAlex Tomas 2023b05e6ae5STheodore Ts'o /* 2024b7ca1e8eSRobin Dong * ext4_ext_check_cache() 2025a4bb6b64SAllison Henderson * Checks to see if the given block is in the cache. 2026a4bb6b64SAllison Henderson * If it is, the cached extent is stored in the given 2027a4bb6b64SAllison Henderson * cache extent pointer. If the cached extent is a hole, 2028a4bb6b64SAllison Henderson * this routine should be used instead of 2029a4bb6b64SAllison Henderson * ext4_ext_in_cache if the calling function needs to 2030a4bb6b64SAllison Henderson * know the size of the hole. 2031a4bb6b64SAllison Henderson * 2032a4bb6b64SAllison Henderson * @inode: The files inode 2033a4bb6b64SAllison Henderson * @block: The block to look for in the cache 2034a4bb6b64SAllison Henderson * @ex: Pointer where the cached extent will be stored 2035a4bb6b64SAllison Henderson * if it contains block 2036a4bb6b64SAllison Henderson * 2037b05e6ae5STheodore Ts'o * Return 0 if cache is invalid; 1 if the cache is valid 2038b05e6ae5STheodore Ts'o */ 2039a4bb6b64SAllison Henderson static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, 2040a4bb6b64SAllison Henderson struct ext4_ext_cache *ex){ 2041a86c6181SAlex Tomas struct ext4_ext_cache *cex; 204277f4135fSVivek Haldar struct ext4_sb_info *sbi; 2043b05e6ae5STheodore Ts'o int ret = 0; 2044a86c6181SAlex Tomas 20452ec0ae3aSTheodore Ts'o /* 20462ec0ae3aSTheodore Ts'o * We borrow i_block_reservation_lock to protect i_cached_extent 20472ec0ae3aSTheodore Ts'o */ 20482ec0ae3aSTheodore Ts'o spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 2049a86c6181SAlex Tomas cex = &EXT4_I(inode)->i_cached_extent; 205077f4135fSVivek Haldar sbi = EXT4_SB(inode->i_sb); 2051a86c6181SAlex Tomas 2052a86c6181SAlex Tomas /* has cache valid data? */ 2053b05e6ae5STheodore Ts'o if (cex->ec_len == 0) 20542ec0ae3aSTheodore Ts'o goto errout; 2055a86c6181SAlex Tomas 2056731eb1a0SAkinobu Mita if (in_range(block, cex->ec_block, cex->ec_len)) { 2057a4bb6b64SAllison Henderson memcpy(ex, cex, sizeof(struct ext4_ext_cache)); 2058bba90743SEric Sandeen ext_debug("%u cached by %u:%u:%llu\n", 2059bba90743SEric Sandeen block, 2060bba90743SEric Sandeen cex->ec_block, cex->ec_len, cex->ec_start); 2061b05e6ae5STheodore Ts'o ret = 1; 2062a86c6181SAlex Tomas } 20632ec0ae3aSTheodore Ts'o errout: 206477f4135fSVivek Haldar if (!ret) 206577f4135fSVivek Haldar sbi->extent_cache_misses++; 206677f4135fSVivek Haldar else 206777f4135fSVivek Haldar sbi->extent_cache_hits++; 2068d8990240SAditya Kali trace_ext4_ext_in_cache(inode, block, ret); 20692ec0ae3aSTheodore Ts'o spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 20702ec0ae3aSTheodore Ts'o return ret; 2071a86c6181SAlex Tomas } 2072a86c6181SAlex Tomas 2073a86c6181SAlex Tomas /* 2074a4bb6b64SAllison Henderson * ext4_ext_in_cache() 2075a4bb6b64SAllison Henderson * Checks to see if the given block is in the cache. 2076a4bb6b64SAllison Henderson * If it is, the cached extent is stored in the given 2077a4bb6b64SAllison Henderson * extent pointer. 2078a4bb6b64SAllison Henderson * 2079a4bb6b64SAllison Henderson * @inode: The files inode 2080a4bb6b64SAllison Henderson * @block: The block to look for in the cache 2081a4bb6b64SAllison Henderson * @ex: Pointer where the cached extent will be stored 2082a4bb6b64SAllison Henderson * if it contains block 2083a4bb6b64SAllison Henderson * 2084a4bb6b64SAllison Henderson * Return 0 if cache is invalid; 1 if the cache is valid 2085a4bb6b64SAllison Henderson */ 2086a4bb6b64SAllison Henderson static int 2087a4bb6b64SAllison Henderson ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2088a4bb6b64SAllison Henderson struct ext4_extent *ex) 2089a4bb6b64SAllison Henderson { 2090a4bb6b64SAllison Henderson struct ext4_ext_cache cex; 2091a4bb6b64SAllison Henderson int ret = 0; 2092a4bb6b64SAllison Henderson 2093a4bb6b64SAllison Henderson if (ext4_ext_check_cache(inode, block, &cex)) { 2094a4bb6b64SAllison Henderson ex->ee_block = cpu_to_le32(cex.ec_block); 2095a4bb6b64SAllison Henderson ext4_ext_store_pblock(ex, cex.ec_start); 2096a4bb6b64SAllison Henderson ex->ee_len = cpu_to_le16(cex.ec_len); 2097a4bb6b64SAllison Henderson ret = 1; 2098a4bb6b64SAllison Henderson } 2099a4bb6b64SAllison Henderson 2100a4bb6b64SAllison Henderson return ret; 2101a4bb6b64SAllison Henderson } 2102a4bb6b64SAllison Henderson 2103a4bb6b64SAllison Henderson 2104a4bb6b64SAllison Henderson /* 2105d0d856e8SRandy Dunlap * ext4_ext_rm_idx: 2106d0d856e8SRandy Dunlap * removes index from the index block. 2107a86c6181SAlex Tomas */ 21081d03ec98SAneesh Kumar K.V static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode, 2109a86c6181SAlex Tomas struct ext4_ext_path *path) 2110a86c6181SAlex Tomas { 2111a86c6181SAlex Tomas int err; 2112f65e6fbaSAlex Tomas ext4_fsblk_t leaf; 2113a86c6181SAlex Tomas 2114a86c6181SAlex Tomas /* free index block */ 2115a86c6181SAlex Tomas path--; 2116bf89d16fSTheodore Ts'o leaf = ext4_idx_pblock(path->p_idx); 2117273df556SFrank Mayhar if (unlikely(path->p_hdr->eh_entries == 0)) { 2118273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0"); 2119273df556SFrank Mayhar return -EIO; 2120273df556SFrank Mayhar } 21217e028976SAvantika Mathur err = ext4_ext_get_access(handle, inode, path); 21227e028976SAvantika Mathur if (err) 2123a86c6181SAlex Tomas return err; 21240e1147b0SRobin Dong 21250e1147b0SRobin Dong if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) { 21260e1147b0SRobin Dong int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx; 21270e1147b0SRobin Dong len *= sizeof(struct ext4_extent_idx); 21280e1147b0SRobin Dong memmove(path->p_idx, path->p_idx + 1, len); 21290e1147b0SRobin Dong } 21300e1147b0SRobin Dong 2131e8546d06SMarcin Slusarz le16_add_cpu(&path->p_hdr->eh_entries, -1); 21327e028976SAvantika Mathur err = ext4_ext_dirty(handle, inode, path); 21337e028976SAvantika Mathur if (err) 2134a86c6181SAlex Tomas return err; 21352ae02107SMingming Cao ext_debug("index is empty, remove it, free block %llu\n", leaf); 2136d8990240SAditya Kali trace_ext4_ext_rm_idx(inode, leaf); 2137d8990240SAditya Kali 21387dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, leaf, 1, 2139e6362609STheodore Ts'o EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2140a86c6181SAlex Tomas return err; 2141a86c6181SAlex Tomas } 2142a86c6181SAlex Tomas 2143a86c6181SAlex Tomas /* 2144ee12b630SMingming Cao * ext4_ext_calc_credits_for_single_extent: 2145ee12b630SMingming Cao * This routine returns max. credits that needed to insert an extent 2146ee12b630SMingming Cao * to the extent tree. 2147ee12b630SMingming Cao * When pass the actual path, the caller should calculate credits 2148ee12b630SMingming Cao * under i_data_sem. 2149a86c6181SAlex Tomas */ 2150525f4ed8SMingming Cao int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks, 2151a86c6181SAlex Tomas struct ext4_ext_path *path) 2152a86c6181SAlex Tomas { 2153a86c6181SAlex Tomas if (path) { 2154ee12b630SMingming Cao int depth = ext_depth(inode); 2155f3bd1f3fSMingming Cao int ret = 0; 2156ee12b630SMingming Cao 2157a86c6181SAlex Tomas /* probably there is space in leaf? */ 2158a86c6181SAlex Tomas if (le16_to_cpu(path[depth].p_hdr->eh_entries) 2159ee12b630SMingming Cao < le16_to_cpu(path[depth].p_hdr->eh_max)) { 2160ee12b630SMingming Cao 2161ee12b630SMingming Cao /* 2162ee12b630SMingming Cao * There are some space in the leaf tree, no 2163ee12b630SMingming Cao * need to account for leaf block credit 2164ee12b630SMingming Cao * 2165ee12b630SMingming Cao * bitmaps and block group descriptor blocks 2166df3ab170STao Ma * and other metadata blocks still need to be 2167ee12b630SMingming Cao * accounted. 2168ee12b630SMingming Cao */ 2169525f4ed8SMingming Cao /* 1 bitmap, 1 block group descriptor */ 2170ee12b630SMingming Cao ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb); 21715887e98bSAneesh Kumar K.V return ret; 2172ee12b630SMingming Cao } 2173ee12b630SMingming Cao } 2174ee12b630SMingming Cao 2175525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, nrblocks); 2176a86c6181SAlex Tomas } 2177a86c6181SAlex Tomas 2178a86c6181SAlex Tomas /* 2179ee12b630SMingming Cao * How many index/leaf blocks need to change/allocate to modify nrblocks? 2180ee12b630SMingming Cao * 2181ee12b630SMingming Cao * if nrblocks are fit in a single extent (chunk flag is 1), then 2182ee12b630SMingming Cao * in the worse case, each tree level index/leaf need to be changed 2183ee12b630SMingming Cao * if the tree split due to insert a new extent, then the old tree 2184ee12b630SMingming Cao * index/leaf need to be updated too 2185ee12b630SMingming Cao * 2186ee12b630SMingming Cao * If the nrblocks are discontiguous, they could cause 2187ee12b630SMingming Cao * the whole tree split more than once, but this is really rare. 2188a86c6181SAlex Tomas */ 2189525f4ed8SMingming Cao int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 2190ee12b630SMingming Cao { 2191ee12b630SMingming Cao int index; 2192ee12b630SMingming Cao int depth = ext_depth(inode); 2193a86c6181SAlex Tomas 2194ee12b630SMingming Cao if (chunk) 2195ee12b630SMingming Cao index = depth * 2; 2196ee12b630SMingming Cao else 2197ee12b630SMingming Cao index = depth * 3; 2198a86c6181SAlex Tomas 2199ee12b630SMingming Cao return index; 2200a86c6181SAlex Tomas } 2201a86c6181SAlex Tomas 2202a86c6181SAlex Tomas static int ext4_remove_blocks(handle_t *handle, struct inode *inode, 2203a86c6181SAlex Tomas struct ext4_extent *ex, 22040aa06000STheodore Ts'o ext4_fsblk_t *partial_cluster, 2205725d26d3SAneesh Kumar K.V ext4_lblk_t from, ext4_lblk_t to) 2206a86c6181SAlex Tomas { 22070aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2208a2df2a63SAmit Arora unsigned short ee_len = ext4_ext_get_actual_len(ex); 22090aa06000STheodore Ts'o ext4_fsblk_t pblk; 2210e6362609STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 2211a86c6181SAlex Tomas 2212c9de560dSAlex Tomas if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 2213e6362609STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 22140aa06000STheodore Ts'o /* 22150aa06000STheodore Ts'o * For bigalloc file systems, we never free a partial cluster 22160aa06000STheodore Ts'o * at the beginning of the extent. Instead, we make a note 22170aa06000STheodore Ts'o * that we tried freeing the cluster, and check to see if we 22180aa06000STheodore Ts'o * need to free it on a subsequent call to ext4_remove_blocks, 22190aa06000STheodore Ts'o * or at the end of the ext4_truncate() operation. 22200aa06000STheodore Ts'o */ 22210aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 22220aa06000STheodore Ts'o 2223d8990240SAditya Kali trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster); 22240aa06000STheodore Ts'o /* 22250aa06000STheodore Ts'o * If we have a partial cluster, and it's different from the 22260aa06000STheodore Ts'o * cluster of the last block, we need to explicitly free the 22270aa06000STheodore Ts'o * partial cluster here. 22280aa06000STheodore Ts'o */ 22290aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - 1; 22300aa06000STheodore Ts'o if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 22310aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 22320aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 22330aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 22340aa06000STheodore Ts'o *partial_cluster = 0; 22350aa06000STheodore Ts'o } 22360aa06000STheodore Ts'o 2237a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2238a86c6181SAlex Tomas { 2239a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2240a86c6181SAlex Tomas spin_lock(&sbi->s_ext_stats_lock); 2241a86c6181SAlex Tomas sbi->s_ext_blocks += ee_len; 2242a86c6181SAlex Tomas sbi->s_ext_extents++; 2243a86c6181SAlex Tomas if (ee_len < sbi->s_ext_min) 2244a86c6181SAlex Tomas sbi->s_ext_min = ee_len; 2245a86c6181SAlex Tomas if (ee_len > sbi->s_ext_max) 2246a86c6181SAlex Tomas sbi->s_ext_max = ee_len; 2247a86c6181SAlex Tomas if (ext_depth(inode) > sbi->s_depth_max) 2248a86c6181SAlex Tomas sbi->s_depth_max = ext_depth(inode); 2249a86c6181SAlex Tomas spin_unlock(&sbi->s_ext_stats_lock); 2250a86c6181SAlex Tomas } 2251a86c6181SAlex Tomas #endif 2252a86c6181SAlex Tomas if (from >= le32_to_cpu(ex->ee_block) 2253a2df2a63SAmit Arora && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2254a86c6181SAlex Tomas /* tail removal */ 2255725d26d3SAneesh Kumar K.V ext4_lblk_t num; 2256725d26d3SAneesh Kumar K.V 2257a2df2a63SAmit Arora num = le32_to_cpu(ex->ee_block) + ee_len - from; 22580aa06000STheodore Ts'o pblk = ext4_ext_pblock(ex) + ee_len - num; 22590aa06000STheodore Ts'o ext_debug("free last %u blocks starting %llu\n", num, pblk); 22600aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, pblk, num, flags); 22610aa06000STheodore Ts'o /* 22620aa06000STheodore Ts'o * If the block range to be freed didn't start at the 22630aa06000STheodore Ts'o * beginning of a cluster, and we removed the entire 22640aa06000STheodore Ts'o * extent, save the partial cluster here, since we 22650aa06000STheodore Ts'o * might need to delete if we determine that the 22660aa06000STheodore Ts'o * truncate operation has removed all of the blocks in 22670aa06000STheodore Ts'o * the cluster. 22680aa06000STheodore Ts'o */ 22690aa06000STheodore Ts'o if (pblk & (sbi->s_cluster_ratio - 1) && 22700aa06000STheodore Ts'o (ee_len == num)) 22710aa06000STheodore Ts'o *partial_cluster = EXT4_B2C(sbi, pblk); 22720aa06000STheodore Ts'o else 22730aa06000STheodore Ts'o *partial_cluster = 0; 2274a86c6181SAlex Tomas } else if (from == le32_to_cpu(ex->ee_block) 2275a2df2a63SAmit Arora && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2276d583fb87SAllison Henderson /* head removal */ 2277d583fb87SAllison Henderson ext4_lblk_t num; 2278d583fb87SAllison Henderson ext4_fsblk_t start; 2279d583fb87SAllison Henderson 2280d583fb87SAllison Henderson num = to - from; 2281d583fb87SAllison Henderson start = ext4_ext_pblock(ex); 2282d583fb87SAllison Henderson 2283d583fb87SAllison Henderson ext_debug("free first %u blocks starting %llu\n", num, start); 2284ee90d57eSH Hartley Sweeten ext4_free_blocks(handle, inode, NULL, start, num, flags); 2285d583fb87SAllison Henderson 2286a86c6181SAlex Tomas } else { 2287725d26d3SAneesh Kumar K.V printk(KERN_INFO "strange request: removal(2) " 2288725d26d3SAneesh Kumar K.V "%u-%u from %u:%u\n", 2289a2df2a63SAmit Arora from, to, le32_to_cpu(ex->ee_block), ee_len); 2290a86c6181SAlex Tomas } 2291a86c6181SAlex Tomas return 0; 2292a86c6181SAlex Tomas } 2293a86c6181SAlex Tomas 2294d583fb87SAllison Henderson 2295d583fb87SAllison Henderson /* 2296d583fb87SAllison Henderson * ext4_ext_rm_leaf() Removes the extents associated with the 2297d583fb87SAllison Henderson * blocks appearing between "start" and "end", and splits the extents 2298d583fb87SAllison Henderson * if "start" and "end" appear in the same extent 2299d583fb87SAllison Henderson * 2300d583fb87SAllison Henderson * @handle: The journal handle 2301d583fb87SAllison Henderson * @inode: The files inode 2302d583fb87SAllison Henderson * @path: The path to the leaf 2303d583fb87SAllison Henderson * @start: The first block to remove 2304d583fb87SAllison Henderson * @end: The last block to remove 2305d583fb87SAllison Henderson */ 2306a86c6181SAlex Tomas static int 2307a86c6181SAlex Tomas ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, 23080aa06000STheodore Ts'o struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster, 23090aa06000STheodore Ts'o ext4_lblk_t start, ext4_lblk_t end) 2310a86c6181SAlex Tomas { 23110aa06000STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2312a86c6181SAlex Tomas int err = 0, correct_index = 0; 2313a86c6181SAlex Tomas int depth = ext_depth(inode), credits; 2314a86c6181SAlex Tomas struct ext4_extent_header *eh; 2315750c9c47SDmitry Monakhov ext4_lblk_t a, b; 2316725d26d3SAneesh Kumar K.V unsigned num; 2317725d26d3SAneesh Kumar K.V ext4_lblk_t ex_ee_block; 2318a86c6181SAlex Tomas unsigned short ex_ee_len; 2319a2df2a63SAmit Arora unsigned uninitialized = 0; 2320a86c6181SAlex Tomas struct ext4_extent *ex; 2321a86c6181SAlex Tomas 2322c29c0ae7SAlex Tomas /* the header must be checked already in ext4_ext_remove_space() */ 2323725d26d3SAneesh Kumar K.V ext_debug("truncate since %u in leaf\n", start); 2324a86c6181SAlex Tomas if (!path[depth].p_hdr) 2325a86c6181SAlex Tomas path[depth].p_hdr = ext_block_hdr(path[depth].p_bh); 2326a86c6181SAlex Tomas eh = path[depth].p_hdr; 2327273df556SFrank Mayhar if (unlikely(path[depth].p_hdr == NULL)) { 2328273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth); 2329273df556SFrank Mayhar return -EIO; 2330273df556SFrank Mayhar } 2331a86c6181SAlex Tomas /* find where to start removing */ 2332a86c6181SAlex Tomas ex = EXT_LAST_EXTENT(eh); 2333a86c6181SAlex Tomas 2334a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2335a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2336a86c6181SAlex Tomas 2337d8990240SAditya Kali trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2338d8990240SAditya Kali 2339a86c6181SAlex Tomas while (ex >= EXT_FIRST_EXTENT(eh) && 2340a86c6181SAlex Tomas ex_ee_block + ex_ee_len > start) { 2341a41f2071SAneesh Kumar K.V 2342a41f2071SAneesh Kumar K.V if (ext4_ext_is_uninitialized(ex)) 2343a41f2071SAneesh Kumar K.V uninitialized = 1; 2344a41f2071SAneesh Kumar K.V else 2345a41f2071SAneesh Kumar K.V uninitialized = 0; 2346a41f2071SAneesh Kumar K.V 2347553f9008SMingming ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, 2348553f9008SMingming uninitialized, ex_ee_len); 2349a86c6181SAlex Tomas path[depth].p_ext = ex; 2350a86c6181SAlex Tomas 2351a86c6181SAlex Tomas a = ex_ee_block > start ? ex_ee_block : start; 2352d583fb87SAllison Henderson b = ex_ee_block+ex_ee_len - 1 < end ? 2353d583fb87SAllison Henderson ex_ee_block+ex_ee_len - 1 : end; 2354a86c6181SAlex Tomas 2355a86c6181SAlex Tomas ext_debug(" border %u:%u\n", a, b); 2356a86c6181SAlex Tomas 2357d583fb87SAllison Henderson /* If this extent is beyond the end of the hole, skip it */ 2358d583fb87SAllison Henderson if (end <= ex_ee_block) { 2359d583fb87SAllison Henderson ex--; 2360d583fb87SAllison Henderson ex_ee_block = le32_to_cpu(ex->ee_block); 2361d583fb87SAllison Henderson ex_ee_len = ext4_ext_get_actual_len(ex); 2362d583fb87SAllison Henderson continue; 2363750c9c47SDmitry Monakhov } else if (b != ex_ee_block + ex_ee_len - 1) { 2364750c9c47SDmitry Monakhov EXT4_ERROR_INODE(inode," bad truncate %u:%u\n", 2365d583fb87SAllison Henderson start, end); 2366d583fb87SAllison Henderson err = -EIO; 2367d583fb87SAllison Henderson goto out; 2368a86c6181SAlex Tomas } else if (a != ex_ee_block) { 2369a86c6181SAlex Tomas /* remove tail of the extent */ 2370750c9c47SDmitry Monakhov num = a - ex_ee_block; 2371a86c6181SAlex Tomas } else { 2372a86c6181SAlex Tomas /* remove whole extent: excellent! */ 2373a86c6181SAlex Tomas num = 0; 2374d583fb87SAllison Henderson } 237534071da7STheodore Ts'o /* 237634071da7STheodore Ts'o * 3 for leaf, sb, and inode plus 2 (bmap and group 237734071da7STheodore Ts'o * descriptor) for each block group; assume two block 237834071da7STheodore Ts'o * groups plus ex_ee_len/blocks_per_block_group for 237934071da7STheodore Ts'o * the worst case 238034071da7STheodore Ts'o */ 238134071da7STheodore Ts'o credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb)); 2382a86c6181SAlex Tomas if (ex == EXT_FIRST_EXTENT(eh)) { 2383a86c6181SAlex Tomas correct_index = 1; 2384a86c6181SAlex Tomas credits += (ext_depth(inode)) + 1; 2385a86c6181SAlex Tomas } 23865aca07ebSDmitry Monakhov credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 2387a86c6181SAlex Tomas 2388487caeefSJan Kara err = ext4_ext_truncate_extend_restart(handle, inode, credits); 23899102e4faSShen Feng if (err) 2390a86c6181SAlex Tomas goto out; 2391a86c6181SAlex Tomas 2392a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path + depth); 2393a86c6181SAlex Tomas if (err) 2394a86c6181SAlex Tomas goto out; 2395a86c6181SAlex Tomas 23960aa06000STheodore Ts'o err = ext4_remove_blocks(handle, inode, ex, partial_cluster, 23970aa06000STheodore Ts'o a, b); 2398a86c6181SAlex Tomas if (err) 2399a86c6181SAlex Tomas goto out; 2400a86c6181SAlex Tomas 2401750c9c47SDmitry Monakhov if (num == 0) 2402d0d856e8SRandy Dunlap /* this extent is removed; mark slot entirely unused */ 2403f65e6fbaSAlex Tomas ext4_ext_store_pblock(ex, 0); 2404a86c6181SAlex Tomas 2405a86c6181SAlex Tomas ex->ee_len = cpu_to_le16(num); 2406749269faSAmit Arora /* 2407749269faSAmit Arora * Do not mark uninitialized if all the blocks in the 2408749269faSAmit Arora * extent have been removed. 2409749269faSAmit Arora */ 2410749269faSAmit Arora if (uninitialized && num) 2411a2df2a63SAmit Arora ext4_ext_mark_uninitialized(ex); 2412d583fb87SAllison Henderson /* 2413d583fb87SAllison Henderson * If the extent was completely released, 2414d583fb87SAllison Henderson * we need to remove it from the leaf 2415d583fb87SAllison Henderson */ 2416d583fb87SAllison Henderson if (num == 0) { 2417f17722f9SLukas Czerner if (end != EXT_MAX_BLOCKS - 1) { 2418d583fb87SAllison Henderson /* 2419d583fb87SAllison Henderson * For hole punching, we need to scoot all the 2420d583fb87SAllison Henderson * extents up when an extent is removed so that 2421d583fb87SAllison Henderson * we dont have blank extents in the middle 2422d583fb87SAllison Henderson */ 2423d583fb87SAllison Henderson memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * 2424d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2425d583fb87SAllison Henderson 2426d583fb87SAllison Henderson /* Now get rid of the one at the end */ 2427d583fb87SAllison Henderson memset(EXT_LAST_EXTENT(eh), 0, 2428d583fb87SAllison Henderson sizeof(struct ext4_extent)); 2429d583fb87SAllison Henderson } 2430d583fb87SAllison Henderson le16_add_cpu(&eh->eh_entries, -1); 24310aa06000STheodore Ts'o } else 24320aa06000STheodore Ts'o *partial_cluster = 0; 2433d583fb87SAllison Henderson 2434750c9c47SDmitry Monakhov err = ext4_ext_dirty(handle, inode, path + depth); 2435750c9c47SDmitry Monakhov if (err) 2436750c9c47SDmitry Monakhov goto out; 2437750c9c47SDmitry Monakhov 24382ae02107SMingming Cao ext_debug("new extent: %u:%u:%llu\n", block, num, 2439bf89d16fSTheodore Ts'o ext4_ext_pblock(ex)); 2440a86c6181SAlex Tomas ex--; 2441a86c6181SAlex Tomas ex_ee_block = le32_to_cpu(ex->ee_block); 2442a2df2a63SAmit Arora ex_ee_len = ext4_ext_get_actual_len(ex); 2443a86c6181SAlex Tomas } 2444a86c6181SAlex Tomas 2445a86c6181SAlex Tomas if (correct_index && eh->eh_entries) 2446a86c6181SAlex Tomas err = ext4_ext_correct_indexes(handle, inode, path); 2447a86c6181SAlex Tomas 24480aa06000STheodore Ts'o /* 24490aa06000STheodore Ts'o * If there is still a entry in the leaf node, check to see if 24500aa06000STheodore Ts'o * it references the partial cluster. This is the only place 24510aa06000STheodore Ts'o * where it could; if it doesn't, we can free the cluster. 24520aa06000STheodore Ts'o */ 24530aa06000STheodore Ts'o if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) && 24540aa06000STheodore Ts'o (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 24550aa06000STheodore Ts'o *partial_cluster)) { 24560aa06000STheodore Ts'o int flags = EXT4_FREE_BLOCKS_FORGET; 24570aa06000STheodore Ts'o 24580aa06000STheodore Ts'o if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 24590aa06000STheodore Ts'o flags |= EXT4_FREE_BLOCKS_METADATA; 24600aa06000STheodore Ts'o 24610aa06000STheodore Ts'o ext4_free_blocks(handle, inode, NULL, 24620aa06000STheodore Ts'o EXT4_C2B(sbi, *partial_cluster), 24630aa06000STheodore Ts'o sbi->s_cluster_ratio, flags); 24640aa06000STheodore Ts'o *partial_cluster = 0; 24650aa06000STheodore Ts'o } 24660aa06000STheodore Ts'o 2467a86c6181SAlex Tomas /* if this leaf is free, then we should 2468a86c6181SAlex Tomas * remove it from index block above */ 2469a86c6181SAlex Tomas if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL) 2470a86c6181SAlex Tomas err = ext4_ext_rm_idx(handle, inode, path + depth); 2471a86c6181SAlex Tomas 2472a86c6181SAlex Tomas out: 2473a86c6181SAlex Tomas return err; 2474a86c6181SAlex Tomas } 2475a86c6181SAlex Tomas 2476a86c6181SAlex Tomas /* 2477d0d856e8SRandy Dunlap * ext4_ext_more_to_rm: 2478d0d856e8SRandy Dunlap * returns 1 if current index has to be freed (even partial) 2479a86c6181SAlex Tomas */ 248009b88252SAvantika Mathur static int 2481a86c6181SAlex Tomas ext4_ext_more_to_rm(struct ext4_ext_path *path) 2482a86c6181SAlex Tomas { 2483a86c6181SAlex Tomas BUG_ON(path->p_idx == NULL); 2484a86c6181SAlex Tomas 2485a86c6181SAlex Tomas if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr)) 2486a86c6181SAlex Tomas return 0; 2487a86c6181SAlex Tomas 2488a86c6181SAlex Tomas /* 2489d0d856e8SRandy Dunlap * if truncate on deeper level happened, it wasn't partial, 2490a86c6181SAlex Tomas * so we have to consider current index for truncation 2491a86c6181SAlex Tomas */ 2492a86c6181SAlex Tomas if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block) 2493a86c6181SAlex Tomas return 0; 2494a86c6181SAlex Tomas return 1; 2495a86c6181SAlex Tomas } 2496a86c6181SAlex Tomas 2497c6a0371cSAllison Henderson static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) 2498a86c6181SAlex Tomas { 2499a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 2500a86c6181SAlex Tomas int depth = ext_depth(inode); 2501a86c6181SAlex Tomas struct ext4_ext_path *path; 25020aa06000STheodore Ts'o ext4_fsblk_t partial_cluster = 0; 2503a86c6181SAlex Tomas handle_t *handle; 25040617b83fSDmitry Monakhov int i, err; 2505a86c6181SAlex Tomas 2506725d26d3SAneesh Kumar K.V ext_debug("truncate since %u\n", start); 2507a86c6181SAlex Tomas 2508a86c6181SAlex Tomas /* probably first extent we're gonna free will be last in block */ 2509a86c6181SAlex Tomas handle = ext4_journal_start(inode, depth + 1); 2510a86c6181SAlex Tomas if (IS_ERR(handle)) 2511a86c6181SAlex Tomas return PTR_ERR(handle); 2512a86c6181SAlex Tomas 25130617b83fSDmitry Monakhov again: 2514a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 2515a86c6181SAlex Tomas 2516d8990240SAditya Kali trace_ext4_ext_remove_space(inode, start, depth); 2517d8990240SAditya Kali 2518a86c6181SAlex Tomas /* 2519d0d856e8SRandy Dunlap * We start scanning from right side, freeing all the blocks 2520d0d856e8SRandy Dunlap * after i_size and walking into the tree depth-wise. 2521a86c6181SAlex Tomas */ 25220617b83fSDmitry Monakhov depth = ext_depth(inode); 2523216553c4SJosef Bacik path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS); 2524a86c6181SAlex Tomas if (path == NULL) { 2525a86c6181SAlex Tomas ext4_journal_stop(handle); 2526a86c6181SAlex Tomas return -ENOMEM; 2527a86c6181SAlex Tomas } 25280617b83fSDmitry Monakhov path[0].p_depth = depth; 2529a86c6181SAlex Tomas path[0].p_hdr = ext_inode_hdr(inode); 253056b19868SAneesh Kumar K.V if (ext4_ext_check(inode, path[0].p_hdr, depth)) { 2531a86c6181SAlex Tomas err = -EIO; 2532a86c6181SAlex Tomas goto out; 2533a86c6181SAlex Tomas } 25340617b83fSDmitry Monakhov i = err = 0; 2535a86c6181SAlex Tomas 2536a86c6181SAlex Tomas while (i >= 0 && err == 0) { 2537a86c6181SAlex Tomas if (i == depth) { 2538a86c6181SAlex Tomas /* this is leaf block */ 2539d583fb87SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 25400aa06000STheodore Ts'o &partial_cluster, start, 25410aa06000STheodore Ts'o EXT_MAX_BLOCKS - 1); 2542d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2543a86c6181SAlex Tomas brelse(path[i].p_bh); 2544a86c6181SAlex Tomas path[i].p_bh = NULL; 2545a86c6181SAlex Tomas i--; 2546a86c6181SAlex Tomas continue; 2547a86c6181SAlex Tomas } 2548a86c6181SAlex Tomas 2549a86c6181SAlex Tomas /* this is index block */ 2550a86c6181SAlex Tomas if (!path[i].p_hdr) { 2551a86c6181SAlex Tomas ext_debug("initialize header\n"); 2552a86c6181SAlex Tomas path[i].p_hdr = ext_block_hdr(path[i].p_bh); 2553a86c6181SAlex Tomas } 2554a86c6181SAlex Tomas 2555a86c6181SAlex Tomas if (!path[i].p_idx) { 2556d0d856e8SRandy Dunlap /* this level hasn't been touched yet */ 2557a86c6181SAlex Tomas path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr); 2558a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1; 2559a86c6181SAlex Tomas ext_debug("init index ptr: hdr 0x%p, num %d\n", 2560a86c6181SAlex Tomas path[i].p_hdr, 2561a86c6181SAlex Tomas le16_to_cpu(path[i].p_hdr->eh_entries)); 2562a86c6181SAlex Tomas } else { 2563d0d856e8SRandy Dunlap /* we were already here, see at next index */ 2564a86c6181SAlex Tomas path[i].p_idx--; 2565a86c6181SAlex Tomas } 2566a86c6181SAlex Tomas 2567a86c6181SAlex Tomas ext_debug("level %d - index, first 0x%p, cur 0x%p\n", 2568a86c6181SAlex Tomas i, EXT_FIRST_INDEX(path[i].p_hdr), 2569a86c6181SAlex Tomas path[i].p_idx); 2570a86c6181SAlex Tomas if (ext4_ext_more_to_rm(path + i)) { 2571c29c0ae7SAlex Tomas struct buffer_head *bh; 2572a86c6181SAlex Tomas /* go to the next level */ 25732ae02107SMingming Cao ext_debug("move to level %d (block %llu)\n", 2574bf89d16fSTheodore Ts'o i + 1, ext4_idx_pblock(path[i].p_idx)); 2575a86c6181SAlex Tomas memset(path + i + 1, 0, sizeof(*path)); 2576bf89d16fSTheodore Ts'o bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx)); 2577c29c0ae7SAlex Tomas if (!bh) { 2578a86c6181SAlex Tomas /* should we reset i_size? */ 2579a86c6181SAlex Tomas err = -EIO; 2580a86c6181SAlex Tomas break; 2581a86c6181SAlex Tomas } 2582c29c0ae7SAlex Tomas if (WARN_ON(i + 1 > depth)) { 2583c29c0ae7SAlex Tomas err = -EIO; 2584c29c0ae7SAlex Tomas break; 2585c29c0ae7SAlex Tomas } 258656b19868SAneesh Kumar K.V if (ext4_ext_check(inode, ext_block_hdr(bh), 2587c29c0ae7SAlex Tomas depth - i - 1)) { 2588c29c0ae7SAlex Tomas err = -EIO; 2589c29c0ae7SAlex Tomas break; 2590c29c0ae7SAlex Tomas } 2591c29c0ae7SAlex Tomas path[i + 1].p_bh = bh; 2592a86c6181SAlex Tomas 2593d0d856e8SRandy Dunlap /* save actual number of indexes since this 2594d0d856e8SRandy Dunlap * number is changed at the next iteration */ 2595a86c6181SAlex Tomas path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries); 2596a86c6181SAlex Tomas i++; 2597a86c6181SAlex Tomas } else { 2598d0d856e8SRandy Dunlap /* we finished processing this index, go up */ 2599a86c6181SAlex Tomas if (path[i].p_hdr->eh_entries == 0 && i > 0) { 2600d0d856e8SRandy Dunlap /* index is empty, remove it; 2601a86c6181SAlex Tomas * handle must be already prepared by the 2602a86c6181SAlex Tomas * truncatei_leaf() */ 2603a86c6181SAlex Tomas err = ext4_ext_rm_idx(handle, inode, path + i); 2604a86c6181SAlex Tomas } 2605d0d856e8SRandy Dunlap /* root level has p_bh == NULL, brelse() eats this */ 2606a86c6181SAlex Tomas brelse(path[i].p_bh); 2607a86c6181SAlex Tomas path[i].p_bh = NULL; 2608a86c6181SAlex Tomas i--; 2609a86c6181SAlex Tomas ext_debug("return to level %d\n", i); 2610a86c6181SAlex Tomas } 2611a86c6181SAlex Tomas } 2612a86c6181SAlex Tomas 2613d8990240SAditya Kali trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster, 2614d8990240SAditya Kali path->p_hdr->eh_entries); 2615d8990240SAditya Kali 26167b415bf6SAditya Kali /* If we still have something in the partial cluster and we have removed 26177b415bf6SAditya Kali * even the first extent, then we should free the blocks in the partial 26187b415bf6SAditya Kali * cluster as well. */ 26197b415bf6SAditya Kali if (partial_cluster && path->p_hdr->eh_entries == 0) { 26207b415bf6SAditya Kali int flags = EXT4_FREE_BLOCKS_FORGET; 26217b415bf6SAditya Kali 26227b415bf6SAditya Kali if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 26237b415bf6SAditya Kali flags |= EXT4_FREE_BLOCKS_METADATA; 26247b415bf6SAditya Kali 26257b415bf6SAditya Kali ext4_free_blocks(handle, inode, NULL, 26267b415bf6SAditya Kali EXT4_C2B(EXT4_SB(sb), partial_cluster), 26277b415bf6SAditya Kali EXT4_SB(sb)->s_cluster_ratio, flags); 26287b415bf6SAditya Kali partial_cluster = 0; 26297b415bf6SAditya Kali } 26307b415bf6SAditya Kali 2631a86c6181SAlex Tomas /* TODO: flexible tree reduction should be here */ 2632a86c6181SAlex Tomas if (path->p_hdr->eh_entries == 0) { 2633a86c6181SAlex Tomas /* 2634d0d856e8SRandy Dunlap * truncate to zero freed all the tree, 2635d0d856e8SRandy Dunlap * so we need to correct eh_depth 2636a86c6181SAlex Tomas */ 2637a86c6181SAlex Tomas err = ext4_ext_get_access(handle, inode, path); 2638a86c6181SAlex Tomas if (err == 0) { 2639a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_depth = 0; 2640a86c6181SAlex Tomas ext_inode_hdr(inode)->eh_max = 264155ad63bfSTheodore Ts'o cpu_to_le16(ext4_ext_space_root(inode, 0)); 2642a86c6181SAlex Tomas err = ext4_ext_dirty(handle, inode, path); 2643a86c6181SAlex Tomas } 2644a86c6181SAlex Tomas } 2645a86c6181SAlex Tomas out: 2646a86c6181SAlex Tomas ext4_ext_drop_refs(path); 2647a86c6181SAlex Tomas kfree(path); 26480617b83fSDmitry Monakhov if (err == -EAGAIN) 26490617b83fSDmitry Monakhov goto again; 2650a86c6181SAlex Tomas ext4_journal_stop(handle); 2651a86c6181SAlex Tomas 2652a86c6181SAlex Tomas return err; 2653a86c6181SAlex Tomas } 2654a86c6181SAlex Tomas 2655a86c6181SAlex Tomas /* 2656a86c6181SAlex Tomas * called at mount time 2657a86c6181SAlex Tomas */ 2658a86c6181SAlex Tomas void ext4_ext_init(struct super_block *sb) 2659a86c6181SAlex Tomas { 2660a86c6181SAlex Tomas /* 2661a86c6181SAlex Tomas * possible initialization would be here 2662a86c6181SAlex Tomas */ 2663a86c6181SAlex Tomas 266483982b6fSTheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) { 266590576c0bSTheodore Ts'o #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS) 26664776004fSTheodore Ts'o printk(KERN_INFO "EXT4-fs: file extents enabled"); 2667bbf2f9fbSRobert P. J. Day #ifdef AGGRESSIVE_TEST 2668bbf2f9fbSRobert P. J. Day printk(", aggressive tests"); 2669a86c6181SAlex Tomas #endif 2670a86c6181SAlex Tomas #ifdef CHECK_BINSEARCH 2671a86c6181SAlex Tomas printk(", check binsearch"); 2672a86c6181SAlex Tomas #endif 2673a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2674a86c6181SAlex Tomas printk(", stats"); 2675a86c6181SAlex Tomas #endif 2676a86c6181SAlex Tomas printk("\n"); 267790576c0bSTheodore Ts'o #endif 2678a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2679a86c6181SAlex Tomas spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock); 2680a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_min = 1 << 30; 2681a86c6181SAlex Tomas EXT4_SB(sb)->s_ext_max = 0; 2682a86c6181SAlex Tomas #endif 2683a86c6181SAlex Tomas } 2684a86c6181SAlex Tomas } 2685a86c6181SAlex Tomas 2686a86c6181SAlex Tomas /* 2687a86c6181SAlex Tomas * called at umount time 2688a86c6181SAlex Tomas */ 2689a86c6181SAlex Tomas void ext4_ext_release(struct super_block *sb) 2690a86c6181SAlex Tomas { 269183982b6fSTheodore Ts'o if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) 2692a86c6181SAlex Tomas return; 2693a86c6181SAlex Tomas 2694a86c6181SAlex Tomas #ifdef EXTENTS_STATS 2695a86c6181SAlex Tomas if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) { 2696a86c6181SAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2697a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n", 2698a86c6181SAlex Tomas sbi->s_ext_blocks, sbi->s_ext_extents, 2699a86c6181SAlex Tomas sbi->s_ext_blocks / sbi->s_ext_extents); 2700a86c6181SAlex Tomas printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n", 2701a86c6181SAlex Tomas sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max); 2702a86c6181SAlex Tomas } 2703a86c6181SAlex Tomas #endif 2704a86c6181SAlex Tomas } 2705a86c6181SAlex Tomas 2706093a088bSAneesh Kumar K.V /* FIXME!! we need to try to merge to left or right after zero-out */ 2707093a088bSAneesh Kumar K.V static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) 2708093a088bSAneesh Kumar K.V { 27092407518dSLukas Czerner ext4_fsblk_t ee_pblock; 27102407518dSLukas Czerner unsigned int ee_len; 2711b720303dSJing Zhang int ret; 2712093a088bSAneesh Kumar K.V 2713093a088bSAneesh Kumar K.V ee_len = ext4_ext_get_actual_len(ex); 2714bf89d16fSTheodore Ts'o ee_pblock = ext4_ext_pblock(ex); 2715093a088bSAneesh Kumar K.V 2716a107e5a3STheodore Ts'o ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS); 27172407518dSLukas Czerner if (ret > 0) 27182407518dSLukas Czerner ret = 0; 2719093a088bSAneesh Kumar K.V 27202407518dSLukas Czerner return ret; 2721093a088bSAneesh Kumar K.V } 2722093a088bSAneesh Kumar K.V 272347ea3bb5SYongqiang Yang /* 272447ea3bb5SYongqiang Yang * used by extent splitting. 272547ea3bb5SYongqiang Yang */ 272647ea3bb5SYongqiang Yang #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ 272747ea3bb5SYongqiang Yang due to ENOSPC */ 272847ea3bb5SYongqiang Yang #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 272947ea3bb5SYongqiang Yang #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 273047ea3bb5SYongqiang Yang 273147ea3bb5SYongqiang Yang /* 273247ea3bb5SYongqiang Yang * ext4_split_extent_at() splits an extent at given block. 273347ea3bb5SYongqiang Yang * 273447ea3bb5SYongqiang Yang * @handle: the journal handle 273547ea3bb5SYongqiang Yang * @inode: the file inode 273647ea3bb5SYongqiang Yang * @path: the path to the extent 273747ea3bb5SYongqiang Yang * @split: the logical block where the extent is splitted. 273847ea3bb5SYongqiang Yang * @split_flags: indicates if the extent could be zeroout if split fails, and 273947ea3bb5SYongqiang Yang * the states(init or uninit) of new extents. 274047ea3bb5SYongqiang Yang * @flags: flags used to insert new extent to extent tree. 274147ea3bb5SYongqiang Yang * 274247ea3bb5SYongqiang Yang * 274347ea3bb5SYongqiang Yang * Splits extent [a, b] into two extents [a, @split) and [@split, b], states 274447ea3bb5SYongqiang Yang * of which are deterimined by split_flag. 274547ea3bb5SYongqiang Yang * 274647ea3bb5SYongqiang Yang * There are two cases: 274747ea3bb5SYongqiang Yang * a> the extent are splitted into two extent. 274847ea3bb5SYongqiang Yang * b> split is not needed, and just mark the extent. 274947ea3bb5SYongqiang Yang * 275047ea3bb5SYongqiang Yang * return 0 on success. 275147ea3bb5SYongqiang Yang */ 275247ea3bb5SYongqiang Yang static int ext4_split_extent_at(handle_t *handle, 275347ea3bb5SYongqiang Yang struct inode *inode, 275447ea3bb5SYongqiang Yang struct ext4_ext_path *path, 275547ea3bb5SYongqiang Yang ext4_lblk_t split, 275647ea3bb5SYongqiang Yang int split_flag, 275747ea3bb5SYongqiang Yang int flags) 275847ea3bb5SYongqiang Yang { 275947ea3bb5SYongqiang Yang ext4_fsblk_t newblock; 276047ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 276147ea3bb5SYongqiang Yang struct ext4_extent *ex, newex, orig_ex; 276247ea3bb5SYongqiang Yang struct ext4_extent *ex2 = NULL; 276347ea3bb5SYongqiang Yang unsigned int ee_len, depth; 276447ea3bb5SYongqiang Yang int err = 0; 276547ea3bb5SYongqiang Yang 276647ea3bb5SYongqiang Yang ext_debug("ext4_split_extents_at: inode %lu, logical" 276747ea3bb5SYongqiang Yang "block %llu\n", inode->i_ino, (unsigned long long)split); 276847ea3bb5SYongqiang Yang 276947ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 277047ea3bb5SYongqiang Yang 277147ea3bb5SYongqiang Yang depth = ext_depth(inode); 277247ea3bb5SYongqiang Yang ex = path[depth].p_ext; 277347ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 277447ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 277547ea3bb5SYongqiang Yang newblock = split - ee_block + ext4_ext_pblock(ex); 277647ea3bb5SYongqiang Yang 277747ea3bb5SYongqiang Yang BUG_ON(split < ee_block || split >= (ee_block + ee_len)); 277847ea3bb5SYongqiang Yang 277947ea3bb5SYongqiang Yang err = ext4_ext_get_access(handle, inode, path + depth); 278047ea3bb5SYongqiang Yang if (err) 278147ea3bb5SYongqiang Yang goto out; 278247ea3bb5SYongqiang Yang 278347ea3bb5SYongqiang Yang if (split == ee_block) { 278447ea3bb5SYongqiang Yang /* 278547ea3bb5SYongqiang Yang * case b: block @split is the block that the extent begins with 278647ea3bb5SYongqiang Yang * then we just change the state of the extent, and splitting 278747ea3bb5SYongqiang Yang * is not needed. 278847ea3bb5SYongqiang Yang */ 278947ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 279047ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 279147ea3bb5SYongqiang Yang else 279247ea3bb5SYongqiang Yang ext4_ext_mark_initialized(ex); 279347ea3bb5SYongqiang Yang 279447ea3bb5SYongqiang Yang if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) 279547ea3bb5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 279647ea3bb5SYongqiang Yang 279747ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 279847ea3bb5SYongqiang Yang goto out; 279947ea3bb5SYongqiang Yang } 280047ea3bb5SYongqiang Yang 280147ea3bb5SYongqiang Yang /* case a */ 280247ea3bb5SYongqiang Yang memcpy(&orig_ex, ex, sizeof(orig_ex)); 280347ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le16(split - ee_block); 280447ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT1) 280547ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex); 280647ea3bb5SYongqiang Yang 280747ea3bb5SYongqiang Yang /* 280847ea3bb5SYongqiang Yang * path may lead to new leaf, not to original leaf any more 280947ea3bb5SYongqiang Yang * after ext4_ext_insert_extent() returns, 281047ea3bb5SYongqiang Yang */ 281147ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 281247ea3bb5SYongqiang Yang if (err) 281347ea3bb5SYongqiang Yang goto fix_extent_len; 281447ea3bb5SYongqiang Yang 281547ea3bb5SYongqiang Yang ex2 = &newex; 281647ea3bb5SYongqiang Yang ex2->ee_block = cpu_to_le32(split); 281747ea3bb5SYongqiang Yang ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); 281847ea3bb5SYongqiang Yang ext4_ext_store_pblock(ex2, newblock); 281947ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 282047ea3bb5SYongqiang Yang ext4_ext_mark_uninitialized(ex2); 282147ea3bb5SYongqiang Yang 282247ea3bb5SYongqiang Yang err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 282347ea3bb5SYongqiang Yang if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 282447ea3bb5SYongqiang Yang err = ext4_ext_zeroout(inode, &orig_ex); 282547ea3bb5SYongqiang Yang if (err) 282647ea3bb5SYongqiang Yang goto fix_extent_len; 282747ea3bb5SYongqiang Yang /* update the extent length and mark as initialized */ 282847ea3bb5SYongqiang Yang ex->ee_len = cpu_to_le32(ee_len); 282947ea3bb5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 283047ea3bb5SYongqiang Yang err = ext4_ext_dirty(handle, inode, path + depth); 283147ea3bb5SYongqiang Yang goto out; 283247ea3bb5SYongqiang Yang } else if (err) 283347ea3bb5SYongqiang Yang goto fix_extent_len; 283447ea3bb5SYongqiang Yang 283547ea3bb5SYongqiang Yang out: 283647ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 283747ea3bb5SYongqiang Yang return err; 283847ea3bb5SYongqiang Yang 283947ea3bb5SYongqiang Yang fix_extent_len: 284047ea3bb5SYongqiang Yang ex->ee_len = orig_ex.ee_len; 284147ea3bb5SYongqiang Yang ext4_ext_dirty(handle, inode, path + depth); 284247ea3bb5SYongqiang Yang return err; 284347ea3bb5SYongqiang Yang } 284447ea3bb5SYongqiang Yang 284547ea3bb5SYongqiang Yang /* 284647ea3bb5SYongqiang Yang * ext4_split_extents() splits an extent and mark extent which is covered 284747ea3bb5SYongqiang Yang * by @map as split_flags indicates 284847ea3bb5SYongqiang Yang * 284947ea3bb5SYongqiang Yang * It may result in splitting the extent into multiple extents (upto three) 285047ea3bb5SYongqiang Yang * There are three possibilities: 285147ea3bb5SYongqiang Yang * a> There is no split required 285247ea3bb5SYongqiang Yang * b> Splits in two extents: Split is happening at either end of the extent 285347ea3bb5SYongqiang Yang * c> Splits in three extents: Somone is splitting in middle of the extent 285447ea3bb5SYongqiang Yang * 285547ea3bb5SYongqiang Yang */ 285647ea3bb5SYongqiang Yang static int ext4_split_extent(handle_t *handle, 285747ea3bb5SYongqiang Yang struct inode *inode, 285847ea3bb5SYongqiang Yang struct ext4_ext_path *path, 285947ea3bb5SYongqiang Yang struct ext4_map_blocks *map, 286047ea3bb5SYongqiang Yang int split_flag, 286147ea3bb5SYongqiang Yang int flags) 286247ea3bb5SYongqiang Yang { 286347ea3bb5SYongqiang Yang ext4_lblk_t ee_block; 286447ea3bb5SYongqiang Yang struct ext4_extent *ex; 286547ea3bb5SYongqiang Yang unsigned int ee_len, depth; 286647ea3bb5SYongqiang Yang int err = 0; 286747ea3bb5SYongqiang Yang int uninitialized; 286847ea3bb5SYongqiang Yang int split_flag1, flags1; 286947ea3bb5SYongqiang Yang 287047ea3bb5SYongqiang Yang depth = ext_depth(inode); 287147ea3bb5SYongqiang Yang ex = path[depth].p_ext; 287247ea3bb5SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 287347ea3bb5SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 287447ea3bb5SYongqiang Yang uninitialized = ext4_ext_is_uninitialized(ex); 287547ea3bb5SYongqiang Yang 287647ea3bb5SYongqiang Yang if (map->m_lblk + map->m_len < ee_block + ee_len) { 287747ea3bb5SYongqiang Yang split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 287847ea3bb5SYongqiang Yang EXT4_EXT_MAY_ZEROOUT : 0; 287947ea3bb5SYongqiang Yang flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; 288047ea3bb5SYongqiang Yang if (uninitialized) 288147ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1 | 288247ea3bb5SYongqiang Yang EXT4_EXT_MARK_UNINIT2; 288347ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 288447ea3bb5SYongqiang Yang map->m_lblk + map->m_len, split_flag1, flags1); 288593917411SYongqiang Yang if (err) 288693917411SYongqiang Yang goto out; 288747ea3bb5SYongqiang Yang } 288847ea3bb5SYongqiang Yang 288947ea3bb5SYongqiang Yang ext4_ext_drop_refs(path); 289047ea3bb5SYongqiang Yang path = ext4_ext_find_extent(inode, map->m_lblk, path); 289147ea3bb5SYongqiang Yang if (IS_ERR(path)) 289247ea3bb5SYongqiang Yang return PTR_ERR(path); 289347ea3bb5SYongqiang Yang 289447ea3bb5SYongqiang Yang if (map->m_lblk >= ee_block) { 289547ea3bb5SYongqiang Yang split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? 289647ea3bb5SYongqiang Yang EXT4_EXT_MAY_ZEROOUT : 0; 289747ea3bb5SYongqiang Yang if (uninitialized) 289847ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT1; 289947ea3bb5SYongqiang Yang if (split_flag & EXT4_EXT_MARK_UNINIT2) 290047ea3bb5SYongqiang Yang split_flag1 |= EXT4_EXT_MARK_UNINIT2; 290147ea3bb5SYongqiang Yang err = ext4_split_extent_at(handle, inode, path, 290247ea3bb5SYongqiang Yang map->m_lblk, split_flag1, flags); 290347ea3bb5SYongqiang Yang if (err) 290447ea3bb5SYongqiang Yang goto out; 290547ea3bb5SYongqiang Yang } 290647ea3bb5SYongqiang Yang 290747ea3bb5SYongqiang Yang ext4_ext_show_leaf(inode, path); 290847ea3bb5SYongqiang Yang out: 290947ea3bb5SYongqiang Yang return err ? err : map->m_len; 291047ea3bb5SYongqiang Yang } 291147ea3bb5SYongqiang Yang 29123977c965SAneesh Kumar K.V #define EXT4_EXT_ZERO_LEN 7 291356055d3aSAmit Arora /* 2914e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() if someone tries to write 291556055d3aSAmit Arora * to an uninitialized extent. It may result in splitting the uninitialized 291656055d3aSAmit Arora * extent into multiple extents (up to three - one initialized and two 291756055d3aSAmit Arora * uninitialized). 291856055d3aSAmit Arora * There are three possibilities: 291956055d3aSAmit Arora * a> There is no split required: Entire extent should be initialized 292056055d3aSAmit Arora * b> Splits in two extents: Write is happening at either end of the extent 292156055d3aSAmit Arora * c> Splits in three extents: Somone is writing in middle of the extent 2922*6f91bc5fSEric Gouriou * 2923*6f91bc5fSEric Gouriou * Pre-conditions: 2924*6f91bc5fSEric Gouriou * - The extent pointed to by 'path' is uninitialized. 2925*6f91bc5fSEric Gouriou * - The extent pointed to by 'path' contains a superset 2926*6f91bc5fSEric Gouriou * of the logical span [map->m_lblk, map->m_lblk + map->m_len). 2927*6f91bc5fSEric Gouriou * 2928*6f91bc5fSEric Gouriou * Post-conditions on success: 2929*6f91bc5fSEric Gouriou * - the returned value is the number of blocks beyond map->l_lblk 2930*6f91bc5fSEric Gouriou * that are allocated and initialized. 2931*6f91bc5fSEric Gouriou * It is guaranteed to be >= map->m_len. 293256055d3aSAmit Arora */ 2933725d26d3SAneesh Kumar K.V static int ext4_ext_convert_to_initialized(handle_t *handle, 2934725d26d3SAneesh Kumar K.V struct inode *inode, 2935e35fd660STheodore Ts'o struct ext4_map_blocks *map, 2936e35fd660STheodore Ts'o struct ext4_ext_path *path) 293756055d3aSAmit Arora { 2938*6f91bc5fSEric Gouriou struct ext4_extent_header *eh; 2939667eff35SYongqiang Yang struct ext4_map_blocks split_map; 2940667eff35SYongqiang Yang struct ext4_extent zero_ex; 2941667eff35SYongqiang Yang struct ext4_extent *ex; 294221ca087aSDmitry Monakhov ext4_lblk_t ee_block, eof_block; 2943f85b287aSDan Carpenter unsigned int ee_len, depth; 2944f85b287aSDan Carpenter int allocated; 294556055d3aSAmit Arora int err = 0; 2946667eff35SYongqiang Yang int split_flag = 0; 294721ca087aSDmitry Monakhov 294821ca087aSDmitry Monakhov ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 294921ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 2950e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 295121ca087aSDmitry Monakhov 295221ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 295321ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 2954e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 2955e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 295656055d3aSAmit Arora 295756055d3aSAmit Arora depth = ext_depth(inode); 2958*6f91bc5fSEric Gouriou eh = path[depth].p_hdr; 295956055d3aSAmit Arora ex = path[depth].p_ext; 296056055d3aSAmit Arora ee_block = le32_to_cpu(ex->ee_block); 296156055d3aSAmit Arora ee_len = ext4_ext_get_actual_len(ex); 2962e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 296321ca087aSDmitry Monakhov 2964*6f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 2965*6f91bc5fSEric Gouriou 2966*6f91bc5fSEric Gouriou /* Pre-conditions */ 2967*6f91bc5fSEric Gouriou BUG_ON(!ext4_ext_is_uninitialized(ex)); 2968*6f91bc5fSEric Gouriou BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); 2969*6f91bc5fSEric Gouriou BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len); 2970*6f91bc5fSEric Gouriou 2971*6f91bc5fSEric Gouriou /* 2972*6f91bc5fSEric Gouriou * Attempt to transfer newly initialized blocks from the currently 2973*6f91bc5fSEric Gouriou * uninitialized extent to its left neighbor. This is much cheaper 2974*6f91bc5fSEric Gouriou * than an insertion followed by a merge as those involve costly 2975*6f91bc5fSEric Gouriou * memmove() calls. This is the common case in steady state for 2976*6f91bc5fSEric Gouriou * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append 2977*6f91bc5fSEric Gouriou * writes. 2978*6f91bc5fSEric Gouriou * 2979*6f91bc5fSEric Gouriou * Limitations of the current logic: 2980*6f91bc5fSEric Gouriou * - L1: we only deal with writes at the start of the extent. 2981*6f91bc5fSEric Gouriou * The approach could be extended to writes at the end 2982*6f91bc5fSEric Gouriou * of the extent but this scenario was deemed less common. 2983*6f91bc5fSEric Gouriou * - L2: we do not deal with writes covering the whole extent. 2984*6f91bc5fSEric Gouriou * This would require removing the extent if the transfer 2985*6f91bc5fSEric Gouriou * is possible. 2986*6f91bc5fSEric Gouriou * - L3: we only attempt to merge with an extent stored in the 2987*6f91bc5fSEric Gouriou * same extent tree node. 2988*6f91bc5fSEric Gouriou */ 2989*6f91bc5fSEric Gouriou if ((map->m_lblk == ee_block) && /*L1*/ 2990*6f91bc5fSEric Gouriou (map->m_len < ee_len) && /*L2*/ 2991*6f91bc5fSEric Gouriou (ex > EXT_FIRST_EXTENT(eh))) { /*L3*/ 2992*6f91bc5fSEric Gouriou struct ext4_extent *prev_ex; 2993*6f91bc5fSEric Gouriou ext4_lblk_t prev_lblk; 2994*6f91bc5fSEric Gouriou ext4_fsblk_t prev_pblk, ee_pblk; 2995*6f91bc5fSEric Gouriou unsigned int prev_len, write_len; 2996*6f91bc5fSEric Gouriou 2997*6f91bc5fSEric Gouriou prev_ex = ex - 1; 2998*6f91bc5fSEric Gouriou prev_lblk = le32_to_cpu(prev_ex->ee_block); 2999*6f91bc5fSEric Gouriou prev_len = ext4_ext_get_actual_len(prev_ex); 3000*6f91bc5fSEric Gouriou prev_pblk = ext4_ext_pblock(prev_ex); 3001*6f91bc5fSEric Gouriou ee_pblk = ext4_ext_pblock(ex); 3002*6f91bc5fSEric Gouriou write_len = map->m_len; 3003*6f91bc5fSEric Gouriou 3004*6f91bc5fSEric Gouriou /* 3005*6f91bc5fSEric Gouriou * A transfer of blocks from 'ex' to 'prev_ex' is allowed 3006*6f91bc5fSEric Gouriou * upon those conditions: 3007*6f91bc5fSEric Gouriou * - C1: prev_ex is initialized, 3008*6f91bc5fSEric Gouriou * - C2: prev_ex is logically abutting ex, 3009*6f91bc5fSEric Gouriou * - C3: prev_ex is physically abutting ex, 3010*6f91bc5fSEric Gouriou * - C4: prev_ex can receive the additional blocks without 3011*6f91bc5fSEric Gouriou * overflowing the (initialized) length limit. 3012*6f91bc5fSEric Gouriou */ 3013*6f91bc5fSEric Gouriou if ((!ext4_ext_is_uninitialized(prev_ex)) && /*C1*/ 3014*6f91bc5fSEric Gouriou ((prev_lblk + prev_len) == ee_block) && /*C2*/ 3015*6f91bc5fSEric Gouriou ((prev_pblk + prev_len) == ee_pblk) && /*C3*/ 3016*6f91bc5fSEric Gouriou (prev_len < (EXT_INIT_MAX_LEN - write_len))) { /*C4*/ 3017*6f91bc5fSEric Gouriou err = ext4_ext_get_access(handle, inode, path + depth); 3018*6f91bc5fSEric Gouriou if (err) 3019*6f91bc5fSEric Gouriou goto out; 3020*6f91bc5fSEric Gouriou 3021*6f91bc5fSEric Gouriou trace_ext4_ext_convert_to_initialized_fastpath(inode, 3022*6f91bc5fSEric Gouriou map, ex, prev_ex); 3023*6f91bc5fSEric Gouriou 3024*6f91bc5fSEric Gouriou /* Shift the start of ex by 'write_len' blocks */ 3025*6f91bc5fSEric Gouriou ex->ee_block = cpu_to_le32(ee_block + write_len); 3026*6f91bc5fSEric Gouriou ext4_ext_store_pblock(ex, ee_pblk + write_len); 3027*6f91bc5fSEric Gouriou ex->ee_len = cpu_to_le16(ee_len - write_len); 3028*6f91bc5fSEric Gouriou ext4_ext_mark_uninitialized(ex); /* Restore the flag */ 3029*6f91bc5fSEric Gouriou 3030*6f91bc5fSEric Gouriou /* Extend prev_ex by 'write_len' blocks */ 3031*6f91bc5fSEric Gouriou prev_ex->ee_len = cpu_to_le16(prev_len + write_len); 3032*6f91bc5fSEric Gouriou 3033*6f91bc5fSEric Gouriou /* Mark the block containing both extents as dirty */ 3034*6f91bc5fSEric Gouriou ext4_ext_dirty(handle, inode, path + depth); 3035*6f91bc5fSEric Gouriou 3036*6f91bc5fSEric Gouriou /* Update path to point to the right extent */ 3037*6f91bc5fSEric Gouriou path[depth].p_ext = prev_ex; 3038*6f91bc5fSEric Gouriou 3039*6f91bc5fSEric Gouriou /* Result: number of initialized blocks past m_lblk */ 3040*6f91bc5fSEric Gouriou allocated = write_len; 3041*6f91bc5fSEric Gouriou goto out; 3042*6f91bc5fSEric Gouriou } 3043*6f91bc5fSEric Gouriou } 3044*6f91bc5fSEric Gouriou 3045667eff35SYongqiang Yang WARN_ON(map->m_lblk < ee_block); 304621ca087aSDmitry Monakhov /* 304721ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 304821ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 304921ca087aSDmitry Monakhov */ 3050667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 305121ca087aSDmitry Monakhov 30523977c965SAneesh Kumar K.V /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ 3053667eff35SYongqiang Yang if (ee_len <= 2*EXT4_EXT_ZERO_LEN && 3054667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3055667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, ex); 30563977c965SAneesh Kumar K.V if (err) 305756055d3aSAmit Arora goto out; 30589df5643aSAneesh Kumar K.V 30599df5643aSAneesh Kumar K.V err = ext4_ext_get_access(handle, inode, path + depth); 30609df5643aSAneesh Kumar K.V if (err) 30619df5643aSAneesh Kumar K.V goto out; 3062667eff35SYongqiang Yang ext4_ext_mark_initialized(ex); 3063667eff35SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 306456055d3aSAmit Arora err = ext4_ext_dirty(handle, inode, path + depth); 306556055d3aSAmit Arora goto out; 3066667eff35SYongqiang Yang } 3067093a088bSAneesh Kumar K.V 3068667eff35SYongqiang Yang /* 3069667eff35SYongqiang Yang * four cases: 3070667eff35SYongqiang Yang * 1. split the extent into three extents. 3071667eff35SYongqiang Yang * 2. split the extent into two extents, zeroout the first half. 3072667eff35SYongqiang Yang * 3. split the extent into two extents, zeroout the second half. 3073667eff35SYongqiang Yang * 4. split the extent into two extents with out zeroout. 3074667eff35SYongqiang Yang */ 3075667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3076667eff35SYongqiang Yang split_map.m_len = map->m_len; 3077667eff35SYongqiang Yang 3078667eff35SYongqiang Yang if (allocated > map->m_len) { 3079667eff35SYongqiang Yang if (allocated <= EXT4_EXT_ZERO_LEN && 3080667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3081667eff35SYongqiang Yang /* case 3 */ 3082667eff35SYongqiang Yang zero_ex.ee_block = 30839b940f8eSAllison Henderson cpu_to_le32(map->m_lblk); 30849b940f8eSAllison Henderson zero_ex.ee_len = cpu_to_le16(allocated); 3085667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3086667eff35SYongqiang Yang ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3087667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3088667eff35SYongqiang Yang if (err) 3089667eff35SYongqiang Yang goto out; 3090667eff35SYongqiang Yang split_map.m_lblk = map->m_lblk; 3091667eff35SYongqiang Yang split_map.m_len = allocated; 3092667eff35SYongqiang Yang } else if ((map->m_lblk - ee_block + map->m_len < 3093667eff35SYongqiang Yang EXT4_EXT_ZERO_LEN) && 3094667eff35SYongqiang Yang (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3095667eff35SYongqiang Yang /* case 2 */ 3096667eff35SYongqiang Yang if (map->m_lblk != ee_block) { 3097667eff35SYongqiang Yang zero_ex.ee_block = ex->ee_block; 3098667eff35SYongqiang Yang zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3099667eff35SYongqiang Yang ee_block); 3100667eff35SYongqiang Yang ext4_ext_store_pblock(&zero_ex, 3101667eff35SYongqiang Yang ext4_ext_pblock(ex)); 3102667eff35SYongqiang Yang err = ext4_ext_zeroout(inode, &zero_ex); 3103667eff35SYongqiang Yang if (err) 3104667eff35SYongqiang Yang goto out; 3105667eff35SYongqiang Yang } 3106667eff35SYongqiang Yang 3107667eff35SYongqiang Yang split_map.m_lblk = ee_block; 31089b940f8eSAllison Henderson split_map.m_len = map->m_lblk - ee_block + map->m_len; 31099b940f8eSAllison Henderson allocated = map->m_len; 3110667eff35SYongqiang Yang } 3111667eff35SYongqiang Yang } 3112667eff35SYongqiang Yang 3113667eff35SYongqiang Yang allocated = ext4_split_extent(handle, inode, path, 3114667eff35SYongqiang Yang &split_map, split_flag, 0); 3115667eff35SYongqiang Yang if (allocated < 0) 3116667eff35SYongqiang Yang err = allocated; 3117667eff35SYongqiang Yang 3118667eff35SYongqiang Yang out: 3119667eff35SYongqiang Yang return err ? err : allocated; 312056055d3aSAmit Arora } 312156055d3aSAmit Arora 3122c278bfecSAneesh Kumar K.V /* 3123e35fd660STheodore Ts'o * This function is called by ext4_ext_map_blocks() from 31240031462bSMingming Cao * ext4_get_blocks_dio_write() when DIO to write 31250031462bSMingming Cao * to an uninitialized extent. 31260031462bSMingming Cao * 3127fd018fe8SPaul Bolle * Writing to an uninitialized extent may result in splitting the uninitialized 3128b595076aSUwe Kleine-König * extent into multiple /initialized uninitialized extents (up to three) 31290031462bSMingming Cao * There are three possibilities: 31300031462bSMingming Cao * a> There is no split required: Entire extent should be uninitialized 31310031462bSMingming Cao * b> Splits in two extents: Write is happening at either end of the extent 31320031462bSMingming Cao * c> Splits in three extents: Somone is writing in middle of the extent 31330031462bSMingming Cao * 31340031462bSMingming Cao * One of more index blocks maybe needed if the extent tree grow after 3135b595076aSUwe Kleine-König * the uninitialized extent split. To prevent ENOSPC occur at the IO 31360031462bSMingming Cao * complete, we need to split the uninitialized extent before DIO submit 3137421f91d2SUwe Kleine-König * the IO. The uninitialized extent called at this time will be split 31380031462bSMingming Cao * into three uninitialized extent(at most). After IO complete, the part 31390031462bSMingming Cao * being filled will be convert to initialized by the end_io callback function 31400031462bSMingming Cao * via ext4_convert_unwritten_extents(). 3141ba230c3fSMingming * 3142ba230c3fSMingming * Returns the size of uninitialized extent to be written on success. 31430031462bSMingming Cao */ 31440031462bSMingming Cao static int ext4_split_unwritten_extents(handle_t *handle, 31450031462bSMingming Cao struct inode *inode, 3146e35fd660STheodore Ts'o struct ext4_map_blocks *map, 31470031462bSMingming Cao struct ext4_ext_path *path, 31480031462bSMingming Cao int flags) 31490031462bSMingming Cao { 3150667eff35SYongqiang Yang ext4_lblk_t eof_block; 3151667eff35SYongqiang Yang ext4_lblk_t ee_block; 3152667eff35SYongqiang Yang struct ext4_extent *ex; 3153667eff35SYongqiang Yang unsigned int ee_len; 3154667eff35SYongqiang Yang int split_flag = 0, depth; 31550031462bSMingming Cao 315621ca087aSDmitry Monakhov ext_debug("ext4_split_unwritten_extents: inode %lu, logical" 315721ca087aSDmitry Monakhov "block %llu, max_blocks %u\n", inode->i_ino, 3158e35fd660STheodore Ts'o (unsigned long long)map->m_lblk, map->m_len); 315921ca087aSDmitry Monakhov 316021ca087aSDmitry Monakhov eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> 316121ca087aSDmitry Monakhov inode->i_sb->s_blocksize_bits; 3162e35fd660STheodore Ts'o if (eof_block < map->m_lblk + map->m_len) 3163e35fd660STheodore Ts'o eof_block = map->m_lblk + map->m_len; 31640031462bSMingming Cao /* 316521ca087aSDmitry Monakhov * It is safe to convert extent to initialized via explicit 316621ca087aSDmitry Monakhov * zeroout only if extent is fully insde i_size or new_size. 316721ca087aSDmitry Monakhov */ 3168667eff35SYongqiang Yang depth = ext_depth(inode); 31690031462bSMingming Cao ex = path[depth].p_ext; 3170667eff35SYongqiang Yang ee_block = le32_to_cpu(ex->ee_block); 3171667eff35SYongqiang Yang ee_len = ext4_ext_get_actual_len(ex); 31720031462bSMingming Cao 3173667eff35SYongqiang Yang split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; 3174667eff35SYongqiang Yang split_flag |= EXT4_EXT_MARK_UNINIT2; 31750031462bSMingming Cao 3176667eff35SYongqiang Yang flags |= EXT4_GET_BLOCKS_PRE_IO; 3177667eff35SYongqiang Yang return ext4_split_extent(handle, inode, path, map, split_flag, flags); 31780031462bSMingming Cao } 3179197217a5SYongqiang Yang 3180c7064ef1SJiaying Zhang static int ext4_convert_unwritten_extents_endio(handle_t *handle, 31810031462bSMingming Cao struct inode *inode, 31820031462bSMingming Cao struct ext4_ext_path *path) 31830031462bSMingming Cao { 31840031462bSMingming Cao struct ext4_extent *ex; 31850031462bSMingming Cao int depth; 31860031462bSMingming Cao int err = 0; 31870031462bSMingming Cao 31880031462bSMingming Cao depth = ext_depth(inode); 31890031462bSMingming Cao ex = path[depth].p_ext; 31900031462bSMingming Cao 3191197217a5SYongqiang Yang ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" 3192197217a5SYongqiang Yang "block %llu, max_blocks %u\n", inode->i_ino, 3193197217a5SYongqiang Yang (unsigned long long)le32_to_cpu(ex->ee_block), 3194197217a5SYongqiang Yang ext4_ext_get_actual_len(ex)); 3195197217a5SYongqiang Yang 31960031462bSMingming Cao err = ext4_ext_get_access(handle, inode, path + depth); 31970031462bSMingming Cao if (err) 31980031462bSMingming Cao goto out; 31990031462bSMingming Cao /* first mark the extent as initialized */ 32000031462bSMingming Cao ext4_ext_mark_initialized(ex); 32010031462bSMingming Cao 3202197217a5SYongqiang Yang /* note: ext4_ext_correct_indexes() isn't needed here because 3203197217a5SYongqiang Yang * borders are not changed 32040031462bSMingming Cao */ 3205197217a5SYongqiang Yang ext4_ext_try_to_merge(inode, path, ex); 3206197217a5SYongqiang Yang 32070031462bSMingming Cao /* Mark modified extent as dirty */ 32080031462bSMingming Cao err = ext4_ext_dirty(handle, inode, path + depth); 32090031462bSMingming Cao out: 32100031462bSMingming Cao ext4_ext_show_leaf(inode, path); 32110031462bSMingming Cao return err; 32120031462bSMingming Cao } 32130031462bSMingming Cao 3214515f41c3SAneesh Kumar K.V static void unmap_underlying_metadata_blocks(struct block_device *bdev, 3215515f41c3SAneesh Kumar K.V sector_t block, int count) 3216515f41c3SAneesh Kumar K.V { 3217515f41c3SAneesh Kumar K.V int i; 3218515f41c3SAneesh Kumar K.V for (i = 0; i < count; i++) 3219515f41c3SAneesh Kumar K.V unmap_underlying_metadata(bdev, block + i); 3220515f41c3SAneesh Kumar K.V } 3221515f41c3SAneesh Kumar K.V 322258590b06STheodore Ts'o /* 322358590b06STheodore Ts'o * Handle EOFBLOCKS_FL flag, clearing it if necessary 322458590b06STheodore Ts'o */ 322558590b06STheodore Ts'o static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3226d002ebf1SEric Sandeen ext4_lblk_t lblk, 322758590b06STheodore Ts'o struct ext4_ext_path *path, 322858590b06STheodore Ts'o unsigned int len) 322958590b06STheodore Ts'o { 323058590b06STheodore Ts'o int i, depth; 323158590b06STheodore Ts'o struct ext4_extent_header *eh; 323265922cb5SSergey Senozhatsky struct ext4_extent *last_ex; 323358590b06STheodore Ts'o 323458590b06STheodore Ts'o if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 323558590b06STheodore Ts'o return 0; 323658590b06STheodore Ts'o 323758590b06STheodore Ts'o depth = ext_depth(inode); 323858590b06STheodore Ts'o eh = path[depth].p_hdr; 323958590b06STheodore Ts'o 324058590b06STheodore Ts'o if (unlikely(!eh->eh_entries)) { 324158590b06STheodore Ts'o EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " 324258590b06STheodore Ts'o "EOFBLOCKS_FL set"); 324358590b06STheodore Ts'o return -EIO; 324458590b06STheodore Ts'o } 324558590b06STheodore Ts'o last_ex = EXT_LAST_EXTENT(eh); 324658590b06STheodore Ts'o /* 324758590b06STheodore Ts'o * We should clear the EOFBLOCKS_FL flag if we are writing the 324858590b06STheodore Ts'o * last block in the last extent in the file. We test this by 324958590b06STheodore Ts'o * first checking to see if the caller to 325058590b06STheodore Ts'o * ext4_ext_get_blocks() was interested in the last block (or 325158590b06STheodore Ts'o * a block beyond the last block) in the current extent. If 325258590b06STheodore Ts'o * this turns out to be false, we can bail out from this 325358590b06STheodore Ts'o * function immediately. 325458590b06STheodore Ts'o */ 3255d002ebf1SEric Sandeen if (lblk + len < le32_to_cpu(last_ex->ee_block) + 325658590b06STheodore Ts'o ext4_ext_get_actual_len(last_ex)) 325758590b06STheodore Ts'o return 0; 325858590b06STheodore Ts'o /* 325958590b06STheodore Ts'o * If the caller does appear to be planning to write at or 326058590b06STheodore Ts'o * beyond the end of the current extent, we then test to see 326158590b06STheodore Ts'o * if the current extent is the last extent in the file, by 326258590b06STheodore Ts'o * checking to make sure it was reached via the rightmost node 326358590b06STheodore Ts'o * at each level of the tree. 326458590b06STheodore Ts'o */ 326558590b06STheodore Ts'o for (i = depth-1; i >= 0; i--) 326658590b06STheodore Ts'o if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr)) 326758590b06STheodore Ts'o return 0; 326858590b06STheodore Ts'o ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 326958590b06STheodore Ts'o return ext4_mark_inode_dirty(handle, inode); 327058590b06STheodore Ts'o } 327158590b06STheodore Ts'o 32727b415bf6SAditya Kali /** 32737b415bf6SAditya Kali * ext4_find_delalloc_range: find delayed allocated block in the given range. 32747b415bf6SAditya Kali * 32757b415bf6SAditya Kali * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns 32767b415bf6SAditya Kali * whether there are any buffers marked for delayed allocation. It returns '1' 32777b415bf6SAditya Kali * on the first delalloc'ed buffer head found. If no buffer head in the given 32787b415bf6SAditya Kali * range is marked for delalloc, it returns 0. 32797b415bf6SAditya Kali * lblk_start should always be <= lblk_end. 32807b415bf6SAditya Kali * search_hint_reverse is to indicate that searching in reverse from lblk_end to 32817b415bf6SAditya Kali * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed 32827b415bf6SAditya Kali * block sooner). This is useful when blocks are truncated sequentially from 32837b415bf6SAditya Kali * lblk_start towards lblk_end. 32847b415bf6SAditya Kali */ 32857b415bf6SAditya Kali static int ext4_find_delalloc_range(struct inode *inode, 32867b415bf6SAditya Kali ext4_lblk_t lblk_start, 32877b415bf6SAditya Kali ext4_lblk_t lblk_end, 32887b415bf6SAditya Kali int search_hint_reverse) 32897b415bf6SAditya Kali { 32907b415bf6SAditya Kali struct address_space *mapping = inode->i_mapping; 32917b415bf6SAditya Kali struct buffer_head *head, *bh = NULL; 32927b415bf6SAditya Kali struct page *page; 32937b415bf6SAditya Kali ext4_lblk_t i, pg_lblk; 32947b415bf6SAditya Kali pgoff_t index; 32957b415bf6SAditya Kali 32967b415bf6SAditya Kali /* reverse search wont work if fs block size is less than page size */ 32977b415bf6SAditya Kali if (inode->i_blkbits < PAGE_CACHE_SHIFT) 32987b415bf6SAditya Kali search_hint_reverse = 0; 32997b415bf6SAditya Kali 33007b415bf6SAditya Kali if (search_hint_reverse) 33017b415bf6SAditya Kali i = lblk_end; 33027b415bf6SAditya Kali else 33037b415bf6SAditya Kali i = lblk_start; 33047b415bf6SAditya Kali 33057b415bf6SAditya Kali index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 33067b415bf6SAditya Kali 33077b415bf6SAditya Kali while ((i >= lblk_start) && (i <= lblk_end)) { 33087b415bf6SAditya Kali page = find_get_page(mapping, index); 33095356f261SAditya Kali if (!page) 33107b415bf6SAditya Kali goto nextpage; 33117b415bf6SAditya Kali 33127b415bf6SAditya Kali if (!page_has_buffers(page)) 33137b415bf6SAditya Kali goto nextpage; 33147b415bf6SAditya Kali 33157b415bf6SAditya Kali head = page_buffers(page); 33167b415bf6SAditya Kali if (!head) 33177b415bf6SAditya Kali goto nextpage; 33187b415bf6SAditya Kali 33197b415bf6SAditya Kali bh = head; 33207b415bf6SAditya Kali pg_lblk = index << (PAGE_CACHE_SHIFT - 33217b415bf6SAditya Kali inode->i_blkbits); 33227b415bf6SAditya Kali do { 33237b415bf6SAditya Kali if (unlikely(pg_lblk < lblk_start)) { 33247b415bf6SAditya Kali /* 33257b415bf6SAditya Kali * This is possible when fs block size is less 33267b415bf6SAditya Kali * than page size and our cluster starts/ends in 33277b415bf6SAditya Kali * middle of the page. So we need to skip the 33287b415bf6SAditya Kali * initial few blocks till we reach the 'lblk' 33297b415bf6SAditya Kali */ 33307b415bf6SAditya Kali pg_lblk++; 33317b415bf6SAditya Kali continue; 33327b415bf6SAditya Kali } 33337b415bf6SAditya Kali 33345356f261SAditya Kali /* Check if the buffer is delayed allocated and that it 33355356f261SAditya Kali * is not yet mapped. (when da-buffers are mapped during 33365356f261SAditya Kali * their writeout, their da_mapped bit is set.) 33375356f261SAditya Kali */ 33385356f261SAditya Kali if (buffer_delay(bh) && !buffer_da_mapped(bh)) { 33397b415bf6SAditya Kali page_cache_release(page); 3340d8990240SAditya Kali trace_ext4_find_delalloc_range(inode, 3341d8990240SAditya Kali lblk_start, lblk_end, 3342d8990240SAditya Kali search_hint_reverse, 3343d8990240SAditya Kali 1, i); 33447b415bf6SAditya Kali return 1; 33457b415bf6SAditya Kali } 33467b415bf6SAditya Kali if (search_hint_reverse) 33477b415bf6SAditya Kali i--; 33487b415bf6SAditya Kali else 33497b415bf6SAditya Kali i++; 33507b415bf6SAditya Kali } while ((i >= lblk_start) && (i <= lblk_end) && 33517b415bf6SAditya Kali ((bh = bh->b_this_page) != head)); 33527b415bf6SAditya Kali nextpage: 33537b415bf6SAditya Kali if (page) 33547b415bf6SAditya Kali page_cache_release(page); 33557b415bf6SAditya Kali /* 33567b415bf6SAditya Kali * Move to next page. 'i' will be the first lblk in the next 33577b415bf6SAditya Kali * page. 33587b415bf6SAditya Kali */ 33597b415bf6SAditya Kali if (search_hint_reverse) 33607b415bf6SAditya Kali index--; 33617b415bf6SAditya Kali else 33627b415bf6SAditya Kali index++; 33637b415bf6SAditya Kali i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 33647b415bf6SAditya Kali } 33657b415bf6SAditya Kali 3366d8990240SAditya Kali trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end, 3367d8990240SAditya Kali search_hint_reverse, 0, 0); 33687b415bf6SAditya Kali return 0; 33697b415bf6SAditya Kali } 33707b415bf6SAditya Kali 33717b415bf6SAditya Kali int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk, 33727b415bf6SAditya Kali int search_hint_reverse) 33737b415bf6SAditya Kali { 33747b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 33757b415bf6SAditya Kali ext4_lblk_t lblk_start, lblk_end; 33767b415bf6SAditya Kali lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); 33777b415bf6SAditya Kali lblk_end = lblk_start + sbi->s_cluster_ratio - 1; 33787b415bf6SAditya Kali 33797b415bf6SAditya Kali return ext4_find_delalloc_range(inode, lblk_start, lblk_end, 33807b415bf6SAditya Kali search_hint_reverse); 33817b415bf6SAditya Kali } 33827b415bf6SAditya Kali 33837b415bf6SAditya Kali /** 33847b415bf6SAditya Kali * Determines how many complete clusters (out of those specified by the 'map') 33857b415bf6SAditya Kali * are under delalloc and were reserved quota for. 33867b415bf6SAditya Kali * This function is called when we are writing out the blocks that were 33877b415bf6SAditya Kali * originally written with their allocation delayed, but then the space was 33887b415bf6SAditya Kali * allocated using fallocate() before the delayed allocation could be resolved. 33897b415bf6SAditya Kali * The cases to look for are: 33907b415bf6SAditya Kali * ('=' indicated delayed allocated blocks 33917b415bf6SAditya Kali * '-' indicates non-delayed allocated blocks) 33927b415bf6SAditya Kali * (a) partial clusters towards beginning and/or end outside of allocated range 33937b415bf6SAditya Kali * are not delalloc'ed. 33947b415bf6SAditya Kali * Ex: 33957b415bf6SAditya Kali * |----c---=|====c====|====c====|===-c----| 33967b415bf6SAditya Kali * |++++++ allocated ++++++| 33977b415bf6SAditya Kali * ==> 4 complete clusters in above example 33987b415bf6SAditya Kali * 33997b415bf6SAditya Kali * (b) partial cluster (outside of allocated range) towards either end is 34007b415bf6SAditya Kali * marked for delayed allocation. In this case, we will exclude that 34017b415bf6SAditya Kali * cluster. 34027b415bf6SAditya Kali * Ex: 34037b415bf6SAditya Kali * |----====c========|========c========| 34047b415bf6SAditya Kali * |++++++ allocated ++++++| 34057b415bf6SAditya Kali * ==> 1 complete clusters in above example 34067b415bf6SAditya Kali * 34077b415bf6SAditya Kali * Ex: 34087b415bf6SAditya Kali * |================c================| 34097b415bf6SAditya Kali * |++++++ allocated ++++++| 34107b415bf6SAditya Kali * ==> 0 complete clusters in above example 34117b415bf6SAditya Kali * 34127b415bf6SAditya Kali * The ext4_da_update_reserve_space will be called only if we 34137b415bf6SAditya Kali * determine here that there were some "entire" clusters that span 34147b415bf6SAditya Kali * this 'allocated' range. 34157b415bf6SAditya Kali * In the non-bigalloc case, this function will just end up returning num_blks 34167b415bf6SAditya Kali * without ever calling ext4_find_delalloc_range. 34177b415bf6SAditya Kali */ 34187b415bf6SAditya Kali static unsigned int 34197b415bf6SAditya Kali get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, 34207b415bf6SAditya Kali unsigned int num_blks) 34217b415bf6SAditya Kali { 34227b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 34237b415bf6SAditya Kali ext4_lblk_t alloc_cluster_start, alloc_cluster_end; 34247b415bf6SAditya Kali ext4_lblk_t lblk_from, lblk_to, c_offset; 34257b415bf6SAditya Kali unsigned int allocated_clusters = 0; 34267b415bf6SAditya Kali 34277b415bf6SAditya Kali alloc_cluster_start = EXT4_B2C(sbi, lblk_start); 34287b415bf6SAditya Kali alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1); 34297b415bf6SAditya Kali 34307b415bf6SAditya Kali /* max possible clusters for this allocation */ 34317b415bf6SAditya Kali allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1; 34327b415bf6SAditya Kali 3433d8990240SAditya Kali trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); 3434d8990240SAditya Kali 34357b415bf6SAditya Kali /* Check towards left side */ 34367b415bf6SAditya Kali c_offset = lblk_start & (sbi->s_cluster_ratio - 1); 34377b415bf6SAditya Kali if (c_offset) { 34387b415bf6SAditya Kali lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); 34397b415bf6SAditya Kali lblk_to = lblk_from + c_offset - 1; 34407b415bf6SAditya Kali 34417b415bf6SAditya Kali if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) 34427b415bf6SAditya Kali allocated_clusters--; 34437b415bf6SAditya Kali } 34447b415bf6SAditya Kali 34457b415bf6SAditya Kali /* Now check towards right. */ 34467b415bf6SAditya Kali c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); 34477b415bf6SAditya Kali if (allocated_clusters && c_offset) { 34487b415bf6SAditya Kali lblk_from = lblk_start + num_blks; 34497b415bf6SAditya Kali lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; 34507b415bf6SAditya Kali 34517b415bf6SAditya Kali if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) 34527b415bf6SAditya Kali allocated_clusters--; 34537b415bf6SAditya Kali } 34547b415bf6SAditya Kali 34557b415bf6SAditya Kali return allocated_clusters; 34567b415bf6SAditya Kali } 34577b415bf6SAditya Kali 34580031462bSMingming Cao static int 34590031462bSMingming Cao ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, 3460e35fd660STheodore Ts'o struct ext4_map_blocks *map, 34610031462bSMingming Cao struct ext4_ext_path *path, int flags, 3462e35fd660STheodore Ts'o unsigned int allocated, ext4_fsblk_t newblock) 34630031462bSMingming Cao { 34640031462bSMingming Cao int ret = 0; 34650031462bSMingming Cao int err = 0; 34668d5d02e6SMingming Cao ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 34670031462bSMingming Cao 34680031462bSMingming Cao ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical" 34690031462bSMingming Cao "block %llu, max_blocks %u, flags %d, allocated %u", 3470e35fd660STheodore Ts'o inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, 34710031462bSMingming Cao flags, allocated); 34720031462bSMingming Cao ext4_ext_show_leaf(inode, path); 34730031462bSMingming Cao 3474d8990240SAditya Kali trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated, 3475d8990240SAditya Kali newblock); 3476d8990240SAditya Kali 3477c7064ef1SJiaying Zhang /* get_block() before submit the IO, split the extent */ 3478744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3479e35fd660STheodore Ts'o ret = ext4_split_unwritten_extents(handle, inode, map, 3480e35fd660STheodore Ts'o path, flags); 34815f524950SMingming /* 34825f524950SMingming * Flag the inode(non aio case) or end_io struct (aio case) 348325985edcSLucas De Marchi * that this IO needs to conversion to written when IO is 34845f524950SMingming * completed 34855f524950SMingming */ 3486b3ff0569STao Ma if (io) { 3487b3ff0569STao Ma if (!(io->flag & EXT4_IO_END_UNWRITTEN)) { 3488bd2d0210STheodore Ts'o io->flag = EXT4_IO_END_UNWRITTEN; 3489e9e3bcecSEric Sandeen atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3490b3ff0569STao Ma } 3491e9e3bcecSEric Sandeen } else 349219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3493744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 3494e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 34950031462bSMingming Cao goto out; 34960031462bSMingming Cao } 3497c7064ef1SJiaying Zhang /* IO end_io complete, convert the filled extent to written */ 3498744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3499c7064ef1SJiaying Zhang ret = ext4_convert_unwritten_extents_endio(handle, inode, 35000031462bSMingming Cao path); 350158590b06STheodore Ts'o if (ret >= 0) { 3502b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 3503d002ebf1SEric Sandeen err = check_eofblocks_fl(handle, inode, map->m_lblk, 3504d002ebf1SEric Sandeen path, map->m_len); 350558590b06STheodore Ts'o } else 350658590b06STheodore Ts'o err = ret; 35070031462bSMingming Cao goto out2; 35080031462bSMingming Cao } 35090031462bSMingming Cao /* buffered IO case */ 35100031462bSMingming Cao /* 35110031462bSMingming Cao * repeat fallocate creation request 35120031462bSMingming Cao * we already have an unwritten extent 35130031462bSMingming Cao */ 35140031462bSMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) 35150031462bSMingming Cao goto map_out; 35160031462bSMingming Cao 35170031462bSMingming Cao /* buffered READ or buffered write_begin() lookup */ 35180031462bSMingming Cao if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 35190031462bSMingming Cao /* 35200031462bSMingming Cao * We have blocks reserved already. We 35210031462bSMingming Cao * return allocated blocks so that delalloc 35220031462bSMingming Cao * won't do block reservation for us. But 35230031462bSMingming Cao * the buffer head will be unmapped so that 35240031462bSMingming Cao * a read from the block returns 0s. 35250031462bSMingming Cao */ 3526e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNWRITTEN; 35270031462bSMingming Cao goto out1; 35280031462bSMingming Cao } 35290031462bSMingming Cao 35300031462bSMingming Cao /* buffered write, writepage time, convert*/ 3531e35fd660STheodore Ts'o ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3532a4e5d88bSDmitry Monakhov if (ret >= 0) 3533b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 35340031462bSMingming Cao out: 35350031462bSMingming Cao if (ret <= 0) { 35360031462bSMingming Cao err = ret; 35370031462bSMingming Cao goto out2; 35380031462bSMingming Cao } else 35390031462bSMingming Cao allocated = ret; 3540e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 3541515f41c3SAneesh Kumar K.V /* 3542515f41c3SAneesh Kumar K.V * if we allocated more blocks than requested 3543515f41c3SAneesh Kumar K.V * we need to make sure we unmap the extra block 3544515f41c3SAneesh Kumar K.V * allocated. The actual needed block will get 3545515f41c3SAneesh Kumar K.V * unmapped later when we find the buffer_head marked 3546515f41c3SAneesh Kumar K.V * new. 3547515f41c3SAneesh Kumar K.V */ 3548e35fd660STheodore Ts'o if (allocated > map->m_len) { 3549515f41c3SAneesh Kumar K.V unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, 3550e35fd660STheodore Ts'o newblock + map->m_len, 3551e35fd660STheodore Ts'o allocated - map->m_len); 3552e35fd660STheodore Ts'o allocated = map->m_len; 3553515f41c3SAneesh Kumar K.V } 35545f634d06SAneesh Kumar K.V 35555f634d06SAneesh Kumar K.V /* 35565f634d06SAneesh Kumar K.V * If we have done fallocate with the offset that is already 35575f634d06SAneesh Kumar K.V * delayed allocated, we would have block reservation 35585f634d06SAneesh Kumar K.V * and quota reservation done in the delayed write path. 35595f634d06SAneesh Kumar K.V * But fallocate would have already updated quota and block 35605f634d06SAneesh Kumar K.V * count for this offset. So cancel these reservation 35615f634d06SAneesh Kumar K.V */ 35627b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 35637b415bf6SAditya Kali unsigned int reserved_clusters; 35647b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 35657b415bf6SAditya Kali map->m_lblk, map->m_len); 35667b415bf6SAditya Kali if (reserved_clusters) 35677b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 35687b415bf6SAditya Kali reserved_clusters, 35697b415bf6SAditya Kali 0); 35707b415bf6SAditya Kali } 35715f634d06SAneesh Kumar K.V 35720031462bSMingming Cao map_out: 3573e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 3574a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { 3575a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, path, 3576a4e5d88bSDmitry Monakhov map->m_len); 3577a4e5d88bSDmitry Monakhov if (err < 0) 3578a4e5d88bSDmitry Monakhov goto out2; 3579a4e5d88bSDmitry Monakhov } 35800031462bSMingming Cao out1: 3581e35fd660STheodore Ts'o if (allocated > map->m_len) 3582e35fd660STheodore Ts'o allocated = map->m_len; 35830031462bSMingming Cao ext4_ext_show_leaf(inode, path); 3584e35fd660STheodore Ts'o map->m_pblk = newblock; 3585e35fd660STheodore Ts'o map->m_len = allocated; 35860031462bSMingming Cao out2: 35870031462bSMingming Cao if (path) { 35880031462bSMingming Cao ext4_ext_drop_refs(path); 35890031462bSMingming Cao kfree(path); 35900031462bSMingming Cao } 35910031462bSMingming Cao return err ? err : allocated; 35920031462bSMingming Cao } 359358590b06STheodore Ts'o 35940031462bSMingming Cao /* 35954d33b1efSTheodore Ts'o * get_implied_cluster_alloc - check to see if the requested 35964d33b1efSTheodore Ts'o * allocation (in the map structure) overlaps with a cluster already 35974d33b1efSTheodore Ts'o * allocated in an extent. 3598d8990240SAditya Kali * @sb The filesystem superblock structure 35994d33b1efSTheodore Ts'o * @map The requested lblk->pblk mapping 36004d33b1efSTheodore Ts'o * @ex The extent structure which might contain an implied 36014d33b1efSTheodore Ts'o * cluster allocation 36024d33b1efSTheodore Ts'o * 36034d33b1efSTheodore Ts'o * This function is called by ext4_ext_map_blocks() after we failed to 36044d33b1efSTheodore Ts'o * find blocks that were already in the inode's extent tree. Hence, 36054d33b1efSTheodore Ts'o * we know that the beginning of the requested region cannot overlap 36064d33b1efSTheodore Ts'o * the extent from the inode's extent tree. There are three cases we 36074d33b1efSTheodore Ts'o * want to catch. The first is this case: 36084d33b1efSTheodore Ts'o * 36094d33b1efSTheodore Ts'o * |--- cluster # N--| 36104d33b1efSTheodore Ts'o * |--- extent ---| |---- requested region ---| 36114d33b1efSTheodore Ts'o * |==========| 36124d33b1efSTheodore Ts'o * 36134d33b1efSTheodore Ts'o * The second case that we need to test for is this one: 36144d33b1efSTheodore Ts'o * 36154d33b1efSTheodore Ts'o * |--------- cluster # N ----------------| 36164d33b1efSTheodore Ts'o * |--- requested region --| |------- extent ----| 36174d33b1efSTheodore Ts'o * |=======================| 36184d33b1efSTheodore Ts'o * 36194d33b1efSTheodore Ts'o * The third case is when the requested region lies between two extents 36204d33b1efSTheodore Ts'o * within the same cluster: 36214d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 36224d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 36234d33b1efSTheodore Ts'o * |------ requested region ------| 36244d33b1efSTheodore Ts'o * |================| 36254d33b1efSTheodore Ts'o * 36264d33b1efSTheodore Ts'o * In each of the above cases, we need to set the map->m_pblk and 36274d33b1efSTheodore Ts'o * map->m_len so it corresponds to the return the extent labelled as 36284d33b1efSTheodore Ts'o * "|====|" from cluster #N, since it is already in use for data in 36294d33b1efSTheodore Ts'o * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to 36304d33b1efSTheodore Ts'o * signal to ext4_ext_map_blocks() that map->m_pblk should be treated 36314d33b1efSTheodore Ts'o * as a new "allocated" block region. Otherwise, we will return 0 and 36324d33b1efSTheodore Ts'o * ext4_ext_map_blocks() will then allocate one or more new clusters 36334d33b1efSTheodore Ts'o * by calling ext4_mb_new_blocks(). 36344d33b1efSTheodore Ts'o */ 3635d8990240SAditya Kali static int get_implied_cluster_alloc(struct super_block *sb, 36364d33b1efSTheodore Ts'o struct ext4_map_blocks *map, 36374d33b1efSTheodore Ts'o struct ext4_extent *ex, 36384d33b1efSTheodore Ts'o struct ext4_ext_path *path) 36394d33b1efSTheodore Ts'o { 3640d8990240SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(sb); 36414d33b1efSTheodore Ts'o ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 36424d33b1efSTheodore Ts'o ext4_lblk_t ex_cluster_start, ex_cluster_end; 36434d33b1efSTheodore Ts'o ext4_lblk_t rr_cluster_start, rr_cluster_end; 36444d33b1efSTheodore Ts'o ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 36454d33b1efSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 36464d33b1efSTheodore Ts'o unsigned short ee_len = ext4_ext_get_actual_len(ex); 36474d33b1efSTheodore Ts'o 36484d33b1efSTheodore Ts'o /* The extent passed in that we are trying to match */ 36494d33b1efSTheodore Ts'o ex_cluster_start = EXT4_B2C(sbi, ee_block); 36504d33b1efSTheodore Ts'o ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1); 36514d33b1efSTheodore Ts'o 36524d33b1efSTheodore Ts'o /* The requested region passed into ext4_map_blocks() */ 36534d33b1efSTheodore Ts'o rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); 36544d33b1efSTheodore Ts'o rr_cluster_end = EXT4_B2C(sbi, map->m_lblk + map->m_len - 1); 36554d33b1efSTheodore Ts'o 36564d33b1efSTheodore Ts'o if ((rr_cluster_start == ex_cluster_end) || 36574d33b1efSTheodore Ts'o (rr_cluster_start == ex_cluster_start)) { 36584d33b1efSTheodore Ts'o if (rr_cluster_start == ex_cluster_end) 36594d33b1efSTheodore Ts'o ee_start += ee_len - 1; 36604d33b1efSTheodore Ts'o map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + 36614d33b1efSTheodore Ts'o c_offset; 36624d33b1efSTheodore Ts'o map->m_len = min(map->m_len, 36634d33b1efSTheodore Ts'o (unsigned) sbi->s_cluster_ratio - c_offset); 36644d33b1efSTheodore Ts'o /* 36654d33b1efSTheodore Ts'o * Check for and handle this case: 36664d33b1efSTheodore Ts'o * 36674d33b1efSTheodore Ts'o * |--------- cluster # N-------------| 36684d33b1efSTheodore Ts'o * |------- extent ----| 36694d33b1efSTheodore Ts'o * |--- requested region ---| 36704d33b1efSTheodore Ts'o * |===========| 36714d33b1efSTheodore Ts'o */ 36724d33b1efSTheodore Ts'o 36734d33b1efSTheodore Ts'o if (map->m_lblk < ee_block) 36744d33b1efSTheodore Ts'o map->m_len = min(map->m_len, ee_block - map->m_lblk); 36754d33b1efSTheodore Ts'o 36764d33b1efSTheodore Ts'o /* 36774d33b1efSTheodore Ts'o * Check for the case where there is already another allocated 36784d33b1efSTheodore Ts'o * block to the right of 'ex' but before the end of the cluster. 36794d33b1efSTheodore Ts'o * 36804d33b1efSTheodore Ts'o * |------------- cluster # N-------------| 36814d33b1efSTheodore Ts'o * |----- ex -----| |---- ex_right ----| 36824d33b1efSTheodore Ts'o * |------ requested region ------| 36834d33b1efSTheodore Ts'o * |================| 36844d33b1efSTheodore Ts'o */ 36854d33b1efSTheodore Ts'o if (map->m_lblk > ee_block) { 36864d33b1efSTheodore Ts'o ext4_lblk_t next = ext4_ext_next_allocated_block(path); 36874d33b1efSTheodore Ts'o map->m_len = min(map->m_len, next - map->m_lblk); 36884d33b1efSTheodore Ts'o } 3689d8990240SAditya Kali 3690d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); 36914d33b1efSTheodore Ts'o return 1; 36924d33b1efSTheodore Ts'o } 3693d8990240SAditya Kali 3694d8990240SAditya Kali trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); 36954d33b1efSTheodore Ts'o return 0; 36964d33b1efSTheodore Ts'o } 36974d33b1efSTheodore Ts'o 36984d33b1efSTheodore Ts'o 36994d33b1efSTheodore Ts'o /* 3700f5ab0d1fSMingming Cao * Block allocation/map/preallocation routine for extents based files 3701f5ab0d1fSMingming Cao * 3702f5ab0d1fSMingming Cao * 3703c278bfecSAneesh Kumar K.V * Need to be called with 37040e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 37050e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 3706f5ab0d1fSMingming Cao * 3707f5ab0d1fSMingming Cao * return > 0, number of of blocks already mapped/allocated 3708f5ab0d1fSMingming Cao * if create == 0 and these are pre-allocated blocks 3709f5ab0d1fSMingming Cao * buffer head is unmapped 3710f5ab0d1fSMingming Cao * otherwise blocks are mapped 3711f5ab0d1fSMingming Cao * 3712f5ab0d1fSMingming Cao * return = 0, if plain look up failed (blocks have not been allocated) 3713f5ab0d1fSMingming Cao * buffer head is unmapped 3714f5ab0d1fSMingming Cao * 3715f5ab0d1fSMingming Cao * return < 0, error case. 3716c278bfecSAneesh Kumar K.V */ 3717e35fd660STheodore Ts'o int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3718e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 3719a86c6181SAlex Tomas { 3720a86c6181SAlex Tomas struct ext4_ext_path *path = NULL; 37214d33b1efSTheodore Ts'o struct ext4_extent newex, *ex, *ex2; 37224d33b1efSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 37230562e0baSJiaying Zhang ext4_fsblk_t newblock = 0; 37244d33b1efSTheodore Ts'o int free_on_err = 0, err = 0, depth, ret; 37254d33b1efSTheodore Ts'o unsigned int allocated = 0, offset = 0; 37267b415bf6SAditya Kali unsigned int allocated_clusters = 0, reserved_clusters = 0; 3727e861304bSAllison Henderson unsigned int punched_out = 0; 3728e861304bSAllison Henderson unsigned int result = 0; 3729c9de560dSAlex Tomas struct ext4_allocation_request ar; 37308d5d02e6SMingming Cao ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 37314d33b1efSTheodore Ts'o ext4_lblk_t cluster_offset; 3732e861304bSAllison Henderson struct ext4_map_blocks punch_map; 3733a86c6181SAlex Tomas 373484fe3befSMingming ext_debug("blocks %u/%u requested for inode %lu\n", 3735e35fd660STheodore Ts'o map->m_lblk, map->m_len, inode->i_ino); 37360562e0baSJiaying Zhang trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 3737a86c6181SAlex Tomas 3738a86c6181SAlex Tomas /* check in cache */ 3739015861baSRobin Dong if (!(flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) && 3740015861baSRobin Dong ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3741b05e6ae5STheodore Ts'o if (!newex.ee_start_lo && !newex.ee_start_hi) { 37427b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 37437b415bf6SAditya Kali ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) 37447b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 37457b415bf6SAditya Kali 3746c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 374756055d3aSAmit Arora /* 374856055d3aSAmit Arora * block isn't allocated yet and 374956055d3aSAmit Arora * user doesn't want to allocate it 375056055d3aSAmit Arora */ 3751a86c6181SAlex Tomas goto out2; 3752a86c6181SAlex Tomas } 3753a86c6181SAlex Tomas /* we should allocate requested block */ 3754b05e6ae5STheodore Ts'o } else { 3755a86c6181SAlex Tomas /* block is already allocated */ 37567b415bf6SAditya Kali if (sbi->s_cluster_ratio > 1) 37577b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 3758e35fd660STheodore Ts'o newblock = map->m_lblk 3759a86c6181SAlex Tomas - le32_to_cpu(newex.ee_block) 3760bf89d16fSTheodore Ts'o + ext4_ext_pblock(&newex); 3761d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3762b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex) - 3763e35fd660STheodore Ts'o (map->m_lblk - le32_to_cpu(newex.ee_block)); 3764a86c6181SAlex Tomas goto out; 3765a86c6181SAlex Tomas } 3766a86c6181SAlex Tomas } 3767a86c6181SAlex Tomas 3768a86c6181SAlex Tomas /* find extent for this block */ 3769e35fd660STheodore Ts'o path = ext4_ext_find_extent(inode, map->m_lblk, NULL); 3770a86c6181SAlex Tomas if (IS_ERR(path)) { 3771a86c6181SAlex Tomas err = PTR_ERR(path); 3772a86c6181SAlex Tomas path = NULL; 3773a86c6181SAlex Tomas goto out2; 3774a86c6181SAlex Tomas } 3775a86c6181SAlex Tomas 3776a86c6181SAlex Tomas depth = ext_depth(inode); 3777a86c6181SAlex Tomas 3778a86c6181SAlex Tomas /* 3779d0d856e8SRandy Dunlap * consistent leaf must not be empty; 3780d0d856e8SRandy Dunlap * this situation is possible, though, _during_ tree modification; 3781a86c6181SAlex Tomas * this is why assert can't be put in ext4_ext_find_extent() 3782a86c6181SAlex Tomas */ 3783273df556SFrank Mayhar if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 3784273df556SFrank Mayhar EXT4_ERROR_INODE(inode, "bad extent address " 3785f70f362bSTheodore Ts'o "lblock: %lu, depth: %d pblock %lld", 3786f70f362bSTheodore Ts'o (unsigned long) map->m_lblk, depth, 3787f70f362bSTheodore Ts'o path[depth].p_block); 3788034fb4c9SSurbhi Palande err = -EIO; 3789034fb4c9SSurbhi Palande goto out2; 3790034fb4c9SSurbhi Palande } 3791a86c6181SAlex Tomas 37927e028976SAvantika Mathur ex = path[depth].p_ext; 37937e028976SAvantika Mathur if (ex) { 3794725d26d3SAneesh Kumar K.V ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); 3795bf89d16fSTheodore Ts'o ext4_fsblk_t ee_start = ext4_ext_pblock(ex); 3796a2df2a63SAmit Arora unsigned short ee_len; 3797471d4011SSuparna Bhattacharya 3798471d4011SSuparna Bhattacharya /* 3799471d4011SSuparna Bhattacharya * Uninitialized extents are treated as holes, except that 380056055d3aSAmit Arora * we split out initialized portions during a write. 3801471d4011SSuparna Bhattacharya */ 3802a2df2a63SAmit Arora ee_len = ext4_ext_get_actual_len(ex); 3803d8990240SAditya Kali 3804d8990240SAditya Kali trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len); 3805d8990240SAditya Kali 3806d0d856e8SRandy Dunlap /* if found extent covers block, simply return it */ 3807e35fd660STheodore Ts'o if (in_range(map->m_lblk, ee_block, ee_len)) { 38080aa06000STheodore Ts'o ext4_fsblk_t partial_cluster = 0; 38090aa06000STheodore Ts'o 3810e35fd660STheodore Ts'o newblock = map->m_lblk - ee_block + ee_start; 3811d0d856e8SRandy Dunlap /* number of remaining blocks in the extent */ 3812e35fd660STheodore Ts'o allocated = ee_len - (map->m_lblk - ee_block); 3813e35fd660STheodore Ts'o ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, 3814a86c6181SAlex Tomas ee_block, ee_len, newblock); 381556055d3aSAmit Arora 3816e861304bSAllison Henderson if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { 3817e861304bSAllison Henderson /* 3818e861304bSAllison Henderson * Do not put uninitialized extent 3819e861304bSAllison Henderson * in the cache 3820e861304bSAllison Henderson */ 382156055d3aSAmit Arora if (!ext4_ext_is_uninitialized(ex)) { 3822a2df2a63SAmit Arora ext4_ext_put_in_cache(inode, ee_block, 3823b05e6ae5STheodore Ts'o ee_len, ee_start); 3824a86c6181SAlex Tomas goto out; 3825a86c6181SAlex Tomas } 3826e861304bSAllison Henderson ret = ext4_ext_handle_uninitialized_extents( 3827e861304bSAllison Henderson handle, inode, map, path, flags, 3828e861304bSAllison Henderson allocated, newblock); 38290031462bSMingming Cao return ret; 383056055d3aSAmit Arora } 3831e861304bSAllison Henderson 3832e861304bSAllison Henderson /* 3833e861304bSAllison Henderson * Punch out the map length, but only to the 3834e861304bSAllison Henderson * end of the extent 3835e861304bSAllison Henderson */ 3836e861304bSAllison Henderson punched_out = allocated < map->m_len ? 3837e861304bSAllison Henderson allocated : map->m_len; 3838e861304bSAllison Henderson 3839e861304bSAllison Henderson /* 3840e861304bSAllison Henderson * Sense extents need to be converted to 3841e861304bSAllison Henderson * uninitialized, they must fit in an 3842e861304bSAllison Henderson * uninitialized extent 3843e861304bSAllison Henderson */ 3844e861304bSAllison Henderson if (punched_out > EXT_UNINIT_MAX_LEN) 3845e861304bSAllison Henderson punched_out = EXT_UNINIT_MAX_LEN; 3846e861304bSAllison Henderson 3847e861304bSAllison Henderson punch_map.m_lblk = map->m_lblk; 3848e861304bSAllison Henderson punch_map.m_pblk = newblock; 3849e861304bSAllison Henderson punch_map.m_len = punched_out; 3850e861304bSAllison Henderson punch_map.m_flags = 0; 3851e861304bSAllison Henderson 3852e861304bSAllison Henderson /* Check to see if the extent needs to be split */ 3853e861304bSAllison Henderson if (punch_map.m_len != ee_len || 3854e861304bSAllison Henderson punch_map.m_lblk != ee_block) { 3855e861304bSAllison Henderson 3856e861304bSAllison Henderson ret = ext4_split_extent(handle, inode, 3857e861304bSAllison Henderson path, &punch_map, 0, 3858e861304bSAllison Henderson EXT4_GET_BLOCKS_PUNCH_OUT_EXT | 3859e861304bSAllison Henderson EXT4_GET_BLOCKS_PRE_IO); 3860e861304bSAllison Henderson 3861e861304bSAllison Henderson if (ret < 0) { 3862e861304bSAllison Henderson err = ret; 3863e861304bSAllison Henderson goto out2; 3864e861304bSAllison Henderson } 3865e861304bSAllison Henderson /* 3866e861304bSAllison Henderson * find extent for the block at 3867e861304bSAllison Henderson * the start of the hole 3868e861304bSAllison Henderson */ 3869e861304bSAllison Henderson ext4_ext_drop_refs(path); 3870e861304bSAllison Henderson kfree(path); 3871e861304bSAllison Henderson 3872e861304bSAllison Henderson path = ext4_ext_find_extent(inode, 3873e861304bSAllison Henderson map->m_lblk, NULL); 3874e861304bSAllison Henderson if (IS_ERR(path)) { 3875e861304bSAllison Henderson err = PTR_ERR(path); 3876e861304bSAllison Henderson path = NULL; 3877e861304bSAllison Henderson goto out2; 3878e861304bSAllison Henderson } 3879e861304bSAllison Henderson 3880e861304bSAllison Henderson depth = ext_depth(inode); 3881e861304bSAllison Henderson ex = path[depth].p_ext; 3882e861304bSAllison Henderson ee_len = ext4_ext_get_actual_len(ex); 3883e861304bSAllison Henderson ee_block = le32_to_cpu(ex->ee_block); 3884e861304bSAllison Henderson ee_start = ext4_ext_pblock(ex); 3885e861304bSAllison Henderson 3886e861304bSAllison Henderson } 3887e861304bSAllison Henderson 3888e861304bSAllison Henderson ext4_ext_mark_uninitialized(ex); 3889e861304bSAllison Henderson 3890f7d0d379SAllison Henderson ext4_ext_invalidate_cache(inode); 3891f7d0d379SAllison Henderson 3892f7d0d379SAllison Henderson err = ext4_ext_rm_leaf(handle, inode, path, 38930aa06000STheodore Ts'o &partial_cluster, map->m_lblk, 38940aa06000STheodore Ts'o map->m_lblk + punched_out); 3895f7d0d379SAllison Henderson 3896f7d0d379SAllison Henderson if (!err && path->p_hdr->eh_entries == 0) { 3897f7d0d379SAllison Henderson /* 3898f7d0d379SAllison Henderson * Punch hole freed all of this sub tree, 3899f7d0d379SAllison Henderson * so we need to correct eh_depth 3900f7d0d379SAllison Henderson */ 3901f7d0d379SAllison Henderson err = ext4_ext_get_access(handle, inode, path); 3902f7d0d379SAllison Henderson if (err == 0) { 3903f7d0d379SAllison Henderson ext_inode_hdr(inode)->eh_depth = 0; 3904f7d0d379SAllison Henderson ext_inode_hdr(inode)->eh_max = 3905f7d0d379SAllison Henderson cpu_to_le16(ext4_ext_space_root( 3906f7d0d379SAllison Henderson inode, 0)); 3907f7d0d379SAllison Henderson 3908f7d0d379SAllison Henderson err = ext4_ext_dirty( 3909f7d0d379SAllison Henderson handle, inode, path); 3910f7d0d379SAllison Henderson } 3911f7d0d379SAllison Henderson } 3912e861304bSAllison Henderson 3913e861304bSAllison Henderson goto out2; 3914e861304bSAllison Henderson } 3915a86c6181SAlex Tomas } 3916a86c6181SAlex Tomas 39177b415bf6SAditya Kali if ((sbi->s_cluster_ratio > 1) && 39187b415bf6SAditya Kali ext4_find_delalloc_cluster(inode, map->m_lblk, 0)) 39197b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39207b415bf6SAditya Kali 3921a86c6181SAlex Tomas /* 3922d0d856e8SRandy Dunlap * requested block isn't allocated yet; 3923a86c6181SAlex Tomas * we couldn't try to create block if create flag is zero 3924a86c6181SAlex Tomas */ 3925c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 392656055d3aSAmit Arora /* 392756055d3aSAmit Arora * put just found gap into cache to speed up 392856055d3aSAmit Arora * subsequent requests 392956055d3aSAmit Arora */ 3930e35fd660STheodore Ts'o ext4_ext_put_gap_in_cache(inode, path, map->m_lblk); 3931a86c6181SAlex Tomas goto out2; 3932a86c6181SAlex Tomas } 39334d33b1efSTheodore Ts'o 3934a86c6181SAlex Tomas /* 3935c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 3936a86c6181SAlex Tomas */ 39377b415bf6SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 39384d33b1efSTheodore Ts'o newex.ee_block = cpu_to_le32(map->m_lblk); 39394d33b1efSTheodore Ts'o cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); 39404d33b1efSTheodore Ts'o 39414d33b1efSTheodore Ts'o /* 39424d33b1efSTheodore Ts'o * If we are doing bigalloc, check to see if the extent returned 39434d33b1efSTheodore Ts'o * by ext4_ext_find_extent() implies a cluster we can use. 39444d33b1efSTheodore Ts'o */ 39454d33b1efSTheodore Ts'o if (cluster_offset && ex && 3946d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 39474d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 39484d33b1efSTheodore Ts'o newblock = map->m_pblk; 39497b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39504d33b1efSTheodore Ts'o goto got_allocated_blocks; 39514d33b1efSTheodore Ts'o } 3952a86c6181SAlex Tomas 3953c9de560dSAlex Tomas /* find neighbour allocated blocks */ 3954e35fd660STheodore Ts'o ar.lleft = map->m_lblk; 3955c9de560dSAlex Tomas err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft); 3956c9de560dSAlex Tomas if (err) 3957c9de560dSAlex Tomas goto out2; 3958e35fd660STheodore Ts'o ar.lright = map->m_lblk; 39594d33b1efSTheodore Ts'o ex2 = NULL; 39604d33b1efSTheodore Ts'o err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); 3961c9de560dSAlex Tomas if (err) 3962c9de560dSAlex Tomas goto out2; 396325d14f98SAmit Arora 39644d33b1efSTheodore Ts'o /* Check if the extent after searching to the right implies a 39654d33b1efSTheodore Ts'o * cluster we can use. */ 39664d33b1efSTheodore Ts'o if ((sbi->s_cluster_ratio > 1) && ex2 && 3967d8990240SAditya Kali get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 39684d33b1efSTheodore Ts'o ar.len = allocated = map->m_len; 39694d33b1efSTheodore Ts'o newblock = map->m_pblk; 39707b415bf6SAditya Kali map->m_flags |= EXT4_MAP_FROM_CLUSTER; 39714d33b1efSTheodore Ts'o goto got_allocated_blocks; 39724d33b1efSTheodore Ts'o } 39734d33b1efSTheodore Ts'o 3974749269faSAmit Arora /* 3975749269faSAmit Arora * See if request is beyond maximum number of blocks we can have in 3976749269faSAmit Arora * a single extent. For an initialized extent this limit is 3977749269faSAmit Arora * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is 3978749269faSAmit Arora * EXT_UNINIT_MAX_LEN. 3979749269faSAmit Arora */ 3980e35fd660STheodore Ts'o if (map->m_len > EXT_INIT_MAX_LEN && 3981c2177057STheodore Ts'o !(flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3982e35fd660STheodore Ts'o map->m_len = EXT_INIT_MAX_LEN; 3983e35fd660STheodore Ts'o else if (map->m_len > EXT_UNINIT_MAX_LEN && 3984c2177057STheodore Ts'o (flags & EXT4_GET_BLOCKS_UNINIT_EXT)) 3985e35fd660STheodore Ts'o map->m_len = EXT_UNINIT_MAX_LEN; 3986749269faSAmit Arora 3987e35fd660STheodore Ts'o /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */ 3988e35fd660STheodore Ts'o newex.ee_len = cpu_to_le16(map->m_len); 39894d33b1efSTheodore Ts'o err = ext4_ext_check_overlap(sbi, inode, &newex, path); 399025d14f98SAmit Arora if (err) 3991b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 399225d14f98SAmit Arora else 3993e35fd660STheodore Ts'o allocated = map->m_len; 3994c9de560dSAlex Tomas 3995c9de560dSAlex Tomas /* allocate new block */ 3996c9de560dSAlex Tomas ar.inode = inode; 3997e35fd660STheodore Ts'o ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); 3998e35fd660STheodore Ts'o ar.logical = map->m_lblk; 39994d33b1efSTheodore Ts'o /* 40004d33b1efSTheodore Ts'o * We calculate the offset from the beginning of the cluster 40014d33b1efSTheodore Ts'o * for the logical block number, since when we allocate a 40024d33b1efSTheodore Ts'o * physical cluster, the physical block should start at the 40034d33b1efSTheodore Ts'o * same offset from the beginning of the cluster. This is 40044d33b1efSTheodore Ts'o * needed so that future calls to get_implied_cluster_alloc() 40054d33b1efSTheodore Ts'o * work correctly. 40064d33b1efSTheodore Ts'o */ 40074d33b1efSTheodore Ts'o offset = map->m_lblk & (sbi->s_cluster_ratio - 1); 40084d33b1efSTheodore Ts'o ar.len = EXT4_NUM_B2C(sbi, offset+allocated); 40094d33b1efSTheodore Ts'o ar.goal -= offset; 40104d33b1efSTheodore Ts'o ar.logical -= offset; 4011c9de560dSAlex Tomas if (S_ISREG(inode->i_mode)) 4012c9de560dSAlex Tomas ar.flags = EXT4_MB_HINT_DATA; 4013c9de560dSAlex Tomas else 4014c9de560dSAlex Tomas /* disable in-core preallocation for non-regular files */ 4015c9de560dSAlex Tomas ar.flags = 0; 4016556b27abSVivek Haldar if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4017556b27abSVivek Haldar ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4018c9de560dSAlex Tomas newblock = ext4_mb_new_blocks(handle, &ar, &err); 4019a86c6181SAlex Tomas if (!newblock) 4020a86c6181SAlex Tomas goto out2; 402184fe3befSMingming ext_debug("allocate new block: goal %llu, found %llu/%u\n", 4022498e5f24STheodore Ts'o ar.goal, newblock, allocated); 40234d33b1efSTheodore Ts'o free_on_err = 1; 40247b415bf6SAditya Kali allocated_clusters = ar.len; 40254d33b1efSTheodore Ts'o ar.len = EXT4_C2B(sbi, ar.len) - offset; 40264d33b1efSTheodore Ts'o if (ar.len > allocated) 40274d33b1efSTheodore Ts'o ar.len = allocated; 4028a86c6181SAlex Tomas 40294d33b1efSTheodore Ts'o got_allocated_blocks: 4030a86c6181SAlex Tomas /* try to insert new extent into found leaf and return */ 40314d33b1efSTheodore Ts'o ext4_ext_store_pblock(&newex, newblock + offset); 4032c9de560dSAlex Tomas newex.ee_len = cpu_to_le16(ar.len); 40338d5d02e6SMingming Cao /* Mark uninitialized */ 40348d5d02e6SMingming Cao if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){ 4035a2df2a63SAmit Arora ext4_ext_mark_uninitialized(&newex); 40368d5d02e6SMingming Cao /* 4037744692dcSJiaying Zhang * io_end structure was created for every IO write to an 403825985edcSLucas De Marchi * uninitialized extent. To avoid unnecessary conversion, 4039744692dcSJiaying Zhang * here we flag the IO that really needs the conversion. 40405f524950SMingming * For non asycn direct IO case, flag the inode state 404125985edcSLucas De Marchi * that we need to perform conversion when IO is done. 40428d5d02e6SMingming Cao */ 4043744692dcSJiaying Zhang if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 4044b3ff0569STao Ma if (io) { 4045b3ff0569STao Ma if (!(io->flag & EXT4_IO_END_UNWRITTEN)) { 4046bd2d0210STheodore Ts'o io->flag = EXT4_IO_END_UNWRITTEN; 4047e9e3bcecSEric Sandeen atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 4048b3ff0569STao Ma } 4049e9e3bcecSEric Sandeen } else 405019f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 405119f5fb7aSTheodore Ts'o EXT4_STATE_DIO_UNWRITTEN); 40525f524950SMingming } 4053744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 4054e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_UNINIT; 40558d5d02e6SMingming Cao } 4056c8d46e41SJiaying Zhang 4057a4e5d88bSDmitry Monakhov err = 0; 4058a4e5d88bSDmitry Monakhov if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) 4059a4e5d88bSDmitry Monakhov err = check_eofblocks_fl(handle, inode, map->m_lblk, 4060a4e5d88bSDmitry Monakhov path, ar.len); 4061575a1d4bSJiaying Zhang if (!err) 4062575a1d4bSJiaying Zhang err = ext4_ext_insert_extent(handle, inode, path, 4063575a1d4bSJiaying Zhang &newex, flags); 40644d33b1efSTheodore Ts'o if (err && free_on_err) { 40657132de74SMaxim Patlasov int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? 40667132de74SMaxim Patlasov EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; 4067315054f0SAlex Tomas /* free data blocks we just allocated */ 4068c9de560dSAlex Tomas /* not a good idea to call discard here directly, 4069c9de560dSAlex Tomas * but otherwise we'd need to call it every free() */ 4070c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 40717dc57615SPeter Huewe ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), 40727132de74SMaxim Patlasov ext4_ext_get_actual_len(&newex), fb_flags); 4073a86c6181SAlex Tomas goto out2; 4074315054f0SAlex Tomas } 4075a86c6181SAlex Tomas 4076a86c6181SAlex Tomas /* previous routine could use block we allocated */ 4077bf89d16fSTheodore Ts'o newblock = ext4_ext_pblock(&newex); 4078b939e376SAneesh Kumar K.V allocated = ext4_ext_get_actual_len(&newex); 4079e35fd660STheodore Ts'o if (allocated > map->m_len) 4080e35fd660STheodore Ts'o allocated = map->m_len; 4081e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_NEW; 4082a86c6181SAlex Tomas 4083b436b9beSJan Kara /* 40845f634d06SAneesh Kumar K.V * Update reserved blocks/metadata blocks after successful 40855f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. 40865f634d06SAneesh Kumar K.V */ 40877b415bf6SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 40887b415bf6SAditya Kali /* 40897b415bf6SAditya Kali * Check how many clusters we had reserved this allocted range. 40907b415bf6SAditya Kali */ 40917b415bf6SAditya Kali reserved_clusters = get_reserved_cluster_alloc(inode, 40927b415bf6SAditya Kali map->m_lblk, allocated); 40937b415bf6SAditya Kali if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 40947b415bf6SAditya Kali if (reserved_clusters) { 40957b415bf6SAditya Kali /* 40967b415bf6SAditya Kali * We have clusters reserved for this range. 40977b415bf6SAditya Kali * But since we are not doing actual allocation 40987b415bf6SAditya Kali * and are simply using blocks from previously 40997b415bf6SAditya Kali * allocated cluster, we should release the 41007b415bf6SAditya Kali * reservation and not claim quota. 41017b415bf6SAditya Kali */ 41027b415bf6SAditya Kali ext4_da_update_reserve_space(inode, 41037b415bf6SAditya Kali reserved_clusters, 0); 41047b415bf6SAditya Kali } 41057b415bf6SAditya Kali } else { 41067b415bf6SAditya Kali BUG_ON(allocated_clusters < reserved_clusters); 41077b415bf6SAditya Kali /* We will claim quota for all newly allocated blocks.*/ 41087b415bf6SAditya Kali ext4_da_update_reserve_space(inode, allocated_clusters, 41097b415bf6SAditya Kali 1); 41107b415bf6SAditya Kali if (reserved_clusters < allocated_clusters) { 41115356f261SAditya Kali struct ext4_inode_info *ei = EXT4_I(inode); 41127b415bf6SAditya Kali int reservation = allocated_clusters - 41137b415bf6SAditya Kali reserved_clusters; 41147b415bf6SAditya Kali /* 41157b415bf6SAditya Kali * It seems we claimed few clusters outside of 41167b415bf6SAditya Kali * the range of this allocation. We should give 41177b415bf6SAditya Kali * it back to the reservation pool. This can 41187b415bf6SAditya Kali * happen in the following case: 41197b415bf6SAditya Kali * 41207b415bf6SAditya Kali * * Suppose s_cluster_ratio is 4 (i.e., each 41217b415bf6SAditya Kali * cluster has 4 blocks. Thus, the clusters 41227b415bf6SAditya Kali * are [0-3],[4-7],[8-11]... 41237b415bf6SAditya Kali * * First comes delayed allocation write for 41247b415bf6SAditya Kali * logical blocks 10 & 11. Since there were no 41257b415bf6SAditya Kali * previous delayed allocated blocks in the 41267b415bf6SAditya Kali * range [8-11], we would reserve 1 cluster 41277b415bf6SAditya Kali * for this write. 41287b415bf6SAditya Kali * * Next comes write for logical blocks 3 to 8. 41297b415bf6SAditya Kali * In this case, we will reserve 2 clusters 41307b415bf6SAditya Kali * (for [0-3] and [4-7]; and not for [8-11] as 41317b415bf6SAditya Kali * that range has a delayed allocated blocks. 41327b415bf6SAditya Kali * Thus total reserved clusters now becomes 3. 41337b415bf6SAditya Kali * * Now, during the delayed allocation writeout 41347b415bf6SAditya Kali * time, we will first write blocks [3-8] and 41357b415bf6SAditya Kali * allocate 3 clusters for writing these 41367b415bf6SAditya Kali * blocks. Also, we would claim all these 41377b415bf6SAditya Kali * three clusters above. 41387b415bf6SAditya Kali * * Now when we come here to writeout the 41397b415bf6SAditya Kali * blocks [10-11], we would expect to claim 41407b415bf6SAditya Kali * the reservation of 1 cluster we had made 41417b415bf6SAditya Kali * (and we would claim it since there are no 41427b415bf6SAditya Kali * more delayed allocated blocks in the range 41437b415bf6SAditya Kali * [8-11]. But our reserved cluster count had 41447b415bf6SAditya Kali * already gone to 0. 41457b415bf6SAditya Kali * 41467b415bf6SAditya Kali * Thus, at the step 4 above when we determine 41477b415bf6SAditya Kali * that there are still some unwritten delayed 41487b415bf6SAditya Kali * allocated blocks outside of our current 41497b415bf6SAditya Kali * block range, we should increment the 41507b415bf6SAditya Kali * reserved clusters count so that when the 41517b415bf6SAditya Kali * remaining blocks finally gets written, we 41527b415bf6SAditya Kali * could claim them. 41537b415bf6SAditya Kali */ 41545356f261SAditya Kali dquot_reserve_block(inode, 41555356f261SAditya Kali EXT4_C2B(sbi, reservation)); 41565356f261SAditya Kali spin_lock(&ei->i_block_reservation_lock); 41575356f261SAditya Kali ei->i_reserved_data_blocks += reservation; 41585356f261SAditya Kali spin_unlock(&ei->i_block_reservation_lock); 41597b415bf6SAditya Kali } 41607b415bf6SAditya Kali } 41617b415bf6SAditya Kali } 41625f634d06SAneesh Kumar K.V 41635f634d06SAneesh Kumar K.V /* 4164b436b9beSJan Kara * Cache the extent and update transaction to commit on fdatasync only 4165b436b9beSJan Kara * when it is _not_ an uninitialized extent. 4166b436b9beSJan Kara */ 4167b436b9beSJan Kara if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 4168b05e6ae5STheodore Ts'o ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); 4169b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 1); 4170b436b9beSJan Kara } else 4171b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4172a86c6181SAlex Tomas out: 4173e35fd660STheodore Ts'o if (allocated > map->m_len) 4174e35fd660STheodore Ts'o allocated = map->m_len; 4175a86c6181SAlex Tomas ext4_ext_show_leaf(inode, path); 4176e35fd660STheodore Ts'o map->m_flags |= EXT4_MAP_MAPPED; 4177e35fd660STheodore Ts'o map->m_pblk = newblock; 4178e35fd660STheodore Ts'o map->m_len = allocated; 4179a86c6181SAlex Tomas out2: 4180a86c6181SAlex Tomas if (path) { 4181a86c6181SAlex Tomas ext4_ext_drop_refs(path); 4182a86c6181SAlex Tomas kfree(path); 4183a86c6181SAlex Tomas } 41840562e0baSJiaying Zhang trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 41850562e0baSJiaying Zhang newblock, map->m_len, err ? err : allocated); 4186e861304bSAllison Henderson 4187e861304bSAllison Henderson result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? 4188e861304bSAllison Henderson punched_out : allocated; 4189e861304bSAllison Henderson 4190e861304bSAllison Henderson return err ? err : result; 4191a86c6181SAlex Tomas } 4192a86c6181SAlex Tomas 4193cf108bcaSJan Kara void ext4_ext_truncate(struct inode *inode) 4194a86c6181SAlex Tomas { 4195a86c6181SAlex Tomas struct address_space *mapping = inode->i_mapping; 4196a86c6181SAlex Tomas struct super_block *sb = inode->i_sb; 4197725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 4198a86c6181SAlex Tomas handle_t *handle; 4199189e868fSAllison Henderson loff_t page_len; 4200a86c6181SAlex Tomas int err = 0; 4201a86c6181SAlex Tomas 4202a86c6181SAlex Tomas /* 42033889fd57SJiaying Zhang * finish any pending end_io work so we won't run the risk of 42043889fd57SJiaying Zhang * converting any truncated blocks to initialized later 42053889fd57SJiaying Zhang */ 42063889fd57SJiaying Zhang ext4_flush_completed_IO(inode); 42073889fd57SJiaying Zhang 42083889fd57SJiaying Zhang /* 4209a86c6181SAlex Tomas * probably first extent we're gonna free will be last in block 4210a86c6181SAlex Tomas */ 4211f3bd1f3fSMingming Cao err = ext4_writepage_trans_blocks(inode); 4212a86c6181SAlex Tomas handle = ext4_journal_start(inode, err); 4213cf108bcaSJan Kara if (IS_ERR(handle)) 4214a86c6181SAlex Tomas return; 4215a86c6181SAlex Tomas 4216189e868fSAllison Henderson if (inode->i_size % PAGE_CACHE_SIZE != 0) { 4217189e868fSAllison Henderson page_len = PAGE_CACHE_SIZE - 4218189e868fSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 4219189e868fSAllison Henderson 4220189e868fSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4221189e868fSAllison Henderson mapping, inode->i_size, page_len, 0); 4222189e868fSAllison Henderson 4223189e868fSAllison Henderson if (err) 4224189e868fSAllison Henderson goto out_stop; 4225189e868fSAllison Henderson } 4226a86c6181SAlex Tomas 42279ddfc3dcSJan Kara if (ext4_orphan_add(handle, inode)) 42289ddfc3dcSJan Kara goto out_stop; 42299ddfc3dcSJan Kara 42300e855ac8SAneesh Kumar K.V down_write(&EXT4_I(inode)->i_data_sem); 4231a86c6181SAlex Tomas ext4_ext_invalidate_cache(inode); 4232a86c6181SAlex Tomas 4233c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 4234c9de560dSAlex Tomas 4235a86c6181SAlex Tomas /* 4236d0d856e8SRandy Dunlap * TODO: optimization is possible here. 4237d0d856e8SRandy Dunlap * Probably we need not scan at all, 4238d0d856e8SRandy Dunlap * because page truncation is enough. 4239a86c6181SAlex Tomas */ 4240a86c6181SAlex Tomas 4241a86c6181SAlex Tomas /* we have to know where to truncate from in crash case */ 4242a86c6181SAlex Tomas EXT4_I(inode)->i_disksize = inode->i_size; 4243a86c6181SAlex Tomas ext4_mark_inode_dirty(handle, inode); 4244a86c6181SAlex Tomas 4245a86c6181SAlex Tomas last_block = (inode->i_size + sb->s_blocksize - 1) 4246a86c6181SAlex Tomas >> EXT4_BLOCK_SIZE_BITS(sb); 4247c6a0371cSAllison Henderson err = ext4_ext_remove_space(inode, last_block); 4248a86c6181SAlex Tomas 4249a86c6181SAlex Tomas /* In a multi-transaction truncate, we only make the final 425056055d3aSAmit Arora * transaction synchronous. 425156055d3aSAmit Arora */ 4252a86c6181SAlex Tomas if (IS_SYNC(inode)) 42530390131bSFrank Mayhar ext4_handle_sync(handle); 4254a86c6181SAlex Tomas 42559ddfc3dcSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 4256f6d2f6b3SEric Gouriou 4257f6d2f6b3SEric Gouriou out_stop: 4258a86c6181SAlex Tomas /* 4259d0d856e8SRandy Dunlap * If this was a simple ftruncate() and the file will remain alive, 4260a86c6181SAlex Tomas * then we need to clear up the orphan record which we created above. 4261a86c6181SAlex Tomas * However, if this was a real unlink then we were called by 4262a86c6181SAlex Tomas * ext4_delete_inode(), and we allow that function to clean up the 4263a86c6181SAlex Tomas * orphan info for us. 4264a86c6181SAlex Tomas */ 4265a86c6181SAlex Tomas if (inode->i_nlink) 4266a86c6181SAlex Tomas ext4_orphan_del(handle, inode); 4267a86c6181SAlex Tomas 4268ef737728SSolofo Ramangalahy inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4269ef737728SSolofo Ramangalahy ext4_mark_inode_dirty(handle, inode); 4270a86c6181SAlex Tomas ext4_journal_stop(handle); 4271a86c6181SAlex Tomas } 4272a86c6181SAlex Tomas 4273fd28784aSAneesh Kumar K.V static void ext4_falloc_update_inode(struct inode *inode, 4274fd28784aSAneesh Kumar K.V int mode, loff_t new_size, int update_ctime) 4275fd28784aSAneesh Kumar K.V { 4276fd28784aSAneesh Kumar K.V struct timespec now; 4277fd28784aSAneesh Kumar K.V 4278fd28784aSAneesh Kumar K.V if (update_ctime) { 4279fd28784aSAneesh Kumar K.V now = current_fs_time(inode->i_sb); 4280fd28784aSAneesh Kumar K.V if (!timespec_equal(&inode->i_ctime, &now)) 4281fd28784aSAneesh Kumar K.V inode->i_ctime = now; 4282fd28784aSAneesh Kumar K.V } 4283fd28784aSAneesh Kumar K.V /* 4284fd28784aSAneesh Kumar K.V * Update only when preallocation was requested beyond 4285fd28784aSAneesh Kumar K.V * the file size. 4286fd28784aSAneesh Kumar K.V */ 4287cf17fea6SAneesh Kumar K.V if (!(mode & FALLOC_FL_KEEP_SIZE)) { 4288cf17fea6SAneesh Kumar K.V if (new_size > i_size_read(inode)) 4289fd28784aSAneesh Kumar K.V i_size_write(inode, new_size); 4290cf17fea6SAneesh Kumar K.V if (new_size > EXT4_I(inode)->i_disksize) 4291cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_size); 4292c8d46e41SJiaying Zhang } else { 4293c8d46e41SJiaying Zhang /* 4294c8d46e41SJiaying Zhang * Mark that we allocate beyond EOF so the subsequent truncate 4295c8d46e41SJiaying Zhang * can proceed even if the new size is the same as i_size. 4296c8d46e41SJiaying Zhang */ 4297c8d46e41SJiaying Zhang if (new_size > i_size_read(inode)) 429812e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 4299fd28784aSAneesh Kumar K.V } 4300fd28784aSAneesh Kumar K.V 4301fd28784aSAneesh Kumar K.V } 4302fd28784aSAneesh Kumar K.V 4303a2df2a63SAmit Arora /* 43042fe17c10SChristoph Hellwig * preallocate space for a file. This implements ext4's fallocate file 4305a2df2a63SAmit Arora * operation, which gets called from sys_fallocate system call. 4306a2df2a63SAmit Arora * For block-mapped files, posix_fallocate should fall back to the method 4307a2df2a63SAmit Arora * of writing zeroes to the required new blocks (the same behavior which is 4308a2df2a63SAmit Arora * expected for file systems which do not support fallocate() system call). 4309a2df2a63SAmit Arora */ 43102fe17c10SChristoph Hellwig long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) 4311a2df2a63SAmit Arora { 43122fe17c10SChristoph Hellwig struct inode *inode = file->f_path.dentry->d_inode; 4313a2df2a63SAmit Arora handle_t *handle; 4314fd28784aSAneesh Kumar K.V loff_t new_size; 4315498e5f24STheodore Ts'o unsigned int max_blocks; 4316a2df2a63SAmit Arora int ret = 0; 4317a2df2a63SAmit Arora int ret2 = 0; 4318a2df2a63SAmit Arora int retries = 0; 4319a4e5d88bSDmitry Monakhov int flags; 43202ed88685STheodore Ts'o struct ext4_map_blocks map; 4321a2df2a63SAmit Arora unsigned int credits, blkbits = inode->i_blkbits; 4322a2df2a63SAmit Arora 4323a2df2a63SAmit Arora /* 4324a2df2a63SAmit Arora * currently supporting (pre)allocate mode for extent-based 4325a2df2a63SAmit Arora * files _only_ 4326a2df2a63SAmit Arora */ 432712e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4328a2df2a63SAmit Arora return -EOPNOTSUPP; 4329a2df2a63SAmit Arora 4330a4bb6b64SAllison Henderson /* Return error if mode is not supported */ 4331a4bb6b64SAllison Henderson if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 4332a4bb6b64SAllison Henderson return -EOPNOTSUPP; 4333a4bb6b64SAllison Henderson 4334a4bb6b64SAllison Henderson if (mode & FALLOC_FL_PUNCH_HOLE) 4335a4bb6b64SAllison Henderson return ext4_punch_hole(file, offset, len); 4336a4bb6b64SAllison Henderson 43370562e0baSJiaying Zhang trace_ext4_fallocate_enter(inode, offset, len, mode); 43382ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 4339fd28784aSAneesh Kumar K.V /* 4340fd28784aSAneesh Kumar K.V * We can't just convert len to max_blocks because 4341fd28784aSAneesh Kumar K.V * If blocksize = 4096 offset = 3072 and len = 2048 4342fd28784aSAneesh Kumar K.V */ 4343a2df2a63SAmit Arora max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 43442ed88685STheodore Ts'o - map.m_lblk; 4345a2df2a63SAmit Arora /* 4346f3bd1f3fSMingming Cao * credits to insert 1 extent into extent tree 4347a2df2a63SAmit Arora */ 4348f3bd1f3fSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 434955bd725aSAneesh Kumar K.V mutex_lock(&inode->i_mutex); 43506d19c42bSNikanth Karthikesan ret = inode_newsize_ok(inode, (len + offset)); 43516d19c42bSNikanth Karthikesan if (ret) { 43526d19c42bSNikanth Karthikesan mutex_unlock(&inode->i_mutex); 43530562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); 43546d19c42bSNikanth Karthikesan return ret; 43556d19c42bSNikanth Karthikesan } 4356a4e5d88bSDmitry Monakhov flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT | 4357a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_NO_NORMALIZE; 4358a4e5d88bSDmitry Monakhov if (mode & FALLOC_FL_KEEP_SIZE) 4359a4e5d88bSDmitry Monakhov flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4360a2df2a63SAmit Arora retry: 4361a2df2a63SAmit Arora while (ret >= 0 && ret < max_blocks) { 43622ed88685STheodore Ts'o map.m_lblk = map.m_lblk + ret; 43632ed88685STheodore Ts'o map.m_len = max_blocks = max_blocks - ret; 4364a2df2a63SAmit Arora handle = ext4_journal_start(inode, credits); 4365a2df2a63SAmit Arora if (IS_ERR(handle)) { 4366a2df2a63SAmit Arora ret = PTR_ERR(handle); 4367a2df2a63SAmit Arora break; 4368a2df2a63SAmit Arora } 4369a4e5d88bSDmitry Monakhov ret = ext4_map_blocks(handle, inode, &map, flags); 4370221879c9SAneesh Kumar K.V if (ret <= 0) { 43712c98615dSAneesh Kumar K.V #ifdef EXT4FS_DEBUG 43722c98615dSAneesh Kumar K.V WARN_ON(ret <= 0); 4373e35fd660STheodore Ts'o printk(KERN_ERR "%s: ext4_ext_map_blocks " 43742c98615dSAneesh Kumar K.V "returned error inode#%lu, block=%u, " 43759fd9784cSThadeu Lima de Souza Cascardo "max_blocks=%u", __func__, 4376a6371b63SKazuya Mio inode->i_ino, map.m_lblk, max_blocks); 43772c98615dSAneesh Kumar K.V #endif 4378a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4379a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4380a2df2a63SAmit Arora break; 4381a2df2a63SAmit Arora } 43822ed88685STheodore Ts'o if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len, 4383fd28784aSAneesh Kumar K.V blkbits) >> blkbits)) 4384fd28784aSAneesh Kumar K.V new_size = offset + len; 4385fd28784aSAneesh Kumar K.V else 438629ae07b7SUtako Kusaka new_size = ((loff_t) map.m_lblk + ret) << blkbits; 4387a2df2a63SAmit Arora 4388fd28784aSAneesh Kumar K.V ext4_falloc_update_inode(inode, mode, new_size, 43892ed88685STheodore Ts'o (map.m_flags & EXT4_MAP_NEW)); 4390a2df2a63SAmit Arora ext4_mark_inode_dirty(handle, inode); 4391a2df2a63SAmit Arora ret2 = ext4_journal_stop(handle); 4392a2df2a63SAmit Arora if (ret2) 4393a2df2a63SAmit Arora break; 4394a2df2a63SAmit Arora } 4395fd28784aSAneesh Kumar K.V if (ret == -ENOSPC && 4396fd28784aSAneesh Kumar K.V ext4_should_retry_alloc(inode->i_sb, &retries)) { 4397fd28784aSAneesh Kumar K.V ret = 0; 4398a2df2a63SAmit Arora goto retry; 4399a2df2a63SAmit Arora } 440055bd725aSAneesh Kumar K.V mutex_unlock(&inode->i_mutex); 44010562e0baSJiaying Zhang trace_ext4_fallocate_exit(inode, offset, max_blocks, 44020562e0baSJiaying Zhang ret > 0 ? ret2 : ret); 4403a2df2a63SAmit Arora return ret > 0 ? ret2 : ret; 4404a2df2a63SAmit Arora } 44056873fa0dSEric Sandeen 44066873fa0dSEric Sandeen /* 44070031462bSMingming Cao * This function convert a range of blocks to written extents 44080031462bSMingming Cao * The caller of this function will pass the start offset and the size. 44090031462bSMingming Cao * all unwritten extents within this range will be converted to 44100031462bSMingming Cao * written extents. 44110031462bSMingming Cao * 44120031462bSMingming Cao * This function is called from the direct IO end io call back 44130031462bSMingming Cao * function, to convert the fallocated extents after IO is completed. 4414109f5565SMingming * Returns 0 on success. 44150031462bSMingming Cao */ 44160031462bSMingming Cao int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, 4417a1de02dcSEric Sandeen ssize_t len) 44180031462bSMingming Cao { 44190031462bSMingming Cao handle_t *handle; 44200031462bSMingming Cao unsigned int max_blocks; 44210031462bSMingming Cao int ret = 0; 44220031462bSMingming Cao int ret2 = 0; 44232ed88685STheodore Ts'o struct ext4_map_blocks map; 44240031462bSMingming Cao unsigned int credits, blkbits = inode->i_blkbits; 44250031462bSMingming Cao 44262ed88685STheodore Ts'o map.m_lblk = offset >> blkbits; 44270031462bSMingming Cao /* 44280031462bSMingming Cao * We can't just convert len to max_blocks because 44290031462bSMingming Cao * If blocksize = 4096 offset = 3072 and len = 2048 44300031462bSMingming Cao */ 44312ed88685STheodore Ts'o max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) - 44322ed88685STheodore Ts'o map.m_lblk); 44330031462bSMingming Cao /* 44340031462bSMingming Cao * credits to insert 1 extent into extent tree 44350031462bSMingming Cao */ 44360031462bSMingming Cao credits = ext4_chunk_trans_blocks(inode, max_blocks); 44370031462bSMingming Cao while (ret >= 0 && ret < max_blocks) { 44382ed88685STheodore Ts'o map.m_lblk += ret; 44392ed88685STheodore Ts'o map.m_len = (max_blocks -= ret); 44400031462bSMingming Cao handle = ext4_journal_start(inode, credits); 44410031462bSMingming Cao if (IS_ERR(handle)) { 44420031462bSMingming Cao ret = PTR_ERR(handle); 44430031462bSMingming Cao break; 44440031462bSMingming Cao } 44452ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, 4446c7064ef1SJiaying Zhang EXT4_GET_BLOCKS_IO_CONVERT_EXT); 44470031462bSMingming Cao if (ret <= 0) { 44480031462bSMingming Cao WARN_ON(ret <= 0); 4449e35fd660STheodore Ts'o printk(KERN_ERR "%s: ext4_ext_map_blocks " 44500031462bSMingming Cao "returned error inode#%lu, block=%u, " 44510031462bSMingming Cao "max_blocks=%u", __func__, 44522ed88685STheodore Ts'o inode->i_ino, map.m_lblk, map.m_len); 44530031462bSMingming Cao } 44540031462bSMingming Cao ext4_mark_inode_dirty(handle, inode); 44550031462bSMingming Cao ret2 = ext4_journal_stop(handle); 44560031462bSMingming Cao if (ret <= 0 || ret2 ) 44570031462bSMingming Cao break; 44580031462bSMingming Cao } 44590031462bSMingming Cao return ret > 0 ? ret2 : ret; 44600031462bSMingming Cao } 44616d9c85ebSYongqiang Yang 44620031462bSMingming Cao /* 44636873fa0dSEric Sandeen * Callback function called for each extent to gather FIEMAP information. 44646873fa0dSEric Sandeen */ 4465c03f8aa9SLukas Czerner static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, 44666873fa0dSEric Sandeen struct ext4_ext_cache *newex, struct ext4_extent *ex, 44676873fa0dSEric Sandeen void *data) 44686873fa0dSEric Sandeen { 44696873fa0dSEric Sandeen __u64 logical; 44706873fa0dSEric Sandeen __u64 physical; 44716873fa0dSEric Sandeen __u64 length; 44726873fa0dSEric Sandeen __u32 flags = 0; 44736d9c85ebSYongqiang Yang int ret = 0; 44746d9c85ebSYongqiang Yang struct fiemap_extent_info *fieinfo = data; 44756d9c85ebSYongqiang Yang unsigned char blksize_bits; 44766873fa0dSEric Sandeen 44776d9c85ebSYongqiang Yang blksize_bits = inode->i_sb->s_blocksize_bits; 44786873fa0dSEric Sandeen logical = (__u64)newex->ec_block << blksize_bits; 44796873fa0dSEric Sandeen 4480b05e6ae5STheodore Ts'o if (newex->ec_start == 0) { 44816d9c85ebSYongqiang Yang /* 44826d9c85ebSYongqiang Yang * No extent in extent-tree contains block @newex->ec_start, 44836d9c85ebSYongqiang Yang * then the block may stay in 1)a hole or 2)delayed-extent. 44846d9c85ebSYongqiang Yang * 44856d9c85ebSYongqiang Yang * Holes or delayed-extents are processed as follows. 44866d9c85ebSYongqiang Yang * 1. lookup dirty pages with specified range in pagecache. 44876d9c85ebSYongqiang Yang * If no page is got, then there is no delayed-extent and 44886d9c85ebSYongqiang Yang * return with EXT_CONTINUE. 44896d9c85ebSYongqiang Yang * 2. find the 1st mapped buffer, 44906d9c85ebSYongqiang Yang * 3. check if the mapped buffer is both in the request range 44916d9c85ebSYongqiang Yang * and a delayed buffer. If not, there is no delayed-extent, 44926d9c85ebSYongqiang Yang * then return. 44936d9c85ebSYongqiang Yang * 4. a delayed-extent is found, the extent will be collected. 44946d9c85ebSYongqiang Yang */ 44956d9c85ebSYongqiang Yang ext4_lblk_t end = 0; 44966d9c85ebSYongqiang Yang pgoff_t last_offset; 44976873fa0dSEric Sandeen pgoff_t offset; 44986d9c85ebSYongqiang Yang pgoff_t index; 4499b221349fSYongqiang Yang pgoff_t start_index = 0; 45006d9c85ebSYongqiang Yang struct page **pages = NULL; 45016873fa0dSEric Sandeen struct buffer_head *bh = NULL; 45026d9c85ebSYongqiang Yang struct buffer_head *head = NULL; 45036d9c85ebSYongqiang Yang unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); 45046d9c85ebSYongqiang Yang 45056d9c85ebSYongqiang Yang pages = kmalloc(PAGE_SIZE, GFP_KERNEL); 45066d9c85ebSYongqiang Yang if (pages == NULL) 45076d9c85ebSYongqiang Yang return -ENOMEM; 45086873fa0dSEric Sandeen 45096873fa0dSEric Sandeen offset = logical >> PAGE_SHIFT; 45106d9c85ebSYongqiang Yang repeat: 45116d9c85ebSYongqiang Yang last_offset = offset; 45126d9c85ebSYongqiang Yang head = NULL; 45136d9c85ebSYongqiang Yang ret = find_get_pages_tag(inode->i_mapping, &offset, 45146d9c85ebSYongqiang Yang PAGECACHE_TAG_DIRTY, nr_pages, pages); 45156873fa0dSEric Sandeen 45166d9c85ebSYongqiang Yang if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 45176d9c85ebSYongqiang Yang /* First time, try to find a mapped buffer. */ 45186d9c85ebSYongqiang Yang if (ret == 0) { 45196d9c85ebSYongqiang Yang out: 45206d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 45216d9c85ebSYongqiang Yang page_cache_release(pages[index]); 45226d9c85ebSYongqiang Yang /* just a hole. */ 45236d9c85ebSYongqiang Yang kfree(pages); 45246873fa0dSEric Sandeen return EXT_CONTINUE; 45256873fa0dSEric Sandeen } 4526b221349fSYongqiang Yang index = 0; 45276d9c85ebSYongqiang Yang 4528b221349fSYongqiang Yang next_page: 45296d9c85ebSYongqiang Yang /* Try to find the 1st mapped buffer. */ 4530b221349fSYongqiang Yang end = ((__u64)pages[index]->index << PAGE_SHIFT) >> 45316d9c85ebSYongqiang Yang blksize_bits; 4532b221349fSYongqiang Yang if (!page_has_buffers(pages[index])) 45336d9c85ebSYongqiang Yang goto out; 4534b221349fSYongqiang Yang head = page_buffers(pages[index]); 45356d9c85ebSYongqiang Yang if (!head) 45366d9c85ebSYongqiang Yang goto out; 45376d9c85ebSYongqiang Yang 4538b221349fSYongqiang Yang index++; 45396d9c85ebSYongqiang Yang bh = head; 45406d9c85ebSYongqiang Yang do { 4541b221349fSYongqiang Yang if (end >= newex->ec_block + 45426d9c85ebSYongqiang Yang newex->ec_len) 45436d9c85ebSYongqiang Yang /* The buffer is out of 45446d9c85ebSYongqiang Yang * the request range. 45456d9c85ebSYongqiang Yang */ 45466d9c85ebSYongqiang Yang goto out; 4547b221349fSYongqiang Yang 4548b221349fSYongqiang Yang if (buffer_mapped(bh) && 4549b221349fSYongqiang Yang end >= newex->ec_block) { 4550b221349fSYongqiang Yang start_index = index - 1; 4551b221349fSYongqiang Yang /* get the 1st mapped buffer. */ 45526d9c85ebSYongqiang Yang goto found_mapped_buffer; 45536d9c85ebSYongqiang Yang } 4554b221349fSYongqiang Yang 45556d9c85ebSYongqiang Yang bh = bh->b_this_page; 45566d9c85ebSYongqiang Yang end++; 45576d9c85ebSYongqiang Yang } while (bh != head); 45586d9c85ebSYongqiang Yang 4559b221349fSYongqiang Yang /* No mapped buffer in the range found in this page, 4560b221349fSYongqiang Yang * We need to look up next page. 4561b221349fSYongqiang Yang */ 4562b221349fSYongqiang Yang if (index >= ret) { 4563b221349fSYongqiang Yang /* There is no page left, but we need to limit 4564b221349fSYongqiang Yang * newex->ec_len. 4565b221349fSYongqiang Yang */ 4566b221349fSYongqiang Yang newex->ec_len = end - newex->ec_block; 45676d9c85ebSYongqiang Yang goto out; 4568b221349fSYongqiang Yang } 4569b221349fSYongqiang Yang goto next_page; 45706d9c85ebSYongqiang Yang } else { 45716d9c85ebSYongqiang Yang /*Find contiguous delayed buffers. */ 45726d9c85ebSYongqiang Yang if (ret > 0 && pages[0]->index == last_offset) 45736d9c85ebSYongqiang Yang head = page_buffers(pages[0]); 45746d9c85ebSYongqiang Yang bh = head; 4575b221349fSYongqiang Yang index = 1; 4576b221349fSYongqiang Yang start_index = 0; 45776d9c85ebSYongqiang Yang } 45786d9c85ebSYongqiang Yang 45796d9c85ebSYongqiang Yang found_mapped_buffer: 45806d9c85ebSYongqiang Yang if (bh != NULL && buffer_delay(bh)) { 45816d9c85ebSYongqiang Yang /* 1st or contiguous delayed buffer found. */ 45826d9c85ebSYongqiang Yang if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 45836d9c85ebSYongqiang Yang /* 45846d9c85ebSYongqiang Yang * 1st delayed buffer found, record 45856d9c85ebSYongqiang Yang * the start of extent. 45866d9c85ebSYongqiang Yang */ 45876d9c85ebSYongqiang Yang flags |= FIEMAP_EXTENT_DELALLOC; 45886d9c85ebSYongqiang Yang newex->ec_block = end; 45896d9c85ebSYongqiang Yang logical = (__u64)end << blksize_bits; 45906d9c85ebSYongqiang Yang } 45916d9c85ebSYongqiang Yang /* Find contiguous delayed buffers. */ 45926d9c85ebSYongqiang Yang do { 45936d9c85ebSYongqiang Yang if (!buffer_delay(bh)) 45946d9c85ebSYongqiang Yang goto found_delayed_extent; 45956d9c85ebSYongqiang Yang bh = bh->b_this_page; 45966d9c85ebSYongqiang Yang end++; 45976d9c85ebSYongqiang Yang } while (bh != head); 45986d9c85ebSYongqiang Yang 4599b221349fSYongqiang Yang for (; index < ret; index++) { 46006d9c85ebSYongqiang Yang if (!page_has_buffers(pages[index])) { 46016d9c85ebSYongqiang Yang bh = NULL; 46026d9c85ebSYongqiang Yang break; 46036d9c85ebSYongqiang Yang } 46046d9c85ebSYongqiang Yang head = page_buffers(pages[index]); 46056d9c85ebSYongqiang Yang if (!head) { 46066d9c85ebSYongqiang Yang bh = NULL; 46076d9c85ebSYongqiang Yang break; 46086d9c85ebSYongqiang Yang } 4609b221349fSYongqiang Yang 46106d9c85ebSYongqiang Yang if (pages[index]->index != 4611b221349fSYongqiang Yang pages[start_index]->index + index 4612b221349fSYongqiang Yang - start_index) { 46136d9c85ebSYongqiang Yang /* Blocks are not contiguous. */ 46146d9c85ebSYongqiang Yang bh = NULL; 46156d9c85ebSYongqiang Yang break; 46166d9c85ebSYongqiang Yang } 46176d9c85ebSYongqiang Yang bh = head; 46186d9c85ebSYongqiang Yang do { 46196d9c85ebSYongqiang Yang if (!buffer_delay(bh)) 46206d9c85ebSYongqiang Yang /* Delayed-extent ends. */ 46216d9c85ebSYongqiang Yang goto found_delayed_extent; 46226d9c85ebSYongqiang Yang bh = bh->b_this_page; 46236d9c85ebSYongqiang Yang end++; 46246d9c85ebSYongqiang Yang } while (bh != head); 46256d9c85ebSYongqiang Yang } 46266d9c85ebSYongqiang Yang } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) 46276d9c85ebSYongqiang Yang /* a hole found. */ 46286d9c85ebSYongqiang Yang goto out; 46296d9c85ebSYongqiang Yang 46306d9c85ebSYongqiang Yang found_delayed_extent: 46316d9c85ebSYongqiang Yang newex->ec_len = min(end - newex->ec_block, 46326d9c85ebSYongqiang Yang (ext4_lblk_t)EXT_INIT_MAX_LEN); 46336d9c85ebSYongqiang Yang if (ret == nr_pages && bh != NULL && 46346d9c85ebSYongqiang Yang newex->ec_len < EXT_INIT_MAX_LEN && 46356d9c85ebSYongqiang Yang buffer_delay(bh)) { 46366d9c85ebSYongqiang Yang /* Have not collected an extent and continue. */ 46376d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 46386d9c85ebSYongqiang Yang page_cache_release(pages[index]); 46396d9c85ebSYongqiang Yang goto repeat; 46406d9c85ebSYongqiang Yang } 46416d9c85ebSYongqiang Yang 46426d9c85ebSYongqiang Yang for (index = 0; index < ret; index++) 46436d9c85ebSYongqiang Yang page_cache_release(pages[index]); 46446d9c85ebSYongqiang Yang kfree(pages); 46456873fa0dSEric Sandeen } 46466873fa0dSEric Sandeen 46476873fa0dSEric Sandeen physical = (__u64)newex->ec_start << blksize_bits; 46486873fa0dSEric Sandeen length = (__u64)newex->ec_len << blksize_bits; 46496873fa0dSEric Sandeen 46506873fa0dSEric Sandeen if (ex && ext4_ext_is_uninitialized(ex)) 46516873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_UNWRITTEN; 46526873fa0dSEric Sandeen 4653c03f8aa9SLukas Czerner if (next == EXT_MAX_BLOCKS) 46546873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_LAST; 46556873fa0dSEric Sandeen 46566d9c85ebSYongqiang Yang ret = fiemap_fill_next_extent(fieinfo, logical, physical, 46576873fa0dSEric Sandeen length, flags); 46586d9c85ebSYongqiang Yang if (ret < 0) 46596d9c85ebSYongqiang Yang return ret; 46606d9c85ebSYongqiang Yang if (ret == 1) 46616873fa0dSEric Sandeen return EXT_BREAK; 46626873fa0dSEric Sandeen return EXT_CONTINUE; 46636873fa0dSEric Sandeen } 46646873fa0dSEric Sandeen /* fiemap flags we can handle specified here */ 46656873fa0dSEric Sandeen #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 46666873fa0dSEric Sandeen 46673a06d778SAneesh Kumar K.V static int ext4_xattr_fiemap(struct inode *inode, 46683a06d778SAneesh Kumar K.V struct fiemap_extent_info *fieinfo) 46696873fa0dSEric Sandeen { 46706873fa0dSEric Sandeen __u64 physical = 0; 46716873fa0dSEric Sandeen __u64 length; 46726873fa0dSEric Sandeen __u32 flags = FIEMAP_EXTENT_LAST; 46736873fa0dSEric Sandeen int blockbits = inode->i_sb->s_blocksize_bits; 46746873fa0dSEric Sandeen int error = 0; 46756873fa0dSEric Sandeen 46766873fa0dSEric Sandeen /* in-inode? */ 467719f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 46786873fa0dSEric Sandeen struct ext4_iloc iloc; 46796873fa0dSEric Sandeen int offset; /* offset of xattr in inode */ 46806873fa0dSEric Sandeen 46816873fa0dSEric Sandeen error = ext4_get_inode_loc(inode, &iloc); 46826873fa0dSEric Sandeen if (error) 46836873fa0dSEric Sandeen return error; 46846873fa0dSEric Sandeen physical = iloc.bh->b_blocknr << blockbits; 46856873fa0dSEric Sandeen offset = EXT4_GOOD_OLD_INODE_SIZE + 46866873fa0dSEric Sandeen EXT4_I(inode)->i_extra_isize; 46876873fa0dSEric Sandeen physical += offset; 46886873fa0dSEric Sandeen length = EXT4_SB(inode->i_sb)->s_inode_size - offset; 46896873fa0dSEric Sandeen flags |= FIEMAP_EXTENT_DATA_INLINE; 4690fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 46916873fa0dSEric Sandeen } else { /* external block */ 46926873fa0dSEric Sandeen physical = EXT4_I(inode)->i_file_acl << blockbits; 46936873fa0dSEric Sandeen length = inode->i_sb->s_blocksize; 46946873fa0dSEric Sandeen } 46956873fa0dSEric Sandeen 46966873fa0dSEric Sandeen if (physical) 46976873fa0dSEric Sandeen error = fiemap_fill_next_extent(fieinfo, 0, physical, 46986873fa0dSEric Sandeen length, flags); 46996873fa0dSEric Sandeen return (error < 0 ? error : 0); 47006873fa0dSEric Sandeen } 47016873fa0dSEric Sandeen 4702a4bb6b64SAllison Henderson /* 4703a4bb6b64SAllison Henderson * ext4_ext_punch_hole 4704a4bb6b64SAllison Henderson * 4705a4bb6b64SAllison Henderson * Punches a hole of "length" bytes in a file starting 4706a4bb6b64SAllison Henderson * at byte "offset" 4707a4bb6b64SAllison Henderson * 4708a4bb6b64SAllison Henderson * @inode: The inode of the file to punch a hole in 4709a4bb6b64SAllison Henderson * @offset: The starting byte offset of the hole 4710a4bb6b64SAllison Henderson * @length: The length of the hole 4711a4bb6b64SAllison Henderson * 4712a4bb6b64SAllison Henderson * Returns the number of blocks removed or negative on err 4713a4bb6b64SAllison Henderson */ 4714a4bb6b64SAllison Henderson int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) 4715a4bb6b64SAllison Henderson { 4716a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 4717a4bb6b64SAllison Henderson struct super_block *sb = inode->i_sb; 4718a4bb6b64SAllison Henderson struct ext4_ext_cache cache_ex; 4719a4bb6b64SAllison Henderson ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks; 4720a4bb6b64SAllison Henderson struct address_space *mapping = inode->i_mapping; 4721a4bb6b64SAllison Henderson struct ext4_map_blocks map; 4722a4bb6b64SAllison Henderson handle_t *handle; 4723ba06208aSAllison Henderson loff_t first_page, last_page, page_len; 4724ba06208aSAllison Henderson loff_t first_page_offset, last_page_offset; 4725a4bb6b64SAllison Henderson int ret, credits, blocks_released, err = 0; 4726a4bb6b64SAllison Henderson 47272be4751bSAllison Henderson /* No need to punch hole beyond i_size */ 47282be4751bSAllison Henderson if (offset >= inode->i_size) 47292be4751bSAllison Henderson return 0; 47302be4751bSAllison Henderson 47312be4751bSAllison Henderson /* 47322be4751bSAllison Henderson * If the hole extends beyond i_size, set the hole 47332be4751bSAllison Henderson * to end after the page that contains i_size 47342be4751bSAllison Henderson */ 47352be4751bSAllison Henderson if (offset + length > inode->i_size) { 47362be4751bSAllison Henderson length = inode->i_size + 47372be4751bSAllison Henderson PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 47382be4751bSAllison Henderson offset; 47392be4751bSAllison Henderson } 47402be4751bSAllison Henderson 4741a4bb6b64SAllison Henderson first_block = (offset + sb->s_blocksize - 1) >> 4742a4bb6b64SAllison Henderson EXT4_BLOCK_SIZE_BITS(sb); 4743a4bb6b64SAllison Henderson last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 4744a4bb6b64SAllison Henderson 4745a4bb6b64SAllison Henderson first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 4746a4bb6b64SAllison Henderson last_page = (offset + length) >> PAGE_CACHE_SHIFT; 4747a4bb6b64SAllison Henderson 4748a4bb6b64SAllison Henderson first_page_offset = first_page << PAGE_CACHE_SHIFT; 4749a4bb6b64SAllison Henderson last_page_offset = last_page << PAGE_CACHE_SHIFT; 4750a4bb6b64SAllison Henderson 4751a4bb6b64SAllison Henderson /* 4752a4bb6b64SAllison Henderson * Write out all dirty pages to avoid race conditions 4753a4bb6b64SAllison Henderson * Then release them. 4754a4bb6b64SAllison Henderson */ 4755a4bb6b64SAllison Henderson if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 4756a4bb6b64SAllison Henderson err = filemap_write_and_wait_range(mapping, 47572be4751bSAllison Henderson offset, offset + length - 1); 4758a4bb6b64SAllison Henderson 4759a4bb6b64SAllison Henderson if (err) 4760a4bb6b64SAllison Henderson return err; 4761a4bb6b64SAllison Henderson } 4762a4bb6b64SAllison Henderson 4763a4bb6b64SAllison Henderson /* Now release the pages */ 4764a4bb6b64SAllison Henderson if (last_page_offset > first_page_offset) { 4765a4bb6b64SAllison Henderson truncate_inode_pages_range(mapping, first_page_offset, 4766a4bb6b64SAllison Henderson last_page_offset-1); 4767a4bb6b64SAllison Henderson } 4768a4bb6b64SAllison Henderson 4769a4bb6b64SAllison Henderson /* finish any pending end_io work */ 4770a4bb6b64SAllison Henderson ext4_flush_completed_IO(inode); 4771a4bb6b64SAllison Henderson 4772a4bb6b64SAllison Henderson credits = ext4_writepage_trans_blocks(inode); 4773a4bb6b64SAllison Henderson handle = ext4_journal_start(inode, credits); 4774a4bb6b64SAllison Henderson if (IS_ERR(handle)) 4775a4bb6b64SAllison Henderson return PTR_ERR(handle); 4776a4bb6b64SAllison Henderson 4777a4bb6b64SAllison Henderson err = ext4_orphan_add(handle, inode); 4778a4bb6b64SAllison Henderson if (err) 4779a4bb6b64SAllison Henderson goto out; 4780a4bb6b64SAllison Henderson 4781a4bb6b64SAllison Henderson /* 4782ba06208aSAllison Henderson * Now we need to zero out the non-page-aligned data in the 4783ba06208aSAllison Henderson * pages at the start and tail of the hole, and unmap the buffer 4784ba06208aSAllison Henderson * heads for the block aligned regions of the page that were 4785ba06208aSAllison Henderson * completely zeroed. 4786a4bb6b64SAllison Henderson */ 4787ba06208aSAllison Henderson if (first_page > last_page) { 4788ba06208aSAllison Henderson /* 4789ba06208aSAllison Henderson * If the file space being truncated is contained within a page 4790ba06208aSAllison Henderson * just zero out and unmap the middle of that page 4791ba06208aSAllison Henderson */ 4792ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 4793ba06208aSAllison Henderson mapping, offset, length, 0); 4794a4bb6b64SAllison Henderson 4795ba06208aSAllison Henderson if (err) 4796ba06208aSAllison Henderson goto out; 4797ba06208aSAllison Henderson } else { 4798ba06208aSAllison Henderson /* 4799ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4800ba06208aSAllison Henderson * the start of the hole 4801ba06208aSAllison Henderson */ 4802ba06208aSAllison Henderson page_len = first_page_offset - offset; 4803ba06208aSAllison Henderson if (page_len > 0) { 4804ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4805ba06208aSAllison Henderson offset, page_len, 0); 4806ba06208aSAllison Henderson if (err) 4807ba06208aSAllison Henderson goto out; 4808ba06208aSAllison Henderson } 4809ba06208aSAllison Henderson 4810ba06208aSAllison Henderson /* 4811ba06208aSAllison Henderson * zero out and unmap the partial page that contains 4812ba06208aSAllison Henderson * the end of the hole 4813ba06208aSAllison Henderson */ 4814ba06208aSAllison Henderson page_len = offset + length - last_page_offset; 4815ba06208aSAllison Henderson if (page_len > 0) { 4816ba06208aSAllison Henderson err = ext4_discard_partial_page_buffers(handle, mapping, 4817ba06208aSAllison Henderson last_page_offset, page_len, 0); 4818ba06208aSAllison Henderson if (err) 4819ba06208aSAllison Henderson goto out; 4820a4bb6b64SAllison Henderson } 4821a4bb6b64SAllison Henderson } 4822a4bb6b64SAllison Henderson 48232be4751bSAllison Henderson 48242be4751bSAllison Henderson /* 48252be4751bSAllison Henderson * If i_size is contained in the last page, we need to 48262be4751bSAllison Henderson * unmap and zero the partial page after i_size 48272be4751bSAllison Henderson */ 48282be4751bSAllison Henderson if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && 48292be4751bSAllison Henderson inode->i_size % PAGE_CACHE_SIZE != 0) { 48302be4751bSAllison Henderson 48312be4751bSAllison Henderson page_len = PAGE_CACHE_SIZE - 48322be4751bSAllison Henderson (inode->i_size & (PAGE_CACHE_SIZE - 1)); 48332be4751bSAllison Henderson 48342be4751bSAllison Henderson if (page_len > 0) { 48352be4751bSAllison Henderson err = ext4_discard_partial_page_buffers(handle, 48362be4751bSAllison Henderson mapping, inode->i_size, page_len, 0); 48372be4751bSAllison Henderson 48382be4751bSAllison Henderson if (err) 48392be4751bSAllison Henderson goto out; 48402be4751bSAllison Henderson } 48412be4751bSAllison Henderson } 48422be4751bSAllison Henderson 4843a4bb6b64SAllison Henderson /* If there are no blocks to remove, return now */ 4844a4bb6b64SAllison Henderson if (first_block >= last_block) 4845a4bb6b64SAllison Henderson goto out; 4846a4bb6b64SAllison Henderson 4847a4bb6b64SAllison Henderson down_write(&EXT4_I(inode)->i_data_sem); 4848a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4849a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4850a4bb6b64SAllison Henderson 4851a4bb6b64SAllison Henderson /* 4852a4bb6b64SAllison Henderson * Loop over all the blocks and identify blocks 4853a4bb6b64SAllison Henderson * that need to be punched out 4854a4bb6b64SAllison Henderson */ 4855a4bb6b64SAllison Henderson iblock = first_block; 4856a4bb6b64SAllison Henderson blocks_released = 0; 4857a4bb6b64SAllison Henderson while (iblock < last_block) { 4858a4bb6b64SAllison Henderson max_blocks = last_block - iblock; 4859a4bb6b64SAllison Henderson num_blocks = 1; 4860a4bb6b64SAllison Henderson memset(&map, 0, sizeof(map)); 4861a4bb6b64SAllison Henderson map.m_lblk = iblock; 4862a4bb6b64SAllison Henderson map.m_len = max_blocks; 4863a4bb6b64SAllison Henderson ret = ext4_ext_map_blocks(handle, inode, &map, 4864a4bb6b64SAllison Henderson EXT4_GET_BLOCKS_PUNCH_OUT_EXT); 4865a4bb6b64SAllison Henderson 4866a4bb6b64SAllison Henderson if (ret > 0) { 4867a4bb6b64SAllison Henderson blocks_released += ret; 4868a4bb6b64SAllison Henderson num_blocks = ret; 4869a4bb6b64SAllison Henderson } else if (ret == 0) { 4870a4bb6b64SAllison Henderson /* 4871a4bb6b64SAllison Henderson * If map blocks could not find the block, 4872a4bb6b64SAllison Henderson * then it is in a hole. If the hole was 4873a4bb6b64SAllison Henderson * not already cached, then map blocks should 4874a4bb6b64SAllison Henderson * put it in the cache. So we can get the hole 4875a4bb6b64SAllison Henderson * out of the cache 4876a4bb6b64SAllison Henderson */ 4877a4bb6b64SAllison Henderson memset(&cache_ex, 0, sizeof(cache_ex)); 4878a4bb6b64SAllison Henderson if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) && 4879a4bb6b64SAllison Henderson !cache_ex.ec_start) { 4880a4bb6b64SAllison Henderson 4881a4bb6b64SAllison Henderson /* The hole is cached */ 4882a4bb6b64SAllison Henderson num_blocks = cache_ex.ec_block + 4883a4bb6b64SAllison Henderson cache_ex.ec_len - iblock; 4884a4bb6b64SAllison Henderson 4885a4bb6b64SAllison Henderson } else { 4886a4bb6b64SAllison Henderson /* The block could not be identified */ 4887a4bb6b64SAllison Henderson err = -EIO; 4888a4bb6b64SAllison Henderson break; 4889a4bb6b64SAllison Henderson } 4890a4bb6b64SAllison Henderson } else { 4891a4bb6b64SAllison Henderson /* Map blocks error */ 4892a4bb6b64SAllison Henderson err = ret; 4893a4bb6b64SAllison Henderson break; 4894a4bb6b64SAllison Henderson } 4895a4bb6b64SAllison Henderson 4896a4bb6b64SAllison Henderson if (num_blocks == 0) { 4897a4bb6b64SAllison Henderson /* This condition should never happen */ 4898a4bb6b64SAllison Henderson ext_debug("Block lookup failed"); 4899a4bb6b64SAllison Henderson err = -EIO; 4900a4bb6b64SAllison Henderson break; 4901a4bb6b64SAllison Henderson } 4902a4bb6b64SAllison Henderson 4903a4bb6b64SAllison Henderson iblock += num_blocks; 4904a4bb6b64SAllison Henderson } 4905a4bb6b64SAllison Henderson 4906a4bb6b64SAllison Henderson if (blocks_released > 0) { 4907a4bb6b64SAllison Henderson ext4_ext_invalidate_cache(inode); 4908a4bb6b64SAllison Henderson ext4_discard_preallocations(inode); 4909a4bb6b64SAllison Henderson } 4910a4bb6b64SAllison Henderson 4911a4bb6b64SAllison Henderson if (IS_SYNC(inode)) 4912a4bb6b64SAllison Henderson ext4_handle_sync(handle); 4913a4bb6b64SAllison Henderson 4914a4bb6b64SAllison Henderson up_write(&EXT4_I(inode)->i_data_sem); 4915a4bb6b64SAllison Henderson 4916a4bb6b64SAllison Henderson out: 4917a4bb6b64SAllison Henderson ext4_orphan_del(handle, inode); 4918a4bb6b64SAllison Henderson inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 4919a4bb6b64SAllison Henderson ext4_mark_inode_dirty(handle, inode); 4920a4bb6b64SAllison Henderson ext4_journal_stop(handle); 4921a4bb6b64SAllison Henderson return err; 4922a4bb6b64SAllison Henderson } 49236873fa0dSEric Sandeen int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 49246873fa0dSEric Sandeen __u64 start, __u64 len) 49256873fa0dSEric Sandeen { 49266873fa0dSEric Sandeen ext4_lblk_t start_blk; 49276873fa0dSEric Sandeen int error = 0; 49286873fa0dSEric Sandeen 49296873fa0dSEric Sandeen /* fallback to generic here if not in extents fmt */ 493012e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 49316873fa0dSEric Sandeen return generic_block_fiemap(inode, fieinfo, start, len, 49326873fa0dSEric Sandeen ext4_get_block); 49336873fa0dSEric Sandeen 49346873fa0dSEric Sandeen if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 49356873fa0dSEric Sandeen return -EBADR; 49366873fa0dSEric Sandeen 49376873fa0dSEric Sandeen if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 49386873fa0dSEric Sandeen error = ext4_xattr_fiemap(inode, fieinfo); 49396873fa0dSEric Sandeen } else { 4940aca92ff6SLeonard Michlmayr ext4_lblk_t len_blks; 4941aca92ff6SLeonard Michlmayr __u64 last_blk; 4942aca92ff6SLeonard Michlmayr 49436873fa0dSEric Sandeen start_blk = start >> inode->i_sb->s_blocksize_bits; 4944aca92ff6SLeonard Michlmayr last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; 4945f17722f9SLukas Czerner if (last_blk >= EXT_MAX_BLOCKS) 4946f17722f9SLukas Czerner last_blk = EXT_MAX_BLOCKS-1; 4947aca92ff6SLeonard Michlmayr len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; 49486873fa0dSEric Sandeen 49496873fa0dSEric Sandeen /* 49506873fa0dSEric Sandeen * Walk the extent tree gathering extent information. 49516873fa0dSEric Sandeen * ext4_ext_fiemap_cb will push extents back to user. 49526873fa0dSEric Sandeen */ 49536873fa0dSEric Sandeen error = ext4_ext_walk_space(inode, start_blk, len_blks, 49546873fa0dSEric Sandeen ext4_ext_fiemap_cb, fieinfo); 49556873fa0dSEric Sandeen } 49566873fa0dSEric Sandeen 49576873fa0dSEric Sandeen return error; 49586873fa0dSEric Sandeen } 4959