1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * Goal-directed block allocation by Stephen Tweedie 16ac27a0ecSDave Kleikamp * (sct@redhat.com), 1993, 1998 17ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 18ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 19ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 20ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 21ac27a0ecSDave Kleikamp * 22617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23ac27a0ecSDave Kleikamp */ 24ac27a0ecSDave Kleikamp 25ac27a0ecSDave Kleikamp #include <linux/module.h> 26ac27a0ecSDave Kleikamp #include <linux/fs.h> 27ac27a0ecSDave Kleikamp #include <linux/time.h> 28dab291afSMingming Cao #include <linux/jbd2.h> 29ac27a0ecSDave Kleikamp #include <linux/highuid.h> 30ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 31ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 32ac27a0ecSDave Kleikamp #include <linux/string.h> 33ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 34ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3564769240SAlex Tomas #include <linux/pagevec.h> 36ac27a0ecSDave Kleikamp #include <linux/mpage.h> 37e83c1397SDuane Griffin #include <linux/namei.h> 38ac27a0ecSDave Kleikamp #include <linux/uio.h> 39ac27a0ecSDave Kleikamp #include <linux/bio.h> 403dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 41ac27a0ecSDave Kleikamp #include "xattr.h" 42ac27a0ecSDave Kleikamp #include "acl.h" 43d2a17637SMingming Cao #include "ext4_extents.h" 44ac27a0ecSDave Kleikamp 45a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 46a1d6cc56SAneesh Kumar K.V 47678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 48678aaf48SJan Kara loff_t new_size) 49678aaf48SJan Kara { 507f5aa215SJan Kara return jbd2_journal_begin_ordered_truncate( 517f5aa215SJan Kara EXT4_SB(inode->i_sb)->s_journal, 527f5aa215SJan Kara &EXT4_I(inode)->jinode, 53678aaf48SJan Kara new_size); 54678aaf48SJan Kara } 55678aaf48SJan Kara 5664769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 5764769240SAlex Tomas 58ac27a0ecSDave Kleikamp /* 59ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 60ac27a0ecSDave Kleikamp */ 61617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 62ac27a0ecSDave Kleikamp { 63617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 64ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 65ac27a0ecSDave Kleikamp 66ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 67ac27a0ecSDave Kleikamp } 68ac27a0ecSDave Kleikamp 69ac27a0ecSDave Kleikamp /* 70617ba13bSMingming Cao * The ext4 forget function must perform a revoke if we are freeing data 71ac27a0ecSDave Kleikamp * which has been journaled. Metadata (eg. indirect blocks) must be 72ac27a0ecSDave Kleikamp * revoked in all cases. 73ac27a0ecSDave Kleikamp * 74ac27a0ecSDave Kleikamp * "bh" may be NULL: a metadata block may have been freed from memory 75ac27a0ecSDave Kleikamp * but there may still be a record of it in the journal, and that record 76ac27a0ecSDave Kleikamp * still needs to be revoked. 770390131bSFrank Mayhar * 780390131bSFrank Mayhar * If the handle isn't valid we're not journaling so there's nothing to do. 79ac27a0ecSDave Kleikamp */ 80617ba13bSMingming Cao int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 81617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t blocknr) 82ac27a0ecSDave Kleikamp { 83ac27a0ecSDave Kleikamp int err; 84ac27a0ecSDave Kleikamp 850390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 860390131bSFrank Mayhar return 0; 870390131bSFrank Mayhar 88ac27a0ecSDave Kleikamp might_sleep(); 89ac27a0ecSDave Kleikamp 90ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "enter"); 91ac27a0ecSDave Kleikamp 92ac27a0ecSDave Kleikamp jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 93ac27a0ecSDave Kleikamp "data mode %lx\n", 94ac27a0ecSDave Kleikamp bh, is_metadata, inode->i_mode, 95ac27a0ecSDave Kleikamp test_opt(inode->i_sb, DATA_FLAGS)); 96ac27a0ecSDave Kleikamp 97ac27a0ecSDave Kleikamp /* Never use the revoke function if we are doing full data 98ac27a0ecSDave Kleikamp * journaling: there is no need to, and a V1 superblock won't 99ac27a0ecSDave Kleikamp * support it. Otherwise, only skip the revoke on un-journaled 100ac27a0ecSDave Kleikamp * data blocks. */ 101ac27a0ecSDave Kleikamp 102617ba13bSMingming Cao if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 103617ba13bSMingming Cao (!is_metadata && !ext4_should_journal_data(inode))) { 104ac27a0ecSDave Kleikamp if (bh) { 105dab291afSMingming Cao BUFFER_TRACE(bh, "call jbd2_journal_forget"); 106617ba13bSMingming Cao return ext4_journal_forget(handle, bh); 107ac27a0ecSDave Kleikamp } 108ac27a0ecSDave Kleikamp return 0; 109ac27a0ecSDave Kleikamp } 110ac27a0ecSDave Kleikamp 111ac27a0ecSDave Kleikamp /* 112ac27a0ecSDave Kleikamp * data!=journal && (is_metadata || should_journal_data(inode)) 113ac27a0ecSDave Kleikamp */ 114617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_revoke"); 115617ba13bSMingming Cao err = ext4_journal_revoke(handle, blocknr, bh); 116ac27a0ecSDave Kleikamp if (err) 11746e665e9SHarvey Harrison ext4_abort(inode->i_sb, __func__, 118ac27a0ecSDave Kleikamp "error %d when attempting revoke", err); 119ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "exit"); 120ac27a0ecSDave Kleikamp return err; 121ac27a0ecSDave Kleikamp } 122ac27a0ecSDave Kleikamp 123ac27a0ecSDave Kleikamp /* 124ac27a0ecSDave Kleikamp * Work out how many blocks we need to proceed with the next chunk of a 125ac27a0ecSDave Kleikamp * truncate transaction. 126ac27a0ecSDave Kleikamp */ 127ac27a0ecSDave Kleikamp static unsigned long blocks_for_truncate(struct inode *inode) 128ac27a0ecSDave Kleikamp { 129725d26d3SAneesh Kumar K.V ext4_lblk_t needed; 130ac27a0ecSDave Kleikamp 131ac27a0ecSDave Kleikamp needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 132ac27a0ecSDave Kleikamp 133ac27a0ecSDave Kleikamp /* Give ourselves just enough room to cope with inodes in which 134ac27a0ecSDave Kleikamp * i_blocks is corrupt: we've seen disk corruptions in the past 135ac27a0ecSDave Kleikamp * which resulted in random data in an inode which looked enough 136617ba13bSMingming Cao * like a regular file for ext4 to try to delete it. Things 137ac27a0ecSDave Kleikamp * will go a bit crazy if that happens, but at least we should 138ac27a0ecSDave Kleikamp * try not to panic the whole kernel. */ 139ac27a0ecSDave Kleikamp if (needed < 2) 140ac27a0ecSDave Kleikamp needed = 2; 141ac27a0ecSDave Kleikamp 142ac27a0ecSDave Kleikamp /* But we need to bound the transaction so we don't overflow the 143ac27a0ecSDave Kleikamp * journal. */ 144617ba13bSMingming Cao if (needed > EXT4_MAX_TRANS_DATA) 145617ba13bSMingming Cao needed = EXT4_MAX_TRANS_DATA; 146ac27a0ecSDave Kleikamp 147617ba13bSMingming Cao return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 148ac27a0ecSDave Kleikamp } 149ac27a0ecSDave Kleikamp 150ac27a0ecSDave Kleikamp /* 151ac27a0ecSDave Kleikamp * Truncate transactions can be complex and absolutely huge. So we need to 152ac27a0ecSDave Kleikamp * be able to restart the transaction at a conventient checkpoint to make 153ac27a0ecSDave Kleikamp * sure we don't overflow the journal. 154ac27a0ecSDave Kleikamp * 155ac27a0ecSDave Kleikamp * start_transaction gets us a new handle for a truncate transaction, 156ac27a0ecSDave Kleikamp * and extend_transaction tries to extend the existing one a bit. If 157ac27a0ecSDave Kleikamp * extend fails, we need to propagate the failure up and restart the 158ac27a0ecSDave Kleikamp * transaction in the top-level truncate loop. --sct 159ac27a0ecSDave Kleikamp */ 160ac27a0ecSDave Kleikamp static handle_t *start_transaction(struct inode *inode) 161ac27a0ecSDave Kleikamp { 162ac27a0ecSDave Kleikamp handle_t *result; 163ac27a0ecSDave Kleikamp 164617ba13bSMingming Cao result = ext4_journal_start(inode, blocks_for_truncate(inode)); 165ac27a0ecSDave Kleikamp if (!IS_ERR(result)) 166ac27a0ecSDave Kleikamp return result; 167ac27a0ecSDave Kleikamp 168617ba13bSMingming Cao ext4_std_error(inode->i_sb, PTR_ERR(result)); 169ac27a0ecSDave Kleikamp return result; 170ac27a0ecSDave Kleikamp } 171ac27a0ecSDave Kleikamp 172ac27a0ecSDave Kleikamp /* 173ac27a0ecSDave Kleikamp * Try to extend this transaction for the purposes of truncation. 174ac27a0ecSDave Kleikamp * 175ac27a0ecSDave Kleikamp * Returns 0 if we managed to create more room. If we can't create more 176ac27a0ecSDave Kleikamp * room, and the transaction must be restarted we return 1. 177ac27a0ecSDave Kleikamp */ 178ac27a0ecSDave Kleikamp static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 179ac27a0ecSDave Kleikamp { 1800390131bSFrank Mayhar if (!ext4_handle_valid(handle)) 1810390131bSFrank Mayhar return 0; 1820390131bSFrank Mayhar if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) 183ac27a0ecSDave Kleikamp return 0; 184617ba13bSMingming Cao if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 185ac27a0ecSDave Kleikamp return 0; 186ac27a0ecSDave Kleikamp return 1; 187ac27a0ecSDave Kleikamp } 188ac27a0ecSDave Kleikamp 189ac27a0ecSDave Kleikamp /* 190ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 191ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 192ac27a0ecSDave Kleikamp * this transaction. 193ac27a0ecSDave Kleikamp */ 194617ba13bSMingming Cao static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 195ac27a0ecSDave Kleikamp { 1960390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 197ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 198617ba13bSMingming Cao return ext4_journal_restart(handle, blocks_for_truncate(inode)); 199ac27a0ecSDave Kleikamp } 200ac27a0ecSDave Kleikamp 201ac27a0ecSDave Kleikamp /* 202ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 203ac27a0ecSDave Kleikamp */ 204617ba13bSMingming Cao void ext4_delete_inode(struct inode *inode) 205ac27a0ecSDave Kleikamp { 206ac27a0ecSDave Kleikamp handle_t *handle; 207bc965ab3STheodore Ts'o int err; 208ac27a0ecSDave Kleikamp 209678aaf48SJan Kara if (ext4_should_order_data(inode)) 210678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 211ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 212ac27a0ecSDave Kleikamp 213ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 214ac27a0ecSDave Kleikamp goto no_delete; 215ac27a0ecSDave Kleikamp 216bc965ab3STheodore Ts'o handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 217ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 218bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 219ac27a0ecSDave Kleikamp /* 220ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 221ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 222ac27a0ecSDave Kleikamp * cleaned up. 223ac27a0ecSDave Kleikamp */ 224617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 225ac27a0ecSDave Kleikamp goto no_delete; 226ac27a0ecSDave Kleikamp } 227ac27a0ecSDave Kleikamp 228ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2290390131bSFrank Mayhar ext4_handle_sync(handle); 230ac27a0ecSDave Kleikamp inode->i_size = 0; 231bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 232bc965ab3STheodore Ts'o if (err) { 233bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 234bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 235bc965ab3STheodore Ts'o goto stop_handle; 236bc965ab3STheodore Ts'o } 237ac27a0ecSDave Kleikamp if (inode->i_blocks) 238617ba13bSMingming Cao ext4_truncate(inode); 239bc965ab3STheodore Ts'o 240bc965ab3STheodore Ts'o /* 241bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 242bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 243bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 244bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 245bc965ab3STheodore Ts'o */ 2460390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 247bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 248bc965ab3STheodore Ts'o if (err > 0) 249bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 250bc965ab3STheodore Ts'o if (err != 0) { 251bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 252bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 253bc965ab3STheodore Ts'o stop_handle: 254bc965ab3STheodore Ts'o ext4_journal_stop(handle); 255bc965ab3STheodore Ts'o goto no_delete; 256bc965ab3STheodore Ts'o } 257bc965ab3STheodore Ts'o } 258bc965ab3STheodore Ts'o 259ac27a0ecSDave Kleikamp /* 260617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 261ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 262617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 263ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 264617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 265ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 266ac27a0ecSDave Kleikamp */ 267617ba13bSMingming Cao ext4_orphan_del(handle, inode); 268617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 269ac27a0ecSDave Kleikamp 270ac27a0ecSDave Kleikamp /* 271ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 272ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 273ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 274ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 275ac27a0ecSDave Kleikamp * fails. 276ac27a0ecSDave Kleikamp */ 277617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 278ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 279ac27a0ecSDave Kleikamp clear_inode(inode); 280ac27a0ecSDave Kleikamp else 281617ba13bSMingming Cao ext4_free_inode(handle, inode); 282617ba13bSMingming Cao ext4_journal_stop(handle); 283ac27a0ecSDave Kleikamp return; 284ac27a0ecSDave Kleikamp no_delete: 285ac27a0ecSDave Kleikamp clear_inode(inode); /* We must guarantee clearing of inode... */ 286ac27a0ecSDave Kleikamp } 287ac27a0ecSDave Kleikamp 288ac27a0ecSDave Kleikamp typedef struct { 289ac27a0ecSDave Kleikamp __le32 *p; 290ac27a0ecSDave Kleikamp __le32 key; 291ac27a0ecSDave Kleikamp struct buffer_head *bh; 292ac27a0ecSDave Kleikamp } Indirect; 293ac27a0ecSDave Kleikamp 294ac27a0ecSDave Kleikamp static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 295ac27a0ecSDave Kleikamp { 296ac27a0ecSDave Kleikamp p->key = *(p->p = v); 297ac27a0ecSDave Kleikamp p->bh = bh; 298ac27a0ecSDave Kleikamp } 299ac27a0ecSDave Kleikamp 300ac27a0ecSDave Kleikamp /** 301617ba13bSMingming Cao * ext4_block_to_path - parse the block number into array of offsets 302ac27a0ecSDave Kleikamp * @inode: inode in question (we are only interested in its superblock) 303ac27a0ecSDave Kleikamp * @i_block: block number to be parsed 304ac27a0ecSDave Kleikamp * @offsets: array to store the offsets in 305ac27a0ecSDave Kleikamp * @boundary: set this non-zero if the referred-to block is likely to be 306ac27a0ecSDave Kleikamp * followed (on disk) by an indirect block. 307ac27a0ecSDave Kleikamp * 308617ba13bSMingming Cao * To store the locations of file's data ext4 uses a data structure common 309ac27a0ecSDave Kleikamp * for UNIX filesystems - tree of pointers anchored in the inode, with 310ac27a0ecSDave Kleikamp * data blocks at leaves and indirect blocks in intermediate nodes. 311ac27a0ecSDave Kleikamp * This function translates the block number into path in that tree - 312ac27a0ecSDave Kleikamp * return value is the path length and @offsets[n] is the offset of 313ac27a0ecSDave Kleikamp * pointer to (n+1)th node in the nth one. If @block is out of range 314ac27a0ecSDave Kleikamp * (negative or too large) warning is printed and zero returned. 315ac27a0ecSDave Kleikamp * 316ac27a0ecSDave Kleikamp * Note: function doesn't find node addresses, so no IO is needed. All 317ac27a0ecSDave Kleikamp * we need to know is the capacity of indirect blocks (taken from the 318ac27a0ecSDave Kleikamp * inode->i_sb). 319ac27a0ecSDave Kleikamp */ 320ac27a0ecSDave Kleikamp 321ac27a0ecSDave Kleikamp /* 322ac27a0ecSDave Kleikamp * Portability note: the last comparison (check that we fit into triple 323ac27a0ecSDave Kleikamp * indirect block) is spelled differently, because otherwise on an 324ac27a0ecSDave Kleikamp * architecture with 32-bit longs and 8Kb pages we might get into trouble 325ac27a0ecSDave Kleikamp * if our filesystem had 8Kb blocks. We might use long long, but that would 326ac27a0ecSDave Kleikamp * kill us on x86. Oh, well, at least the sign propagation does not matter - 327ac27a0ecSDave Kleikamp * i_block would have to be negative in the very beginning, so we would not 328ac27a0ecSDave Kleikamp * get there at all. 329ac27a0ecSDave Kleikamp */ 330ac27a0ecSDave Kleikamp 331617ba13bSMingming Cao static int ext4_block_to_path(struct inode *inode, 332725d26d3SAneesh Kumar K.V ext4_lblk_t i_block, 333725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], int *boundary) 334ac27a0ecSDave Kleikamp { 335617ba13bSMingming Cao int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 336617ba13bSMingming Cao int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 337617ba13bSMingming Cao const long direct_blocks = EXT4_NDIR_BLOCKS, 338ac27a0ecSDave Kleikamp indirect_blocks = ptrs, 339ac27a0ecSDave Kleikamp double_blocks = (1 << (ptrs_bits * 2)); 340ac27a0ecSDave Kleikamp int n = 0; 341ac27a0ecSDave Kleikamp int final = 0; 342ac27a0ecSDave Kleikamp 343ac27a0ecSDave Kleikamp if (i_block < 0) { 344617ba13bSMingming Cao ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); 345ac27a0ecSDave Kleikamp } else if (i_block < direct_blocks) { 346ac27a0ecSDave Kleikamp offsets[n++] = i_block; 347ac27a0ecSDave Kleikamp final = direct_blocks; 348ac27a0ecSDave Kleikamp } else if ((i_block -= direct_blocks) < indirect_blocks) { 349617ba13bSMingming Cao offsets[n++] = EXT4_IND_BLOCK; 350ac27a0ecSDave Kleikamp offsets[n++] = i_block; 351ac27a0ecSDave Kleikamp final = ptrs; 352ac27a0ecSDave Kleikamp } else if ((i_block -= indirect_blocks) < double_blocks) { 353617ba13bSMingming Cao offsets[n++] = EXT4_DIND_BLOCK; 354ac27a0ecSDave Kleikamp offsets[n++] = i_block >> ptrs_bits; 355ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 356ac27a0ecSDave Kleikamp final = ptrs; 357ac27a0ecSDave Kleikamp } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 358617ba13bSMingming Cao offsets[n++] = EXT4_TIND_BLOCK; 359ac27a0ecSDave Kleikamp offsets[n++] = i_block >> (ptrs_bits * 2); 360ac27a0ecSDave Kleikamp offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 361ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 362ac27a0ecSDave Kleikamp final = ptrs; 363ac27a0ecSDave Kleikamp } else { 364e2b46574SEric Sandeen ext4_warning(inode->i_sb, "ext4_block_to_path", 36506a279d6STheodore Ts'o "block %lu > max in inode %lu", 366e2b46574SEric Sandeen i_block + direct_blocks + 36706a279d6STheodore Ts'o indirect_blocks + double_blocks, inode->i_ino); 368ac27a0ecSDave Kleikamp } 369ac27a0ecSDave Kleikamp if (boundary) 370ac27a0ecSDave Kleikamp *boundary = final - 1 - (i_block & (ptrs - 1)); 371ac27a0ecSDave Kleikamp return n; 372ac27a0ecSDave Kleikamp } 373ac27a0ecSDave Kleikamp 374ac27a0ecSDave Kleikamp /** 375617ba13bSMingming Cao * ext4_get_branch - read the chain of indirect blocks leading to data 376ac27a0ecSDave Kleikamp * @inode: inode in question 377ac27a0ecSDave Kleikamp * @depth: depth of the chain (1 - direct pointer, etc.) 378ac27a0ecSDave Kleikamp * @offsets: offsets of pointers in inode/indirect blocks 379ac27a0ecSDave Kleikamp * @chain: place to store the result 380ac27a0ecSDave Kleikamp * @err: here we store the error value 381ac27a0ecSDave Kleikamp * 382ac27a0ecSDave Kleikamp * Function fills the array of triples <key, p, bh> and returns %NULL 383ac27a0ecSDave Kleikamp * if everything went OK or the pointer to the last filled triple 384ac27a0ecSDave Kleikamp * (incomplete one) otherwise. Upon the return chain[i].key contains 385ac27a0ecSDave Kleikamp * the number of (i+1)-th block in the chain (as it is stored in memory, 386ac27a0ecSDave Kleikamp * i.e. little-endian 32-bit), chain[i].p contains the address of that 387ac27a0ecSDave Kleikamp * number (it points into struct inode for i==0 and into the bh->b_data 388ac27a0ecSDave Kleikamp * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 389ac27a0ecSDave Kleikamp * block for i>0 and NULL for i==0. In other words, it holds the block 390ac27a0ecSDave Kleikamp * numbers of the chain, addresses they were taken from (and where we can 391ac27a0ecSDave Kleikamp * verify that chain did not change) and buffer_heads hosting these 392ac27a0ecSDave Kleikamp * numbers. 393ac27a0ecSDave Kleikamp * 394ac27a0ecSDave Kleikamp * Function stops when it stumbles upon zero pointer (absent block) 395ac27a0ecSDave Kleikamp * (pointer to last triple returned, *@err == 0) 396ac27a0ecSDave Kleikamp * or when it gets an IO error reading an indirect block 397ac27a0ecSDave Kleikamp * (ditto, *@err == -EIO) 398ac27a0ecSDave Kleikamp * or when it reads all @depth-1 indirect blocks successfully and finds 399ac27a0ecSDave Kleikamp * the whole chain, all way to the data (returns %NULL, *err == 0). 400c278bfecSAneesh Kumar K.V * 401c278bfecSAneesh Kumar K.V * Need to be called with 4020e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) 403ac27a0ecSDave Kleikamp */ 404725d26d3SAneesh Kumar K.V static Indirect *ext4_get_branch(struct inode *inode, int depth, 405725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, 406ac27a0ecSDave Kleikamp Indirect chain[4], int *err) 407ac27a0ecSDave Kleikamp { 408ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 409ac27a0ecSDave Kleikamp Indirect *p = chain; 410ac27a0ecSDave Kleikamp struct buffer_head *bh; 411ac27a0ecSDave Kleikamp 412ac27a0ecSDave Kleikamp *err = 0; 413ac27a0ecSDave Kleikamp /* i_data is not going away, no lock needed */ 414617ba13bSMingming Cao add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 415ac27a0ecSDave Kleikamp if (!p->key) 416ac27a0ecSDave Kleikamp goto no_block; 417ac27a0ecSDave Kleikamp while (--depth) { 418ac27a0ecSDave Kleikamp bh = sb_bread(sb, le32_to_cpu(p->key)); 419ac27a0ecSDave Kleikamp if (!bh) 420ac27a0ecSDave Kleikamp goto failure; 421ac27a0ecSDave Kleikamp add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 422ac27a0ecSDave Kleikamp /* Reader: end */ 423ac27a0ecSDave Kleikamp if (!p->key) 424ac27a0ecSDave Kleikamp goto no_block; 425ac27a0ecSDave Kleikamp } 426ac27a0ecSDave Kleikamp return NULL; 427ac27a0ecSDave Kleikamp 428ac27a0ecSDave Kleikamp failure: 429ac27a0ecSDave Kleikamp *err = -EIO; 430ac27a0ecSDave Kleikamp no_block: 431ac27a0ecSDave Kleikamp return p; 432ac27a0ecSDave Kleikamp } 433ac27a0ecSDave Kleikamp 434ac27a0ecSDave Kleikamp /** 435617ba13bSMingming Cao * ext4_find_near - find a place for allocation with sufficient locality 436ac27a0ecSDave Kleikamp * @inode: owner 437ac27a0ecSDave Kleikamp * @ind: descriptor of indirect block. 438ac27a0ecSDave Kleikamp * 4391cc8dcf5SBenoit Boissinot * This function returns the preferred place for block allocation. 440ac27a0ecSDave Kleikamp * It is used when heuristic for sequential allocation fails. 441ac27a0ecSDave Kleikamp * Rules are: 442ac27a0ecSDave Kleikamp * + if there is a block to the left of our position - allocate near it. 443ac27a0ecSDave Kleikamp * + if pointer will live in indirect block - allocate near that block. 444ac27a0ecSDave Kleikamp * + if pointer will live in inode - allocate in the same 445ac27a0ecSDave Kleikamp * cylinder group. 446ac27a0ecSDave Kleikamp * 447ac27a0ecSDave Kleikamp * In the latter case we colour the starting block by the callers PID to 448ac27a0ecSDave Kleikamp * prevent it from clashing with concurrent allocations for a different inode 449ac27a0ecSDave Kleikamp * in the same block group. The PID is used here so that functionally related 450ac27a0ecSDave Kleikamp * files will be close-by on-disk. 451ac27a0ecSDave Kleikamp * 452ac27a0ecSDave Kleikamp * Caller must make sure that @ind is valid and will stay that way. 453ac27a0ecSDave Kleikamp */ 454617ba13bSMingming Cao static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 455ac27a0ecSDave Kleikamp { 456617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 457ac27a0ecSDave Kleikamp __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 458ac27a0ecSDave Kleikamp __le32 *p; 459617ba13bSMingming Cao ext4_fsblk_t bg_start; 46074d3487fSValerie Clement ext4_fsblk_t last_block; 461617ba13bSMingming Cao ext4_grpblk_t colour; 462ac27a0ecSDave Kleikamp 463ac27a0ecSDave Kleikamp /* Try to find previous block */ 464ac27a0ecSDave Kleikamp for (p = ind->p - 1; p >= start; p--) { 465ac27a0ecSDave Kleikamp if (*p) 466ac27a0ecSDave Kleikamp return le32_to_cpu(*p); 467ac27a0ecSDave Kleikamp } 468ac27a0ecSDave Kleikamp 469ac27a0ecSDave Kleikamp /* No such thing, so let's try location of indirect block */ 470ac27a0ecSDave Kleikamp if (ind->bh) 471ac27a0ecSDave Kleikamp return ind->bh->b_blocknr; 472ac27a0ecSDave Kleikamp 473ac27a0ecSDave Kleikamp /* 474ac27a0ecSDave Kleikamp * It is going to be referred to from the inode itself? OK, just put it 475ac27a0ecSDave Kleikamp * into the same cylinder group then. 476ac27a0ecSDave Kleikamp */ 477617ba13bSMingming Cao bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); 47874d3487fSValerie Clement last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 47974d3487fSValerie Clement 48074d3487fSValerie Clement if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 481ac27a0ecSDave Kleikamp colour = (current->pid % 16) * 482617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 48374d3487fSValerie Clement else 48474d3487fSValerie Clement colour = (current->pid % 16) * ((last_block - bg_start) / 16); 485ac27a0ecSDave Kleikamp return bg_start + colour; 486ac27a0ecSDave Kleikamp } 487ac27a0ecSDave Kleikamp 488ac27a0ecSDave Kleikamp /** 4891cc8dcf5SBenoit Boissinot * ext4_find_goal - find a preferred place for allocation. 490ac27a0ecSDave Kleikamp * @inode: owner 491ac27a0ecSDave Kleikamp * @block: block we want 492ac27a0ecSDave Kleikamp * @partial: pointer to the last triple within a chain 493ac27a0ecSDave Kleikamp * 4941cc8dcf5SBenoit Boissinot * Normally this function find the preferred place for block allocation, 495fb01bfdaSAkinobu Mita * returns it. 496ac27a0ecSDave Kleikamp */ 497725d26d3SAneesh Kumar K.V static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 498fb01bfdaSAkinobu Mita Indirect *partial) 499ac27a0ecSDave Kleikamp { 500ac27a0ecSDave Kleikamp /* 501c2ea3fdeSTheodore Ts'o * XXX need to get goal block from mballoc's data structures 502ac27a0ecSDave Kleikamp */ 503ac27a0ecSDave Kleikamp 504617ba13bSMingming Cao return ext4_find_near(inode, partial); 505ac27a0ecSDave Kleikamp } 506ac27a0ecSDave Kleikamp 507ac27a0ecSDave Kleikamp /** 508617ba13bSMingming Cao * ext4_blks_to_allocate: Look up the block map and count the number 509ac27a0ecSDave Kleikamp * of direct blocks need to be allocated for the given branch. 510ac27a0ecSDave Kleikamp * 511ac27a0ecSDave Kleikamp * @branch: chain of indirect blocks 512ac27a0ecSDave Kleikamp * @k: number of blocks need for indirect blocks 513ac27a0ecSDave Kleikamp * @blks: number of data blocks to be mapped. 514ac27a0ecSDave Kleikamp * @blocks_to_boundary: the offset in the indirect block 515ac27a0ecSDave Kleikamp * 516ac27a0ecSDave Kleikamp * return the total number of blocks to be allocate, including the 517ac27a0ecSDave Kleikamp * direct and indirect blocks. 518ac27a0ecSDave Kleikamp */ 519498e5f24STheodore Ts'o static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, 520ac27a0ecSDave Kleikamp int blocks_to_boundary) 521ac27a0ecSDave Kleikamp { 522498e5f24STheodore Ts'o unsigned int count = 0; 523ac27a0ecSDave Kleikamp 524ac27a0ecSDave Kleikamp /* 525ac27a0ecSDave Kleikamp * Simple case, [t,d]Indirect block(s) has not allocated yet 526ac27a0ecSDave Kleikamp * then it's clear blocks on that path have not allocated 527ac27a0ecSDave Kleikamp */ 528ac27a0ecSDave Kleikamp if (k > 0) { 529ac27a0ecSDave Kleikamp /* right now we don't handle cross boundary allocation */ 530ac27a0ecSDave Kleikamp if (blks < blocks_to_boundary + 1) 531ac27a0ecSDave Kleikamp count += blks; 532ac27a0ecSDave Kleikamp else 533ac27a0ecSDave Kleikamp count += blocks_to_boundary + 1; 534ac27a0ecSDave Kleikamp return count; 535ac27a0ecSDave Kleikamp } 536ac27a0ecSDave Kleikamp 537ac27a0ecSDave Kleikamp count++; 538ac27a0ecSDave Kleikamp while (count < blks && count <= blocks_to_boundary && 539ac27a0ecSDave Kleikamp le32_to_cpu(*(branch[0].p + count)) == 0) { 540ac27a0ecSDave Kleikamp count++; 541ac27a0ecSDave Kleikamp } 542ac27a0ecSDave Kleikamp return count; 543ac27a0ecSDave Kleikamp } 544ac27a0ecSDave Kleikamp 545ac27a0ecSDave Kleikamp /** 546617ba13bSMingming Cao * ext4_alloc_blocks: multiple allocate blocks needed for a branch 547ac27a0ecSDave Kleikamp * @indirect_blks: the number of blocks need to allocate for indirect 548ac27a0ecSDave Kleikamp * blocks 549ac27a0ecSDave Kleikamp * 550ac27a0ecSDave Kleikamp * @new_blocks: on return it will store the new block numbers for 551ac27a0ecSDave Kleikamp * the indirect blocks(if needed) and the first direct block, 552ac27a0ecSDave Kleikamp * @blks: on return it will store the total number of allocated 553ac27a0ecSDave Kleikamp * direct blocks 554ac27a0ecSDave Kleikamp */ 555617ba13bSMingming Cao static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 5567061eba7SAneesh Kumar K.V ext4_lblk_t iblock, ext4_fsblk_t goal, 5577061eba7SAneesh Kumar K.V int indirect_blks, int blks, 558617ba13bSMingming Cao ext4_fsblk_t new_blocks[4], int *err) 559ac27a0ecSDave Kleikamp { 560815a1130STheodore Ts'o struct ext4_allocation_request ar; 561ac27a0ecSDave Kleikamp int target, i; 5627061eba7SAneesh Kumar K.V unsigned long count = 0, blk_allocated = 0; 563ac27a0ecSDave Kleikamp int index = 0; 564617ba13bSMingming Cao ext4_fsblk_t current_block = 0; 565ac27a0ecSDave Kleikamp int ret = 0; 566ac27a0ecSDave Kleikamp 567ac27a0ecSDave Kleikamp /* 568ac27a0ecSDave Kleikamp * Here we try to allocate the requested multiple blocks at once, 569ac27a0ecSDave Kleikamp * on a best-effort basis. 570ac27a0ecSDave Kleikamp * To build a branch, we should allocate blocks for 571ac27a0ecSDave Kleikamp * the indirect blocks(if not allocated yet), and at least 572ac27a0ecSDave Kleikamp * the first direct block of this branch. That's the 573ac27a0ecSDave Kleikamp * minimum number of blocks need to allocate(required) 574ac27a0ecSDave Kleikamp */ 5757061eba7SAneesh Kumar K.V /* first we try to allocate the indirect blocks */ 5767061eba7SAneesh Kumar K.V target = indirect_blks; 5777061eba7SAneesh Kumar K.V while (target > 0) { 578ac27a0ecSDave Kleikamp count = target; 579ac27a0ecSDave Kleikamp /* allocating blocks for indirect blocks and direct blocks */ 5807061eba7SAneesh Kumar K.V current_block = ext4_new_meta_blocks(handle, inode, 5817061eba7SAneesh Kumar K.V goal, &count, err); 582ac27a0ecSDave Kleikamp if (*err) 583ac27a0ecSDave Kleikamp goto failed_out; 584ac27a0ecSDave Kleikamp 585ac27a0ecSDave Kleikamp target -= count; 586ac27a0ecSDave Kleikamp /* allocate blocks for indirect blocks */ 587ac27a0ecSDave Kleikamp while (index < indirect_blks && count) { 588ac27a0ecSDave Kleikamp new_blocks[index++] = current_block++; 589ac27a0ecSDave Kleikamp count--; 590ac27a0ecSDave Kleikamp } 5917061eba7SAneesh Kumar K.V if (count > 0) { 5927061eba7SAneesh Kumar K.V /* 5937061eba7SAneesh Kumar K.V * save the new block number 5947061eba7SAneesh Kumar K.V * for the first direct block 5957061eba7SAneesh Kumar K.V */ 5967061eba7SAneesh Kumar K.V new_blocks[index] = current_block; 5977061eba7SAneesh Kumar K.V printk(KERN_INFO "%s returned more blocks than " 5987061eba7SAneesh Kumar K.V "requested\n", __func__); 5997061eba7SAneesh Kumar K.V WARN_ON(1); 600ac27a0ecSDave Kleikamp break; 601ac27a0ecSDave Kleikamp } 6027061eba7SAneesh Kumar K.V } 603ac27a0ecSDave Kleikamp 6047061eba7SAneesh Kumar K.V target = blks - count ; 6057061eba7SAneesh Kumar K.V blk_allocated = count; 6067061eba7SAneesh Kumar K.V if (!target) 6077061eba7SAneesh Kumar K.V goto allocated; 6087061eba7SAneesh Kumar K.V /* Now allocate data blocks */ 609815a1130STheodore Ts'o memset(&ar, 0, sizeof(ar)); 610815a1130STheodore Ts'o ar.inode = inode; 611815a1130STheodore Ts'o ar.goal = goal; 612815a1130STheodore Ts'o ar.len = target; 613815a1130STheodore Ts'o ar.logical = iblock; 614815a1130STheodore Ts'o if (S_ISREG(inode->i_mode)) 615815a1130STheodore Ts'o /* enable in-core preallocation only for regular files */ 616815a1130STheodore Ts'o ar.flags = EXT4_MB_HINT_DATA; 617815a1130STheodore Ts'o 618815a1130STheodore Ts'o current_block = ext4_mb_new_blocks(handle, &ar, err); 619815a1130STheodore Ts'o 6207061eba7SAneesh Kumar K.V if (*err && (target == blks)) { 6217061eba7SAneesh Kumar K.V /* 6227061eba7SAneesh Kumar K.V * if the allocation failed and we didn't allocate 6237061eba7SAneesh Kumar K.V * any blocks before 6247061eba7SAneesh Kumar K.V */ 6257061eba7SAneesh Kumar K.V goto failed_out; 6267061eba7SAneesh Kumar K.V } 6277061eba7SAneesh Kumar K.V if (!*err) { 6287061eba7SAneesh Kumar K.V if (target == blks) { 6297061eba7SAneesh Kumar K.V /* 6307061eba7SAneesh Kumar K.V * save the new block number 6317061eba7SAneesh Kumar K.V * for the first direct block 6327061eba7SAneesh Kumar K.V */ 633ac27a0ecSDave Kleikamp new_blocks[index] = current_block; 6347061eba7SAneesh Kumar K.V } 635815a1130STheodore Ts'o blk_allocated += ar.len; 6367061eba7SAneesh Kumar K.V } 6377061eba7SAneesh Kumar K.V allocated: 638ac27a0ecSDave Kleikamp /* total number of blocks allocated for direct blocks */ 6397061eba7SAneesh Kumar K.V ret = blk_allocated; 640ac27a0ecSDave Kleikamp *err = 0; 641ac27a0ecSDave Kleikamp return ret; 642ac27a0ecSDave Kleikamp failed_out: 643ac27a0ecSDave Kleikamp for (i = 0; i < index; i++) 644c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 645ac27a0ecSDave Kleikamp return ret; 646ac27a0ecSDave Kleikamp } 647ac27a0ecSDave Kleikamp 648ac27a0ecSDave Kleikamp /** 649617ba13bSMingming Cao * ext4_alloc_branch - allocate and set up a chain of blocks. 650ac27a0ecSDave Kleikamp * @inode: owner 651ac27a0ecSDave Kleikamp * @indirect_blks: number of allocated indirect blocks 652ac27a0ecSDave Kleikamp * @blks: number of allocated direct blocks 653ac27a0ecSDave Kleikamp * @offsets: offsets (in the blocks) to store the pointers to next. 654ac27a0ecSDave Kleikamp * @branch: place to store the chain in. 655ac27a0ecSDave Kleikamp * 656ac27a0ecSDave Kleikamp * This function allocates blocks, zeroes out all but the last one, 657ac27a0ecSDave Kleikamp * links them into chain and (if we are synchronous) writes them to disk. 658ac27a0ecSDave Kleikamp * In other words, it prepares a branch that can be spliced onto the 659ac27a0ecSDave Kleikamp * inode. It stores the information about that chain in the branch[], in 660617ba13bSMingming Cao * the same format as ext4_get_branch() would do. We are calling it after 661ac27a0ecSDave Kleikamp * we had read the existing part of chain and partial points to the last 662ac27a0ecSDave Kleikamp * triple of that (one with zero ->key). Upon the exit we have the same 663617ba13bSMingming Cao * picture as after the successful ext4_get_block(), except that in one 664ac27a0ecSDave Kleikamp * place chain is disconnected - *branch->p is still zero (we did not 665ac27a0ecSDave Kleikamp * set the last link), but branch->key contains the number that should 666ac27a0ecSDave Kleikamp * be placed into *branch->p to fill that gap. 667ac27a0ecSDave Kleikamp * 668ac27a0ecSDave Kleikamp * If allocation fails we free all blocks we've allocated (and forget 669ac27a0ecSDave Kleikamp * their buffer_heads) and return the error value the from failed 670617ba13bSMingming Cao * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 671ac27a0ecSDave Kleikamp * as described above and return 0. 672ac27a0ecSDave Kleikamp */ 673617ba13bSMingming Cao static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 6747061eba7SAneesh Kumar K.V ext4_lblk_t iblock, int indirect_blks, 6757061eba7SAneesh Kumar K.V int *blks, ext4_fsblk_t goal, 676725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, Indirect *branch) 677ac27a0ecSDave Kleikamp { 678ac27a0ecSDave Kleikamp int blocksize = inode->i_sb->s_blocksize; 679ac27a0ecSDave Kleikamp int i, n = 0; 680ac27a0ecSDave Kleikamp int err = 0; 681ac27a0ecSDave Kleikamp struct buffer_head *bh; 682ac27a0ecSDave Kleikamp int num; 683617ba13bSMingming Cao ext4_fsblk_t new_blocks[4]; 684617ba13bSMingming Cao ext4_fsblk_t current_block; 685ac27a0ecSDave Kleikamp 6867061eba7SAneesh Kumar K.V num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 687ac27a0ecSDave Kleikamp *blks, new_blocks, &err); 688ac27a0ecSDave Kleikamp if (err) 689ac27a0ecSDave Kleikamp return err; 690ac27a0ecSDave Kleikamp 691ac27a0ecSDave Kleikamp branch[0].key = cpu_to_le32(new_blocks[0]); 692ac27a0ecSDave Kleikamp /* 693ac27a0ecSDave Kleikamp * metadata blocks and data blocks are allocated. 694ac27a0ecSDave Kleikamp */ 695ac27a0ecSDave Kleikamp for (n = 1; n <= indirect_blks; n++) { 696ac27a0ecSDave Kleikamp /* 697ac27a0ecSDave Kleikamp * Get buffer_head for parent block, zero it out 698ac27a0ecSDave Kleikamp * and set the pointer to new one, then send 699ac27a0ecSDave Kleikamp * parent to disk. 700ac27a0ecSDave Kleikamp */ 701ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 702ac27a0ecSDave Kleikamp branch[n].bh = bh; 703ac27a0ecSDave Kleikamp lock_buffer(bh); 704ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 705617ba13bSMingming Cao err = ext4_journal_get_create_access(handle, bh); 706ac27a0ecSDave Kleikamp if (err) { 707ac27a0ecSDave Kleikamp unlock_buffer(bh); 708ac27a0ecSDave Kleikamp brelse(bh); 709ac27a0ecSDave Kleikamp goto failed; 710ac27a0ecSDave Kleikamp } 711ac27a0ecSDave Kleikamp 712ac27a0ecSDave Kleikamp memset(bh->b_data, 0, blocksize); 713ac27a0ecSDave Kleikamp branch[n].p = (__le32 *) bh->b_data + offsets[n]; 714ac27a0ecSDave Kleikamp branch[n].key = cpu_to_le32(new_blocks[n]); 715ac27a0ecSDave Kleikamp *branch[n].p = branch[n].key; 716ac27a0ecSDave Kleikamp if (n == indirect_blks) { 717ac27a0ecSDave Kleikamp current_block = new_blocks[n]; 718ac27a0ecSDave Kleikamp /* 719ac27a0ecSDave Kleikamp * End of chain, update the last new metablock of 720ac27a0ecSDave Kleikamp * the chain to point to the new allocated 721ac27a0ecSDave Kleikamp * data blocks numbers 722ac27a0ecSDave Kleikamp */ 723ac27a0ecSDave Kleikamp for (i=1; i < num; i++) 724ac27a0ecSDave Kleikamp *(branch[n].p + i) = cpu_to_le32(++current_block); 725ac27a0ecSDave Kleikamp } 726ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "marking uptodate"); 727ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 728ac27a0ecSDave Kleikamp unlock_buffer(bh); 729ac27a0ecSDave Kleikamp 7300390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7310390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 732ac27a0ecSDave Kleikamp if (err) 733ac27a0ecSDave Kleikamp goto failed; 734ac27a0ecSDave Kleikamp } 735ac27a0ecSDave Kleikamp *blks = num; 736ac27a0ecSDave Kleikamp return err; 737ac27a0ecSDave Kleikamp failed: 738ac27a0ecSDave Kleikamp /* Allocation failed, free what we already allocated */ 739ac27a0ecSDave Kleikamp for (i = 1; i <= n ; i++) { 740dab291afSMingming Cao BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 741617ba13bSMingming Cao ext4_journal_forget(handle, branch[i].bh); 742ac27a0ecSDave Kleikamp } 743ac27a0ecSDave Kleikamp for (i = 0; i < indirect_blks; i++) 744c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 745ac27a0ecSDave Kleikamp 746c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 747ac27a0ecSDave Kleikamp 748ac27a0ecSDave Kleikamp return err; 749ac27a0ecSDave Kleikamp } 750ac27a0ecSDave Kleikamp 751ac27a0ecSDave Kleikamp /** 752617ba13bSMingming Cao * ext4_splice_branch - splice the allocated branch onto inode. 753ac27a0ecSDave Kleikamp * @inode: owner 754ac27a0ecSDave Kleikamp * @block: (logical) number of block we are adding 755ac27a0ecSDave Kleikamp * @chain: chain of indirect blocks (with a missing link - see 756617ba13bSMingming Cao * ext4_alloc_branch) 757ac27a0ecSDave Kleikamp * @where: location of missing link 758ac27a0ecSDave Kleikamp * @num: number of indirect blocks we are adding 759ac27a0ecSDave Kleikamp * @blks: number of direct blocks we are adding 760ac27a0ecSDave Kleikamp * 761ac27a0ecSDave Kleikamp * This function fills the missing link and does all housekeeping needed in 762ac27a0ecSDave Kleikamp * inode (->i_blocks, etc.). In case of success we end up with the full 763ac27a0ecSDave Kleikamp * chain to new block and return 0. 764ac27a0ecSDave Kleikamp */ 765617ba13bSMingming Cao static int ext4_splice_branch(handle_t *handle, struct inode *inode, 766725d26d3SAneesh Kumar K.V ext4_lblk_t block, Indirect *where, int num, int blks) 767ac27a0ecSDave Kleikamp { 768ac27a0ecSDave Kleikamp int i; 769ac27a0ecSDave Kleikamp int err = 0; 770617ba13bSMingming Cao ext4_fsblk_t current_block; 771ac27a0ecSDave Kleikamp 772ac27a0ecSDave Kleikamp /* 773ac27a0ecSDave Kleikamp * If we're splicing into a [td]indirect block (as opposed to the 774ac27a0ecSDave Kleikamp * inode) then we need to get write access to the [td]indirect block 775ac27a0ecSDave Kleikamp * before the splice. 776ac27a0ecSDave Kleikamp */ 777ac27a0ecSDave Kleikamp if (where->bh) { 778ac27a0ecSDave Kleikamp BUFFER_TRACE(where->bh, "get_write_access"); 779617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, where->bh); 780ac27a0ecSDave Kleikamp if (err) 781ac27a0ecSDave Kleikamp goto err_out; 782ac27a0ecSDave Kleikamp } 783ac27a0ecSDave Kleikamp /* That's it */ 784ac27a0ecSDave Kleikamp 785ac27a0ecSDave Kleikamp *where->p = where->key; 786ac27a0ecSDave Kleikamp 787ac27a0ecSDave Kleikamp /* 788ac27a0ecSDave Kleikamp * Update the host buffer_head or inode to point to more just allocated 789ac27a0ecSDave Kleikamp * direct blocks blocks 790ac27a0ecSDave Kleikamp */ 791ac27a0ecSDave Kleikamp if (num == 0 && blks > 1) { 792ac27a0ecSDave Kleikamp current_block = le32_to_cpu(where->key) + 1; 793ac27a0ecSDave Kleikamp for (i = 1; i < blks; i++) 794ac27a0ecSDave Kleikamp *(where->p + i) = cpu_to_le32(current_block++); 795ac27a0ecSDave Kleikamp } 796ac27a0ecSDave Kleikamp 797ac27a0ecSDave Kleikamp /* We are done with atomic stuff, now do the rest of housekeeping */ 798ac27a0ecSDave Kleikamp 799ef7f3835SKalpak Shah inode->i_ctime = ext4_current_time(inode); 800617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 801ac27a0ecSDave Kleikamp 802ac27a0ecSDave Kleikamp /* had we spliced it onto indirect block? */ 803ac27a0ecSDave Kleikamp if (where->bh) { 804ac27a0ecSDave Kleikamp /* 805ac27a0ecSDave Kleikamp * If we spliced it onto an indirect block, we haven't 806ac27a0ecSDave Kleikamp * altered the inode. Note however that if it is being spliced 807ac27a0ecSDave Kleikamp * onto an indirect block at the very end of the file (the 808ac27a0ecSDave Kleikamp * file is growing) then we *will* alter the inode to reflect 809ac27a0ecSDave Kleikamp * the new i_size. But that is not done here - it is done in 810617ba13bSMingming Cao * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 811ac27a0ecSDave Kleikamp */ 812ac27a0ecSDave Kleikamp jbd_debug(5, "splicing indirect only\n"); 8130390131bSFrank Mayhar BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 8140390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, where->bh); 815ac27a0ecSDave Kleikamp if (err) 816ac27a0ecSDave Kleikamp goto err_out; 817ac27a0ecSDave Kleikamp } else { 818ac27a0ecSDave Kleikamp /* 819ac27a0ecSDave Kleikamp * OK, we spliced it into the inode itself on a direct block. 820ac27a0ecSDave Kleikamp * Inode was dirtied above. 821ac27a0ecSDave Kleikamp */ 822ac27a0ecSDave Kleikamp jbd_debug(5, "splicing direct\n"); 823ac27a0ecSDave Kleikamp } 824ac27a0ecSDave Kleikamp return err; 825ac27a0ecSDave Kleikamp 826ac27a0ecSDave Kleikamp err_out: 827ac27a0ecSDave Kleikamp for (i = 1; i <= num; i++) { 828dab291afSMingming Cao BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 829617ba13bSMingming Cao ext4_journal_forget(handle, where[i].bh); 830c9de560dSAlex Tomas ext4_free_blocks(handle, inode, 831c9de560dSAlex Tomas le32_to_cpu(where[i-1].key), 1, 0); 832ac27a0ecSDave Kleikamp } 833c9de560dSAlex Tomas ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 834ac27a0ecSDave Kleikamp 835ac27a0ecSDave Kleikamp return err; 836ac27a0ecSDave Kleikamp } 837ac27a0ecSDave Kleikamp 838ac27a0ecSDave Kleikamp /* 839ac27a0ecSDave Kleikamp * Allocation strategy is simple: if we have to allocate something, we will 840ac27a0ecSDave Kleikamp * have to go the whole way to leaf. So let's do it before attaching anything 841ac27a0ecSDave Kleikamp * to tree, set linkage between the newborn blocks, write them if sync is 842ac27a0ecSDave Kleikamp * required, recheck the path, free and repeat if check fails, otherwise 843ac27a0ecSDave Kleikamp * set the last missing link (that will protect us from any truncate-generated 844ac27a0ecSDave Kleikamp * removals - all blocks on the path are immune now) and possibly force the 845ac27a0ecSDave Kleikamp * write on the parent block. 846ac27a0ecSDave Kleikamp * That has a nice additional property: no special recovery from the failed 847ac27a0ecSDave Kleikamp * allocations is needed - we simply release blocks and do not touch anything 848ac27a0ecSDave Kleikamp * reachable from inode. 849ac27a0ecSDave Kleikamp * 850ac27a0ecSDave Kleikamp * `handle' can be NULL if create == 0. 851ac27a0ecSDave Kleikamp * 852ac27a0ecSDave Kleikamp * return > 0, # of blocks mapped or allocated. 853ac27a0ecSDave Kleikamp * return = 0, if plain lookup failed. 854ac27a0ecSDave Kleikamp * return < 0, error case. 855c278bfecSAneesh Kumar K.V * 856c278bfecSAneesh Kumar K.V * 857c278bfecSAneesh Kumar K.V * Need to be called with 8580e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 8590e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 860ac27a0ecSDave Kleikamp */ 861498e5f24STheodore Ts'o static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, 862498e5f24STheodore Ts'o ext4_lblk_t iblock, unsigned int maxblocks, 863ac27a0ecSDave Kleikamp struct buffer_head *bh_result, 864ac27a0ecSDave Kleikamp int create, int extend_disksize) 865ac27a0ecSDave Kleikamp { 866ac27a0ecSDave Kleikamp int err = -EIO; 867725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 868ac27a0ecSDave Kleikamp Indirect chain[4]; 869ac27a0ecSDave Kleikamp Indirect *partial; 870617ba13bSMingming Cao ext4_fsblk_t goal; 871ac27a0ecSDave Kleikamp int indirect_blks; 872ac27a0ecSDave Kleikamp int blocks_to_boundary = 0; 873ac27a0ecSDave Kleikamp int depth; 874617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 875ac27a0ecSDave Kleikamp int count = 0; 876617ba13bSMingming Cao ext4_fsblk_t first_block = 0; 87761628a3fSMingming Cao loff_t disksize; 878ac27a0ecSDave Kleikamp 879ac27a0ecSDave Kleikamp 880a86c6181SAlex Tomas J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 881ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 882725d26d3SAneesh Kumar K.V depth = ext4_block_to_path(inode, iblock, offsets, 883725d26d3SAneesh Kumar K.V &blocks_to_boundary); 884ac27a0ecSDave Kleikamp 885ac27a0ecSDave Kleikamp if (depth == 0) 886ac27a0ecSDave Kleikamp goto out; 887ac27a0ecSDave Kleikamp 888617ba13bSMingming Cao partial = ext4_get_branch(inode, depth, offsets, chain, &err); 889ac27a0ecSDave Kleikamp 890ac27a0ecSDave Kleikamp /* Simplest case - block found, no allocation needed */ 891ac27a0ecSDave Kleikamp if (!partial) { 892ac27a0ecSDave Kleikamp first_block = le32_to_cpu(chain[depth - 1].key); 893ac27a0ecSDave Kleikamp clear_buffer_new(bh_result); 894ac27a0ecSDave Kleikamp count++; 895ac27a0ecSDave Kleikamp /*map more blocks*/ 896ac27a0ecSDave Kleikamp while (count < maxblocks && count <= blocks_to_boundary) { 897617ba13bSMingming Cao ext4_fsblk_t blk; 898ac27a0ecSDave Kleikamp 899ac27a0ecSDave Kleikamp blk = le32_to_cpu(*(chain[depth-1].p + count)); 900ac27a0ecSDave Kleikamp 901ac27a0ecSDave Kleikamp if (blk == first_block + count) 902ac27a0ecSDave Kleikamp count++; 903ac27a0ecSDave Kleikamp else 904ac27a0ecSDave Kleikamp break; 905ac27a0ecSDave Kleikamp } 906ac27a0ecSDave Kleikamp goto got_it; 907ac27a0ecSDave Kleikamp } 908ac27a0ecSDave Kleikamp 909ac27a0ecSDave Kleikamp /* Next simple case - plain lookup or failed read of indirect block */ 910ac27a0ecSDave Kleikamp if (!create || err == -EIO) 911ac27a0ecSDave Kleikamp goto cleanup; 912ac27a0ecSDave Kleikamp 913ac27a0ecSDave Kleikamp /* 914c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 915ac27a0ecSDave Kleikamp */ 916fb01bfdaSAkinobu Mita goal = ext4_find_goal(inode, iblock, partial); 917ac27a0ecSDave Kleikamp 918ac27a0ecSDave Kleikamp /* the number of blocks need to allocate for [d,t]indirect blocks */ 919ac27a0ecSDave Kleikamp indirect_blks = (chain + depth) - partial - 1; 920ac27a0ecSDave Kleikamp 921ac27a0ecSDave Kleikamp /* 922ac27a0ecSDave Kleikamp * Next look up the indirect map to count the totoal number of 923ac27a0ecSDave Kleikamp * direct blocks to allocate for this branch. 924ac27a0ecSDave Kleikamp */ 925617ba13bSMingming Cao count = ext4_blks_to_allocate(partial, indirect_blks, 926ac27a0ecSDave Kleikamp maxblocks, blocks_to_boundary); 927ac27a0ecSDave Kleikamp /* 928617ba13bSMingming Cao * Block out ext4_truncate while we alter the tree 929ac27a0ecSDave Kleikamp */ 9307061eba7SAneesh Kumar K.V err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 9317061eba7SAneesh Kumar K.V &count, goal, 932ac27a0ecSDave Kleikamp offsets + (partial - chain), partial); 933ac27a0ecSDave Kleikamp 934ac27a0ecSDave Kleikamp /* 935617ba13bSMingming Cao * The ext4_splice_branch call will free and forget any buffers 936ac27a0ecSDave Kleikamp * on the new chain if there is a failure, but that risks using 937ac27a0ecSDave Kleikamp * up transaction credits, especially for bitmaps where the 938ac27a0ecSDave Kleikamp * credits cannot be returned. Can we handle this somehow? We 939ac27a0ecSDave Kleikamp * may need to return -EAGAIN upwards in the worst case. --sct 940ac27a0ecSDave Kleikamp */ 941ac27a0ecSDave Kleikamp if (!err) 942617ba13bSMingming Cao err = ext4_splice_branch(handle, inode, iblock, 943ac27a0ecSDave Kleikamp partial, indirect_blks, count); 944ac27a0ecSDave Kleikamp /* 9450e855ac8SAneesh Kumar K.V * i_disksize growing is protected by i_data_sem. Don't forget to 946ac27a0ecSDave Kleikamp * protect it if you're about to implement concurrent 947617ba13bSMingming Cao * ext4_get_block() -bzzz 948ac27a0ecSDave Kleikamp */ 94961628a3fSMingming Cao if (!err && extend_disksize) { 95061628a3fSMingming Cao disksize = ((loff_t) iblock + count) << inode->i_blkbits; 95161628a3fSMingming Cao if (disksize > i_size_read(inode)) 95261628a3fSMingming Cao disksize = i_size_read(inode); 95361628a3fSMingming Cao if (disksize > ei->i_disksize) 95461628a3fSMingming Cao ei->i_disksize = disksize; 95561628a3fSMingming Cao } 956ac27a0ecSDave Kleikamp if (err) 957ac27a0ecSDave Kleikamp goto cleanup; 958ac27a0ecSDave Kleikamp 959ac27a0ecSDave Kleikamp set_buffer_new(bh_result); 960ac27a0ecSDave Kleikamp got_it: 961ac27a0ecSDave Kleikamp map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 962ac27a0ecSDave Kleikamp if (count > blocks_to_boundary) 963ac27a0ecSDave Kleikamp set_buffer_boundary(bh_result); 964ac27a0ecSDave Kleikamp err = count; 965ac27a0ecSDave Kleikamp /* Clean up and exit */ 966ac27a0ecSDave Kleikamp partial = chain + depth - 1; /* the whole chain */ 967ac27a0ecSDave Kleikamp cleanup: 968ac27a0ecSDave Kleikamp while (partial > chain) { 969ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 970ac27a0ecSDave Kleikamp brelse(partial->bh); 971ac27a0ecSDave Kleikamp partial--; 972ac27a0ecSDave Kleikamp } 973ac27a0ecSDave Kleikamp BUFFER_TRACE(bh_result, "returned"); 974ac27a0ecSDave Kleikamp out: 975ac27a0ecSDave Kleikamp return err; 976ac27a0ecSDave Kleikamp } 977ac27a0ecSDave Kleikamp 97812219aeaSAneesh Kumar K.V /* 97912219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 98012219aeaSAneesh Kumar K.V * to allocate @blocks for non extent file based file 98112219aeaSAneesh Kumar K.V */ 98212219aeaSAneesh Kumar K.V static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 98312219aeaSAneesh Kumar K.V { 98412219aeaSAneesh Kumar K.V int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 98512219aeaSAneesh Kumar K.V int ind_blks, dind_blks, tind_blks; 98612219aeaSAneesh Kumar K.V 98712219aeaSAneesh Kumar K.V /* number of new indirect blocks needed */ 98812219aeaSAneesh Kumar K.V ind_blks = (blocks + icap - 1) / icap; 98912219aeaSAneesh Kumar K.V 99012219aeaSAneesh Kumar K.V dind_blks = (ind_blks + icap - 1) / icap; 99112219aeaSAneesh Kumar K.V 99212219aeaSAneesh Kumar K.V tind_blks = 1; 99312219aeaSAneesh Kumar K.V 99412219aeaSAneesh Kumar K.V return ind_blks + dind_blks + tind_blks; 99512219aeaSAneesh Kumar K.V } 99612219aeaSAneesh Kumar K.V 99712219aeaSAneesh Kumar K.V /* 99812219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 99912219aeaSAneesh Kumar K.V * to allocate given number of blocks 100012219aeaSAneesh Kumar K.V */ 100112219aeaSAneesh Kumar K.V static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 100212219aeaSAneesh Kumar K.V { 1003cd213226SMingming Cao if (!blocks) 1004cd213226SMingming Cao return 0; 1005cd213226SMingming Cao 100612219aeaSAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 100712219aeaSAneesh Kumar K.V return ext4_ext_calc_metadata_amount(inode, blocks); 100812219aeaSAneesh Kumar K.V 100912219aeaSAneesh Kumar K.V return ext4_indirect_calc_metadata_amount(inode, blocks); 101012219aeaSAneesh Kumar K.V } 101112219aeaSAneesh Kumar K.V 101212219aeaSAneesh Kumar K.V static void ext4_da_update_reserve_space(struct inode *inode, int used) 101312219aeaSAneesh Kumar K.V { 101412219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 101512219aeaSAneesh Kumar K.V int total, mdb, mdb_free; 101612219aeaSAneesh Kumar K.V 101712219aeaSAneesh Kumar K.V spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 101812219aeaSAneesh Kumar K.V /* recalculate the number of metablocks still need to be reserved */ 101912219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - used; 102012219aeaSAneesh Kumar K.V mdb = ext4_calc_metadata_amount(inode, total); 102112219aeaSAneesh Kumar K.V 102212219aeaSAneesh Kumar K.V /* figure out how many metablocks to release */ 102312219aeaSAneesh Kumar K.V BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 102412219aeaSAneesh Kumar K.V mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 102512219aeaSAneesh Kumar K.V 10266bc6e63fSAneesh Kumar K.V if (mdb_free) { 102712219aeaSAneesh Kumar K.V /* Account for allocated meta_blocks */ 102812219aeaSAneesh Kumar K.V mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 102912219aeaSAneesh Kumar K.V 10306bc6e63fSAneesh Kumar K.V /* update fs dirty blocks counter */ 10316bc6e63fSAneesh Kumar K.V percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 10326bc6e63fSAneesh Kumar K.V EXT4_I(inode)->i_allocated_meta_blocks = 0; 10336bc6e63fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks = mdb; 10346bc6e63fSAneesh Kumar K.V } 103512219aeaSAneesh Kumar K.V 103612219aeaSAneesh Kumar K.V /* update per-inode reservations */ 103712219aeaSAneesh Kumar K.V BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 103812219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= used; 103912219aeaSAneesh Kumar K.V 104012219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 104112219aeaSAneesh Kumar K.V } 104212219aeaSAneesh Kumar K.V 1043f5ab0d1fSMingming Cao /* 10442b2d6d01STheodore Ts'o * The ext4_get_blocks_wrap() function try to look up the requested blocks, 10452b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 1046f5ab0d1fSMingming Cao * 1047f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1048f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 1049f5ab0d1fSMingming Cao * mapped. 1050f5ab0d1fSMingming Cao * 1051f5ab0d1fSMingming Cao * If file type is extents based, it will call ext4_ext_get_blocks(), 1052f5ab0d1fSMingming Cao * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping 1053f5ab0d1fSMingming Cao * based files 1054f5ab0d1fSMingming Cao * 1055f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 1056f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 1057f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 1058f5ab0d1fSMingming Cao * the buffer head is mapped. 1059f5ab0d1fSMingming Cao * 1060f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 1061f5ab0d1fSMingming Cao * that casem, buffer head is unmapped 1062f5ab0d1fSMingming Cao * 1063f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 1064f5ab0d1fSMingming Cao */ 10650e855ac8SAneesh Kumar K.V int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 1066498e5f24STheodore Ts'o unsigned int max_blocks, struct buffer_head *bh, 1067d2a17637SMingming Cao int create, int extend_disksize, int flag) 10680e855ac8SAneesh Kumar K.V { 10690e855ac8SAneesh Kumar K.V int retval; 1070f5ab0d1fSMingming Cao 1071f5ab0d1fSMingming Cao clear_buffer_mapped(bh); 1072f5ab0d1fSMingming Cao 10734df3d265SAneesh Kumar K.V /* 10744df3d265SAneesh Kumar K.V * Try to see if we can get the block without requesting 10754df3d265SAneesh Kumar K.V * for new file system block. 10764df3d265SAneesh Kumar K.V */ 10770e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 10784df3d265SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 10794df3d265SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 10804df3d265SAneesh Kumar K.V bh, 0, 0); 10814df3d265SAneesh Kumar K.V } else { 10824df3d265SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, 10834df3d265SAneesh Kumar K.V inode, block, max_blocks, bh, 0, 0); 10840e855ac8SAneesh Kumar K.V } 10854df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 1086f5ab0d1fSMingming Cao 1087f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 1088f5ab0d1fSMingming Cao if (!create) 10894df3d265SAneesh Kumar K.V return retval; 10904df3d265SAneesh Kumar K.V 10914df3d265SAneesh Kumar K.V /* 1092f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 1093f5ab0d1fSMingming Cao * 1094f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 1095f5ab0d1fSMingming Cao * ext4_ext_get_block() returns th create = 0 1096f5ab0d1fSMingming Cao * with buffer head unmapped. 1097f5ab0d1fSMingming Cao */ 1098f5ab0d1fSMingming Cao if (retval > 0 && buffer_mapped(bh)) 1099f5ab0d1fSMingming Cao return retval; 1100f5ab0d1fSMingming Cao 1101f5ab0d1fSMingming Cao /* 1102f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 1103f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 1104f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 1105f5ab0d1fSMingming Cao * with create == 1 flag. 11064df3d265SAneesh Kumar K.V */ 11074df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 1108d2a17637SMingming Cao 1109d2a17637SMingming Cao /* 1110d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 1111d2a17637SMingming Cao * we have already reserved fs blocks for allocation 1112d2a17637SMingming Cao * let the underlying get_block() function know to 1113d2a17637SMingming Cao * avoid double accounting 1114d2a17637SMingming Cao */ 1115d2a17637SMingming Cao if (flag) 1116d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 1; 11174df3d265SAneesh Kumar K.V /* 11184df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 11194df3d265SAneesh Kumar K.V * could have changed the inode type in between 11204df3d265SAneesh Kumar K.V */ 11210e855ac8SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 11220e855ac8SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 11230e855ac8SAneesh Kumar K.V bh, create, extend_disksize); 11240e855ac8SAneesh Kumar K.V } else { 11250e855ac8SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, inode, block, 11260e855ac8SAneesh Kumar K.V max_blocks, bh, create, extend_disksize); 1127267e4db9SAneesh Kumar K.V 1128267e4db9SAneesh Kumar K.V if (retval > 0 && buffer_new(bh)) { 1129267e4db9SAneesh Kumar K.V /* 1130267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 1131267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 1132267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 1133267e4db9SAneesh Kumar K.V */ 1134267e4db9SAneesh Kumar K.V EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & 1135267e4db9SAneesh Kumar K.V ~EXT4_EXT_MIGRATE; 1136267e4db9SAneesh Kumar K.V } 11370e855ac8SAneesh Kumar K.V } 1138d2a17637SMingming Cao 1139d2a17637SMingming Cao if (flag) { 1140d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1141d2a17637SMingming Cao /* 1142d2a17637SMingming Cao * Update reserved blocks/metadata blocks 1143d2a17637SMingming Cao * after successful block allocation 1144d2a17637SMingming Cao * which were deferred till now 1145d2a17637SMingming Cao */ 1146d2a17637SMingming Cao if ((retval > 0) && buffer_delay(bh)) 114712219aeaSAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval); 1148d2a17637SMingming Cao } 1149d2a17637SMingming Cao 11500e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 11510e855ac8SAneesh Kumar K.V return retval; 11520e855ac8SAneesh Kumar K.V } 11530e855ac8SAneesh Kumar K.V 1154f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 1155f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 1156f3bd1f3fSMingming Cao 11576873fa0dSEric Sandeen int ext4_get_block(struct inode *inode, sector_t iblock, 1158ac27a0ecSDave Kleikamp struct buffer_head *bh_result, int create) 1159ac27a0ecSDave Kleikamp { 11603e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 11617fb5409dSJan Kara int ret = 0, started = 0; 1162ac27a0ecSDave Kleikamp unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1163f3bd1f3fSMingming Cao int dio_credits; 1164ac27a0ecSDave Kleikamp 11657fb5409dSJan Kara if (create && !handle) { 11667fb5409dSJan Kara /* Direct IO write... */ 11677fb5409dSJan Kara if (max_blocks > DIO_MAX_BLOCKS) 11687fb5409dSJan Kara max_blocks = DIO_MAX_BLOCKS; 1169f3bd1f3fSMingming Cao dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1170f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 11717fb5409dSJan Kara if (IS_ERR(handle)) { 1172ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 11737fb5409dSJan Kara goto out; 11747fb5409dSJan Kara } 11757fb5409dSJan Kara started = 1; 1176ac27a0ecSDave Kleikamp } 1177ac27a0ecSDave Kleikamp 1178a86c6181SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, 1179d2a17637SMingming Cao max_blocks, bh_result, create, 0, 0); 1180ac27a0ecSDave Kleikamp if (ret > 0) { 1181ac27a0ecSDave Kleikamp bh_result->b_size = (ret << inode->i_blkbits); 1182ac27a0ecSDave Kleikamp ret = 0; 1183ac27a0ecSDave Kleikamp } 11847fb5409dSJan Kara if (started) 11857fb5409dSJan Kara ext4_journal_stop(handle); 11867fb5409dSJan Kara out: 1187ac27a0ecSDave Kleikamp return ret; 1188ac27a0ecSDave Kleikamp } 1189ac27a0ecSDave Kleikamp 1190ac27a0ecSDave Kleikamp /* 1191ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 1192ac27a0ecSDave Kleikamp */ 1193617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1194725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 1195ac27a0ecSDave Kleikamp { 1196ac27a0ecSDave Kleikamp struct buffer_head dummy; 1197ac27a0ecSDave Kleikamp int fatal = 0, err; 1198ac27a0ecSDave Kleikamp 1199ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 1200ac27a0ecSDave Kleikamp 1201ac27a0ecSDave Kleikamp dummy.b_state = 0; 1202ac27a0ecSDave Kleikamp dummy.b_blocknr = -1000; 1203ac27a0ecSDave Kleikamp buffer_trace_init(&dummy.b_history); 1204a86c6181SAlex Tomas err = ext4_get_blocks_wrap(handle, inode, block, 1, 1205d2a17637SMingming Cao &dummy, create, 1, 0); 1206ac27a0ecSDave Kleikamp /* 1207617ba13bSMingming Cao * ext4_get_blocks_handle() returns number of blocks 1208ac27a0ecSDave Kleikamp * mapped. 0 in case of a HOLE. 1209ac27a0ecSDave Kleikamp */ 1210ac27a0ecSDave Kleikamp if (err > 0) { 1211ac27a0ecSDave Kleikamp if (err > 1) 1212ac27a0ecSDave Kleikamp WARN_ON(1); 1213ac27a0ecSDave Kleikamp err = 0; 1214ac27a0ecSDave Kleikamp } 1215ac27a0ecSDave Kleikamp *errp = err; 1216ac27a0ecSDave Kleikamp if (!err && buffer_mapped(&dummy)) { 1217ac27a0ecSDave Kleikamp struct buffer_head *bh; 1218ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1219ac27a0ecSDave Kleikamp if (!bh) { 1220ac27a0ecSDave Kleikamp *errp = -EIO; 1221ac27a0ecSDave Kleikamp goto err; 1222ac27a0ecSDave Kleikamp } 1223ac27a0ecSDave Kleikamp if (buffer_new(&dummy)) { 1224ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 1225ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 1226ac27a0ecSDave Kleikamp 1227ac27a0ecSDave Kleikamp /* 1228ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 1229ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 1230ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 1231617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 1232ac27a0ecSDave Kleikamp * problem. 1233ac27a0ecSDave Kleikamp */ 1234ac27a0ecSDave Kleikamp lock_buffer(bh); 1235ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 1236617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 1237ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 1238ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1239ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 1240ac27a0ecSDave Kleikamp } 1241ac27a0ecSDave Kleikamp unlock_buffer(bh); 12420390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 12430390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 1244ac27a0ecSDave Kleikamp if (!fatal) 1245ac27a0ecSDave Kleikamp fatal = err; 1246ac27a0ecSDave Kleikamp } else { 1247ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 1248ac27a0ecSDave Kleikamp } 1249ac27a0ecSDave Kleikamp if (fatal) { 1250ac27a0ecSDave Kleikamp *errp = fatal; 1251ac27a0ecSDave Kleikamp brelse(bh); 1252ac27a0ecSDave Kleikamp bh = NULL; 1253ac27a0ecSDave Kleikamp } 1254ac27a0ecSDave Kleikamp return bh; 1255ac27a0ecSDave Kleikamp } 1256ac27a0ecSDave Kleikamp err: 1257ac27a0ecSDave Kleikamp return NULL; 1258ac27a0ecSDave Kleikamp } 1259ac27a0ecSDave Kleikamp 1260617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1261725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 1262ac27a0ecSDave Kleikamp { 1263ac27a0ecSDave Kleikamp struct buffer_head *bh; 1264ac27a0ecSDave Kleikamp 1265617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 1266ac27a0ecSDave Kleikamp if (!bh) 1267ac27a0ecSDave Kleikamp return bh; 1268ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1269ac27a0ecSDave Kleikamp return bh; 1270ac27a0ecSDave Kleikamp ll_rw_block(READ_META, 1, &bh); 1271ac27a0ecSDave Kleikamp wait_on_buffer(bh); 1272ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1273ac27a0ecSDave Kleikamp return bh; 1274ac27a0ecSDave Kleikamp put_bh(bh); 1275ac27a0ecSDave Kleikamp *err = -EIO; 1276ac27a0ecSDave Kleikamp return NULL; 1277ac27a0ecSDave Kleikamp } 1278ac27a0ecSDave Kleikamp 1279ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle, 1280ac27a0ecSDave Kleikamp struct buffer_head *head, 1281ac27a0ecSDave Kleikamp unsigned from, 1282ac27a0ecSDave Kleikamp unsigned to, 1283ac27a0ecSDave Kleikamp int *partial, 1284ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 1285ac27a0ecSDave Kleikamp struct buffer_head *bh)) 1286ac27a0ecSDave Kleikamp { 1287ac27a0ecSDave Kleikamp struct buffer_head *bh; 1288ac27a0ecSDave Kleikamp unsigned block_start, block_end; 1289ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 1290ac27a0ecSDave Kleikamp int err, ret = 0; 1291ac27a0ecSDave Kleikamp struct buffer_head *next; 1292ac27a0ecSDave Kleikamp 1293ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 1294ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 1295ac27a0ecSDave Kleikamp block_start = block_end, bh = next) 1296ac27a0ecSDave Kleikamp { 1297ac27a0ecSDave Kleikamp next = bh->b_this_page; 1298ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 1299ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 1300ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 1301ac27a0ecSDave Kleikamp *partial = 1; 1302ac27a0ecSDave Kleikamp continue; 1303ac27a0ecSDave Kleikamp } 1304ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 1305ac27a0ecSDave Kleikamp if (!ret) 1306ac27a0ecSDave Kleikamp ret = err; 1307ac27a0ecSDave Kleikamp } 1308ac27a0ecSDave Kleikamp return ret; 1309ac27a0ecSDave Kleikamp } 1310ac27a0ecSDave Kleikamp 1311ac27a0ecSDave Kleikamp /* 1312ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 1313ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 1314617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 1315dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 1316ac27a0ecSDave Kleikamp * prepare_write() is the right place. 1317ac27a0ecSDave Kleikamp * 1318617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 1319617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 1320ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 1321ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 1322ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 1323ac27a0ecSDave Kleikamp * 1324617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 1325ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 1326ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 1327ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 1328ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 1329ac27a0ecSDave Kleikamp * violation. 1330ac27a0ecSDave Kleikamp * 1331dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1332ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 1333ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 1334ac27a0ecSDave Kleikamp * write. 1335ac27a0ecSDave Kleikamp */ 1336ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle, 1337ac27a0ecSDave Kleikamp struct buffer_head *bh) 1338ac27a0ecSDave Kleikamp { 1339ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1340ac27a0ecSDave Kleikamp return 0; 1341617ba13bSMingming Cao return ext4_journal_get_write_access(handle, bh); 1342ac27a0ecSDave Kleikamp } 1343ac27a0ecSDave Kleikamp 1344bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 1345bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 1346bfc1af65SNick Piggin struct page **pagep, void **fsdata) 1347ac27a0ecSDave Kleikamp { 1348bfc1af65SNick Piggin struct inode *inode = mapping->host; 13497479d2b9SAndrew Morton int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1350ac27a0ecSDave Kleikamp handle_t *handle; 1351ac27a0ecSDave Kleikamp int retries = 0; 1352bfc1af65SNick Piggin struct page *page; 1353bfc1af65SNick Piggin pgoff_t index; 1354bfc1af65SNick Piggin unsigned from, to; 1355bfc1af65SNick Piggin 1356ba80b101STheodore Ts'o trace_mark(ext4_write_begin, 1357ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u flags %u", 1358ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 1359ba80b101STheodore Ts'o (unsigned long long) pos, len, flags); 1360bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 1361bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1362bfc1af65SNick Piggin to = from + len; 1363ac27a0ecSDave Kleikamp 1364ac27a0ecSDave Kleikamp retry: 1365617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 13667479d2b9SAndrew Morton if (IS_ERR(handle)) { 13677479d2b9SAndrew Morton ret = PTR_ERR(handle); 13687479d2b9SAndrew Morton goto out; 13697479d2b9SAndrew Morton } 1370ac27a0ecSDave Kleikamp 1371*ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 1372*ebd3610bSJan Kara * started */ 1373*ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 1374*ebd3610bSJan Kara 137554566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 1376cf108bcaSJan Kara if (!page) { 1377cf108bcaSJan Kara ext4_journal_stop(handle); 1378cf108bcaSJan Kara ret = -ENOMEM; 1379cf108bcaSJan Kara goto out; 1380cf108bcaSJan Kara } 1381cf108bcaSJan Kara *pagep = page; 1382cf108bcaSJan Kara 1383bfc1af65SNick Piggin ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1384bfc1af65SNick Piggin ext4_get_block); 1385bfc1af65SNick Piggin 1386bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 1387ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), 1388ac27a0ecSDave Kleikamp from, to, NULL, do_journal_get_write_access); 1389b46be050SAndrey Savochkin } 1390bfc1af65SNick Piggin 1391bfc1af65SNick Piggin if (ret) { 1392bfc1af65SNick Piggin unlock_page(page); 1393cf108bcaSJan Kara ext4_journal_stop(handle); 1394bfc1af65SNick Piggin page_cache_release(page); 1395ae4d5372SAneesh Kumar K.V /* 1396ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 1397ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 1398ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 1399ae4d5372SAneesh Kumar K.V */ 1400ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 1401ae4d5372SAneesh Kumar K.V vmtruncate(inode, inode->i_size); 1402bfc1af65SNick Piggin } 1403bfc1af65SNick Piggin 1404617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1405ac27a0ecSDave Kleikamp goto retry; 14067479d2b9SAndrew Morton out: 1407ac27a0ecSDave Kleikamp return ret; 1408ac27a0ecSDave Kleikamp } 1409ac27a0ecSDave Kleikamp 1410bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 1411bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1412ac27a0ecSDave Kleikamp { 1413ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1414ac27a0ecSDave Kleikamp return 0; 1415ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 14160390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 1417ac27a0ecSDave Kleikamp } 1418ac27a0ecSDave Kleikamp 1419ac27a0ecSDave Kleikamp /* 1420ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1421ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1422ac27a0ecSDave Kleikamp * 1423617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1424ac27a0ecSDave Kleikamp * buffers are managed internally. 1425ac27a0ecSDave Kleikamp */ 1426bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1427bfc1af65SNick Piggin struct address_space *mapping, 1428bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1429bfc1af65SNick Piggin struct page *page, void *fsdata) 1430ac27a0ecSDave Kleikamp { 1431617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1432cf108bcaSJan Kara struct inode *inode = mapping->host; 1433ac27a0ecSDave Kleikamp int ret = 0, ret2; 1434ac27a0ecSDave Kleikamp 1435ba80b101STheodore Ts'o trace_mark(ext4_ordered_write_end, 1436ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u copied %u", 1437ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 1438ba80b101STheodore Ts'o (unsigned long long) pos, len, copied); 1439678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1440ac27a0ecSDave Kleikamp 1441ac27a0ecSDave Kleikamp if (ret == 0) { 1442ac27a0ecSDave Kleikamp loff_t new_i_size; 1443ac27a0ecSDave Kleikamp 1444bfc1af65SNick Piggin new_i_size = pos + copied; 1445cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1446cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1447cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 1448cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 1449cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 1450cf17fea6SAneesh Kumar K.V */ 1451cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1452cf17fea6SAneesh Kumar K.V } 1453cf17fea6SAneesh Kumar K.V 1454cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1455bfc1af65SNick Piggin page, fsdata); 1456f8a87d89SRoel Kluin copied = ret2; 1457f8a87d89SRoel Kluin if (ret2 < 0) 1458f8a87d89SRoel Kluin ret = ret2; 1459ac27a0ecSDave Kleikamp } 1460617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1461ac27a0ecSDave Kleikamp if (!ret) 1462ac27a0ecSDave Kleikamp ret = ret2; 1463bfc1af65SNick Piggin 1464bfc1af65SNick Piggin return ret ? ret : copied; 1465ac27a0ecSDave Kleikamp } 1466ac27a0ecSDave Kleikamp 1467bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1468bfc1af65SNick Piggin struct address_space *mapping, 1469bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1470bfc1af65SNick Piggin struct page *page, void *fsdata) 1471ac27a0ecSDave Kleikamp { 1472617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1473cf108bcaSJan Kara struct inode *inode = mapping->host; 1474ac27a0ecSDave Kleikamp int ret = 0, ret2; 1475ac27a0ecSDave Kleikamp loff_t new_i_size; 1476ac27a0ecSDave Kleikamp 1477ba80b101STheodore Ts'o trace_mark(ext4_writeback_write_end, 1478ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u copied %u", 1479ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 1480ba80b101STheodore Ts'o (unsigned long long) pos, len, copied); 1481bfc1af65SNick Piggin new_i_size = pos + copied; 1482cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1483cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1484cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 1485cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 1486cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 1487cf17fea6SAneesh Kumar K.V */ 1488cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1489cf17fea6SAneesh Kumar K.V } 1490ac27a0ecSDave Kleikamp 1491cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1492bfc1af65SNick Piggin page, fsdata); 1493f8a87d89SRoel Kluin copied = ret2; 1494f8a87d89SRoel Kluin if (ret2 < 0) 1495f8a87d89SRoel Kluin ret = ret2; 1496ac27a0ecSDave Kleikamp 1497617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1498ac27a0ecSDave Kleikamp if (!ret) 1499ac27a0ecSDave Kleikamp ret = ret2; 1500bfc1af65SNick Piggin 1501bfc1af65SNick Piggin return ret ? ret : copied; 1502ac27a0ecSDave Kleikamp } 1503ac27a0ecSDave Kleikamp 1504bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1505bfc1af65SNick Piggin struct address_space *mapping, 1506bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1507bfc1af65SNick Piggin struct page *page, void *fsdata) 1508ac27a0ecSDave Kleikamp { 1509617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1510bfc1af65SNick Piggin struct inode *inode = mapping->host; 1511ac27a0ecSDave Kleikamp int ret = 0, ret2; 1512ac27a0ecSDave Kleikamp int partial = 0; 1513bfc1af65SNick Piggin unsigned from, to; 1514cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1515ac27a0ecSDave Kleikamp 1516ba80b101STheodore Ts'o trace_mark(ext4_journalled_write_end, 1517ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u copied %u", 1518ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 1519ba80b101STheodore Ts'o (unsigned long long) pos, len, copied); 1520bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1521bfc1af65SNick Piggin to = from + len; 1522bfc1af65SNick Piggin 1523bfc1af65SNick Piggin if (copied < len) { 1524bfc1af65SNick Piggin if (!PageUptodate(page)) 1525bfc1af65SNick Piggin copied = 0; 1526bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1527bfc1af65SNick Piggin } 1528ac27a0ecSDave Kleikamp 1529ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), from, 1530bfc1af65SNick Piggin to, &partial, write_end_fn); 1531ac27a0ecSDave Kleikamp if (!partial) 1532ac27a0ecSDave Kleikamp SetPageUptodate(page); 1533cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1534cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1535bfc1af65SNick Piggin i_size_write(inode, pos+copied); 1536617ba13bSMingming Cao EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1537cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1538cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1539617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1540ac27a0ecSDave Kleikamp if (!ret) 1541ac27a0ecSDave Kleikamp ret = ret2; 1542ac27a0ecSDave Kleikamp } 1543bfc1af65SNick Piggin 1544cf108bcaSJan Kara unlock_page(page); 1545617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1546ac27a0ecSDave Kleikamp if (!ret) 1547ac27a0ecSDave Kleikamp ret = ret2; 1548bfc1af65SNick Piggin page_cache_release(page); 1549bfc1af65SNick Piggin 1550bfc1af65SNick Piggin return ret ? ret : copied; 1551ac27a0ecSDave Kleikamp } 1552d2a17637SMingming Cao 1553d2a17637SMingming Cao static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1554d2a17637SMingming Cao { 1555030ba6bcSAneesh Kumar K.V int retries = 0; 1556d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1557d2a17637SMingming Cao unsigned long md_needed, mdblocks, total = 0; 1558d2a17637SMingming Cao 1559d2a17637SMingming Cao /* 1560d2a17637SMingming Cao * recalculate the amount of metadata blocks to reserve 1561d2a17637SMingming Cao * in order to allocate nrblocks 1562d2a17637SMingming Cao * worse case is one extent per block 1563d2a17637SMingming Cao */ 1564030ba6bcSAneesh Kumar K.V repeat: 1565d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1566d2a17637SMingming Cao total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1567d2a17637SMingming Cao mdblocks = ext4_calc_metadata_amount(inode, total); 1568d2a17637SMingming Cao BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1569d2a17637SMingming Cao 1570d2a17637SMingming Cao md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1571d2a17637SMingming Cao total = md_needed + nrblocks; 1572d2a17637SMingming Cao 1573a30d542aSAneesh Kumar K.V if (ext4_claim_free_blocks(sbi, total)) { 1574d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1575030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1576030ba6bcSAneesh Kumar K.V yield(); 1577030ba6bcSAneesh Kumar K.V goto repeat; 1578030ba6bcSAneesh Kumar K.V } 1579d2a17637SMingming Cao return -ENOSPC; 1580d2a17637SMingming Cao } 1581d2a17637SMingming Cao EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1582d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1583d2a17637SMingming Cao 1584d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1585d2a17637SMingming Cao return 0; /* success */ 1586d2a17637SMingming Cao } 1587d2a17637SMingming Cao 158812219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1589d2a17637SMingming Cao { 1590d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1591d2a17637SMingming Cao int total, mdb, mdb_free, release; 1592d2a17637SMingming Cao 1593cd213226SMingming Cao if (!to_free) 1594cd213226SMingming Cao return; /* Nothing to release, exit */ 1595cd213226SMingming Cao 1596d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1597cd213226SMingming Cao 1598cd213226SMingming Cao if (!EXT4_I(inode)->i_reserved_data_blocks) { 1599cd213226SMingming Cao /* 1600cd213226SMingming Cao * if there is no reserved blocks, but we try to free some 1601cd213226SMingming Cao * then the counter is messed up somewhere. 1602cd213226SMingming Cao * but since this function is called from invalidate 1603cd213226SMingming Cao * page, it's harmless to return without any action 1604cd213226SMingming Cao */ 1605cd213226SMingming Cao printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1606cd213226SMingming Cao "blocks for inode %lu, but there is no reserved " 1607cd213226SMingming Cao "data blocks\n", to_free, inode->i_ino); 1608cd213226SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1609cd213226SMingming Cao return; 1610cd213226SMingming Cao } 1611cd213226SMingming Cao 1612d2a17637SMingming Cao /* recalculate the number of metablocks still need to be reserved */ 161312219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1614d2a17637SMingming Cao mdb = ext4_calc_metadata_amount(inode, total); 1615d2a17637SMingming Cao 1616d2a17637SMingming Cao /* figure out how many metablocks to release */ 1617d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1618d2a17637SMingming Cao mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1619d2a17637SMingming Cao 1620d2a17637SMingming Cao release = to_free + mdb_free; 1621d2a17637SMingming Cao 16226bc6e63fSAneesh Kumar K.V /* update fs dirty blocks counter for truncate case */ 16236bc6e63fSAneesh Kumar K.V percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1624d2a17637SMingming Cao 1625d2a17637SMingming Cao /* update per-inode reservations */ 162612219aeaSAneesh Kumar K.V BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 162712219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1628d2a17637SMingming Cao 1629d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1630d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1631d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1632d2a17637SMingming Cao } 1633d2a17637SMingming Cao 1634d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1635d2a17637SMingming Cao unsigned long offset) 1636d2a17637SMingming Cao { 1637d2a17637SMingming Cao int to_release = 0; 1638d2a17637SMingming Cao struct buffer_head *head, *bh; 1639d2a17637SMingming Cao unsigned int curr_off = 0; 1640d2a17637SMingming Cao 1641d2a17637SMingming Cao head = page_buffers(page); 1642d2a17637SMingming Cao bh = head; 1643d2a17637SMingming Cao do { 1644d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1645d2a17637SMingming Cao 1646d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1647d2a17637SMingming Cao to_release++; 1648d2a17637SMingming Cao clear_buffer_delay(bh); 1649d2a17637SMingming Cao } 1650d2a17637SMingming Cao curr_off = next_off; 1651d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 165212219aeaSAneesh Kumar K.V ext4_da_release_space(page->mapping->host, to_release); 1653d2a17637SMingming Cao } 1654ac27a0ecSDave Kleikamp 1655ac27a0ecSDave Kleikamp /* 165664769240SAlex Tomas * Delayed allocation stuff 165764769240SAlex Tomas */ 165864769240SAlex Tomas 165964769240SAlex Tomas struct mpage_da_data { 166064769240SAlex Tomas struct inode *inode; 166164769240SAlex Tomas struct buffer_head lbh; /* extent of blocks */ 166264769240SAlex Tomas unsigned long first_page, next_page; /* extent of pages */ 166364769240SAlex Tomas get_block_t *get_block; 166464769240SAlex Tomas struct writeback_control *wbc; 1665a1d6cc56SAneesh Kumar K.V int io_done; 1666498e5f24STheodore Ts'o int pages_written; 1667df22291fSAneesh Kumar K.V int retval; 166864769240SAlex Tomas }; 166964769240SAlex Tomas 167064769240SAlex Tomas /* 167164769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1672a1d6cc56SAneesh Kumar K.V * them with writepage() call back 167364769240SAlex Tomas * 167464769240SAlex Tomas * @mpd->inode: inode 167564769240SAlex Tomas * @mpd->first_page: first page of the extent 167664769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 167764769240SAlex Tomas * @mpd->get_block: the filesystem's block mapper function 167864769240SAlex Tomas * 167964769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 168064769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 168164769240SAlex Tomas * 168264769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 168364769240SAlex Tomas */ 168464769240SAlex Tomas static int mpage_da_submit_io(struct mpage_da_data *mpd) 168564769240SAlex Tomas { 168622208dedSAneesh Kumar K.V long pages_skipped; 1687791b7f08SAneesh Kumar K.V struct pagevec pvec; 1688791b7f08SAneesh Kumar K.V unsigned long index, end; 1689791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1690791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1691791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 169264769240SAlex Tomas 169364769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1694791b7f08SAneesh Kumar K.V /* 1695791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1696791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 1697791b7f08SAneesh Kumar K.V * If we look at mpd->lbh.b_blocknr we would only be looking 1698791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1699791b7f08SAneesh Kumar K.V */ 170064769240SAlex Tomas index = mpd->first_page; 170164769240SAlex Tomas end = mpd->next_page - 1; 170264769240SAlex Tomas 1703791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 170464769240SAlex Tomas while (index <= end) { 1705791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 170664769240SAlex Tomas if (nr_pages == 0) 170764769240SAlex Tomas break; 170864769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 170964769240SAlex Tomas struct page *page = pvec.pages[i]; 171064769240SAlex Tomas 1711791b7f08SAneesh Kumar K.V index = page->index; 1712791b7f08SAneesh Kumar K.V if (index > end) 1713791b7f08SAneesh Kumar K.V break; 1714791b7f08SAneesh Kumar K.V index++; 1715791b7f08SAneesh Kumar K.V 1716791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1717791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1718791b7f08SAneesh Kumar K.V 171922208dedSAneesh Kumar K.V pages_skipped = mpd->wbc->pages_skipped; 1720a1d6cc56SAneesh Kumar K.V err = mapping->a_ops->writepage(page, mpd->wbc); 172122208dedSAneesh Kumar K.V if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 172222208dedSAneesh Kumar K.V /* 172322208dedSAneesh Kumar K.V * have successfully written the page 172422208dedSAneesh Kumar K.V * without skipping the same 172522208dedSAneesh Kumar K.V */ 1726a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 172764769240SAlex Tomas /* 172864769240SAlex Tomas * In error case, we have to continue because 172964769240SAlex Tomas * remaining pages are still locked 173064769240SAlex Tomas * XXX: unlock and re-dirty them? 173164769240SAlex Tomas */ 173264769240SAlex Tomas if (ret == 0) 173364769240SAlex Tomas ret = err; 173464769240SAlex Tomas } 173564769240SAlex Tomas pagevec_release(&pvec); 173664769240SAlex Tomas } 173764769240SAlex Tomas return ret; 173864769240SAlex Tomas } 173964769240SAlex Tomas 174064769240SAlex Tomas /* 174164769240SAlex Tomas * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 174264769240SAlex Tomas * 174364769240SAlex Tomas * @mpd->inode - inode to walk through 174464769240SAlex Tomas * @exbh->b_blocknr - first block on a disk 174564769240SAlex Tomas * @exbh->b_size - amount of space in bytes 174664769240SAlex Tomas * @logical - first logical block to start assignment with 174764769240SAlex Tomas * 174864769240SAlex Tomas * the function goes through all passed space and put actual disk 174964769240SAlex Tomas * block numbers into buffer heads, dropping BH_Delay 175064769240SAlex Tomas */ 175164769240SAlex Tomas static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 175264769240SAlex Tomas struct buffer_head *exbh) 175364769240SAlex Tomas { 175464769240SAlex Tomas struct inode *inode = mpd->inode; 175564769240SAlex Tomas struct address_space *mapping = inode->i_mapping; 175664769240SAlex Tomas int blocks = exbh->b_size >> inode->i_blkbits; 175764769240SAlex Tomas sector_t pblock = exbh->b_blocknr, cur_logical; 175864769240SAlex Tomas struct buffer_head *head, *bh; 1759a1d6cc56SAneesh Kumar K.V pgoff_t index, end; 176064769240SAlex Tomas struct pagevec pvec; 176164769240SAlex Tomas int nr_pages, i; 176264769240SAlex Tomas 176364769240SAlex Tomas index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 176464769240SAlex Tomas end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 176564769240SAlex Tomas cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 176664769240SAlex Tomas 176764769240SAlex Tomas pagevec_init(&pvec, 0); 176864769240SAlex Tomas 176964769240SAlex Tomas while (index <= end) { 177064769240SAlex Tomas /* XXX: optimize tail */ 177164769240SAlex Tomas nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 177264769240SAlex Tomas if (nr_pages == 0) 177364769240SAlex Tomas break; 177464769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 177564769240SAlex Tomas struct page *page = pvec.pages[i]; 177664769240SAlex Tomas 177764769240SAlex Tomas index = page->index; 177864769240SAlex Tomas if (index > end) 177964769240SAlex Tomas break; 178064769240SAlex Tomas index++; 178164769240SAlex Tomas 178264769240SAlex Tomas BUG_ON(!PageLocked(page)); 178364769240SAlex Tomas BUG_ON(PageWriteback(page)); 178464769240SAlex Tomas BUG_ON(!page_has_buffers(page)); 178564769240SAlex Tomas 178664769240SAlex Tomas bh = page_buffers(page); 178764769240SAlex Tomas head = bh; 178864769240SAlex Tomas 178964769240SAlex Tomas /* skip blocks out of the range */ 179064769240SAlex Tomas do { 179164769240SAlex Tomas if (cur_logical >= logical) 179264769240SAlex Tomas break; 179364769240SAlex Tomas cur_logical++; 179464769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 179564769240SAlex Tomas 179664769240SAlex Tomas do { 179764769240SAlex Tomas if (cur_logical >= logical + blocks) 179864769240SAlex Tomas break; 179964769240SAlex Tomas if (buffer_delay(bh)) { 180064769240SAlex Tomas bh->b_blocknr = pblock; 180164769240SAlex Tomas clear_buffer_delay(bh); 1802bf068ee2SAneesh Kumar K.V bh->b_bdev = inode->i_sb->s_bdev; 1803bf068ee2SAneesh Kumar K.V } else if (buffer_unwritten(bh)) { 1804bf068ee2SAneesh Kumar K.V bh->b_blocknr = pblock; 1805bf068ee2SAneesh Kumar K.V clear_buffer_unwritten(bh); 1806bf068ee2SAneesh Kumar K.V set_buffer_mapped(bh); 1807bf068ee2SAneesh Kumar K.V set_buffer_new(bh); 1808bf068ee2SAneesh Kumar K.V bh->b_bdev = inode->i_sb->s_bdev; 180961628a3fSMingming Cao } else if (buffer_mapped(bh)) 181064769240SAlex Tomas BUG_ON(bh->b_blocknr != pblock); 181164769240SAlex Tomas 181264769240SAlex Tomas cur_logical++; 181364769240SAlex Tomas pblock++; 181464769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 181564769240SAlex Tomas } 181664769240SAlex Tomas pagevec_release(&pvec); 181764769240SAlex Tomas } 181864769240SAlex Tomas } 181964769240SAlex Tomas 182064769240SAlex Tomas 182164769240SAlex Tomas /* 182264769240SAlex Tomas * __unmap_underlying_blocks - just a helper function to unmap 182364769240SAlex Tomas * set of blocks described by @bh 182464769240SAlex Tomas */ 182564769240SAlex Tomas static inline void __unmap_underlying_blocks(struct inode *inode, 182664769240SAlex Tomas struct buffer_head *bh) 182764769240SAlex Tomas { 182864769240SAlex Tomas struct block_device *bdev = inode->i_sb->s_bdev; 182964769240SAlex Tomas int blocks, i; 183064769240SAlex Tomas 183164769240SAlex Tomas blocks = bh->b_size >> inode->i_blkbits; 183264769240SAlex Tomas for (i = 0; i < blocks; i++) 183364769240SAlex Tomas unmap_underlying_metadata(bdev, bh->b_blocknr + i); 183464769240SAlex Tomas } 183564769240SAlex Tomas 1836c4a0c46eSAneesh Kumar K.V static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 1837c4a0c46eSAneesh Kumar K.V sector_t logical, long blk_cnt) 1838c4a0c46eSAneesh Kumar K.V { 1839c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1840c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1841c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1842c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1843c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1844c4a0c46eSAneesh Kumar K.V 1845c4a0c46eSAneesh Kumar K.V index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1846c4a0c46eSAneesh Kumar K.V end = (logical + blk_cnt - 1) >> 1847c4a0c46eSAneesh Kumar K.V (PAGE_CACHE_SHIFT - inode->i_blkbits); 1848c4a0c46eSAneesh Kumar K.V while (index <= end) { 1849c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1850c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1851c4a0c46eSAneesh Kumar K.V break; 1852c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1853c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 1854c4a0c46eSAneesh Kumar K.V index = page->index; 1855c4a0c46eSAneesh Kumar K.V if (index > end) 1856c4a0c46eSAneesh Kumar K.V break; 1857c4a0c46eSAneesh Kumar K.V index++; 1858c4a0c46eSAneesh Kumar K.V 1859c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1860c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1861c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1862c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1863c4a0c46eSAneesh Kumar K.V unlock_page(page); 1864c4a0c46eSAneesh Kumar K.V } 1865c4a0c46eSAneesh Kumar K.V } 1866c4a0c46eSAneesh Kumar K.V return; 1867c4a0c46eSAneesh Kumar K.V } 1868c4a0c46eSAneesh Kumar K.V 1869df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1870df22291fSAneesh Kumar K.V { 1871df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1872df22291fSAneesh Kumar K.V printk(KERN_EMERG "Total free blocks count %lld\n", 1873df22291fSAneesh Kumar K.V ext4_count_free_blocks(inode->i_sb)); 1874df22291fSAneesh Kumar K.V printk(KERN_EMERG "Free/Dirty block details\n"); 1875df22291fSAneesh Kumar K.V printk(KERN_EMERG "free_blocks=%lld\n", 18768f72fbdfSAlexander Beregalov (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); 1877df22291fSAneesh Kumar K.V printk(KERN_EMERG "dirty_blocks=%lld\n", 18788f72fbdfSAlexander Beregalov (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 1879df22291fSAneesh Kumar K.V printk(KERN_EMERG "Block reservation details\n"); 1880498e5f24STheodore Ts'o printk(KERN_EMERG "i_reserved_data_blocks=%u\n", 1881df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 1882498e5f24STheodore Ts'o printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", 1883df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1884df22291fSAneesh Kumar K.V return; 1885df22291fSAneesh Kumar K.V } 1886df22291fSAneesh Kumar K.V 188764769240SAlex Tomas /* 188864769240SAlex Tomas * mpage_da_map_blocks - go through given space 188964769240SAlex Tomas * 189064769240SAlex Tomas * @mpd->lbh - bh describing space 189164769240SAlex Tomas * @mpd->get_block - the filesystem's block mapper function 189264769240SAlex Tomas * 189364769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 189464769240SAlex Tomas * 189564769240SAlex Tomas */ 1896c4a0c46eSAneesh Kumar K.V static int mpage_da_map_blocks(struct mpage_da_data *mpd) 189764769240SAlex Tomas { 1898a1d6cc56SAneesh Kumar K.V int err = 0; 1899030ba6bcSAneesh Kumar K.V struct buffer_head new; 190064769240SAlex Tomas struct buffer_head *lbh = &mpd->lbh; 1901df22291fSAneesh Kumar K.V sector_t next; 190264769240SAlex Tomas 190364769240SAlex Tomas /* 190464769240SAlex Tomas * We consider only non-mapped and non-allocated blocks 190564769240SAlex Tomas */ 190664769240SAlex Tomas if (buffer_mapped(lbh) && !buffer_delay(lbh)) 1907c4a0c46eSAneesh Kumar K.V return 0; 190864769240SAlex Tomas new.b_state = lbh->b_state; 190964769240SAlex Tomas new.b_blocknr = 0; 1910a1d6cc56SAneesh Kumar K.V new.b_size = lbh->b_size; 1911df22291fSAneesh Kumar K.V next = lbh->b_blocknr; 191264769240SAlex Tomas /* 1913a1d6cc56SAneesh Kumar K.V * If we didn't accumulate anything 1914a1d6cc56SAneesh Kumar K.V * to write simply return 191564769240SAlex Tomas */ 1916a1d6cc56SAneesh Kumar K.V if (!new.b_size) 1917c4a0c46eSAneesh Kumar K.V return 0; 1918a1d6cc56SAneesh Kumar K.V err = mpd->get_block(mpd->inode, next, &new, 1); 1919c4a0c46eSAneesh Kumar K.V if (err) { 1920c4a0c46eSAneesh Kumar K.V 1921c4a0c46eSAneesh Kumar K.V /* If get block returns with error 1922c4a0c46eSAneesh Kumar K.V * we simply return. Later writepage 1923c4a0c46eSAneesh Kumar K.V * will redirty the page and writepages 1924c4a0c46eSAneesh Kumar K.V * will find the dirty page again 1925c4a0c46eSAneesh Kumar K.V */ 1926c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 1927c4a0c46eSAneesh Kumar K.V return 0; 1928df22291fSAneesh Kumar K.V 1929df22291fSAneesh Kumar K.V if (err == -ENOSPC && 1930df22291fSAneesh Kumar K.V ext4_count_free_blocks(mpd->inode->i_sb)) { 1931df22291fSAneesh Kumar K.V mpd->retval = err; 1932df22291fSAneesh Kumar K.V return 0; 1933df22291fSAneesh Kumar K.V } 1934df22291fSAneesh Kumar K.V 1935c4a0c46eSAneesh Kumar K.V /* 1936c4a0c46eSAneesh Kumar K.V * get block failure will cause us 1937c4a0c46eSAneesh Kumar K.V * to loop in writepages. Because 1938c4a0c46eSAneesh Kumar K.V * a_ops->writepage won't be able to 1939c4a0c46eSAneesh Kumar K.V * make progress. The page will be redirtied 1940c4a0c46eSAneesh Kumar K.V * by writepage and writepages will again 1941c4a0c46eSAneesh Kumar K.V * try to write the same. 1942c4a0c46eSAneesh Kumar K.V */ 1943c4a0c46eSAneesh Kumar K.V printk(KERN_EMERG "%s block allocation failed for inode %lu " 1944c4a0c46eSAneesh Kumar K.V "at logical offset %llu with max blocks " 1945c4a0c46eSAneesh Kumar K.V "%zd with error %d\n", 1946c4a0c46eSAneesh Kumar K.V __func__, mpd->inode->i_ino, 1947c4a0c46eSAneesh Kumar K.V (unsigned long long)next, 1948c4a0c46eSAneesh Kumar K.V lbh->b_size >> mpd->inode->i_blkbits, err); 1949c4a0c46eSAneesh Kumar K.V printk(KERN_EMERG "This should not happen.!! " 1950c4a0c46eSAneesh Kumar K.V "Data will be lost\n"); 1951030ba6bcSAneesh Kumar K.V if (err == -ENOSPC) { 1952df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1953030ba6bcSAneesh Kumar K.V } 1954c4a0c46eSAneesh Kumar K.V /* invlaidate all the pages */ 1955c4a0c46eSAneesh Kumar K.V ext4_da_block_invalidatepages(mpd, next, 1956c4a0c46eSAneesh Kumar K.V lbh->b_size >> mpd->inode->i_blkbits); 1957c4a0c46eSAneesh Kumar K.V return err; 1958c4a0c46eSAneesh Kumar K.V } 195964769240SAlex Tomas BUG_ON(new.b_size == 0); 196064769240SAlex Tomas 196164769240SAlex Tomas if (buffer_new(&new)) 196264769240SAlex Tomas __unmap_underlying_blocks(mpd->inode, &new); 196364769240SAlex Tomas 196464769240SAlex Tomas /* 196564769240SAlex Tomas * If blocks are delayed marked, we need to 196664769240SAlex Tomas * put actual blocknr and drop delayed bit 196764769240SAlex Tomas */ 1968bf068ee2SAneesh Kumar K.V if (buffer_delay(lbh) || buffer_unwritten(lbh)) 196964769240SAlex Tomas mpage_put_bnr_to_bhs(mpd, next, &new); 197064769240SAlex Tomas 1971c4a0c46eSAneesh Kumar K.V return 0; 197264769240SAlex Tomas } 197364769240SAlex Tomas 1974bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1975bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 197664769240SAlex Tomas 197764769240SAlex Tomas /* 197864769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 197964769240SAlex Tomas * 198064769240SAlex Tomas * @mpd->lbh - extent of blocks 198164769240SAlex Tomas * @logical - logical number of the block in the file 198264769240SAlex Tomas * @bh - bh of the block (used to access block's state) 198364769240SAlex Tomas * 198464769240SAlex Tomas * the function is used to collect contig. blocks in same state 198564769240SAlex Tomas */ 198664769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 198764769240SAlex Tomas sector_t logical, struct buffer_head *bh) 198864769240SAlex Tomas { 198964769240SAlex Tomas sector_t next; 1990525f4ed8SMingming Cao size_t b_size = bh->b_size; 1991525f4ed8SMingming Cao struct buffer_head *lbh = &mpd->lbh; 1992525f4ed8SMingming Cao int nrblocks = lbh->b_size >> mpd->inode->i_blkbits; 199364769240SAlex Tomas 1994525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 1995525f4ed8SMingming Cao if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 1996525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1997525f4ed8SMingming Cao /* 1998525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1999525f4ed8SMingming Cao * credit available. Total credit needed to insert 2000525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 2001525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 2002525f4ed8SMingming Cao */ 2003525f4ed8SMingming Cao goto flush_it; 2004525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 2005525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 2006525f4ed8SMingming Cao /* 2007525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 2008525f4ed8SMingming Cao * allowed limit for which we have journal credit 2009525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 2010525f4ed8SMingming Cao */ 2011525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 2012525f4ed8SMingming Cao mpd->inode->i_blkbits; 2013525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 2014525f4ed8SMingming Cao } 2015525f4ed8SMingming Cao } 201664769240SAlex Tomas /* 201764769240SAlex Tomas * First block in the extent 201864769240SAlex Tomas */ 201964769240SAlex Tomas if (lbh->b_size == 0) { 202064769240SAlex Tomas lbh->b_blocknr = logical; 2021525f4ed8SMingming Cao lbh->b_size = b_size; 202264769240SAlex Tomas lbh->b_state = bh->b_state & BH_FLAGS; 202364769240SAlex Tomas return; 202464769240SAlex Tomas } 202564769240SAlex Tomas 2026525f4ed8SMingming Cao next = lbh->b_blocknr + nrblocks; 202764769240SAlex Tomas /* 202864769240SAlex Tomas * Can we merge the block to our big extent? 202964769240SAlex Tomas */ 203064769240SAlex Tomas if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) { 2031525f4ed8SMingming Cao lbh->b_size += b_size; 203264769240SAlex Tomas return; 203364769240SAlex Tomas } 203464769240SAlex Tomas 2035525f4ed8SMingming Cao flush_it: 203664769240SAlex Tomas /* 203764769240SAlex Tomas * We couldn't merge the block to our extent, so we 203864769240SAlex Tomas * need to flush current extent and start new one 203964769240SAlex Tomas */ 2040c4a0c46eSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 2041a1d6cc56SAneesh Kumar K.V mpage_da_submit_io(mpd); 2042a1d6cc56SAneesh Kumar K.V mpd->io_done = 1; 2043a1d6cc56SAneesh Kumar K.V return; 204464769240SAlex Tomas } 204564769240SAlex Tomas 204664769240SAlex Tomas /* 204764769240SAlex Tomas * __mpage_da_writepage - finds extent of pages and blocks 204864769240SAlex Tomas * 204964769240SAlex Tomas * @page: page to consider 205064769240SAlex Tomas * @wbc: not used, we just follow rules 205164769240SAlex Tomas * @data: context 205264769240SAlex Tomas * 205364769240SAlex Tomas * The function finds extents of pages and scan them for all blocks. 205464769240SAlex Tomas */ 205564769240SAlex Tomas static int __mpage_da_writepage(struct page *page, 205664769240SAlex Tomas struct writeback_control *wbc, void *data) 205764769240SAlex Tomas { 205864769240SAlex Tomas struct mpage_da_data *mpd = data; 205964769240SAlex Tomas struct inode *inode = mpd->inode; 206064769240SAlex Tomas struct buffer_head *bh, *head, fake; 206164769240SAlex Tomas sector_t logical; 206264769240SAlex Tomas 2063a1d6cc56SAneesh Kumar K.V if (mpd->io_done) { 2064a1d6cc56SAneesh Kumar K.V /* 2065a1d6cc56SAneesh Kumar K.V * Rest of the page in the page_vec 2066a1d6cc56SAneesh Kumar K.V * redirty then and skip then. We will 2067a1d6cc56SAneesh Kumar K.V * try to to write them again after 2068a1d6cc56SAneesh Kumar K.V * starting a new transaction 2069a1d6cc56SAneesh Kumar K.V */ 2070a1d6cc56SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2071a1d6cc56SAneesh Kumar K.V unlock_page(page); 2072a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 2073a1d6cc56SAneesh Kumar K.V } 207464769240SAlex Tomas /* 207564769240SAlex Tomas * Can we merge this page to current extent? 207664769240SAlex Tomas */ 207764769240SAlex Tomas if (mpd->next_page != page->index) { 207864769240SAlex Tomas /* 207964769240SAlex Tomas * Nope, we can't. So, we map non-allocated blocks 2080a1d6cc56SAneesh Kumar K.V * and start IO on them using writepage() 208164769240SAlex Tomas */ 208264769240SAlex Tomas if (mpd->next_page != mpd->first_page) { 2083c4a0c46eSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 208464769240SAlex Tomas mpage_da_submit_io(mpd); 2085a1d6cc56SAneesh Kumar K.V /* 2086a1d6cc56SAneesh Kumar K.V * skip rest of the page in the page_vec 2087a1d6cc56SAneesh Kumar K.V */ 2088a1d6cc56SAneesh Kumar K.V mpd->io_done = 1; 2089a1d6cc56SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2090a1d6cc56SAneesh Kumar K.V unlock_page(page); 2091a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 209264769240SAlex Tomas } 209364769240SAlex Tomas 209464769240SAlex Tomas /* 209564769240SAlex Tomas * Start next extent of pages ... 209664769240SAlex Tomas */ 209764769240SAlex Tomas mpd->first_page = page->index; 209864769240SAlex Tomas 209964769240SAlex Tomas /* 210064769240SAlex Tomas * ... and blocks 210164769240SAlex Tomas */ 210264769240SAlex Tomas mpd->lbh.b_size = 0; 210364769240SAlex Tomas mpd->lbh.b_state = 0; 210464769240SAlex Tomas mpd->lbh.b_blocknr = 0; 210564769240SAlex Tomas } 210664769240SAlex Tomas 210764769240SAlex Tomas mpd->next_page = page->index + 1; 210864769240SAlex Tomas logical = (sector_t) page->index << 210964769240SAlex Tomas (PAGE_CACHE_SHIFT - inode->i_blkbits); 211064769240SAlex Tomas 211164769240SAlex Tomas if (!page_has_buffers(page)) { 211264769240SAlex Tomas /* 211364769240SAlex Tomas * There is no attached buffer heads yet (mmap?) 211464769240SAlex Tomas * we treat the page asfull of dirty blocks 211564769240SAlex Tomas */ 211664769240SAlex Tomas bh = &fake; 211764769240SAlex Tomas bh->b_size = PAGE_CACHE_SIZE; 211864769240SAlex Tomas bh->b_state = 0; 211964769240SAlex Tomas set_buffer_dirty(bh); 212064769240SAlex Tomas set_buffer_uptodate(bh); 212164769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 2122a1d6cc56SAneesh Kumar K.V if (mpd->io_done) 2123a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 212464769240SAlex Tomas } else { 212564769240SAlex Tomas /* 212664769240SAlex Tomas * Page with regular buffer heads, just add all dirty ones 212764769240SAlex Tomas */ 212864769240SAlex Tomas head = page_buffers(page); 212964769240SAlex Tomas bh = head; 213064769240SAlex Tomas do { 213164769240SAlex Tomas BUG_ON(buffer_locked(bh)); 2132791b7f08SAneesh Kumar K.V /* 2133791b7f08SAneesh Kumar K.V * We need to try to allocate 2134791b7f08SAneesh Kumar K.V * unmapped blocks in the same page. 2135791b7f08SAneesh Kumar K.V * Otherwise we won't make progress 2136791b7f08SAneesh Kumar K.V * with the page in ext4_da_writepage 2137791b7f08SAneesh Kumar K.V */ 2138a1d6cc56SAneesh Kumar K.V if (buffer_dirty(bh) && 2139a1d6cc56SAneesh Kumar K.V (!buffer_mapped(bh) || buffer_delay(bh))) { 214064769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 2141a1d6cc56SAneesh Kumar K.V if (mpd->io_done) 2142a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 2143791b7f08SAneesh Kumar K.V } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 2144791b7f08SAneesh Kumar K.V /* 2145791b7f08SAneesh Kumar K.V * mapped dirty buffer. We need to update 2146791b7f08SAneesh Kumar K.V * the b_state because we look at 2147791b7f08SAneesh Kumar K.V * b_state in mpage_da_map_blocks. We don't 2148791b7f08SAneesh Kumar K.V * update b_size because if we find an 2149791b7f08SAneesh Kumar K.V * unmapped buffer_head later we need to 2150791b7f08SAneesh Kumar K.V * use the b_state flag of that buffer_head. 2151791b7f08SAneesh Kumar K.V */ 2152791b7f08SAneesh Kumar K.V if (mpd->lbh.b_size == 0) 2153791b7f08SAneesh Kumar K.V mpd->lbh.b_state = 2154791b7f08SAneesh Kumar K.V bh->b_state & BH_FLAGS; 2155a1d6cc56SAneesh Kumar K.V } 215664769240SAlex Tomas logical++; 215764769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 215864769240SAlex Tomas } 215964769240SAlex Tomas 216064769240SAlex Tomas return 0; 216164769240SAlex Tomas } 216264769240SAlex Tomas 216364769240SAlex Tomas /* 216464769240SAlex Tomas * mpage_da_writepages - walk the list of dirty pages of the given 216564769240SAlex Tomas * address space, allocates non-allocated blocks, maps newly-allocated 216664769240SAlex Tomas * blocks to existing bhs and issue IO them 216764769240SAlex Tomas * 216864769240SAlex Tomas * @mapping: address space structure to write 216964769240SAlex Tomas * @wbc: subtract the number of written pages from *@wbc->nr_to_write 217064769240SAlex Tomas * @get_block: the filesystem's block mapper function. 217164769240SAlex Tomas * 217264769240SAlex Tomas * This is a library function, which implements the writepages() 217364769240SAlex Tomas * address_space_operation. 217464769240SAlex Tomas */ 217564769240SAlex Tomas static int mpage_da_writepages(struct address_space *mapping, 217664769240SAlex Tomas struct writeback_control *wbc, 2177df22291fSAneesh Kumar K.V struct mpage_da_data *mpd) 217864769240SAlex Tomas { 217964769240SAlex Tomas int ret; 218064769240SAlex Tomas 2181df22291fSAneesh Kumar K.V if (!mpd->get_block) 218264769240SAlex Tomas return generic_writepages(mapping, wbc); 218364769240SAlex Tomas 2184df22291fSAneesh Kumar K.V mpd->lbh.b_size = 0; 2185df22291fSAneesh Kumar K.V mpd->lbh.b_state = 0; 2186df22291fSAneesh Kumar K.V mpd->lbh.b_blocknr = 0; 2187df22291fSAneesh Kumar K.V mpd->first_page = 0; 2188df22291fSAneesh Kumar K.V mpd->next_page = 0; 2189df22291fSAneesh Kumar K.V mpd->io_done = 0; 2190df22291fSAneesh Kumar K.V mpd->pages_written = 0; 2191df22291fSAneesh Kumar K.V mpd->retval = 0; 2192a1d6cc56SAneesh Kumar K.V 2193df22291fSAneesh Kumar K.V ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd); 219464769240SAlex Tomas /* 219564769240SAlex Tomas * Handle last extent of pages 219664769240SAlex Tomas */ 2197df22291fSAneesh Kumar K.V if (!mpd->io_done && mpd->next_page != mpd->first_page) { 2198df22291fSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 2199df22291fSAneesh Kumar K.V mpage_da_submit_io(mpd); 220064769240SAlex Tomas 220122208dedSAneesh Kumar K.V mpd->io_done = 1; 220222208dedSAneesh Kumar K.V ret = MPAGE_DA_EXTENT_TAIL; 220322208dedSAneesh Kumar K.V } 220422208dedSAneesh Kumar K.V wbc->nr_to_write -= mpd->pages_written; 220564769240SAlex Tomas return ret; 220664769240SAlex Tomas } 220764769240SAlex Tomas 220864769240SAlex Tomas /* 220964769240SAlex Tomas * this is a special callback for ->write_begin() only 221064769240SAlex Tomas * it's intention is to return mapped block or reserve space 221164769240SAlex Tomas */ 221264769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 221364769240SAlex Tomas struct buffer_head *bh_result, int create) 221464769240SAlex Tomas { 221564769240SAlex Tomas int ret = 0; 221664769240SAlex Tomas 221764769240SAlex Tomas BUG_ON(create == 0); 221864769240SAlex Tomas BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 221964769240SAlex Tomas 222064769240SAlex Tomas /* 222164769240SAlex Tomas * first, we need to know whether the block is allocated already 222264769240SAlex Tomas * preallocated blocks are unmapped but should treated 222364769240SAlex Tomas * the same as allocated blocks. 222464769240SAlex Tomas */ 2225d2a17637SMingming Cao ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0); 2226d2a17637SMingming Cao if ((ret == 0) && !buffer_delay(bh_result)) { 2227d2a17637SMingming Cao /* the block isn't (pre)allocated yet, let's reserve space */ 222864769240SAlex Tomas /* 222964769240SAlex Tomas * XXX: __block_prepare_write() unmaps passed block, 223064769240SAlex Tomas * is it OK? 223164769240SAlex Tomas */ 2232d2a17637SMingming Cao ret = ext4_da_reserve_space(inode, 1); 2233d2a17637SMingming Cao if (ret) 2234d2a17637SMingming Cao /* not enough space to reserve */ 2235d2a17637SMingming Cao return ret; 2236d2a17637SMingming Cao 223764769240SAlex Tomas map_bh(bh_result, inode->i_sb, 0); 223864769240SAlex Tomas set_buffer_new(bh_result); 223964769240SAlex Tomas set_buffer_delay(bh_result); 224064769240SAlex Tomas } else if (ret > 0) { 224164769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 224264769240SAlex Tomas ret = 0; 224364769240SAlex Tomas } 224464769240SAlex Tomas 224564769240SAlex Tomas return ret; 224664769240SAlex Tomas } 2247d2a17637SMingming Cao #define EXT4_DELALLOC_RSVED 1 224864769240SAlex Tomas static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, 224964769240SAlex Tomas struct buffer_head *bh_result, int create) 225064769240SAlex Tomas { 225161628a3fSMingming Cao int ret; 225264769240SAlex Tomas unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 225364769240SAlex Tomas loff_t disksize = EXT4_I(inode)->i_disksize; 225464769240SAlex Tomas handle_t *handle = NULL; 225564769240SAlex Tomas 225661628a3fSMingming Cao handle = ext4_journal_current_handle(); 2257166348ddSAneesh Kumar K.V BUG_ON(!handle); 225864769240SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks, 2259d2a17637SMingming Cao bh_result, create, 0, EXT4_DELALLOC_RSVED); 226064769240SAlex Tomas if (ret > 0) { 2261166348ddSAneesh Kumar K.V 226264769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 226364769240SAlex Tomas 2264166348ddSAneesh Kumar K.V if (ext4_should_order_data(inode)) { 2265166348ddSAneesh Kumar K.V int retval; 2266166348ddSAneesh Kumar K.V retval = ext4_jbd2_file_inode(handle, inode); 2267166348ddSAneesh Kumar K.V if (retval) 2268166348ddSAneesh Kumar K.V /* 2269166348ddSAneesh Kumar K.V * Failed to add inode for ordered 2270166348ddSAneesh Kumar K.V * mode. Don't update file size 2271166348ddSAneesh Kumar K.V */ 2272166348ddSAneesh Kumar K.V return retval; 2273166348ddSAneesh Kumar K.V } 2274166348ddSAneesh Kumar K.V 227564769240SAlex Tomas /* 227664769240SAlex Tomas * Update on-disk size along with block allocation 227764769240SAlex Tomas * we don't use 'extend_disksize' as size may change 227864769240SAlex Tomas * within already allocated block -bzzz 227964769240SAlex Tomas */ 228064769240SAlex Tomas disksize = ((loff_t) iblock + ret) << inode->i_blkbits; 228164769240SAlex Tomas if (disksize > i_size_read(inode)) 228264769240SAlex Tomas disksize = i_size_read(inode); 228364769240SAlex Tomas if (disksize > EXT4_I(inode)->i_disksize) { 2284cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, disksize); 228561628a3fSMingming Cao ret = ext4_mark_inode_dirty(handle, inode); 228664769240SAlex Tomas return ret; 228764769240SAlex Tomas } 228861628a3fSMingming Cao ret = 0; 228961628a3fSMingming Cao } 229061628a3fSMingming Cao return ret; 229161628a3fSMingming Cao } 229261628a3fSMingming Cao 229361628a3fSMingming Cao static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 229461628a3fSMingming Cao { 2295f0e6c985SAneesh Kumar K.V /* 2296f0e6c985SAneesh Kumar K.V * unmapped buffer is possible for holes. 2297f0e6c985SAneesh Kumar K.V * delay buffer is possible with delayed allocation 2298f0e6c985SAneesh Kumar K.V */ 2299f0e6c985SAneesh Kumar K.V return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)); 2300f0e6c985SAneesh Kumar K.V } 2301f0e6c985SAneesh Kumar K.V 2302f0e6c985SAneesh Kumar K.V static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock, 2303f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 2304f0e6c985SAneesh Kumar K.V { 2305f0e6c985SAneesh Kumar K.V int ret = 0; 2306f0e6c985SAneesh Kumar K.V unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2307f0e6c985SAneesh Kumar K.V 2308f0e6c985SAneesh Kumar K.V /* 2309f0e6c985SAneesh Kumar K.V * we don't want to do block allocation in writepage 2310f0e6c985SAneesh Kumar K.V * so call get_block_wrap with create = 0 2311f0e6c985SAneesh Kumar K.V */ 2312f0e6c985SAneesh Kumar K.V ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks, 2313f0e6c985SAneesh Kumar K.V bh_result, 0, 0, 0); 2314f0e6c985SAneesh Kumar K.V if (ret > 0) { 2315f0e6c985SAneesh Kumar K.V bh_result->b_size = (ret << inode->i_blkbits); 2316f0e6c985SAneesh Kumar K.V ret = 0; 2317f0e6c985SAneesh Kumar K.V } 2318f0e6c985SAneesh Kumar K.V return ret; 231961628a3fSMingming Cao } 232061628a3fSMingming Cao 232161628a3fSMingming Cao /* 2322f0e6c985SAneesh Kumar K.V * get called vi ext4_da_writepages after taking page lock (have journal handle) 2323f0e6c985SAneesh Kumar K.V * get called via journal_submit_inode_data_buffers (no journal handle) 2324f0e6c985SAneesh Kumar K.V * get called via shrink_page_list via pdflush (no journal handle) 2325f0e6c985SAneesh Kumar K.V * or grab_page_cache when doing write_begin (have journal handle) 232661628a3fSMingming Cao */ 232764769240SAlex Tomas static int ext4_da_writepage(struct page *page, 232864769240SAlex Tomas struct writeback_control *wbc) 232964769240SAlex Tomas { 233064769240SAlex Tomas int ret = 0; 233161628a3fSMingming Cao loff_t size; 2332498e5f24STheodore Ts'o unsigned int len; 233361628a3fSMingming Cao struct buffer_head *page_bufs; 233461628a3fSMingming Cao struct inode *inode = page->mapping->host; 233564769240SAlex Tomas 2336ba80b101STheodore Ts'o trace_mark(ext4_da_writepage, 2337ba80b101STheodore Ts'o "dev %s ino %lu page_index %lu", 2338ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, page->index); 233961628a3fSMingming Cao size = i_size_read(inode); 234061628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 234161628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 234261628a3fSMingming Cao else 234361628a3fSMingming Cao len = PAGE_CACHE_SIZE; 234461628a3fSMingming Cao 2345f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2346f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2347f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2348f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 234961628a3fSMingming Cao /* 2350f0e6c985SAneesh Kumar K.V * We don't want to do block allocation 2351f0e6c985SAneesh Kumar K.V * So redirty the page and return 2352cd1aac32SAneesh Kumar K.V * We may reach here when we do a journal commit 2353cd1aac32SAneesh Kumar K.V * via journal_submit_inode_data_buffers. 2354cd1aac32SAneesh Kumar K.V * If we don't have mapping block we just ignore 2355f0e6c985SAneesh Kumar K.V * them. We can also reach here via shrink_page_list 2356f0e6c985SAneesh Kumar K.V */ 2357f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2358f0e6c985SAneesh Kumar K.V unlock_page(page); 2359f0e6c985SAneesh Kumar K.V return 0; 2360f0e6c985SAneesh Kumar K.V } 2361f0e6c985SAneesh Kumar K.V } else { 2362f0e6c985SAneesh Kumar K.V /* 2363f0e6c985SAneesh Kumar K.V * The test for page_has_buffers() is subtle: 2364f0e6c985SAneesh Kumar K.V * We know the page is dirty but it lost buffers. That means 2365f0e6c985SAneesh Kumar K.V * that at some moment in time after write_begin()/write_end() 2366f0e6c985SAneesh Kumar K.V * has been called all buffers have been clean and thus they 2367f0e6c985SAneesh Kumar K.V * must have been written at least once. So they are all 2368f0e6c985SAneesh Kumar K.V * mapped and we can happily proceed with mapping them 2369f0e6c985SAneesh Kumar K.V * and writing the page. 2370f0e6c985SAneesh Kumar K.V * 2371f0e6c985SAneesh Kumar K.V * Try to initialize the buffer_heads and check whether 2372f0e6c985SAneesh Kumar K.V * all are mapped and non delay. We don't want to 2373f0e6c985SAneesh Kumar K.V * do block allocation here. 2374f0e6c985SAneesh Kumar K.V */ 2375f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2376f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 2377f0e6c985SAneesh Kumar K.V if (!ret) { 2378f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2379f0e6c985SAneesh Kumar K.V /* check whether all are mapped and non delay */ 2380f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2381f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 2382f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2383f0e6c985SAneesh Kumar K.V unlock_page(page); 2384f0e6c985SAneesh Kumar K.V return 0; 2385f0e6c985SAneesh Kumar K.V } 2386f0e6c985SAneesh Kumar K.V } else { 2387f0e6c985SAneesh Kumar K.V /* 2388f0e6c985SAneesh Kumar K.V * We can't do block allocation here 2389f0e6c985SAneesh Kumar K.V * so just redity the page and unlock 2390f0e6c985SAneesh Kumar K.V * and return 239161628a3fSMingming Cao */ 239261628a3fSMingming Cao redirty_page_for_writepage(wbc, page); 239361628a3fSMingming Cao unlock_page(page); 239461628a3fSMingming Cao return 0; 239561628a3fSMingming Cao } 2396ed9b3e33SAneesh Kumar K.V /* now mark the buffer_heads as dirty and uptodate */ 2397ed9b3e33SAneesh Kumar K.V block_commit_write(page, 0, PAGE_CACHE_SIZE); 239864769240SAlex Tomas } 239964769240SAlex Tomas 240064769240SAlex Tomas if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2401f0e6c985SAneesh Kumar K.V ret = nobh_writepage(page, ext4_normal_get_block_write, wbc); 240264769240SAlex Tomas else 2403f0e6c985SAneesh Kumar K.V ret = block_write_full_page(page, 2404f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2405f0e6c985SAneesh Kumar K.V wbc); 240664769240SAlex Tomas 240764769240SAlex Tomas return ret; 240864769240SAlex Tomas } 240964769240SAlex Tomas 241061628a3fSMingming Cao /* 2411525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 2412525f4ed8SMingming Cao * calulate the total number of credits to reserve to fit 2413525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2414525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2415525f4ed8SMingming Cao * the block allocation. 241661628a3fSMingming Cao */ 2417525f4ed8SMingming Cao 2418525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2419525f4ed8SMingming Cao { 2420525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2421525f4ed8SMingming Cao 2422525f4ed8SMingming Cao /* 2423525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2424525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2425525f4ed8SMingming Cao * number of contiguous block. So we will limit 2426525f4ed8SMingming Cao * number of contiguous block to a sane value 2427525f4ed8SMingming Cao */ 2428525f4ed8SMingming Cao if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2429525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2430525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2431525f4ed8SMingming Cao 2432525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2433525f4ed8SMingming Cao } 243461628a3fSMingming Cao 243564769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 243664769240SAlex Tomas struct writeback_control *wbc) 243764769240SAlex Tomas { 243822208dedSAneesh Kumar K.V pgoff_t index; 243922208dedSAneesh Kumar K.V int range_whole = 0; 244061628a3fSMingming Cao handle_t *handle = NULL; 2441df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 24425e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 244322208dedSAneesh Kumar K.V int no_nrwrite_index_update; 2444498e5f24STheodore Ts'o int pages_written = 0; 2445498e5f24STheodore Ts'o long pages_skipped; 24462acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 24475e745b04SAneesh Kumar K.V int needed_blocks, ret = 0, nr_to_writebump = 0; 24485e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 244961628a3fSMingming Cao 2450ba80b101STheodore Ts'o trace_mark(ext4_da_writepages, 2451ba80b101STheodore Ts'o "dev %s ino %lu nr_t_write %ld " 2452ba80b101STheodore Ts'o "pages_skipped %ld range_start %llu " 2453ba80b101STheodore Ts'o "range_end %llu nonblocking %d " 2454ba80b101STheodore Ts'o "for_kupdate %d for_reclaim %d " 2455ba80b101STheodore Ts'o "for_writepages %d range_cyclic %d", 2456ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 2457ba80b101STheodore Ts'o wbc->nr_to_write, wbc->pages_skipped, 2458ba80b101STheodore Ts'o (unsigned long long) wbc->range_start, 2459ba80b101STheodore Ts'o (unsigned long long) wbc->range_end, 2460ba80b101STheodore Ts'o wbc->nonblocking, wbc->for_kupdate, 2461ba80b101STheodore Ts'o wbc->for_reclaim, wbc->for_writepages, 2462ba80b101STheodore Ts'o wbc->range_cyclic); 2463ba80b101STheodore Ts'o 246461628a3fSMingming Cao /* 246561628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 246661628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 246761628a3fSMingming Cao * because that could violate lock ordering on umount 246861628a3fSMingming Cao */ 2469a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 247061628a3fSMingming Cao return 0; 24712a21e37eSTheodore Ts'o 24722a21e37eSTheodore Ts'o /* 24732a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 24742a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 24752a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 24762a21e37eSTheodore Ts'o * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because 24772a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 24782a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 24792a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 24802a21e37eSTheodore Ts'o * the stack trace. 24812a21e37eSTheodore Ts'o */ 24822a21e37eSTheodore Ts'o if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT)) 24832a21e37eSTheodore Ts'o return -EROFS; 24842a21e37eSTheodore Ts'o 24855e745b04SAneesh Kumar K.V /* 24865e745b04SAneesh Kumar K.V * Make sure nr_to_write is >= sbi->s_mb_stream_request 24875e745b04SAneesh Kumar K.V * This make sure small files blocks are allocated in 24885e745b04SAneesh Kumar K.V * single attempt. This ensure that small files 24895e745b04SAneesh Kumar K.V * get less fragmented. 24905e745b04SAneesh Kumar K.V */ 24915e745b04SAneesh Kumar K.V if (wbc->nr_to_write < sbi->s_mb_stream_request) { 24925e745b04SAneesh Kumar K.V nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 24935e745b04SAneesh Kumar K.V wbc->nr_to_write = sbi->s_mb_stream_request; 24945e745b04SAneesh Kumar K.V } 249522208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 249622208dedSAneesh Kumar K.V range_whole = 1; 249761628a3fSMingming Cao 24982acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 24992acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 250022208dedSAneesh Kumar K.V index = mapping->writeback_index; 25012acf2c26SAneesh Kumar K.V if (index) 25022acf2c26SAneesh Kumar K.V cycled = 0; 25032acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 25042acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 25052acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 25062acf2c26SAneesh Kumar K.V } else 250722208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 2508a1d6cc56SAneesh Kumar K.V 2509df22291fSAneesh Kumar K.V mpd.wbc = wbc; 2510df22291fSAneesh Kumar K.V mpd.inode = mapping->host; 2511df22291fSAneesh Kumar K.V 251222208dedSAneesh Kumar K.V /* 251322208dedSAneesh Kumar K.V * we don't want write_cache_pages to update 251422208dedSAneesh Kumar K.V * nr_to_write and writeback_index 251522208dedSAneesh Kumar K.V */ 251622208dedSAneesh Kumar K.V no_nrwrite_index_update = wbc->no_nrwrite_index_update; 251722208dedSAneesh Kumar K.V wbc->no_nrwrite_index_update = 1; 251822208dedSAneesh Kumar K.V pages_skipped = wbc->pages_skipped; 251922208dedSAneesh Kumar K.V 25202acf2c26SAneesh Kumar K.V retry: 252122208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2522a1d6cc56SAneesh Kumar K.V 2523a1d6cc56SAneesh Kumar K.V /* 2524a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2525a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2526a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2527a1d6cc56SAneesh Kumar K.V * by delalloc 2528a1d6cc56SAneesh Kumar K.V */ 2529a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2530525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2531a1d6cc56SAneesh Kumar K.V 253261628a3fSMingming Cao /* start a new transaction*/ 253361628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 253461628a3fSMingming Cao if (IS_ERR(handle)) { 253561628a3fSMingming Cao ret = PTR_ERR(handle); 25362a21e37eSTheodore Ts'o printk(KERN_CRIT "%s: jbd2_start: " 2537a1d6cc56SAneesh Kumar K.V "%ld pages, ino %lu; err %d\n", __func__, 2538a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 2539a1d6cc56SAneesh Kumar K.V dump_stack(); 254061628a3fSMingming Cao goto out_writepages; 254161628a3fSMingming Cao } 2542df22291fSAneesh Kumar K.V mpd.get_block = ext4_da_get_block_write; 2543df22291fSAneesh Kumar K.V ret = mpage_da_writepages(mapping, wbc, &mpd); 2544df22291fSAneesh Kumar K.V 254561628a3fSMingming Cao ext4_journal_stop(handle); 2546df22291fSAneesh Kumar K.V 254722208dedSAneesh Kumar K.V if (mpd.retval == -ENOSPC) { 254822208dedSAneesh Kumar K.V /* commit the transaction which would 254922208dedSAneesh Kumar K.V * free blocks released in the transaction 255022208dedSAneesh Kumar K.V * and try again 255122208dedSAneesh Kumar K.V */ 2552df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 255322208dedSAneesh Kumar K.V wbc->pages_skipped = pages_skipped; 255422208dedSAneesh Kumar K.V ret = 0; 255522208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2556a1d6cc56SAneesh Kumar K.V /* 2557a1d6cc56SAneesh Kumar K.V * got one extent now try with 2558a1d6cc56SAneesh Kumar K.V * rest of the pages 2559a1d6cc56SAneesh Kumar K.V */ 256022208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 256122208dedSAneesh Kumar K.V wbc->pages_skipped = pages_skipped; 2562a1d6cc56SAneesh Kumar K.V ret = 0; 25632acf2c26SAneesh Kumar K.V io_done = 1; 256422208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 256561628a3fSMingming Cao /* 256661628a3fSMingming Cao * There is no more writeout needed 256761628a3fSMingming Cao * or we requested for a noblocking writeout 256861628a3fSMingming Cao * and we found the device congested 256961628a3fSMingming Cao */ 257061628a3fSMingming Cao break; 257161628a3fSMingming Cao } 25722acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 25732acf2c26SAneesh Kumar K.V cycled = 1; 25742acf2c26SAneesh Kumar K.V index = 0; 25752acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 25762acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 25772acf2c26SAneesh Kumar K.V goto retry; 25782acf2c26SAneesh Kumar K.V } 257922208dedSAneesh Kumar K.V if (pages_skipped != wbc->pages_skipped) 258022208dedSAneesh Kumar K.V printk(KERN_EMERG "This should not happen leaving %s " 258122208dedSAneesh Kumar K.V "with nr_to_write = %ld ret = %d\n", 258222208dedSAneesh Kumar K.V __func__, wbc->nr_to_write, ret); 258361628a3fSMingming Cao 258422208dedSAneesh Kumar K.V /* Update index */ 258522208dedSAneesh Kumar K.V index += pages_written; 25862acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 258722208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 258822208dedSAneesh Kumar K.V /* 258922208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 259022208dedSAneesh Kumar K.V * mode will write it back later 259122208dedSAneesh Kumar K.V */ 259222208dedSAneesh Kumar K.V mapping->writeback_index = index; 2593a1d6cc56SAneesh Kumar K.V 259461628a3fSMingming Cao out_writepages: 259522208dedSAneesh Kumar K.V if (!no_nrwrite_index_update) 259622208dedSAneesh Kumar K.V wbc->no_nrwrite_index_update = 0; 259722208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2598ba80b101STheodore Ts'o trace_mark(ext4_da_writepage_result, 2599ba80b101STheodore Ts'o "dev %s ino %lu ret %d pages_written %d " 2600ba80b101STheodore Ts'o "pages_skipped %ld congestion %d " 2601ba80b101STheodore Ts'o "more_io %d no_nrwrite_index_update %d", 2602ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, ret, 2603ba80b101STheodore Ts'o pages_written, wbc->pages_skipped, 2604ba80b101STheodore Ts'o wbc->encountered_congestion, wbc->more_io, 2605ba80b101STheodore Ts'o wbc->no_nrwrite_index_update); 260661628a3fSMingming Cao return ret; 260764769240SAlex Tomas } 260864769240SAlex Tomas 260979f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1 261079f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 261179f0be8dSAneesh Kumar K.V { 261279f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 261379f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 261479f0be8dSAneesh Kumar K.V 261579f0be8dSAneesh Kumar K.V /* 261679f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 261779f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2618179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 261979f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 262079f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 262179f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 262279f0be8dSAneesh Kumar K.V */ 262379f0be8dSAneesh Kumar K.V free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 262479f0be8dSAneesh Kumar K.V dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 262579f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 262679f0be8dSAneesh Kumar K.V free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 262779f0be8dSAneesh Kumar K.V /* 262879f0be8dSAneesh Kumar K.V * free block count is less that 150% of dirty blocks 262979f0be8dSAneesh Kumar K.V * or free blocks is less that watermark 263079f0be8dSAneesh Kumar K.V */ 263179f0be8dSAneesh Kumar K.V return 1; 263279f0be8dSAneesh Kumar K.V } 263379f0be8dSAneesh Kumar K.V return 0; 263479f0be8dSAneesh Kumar K.V } 263579f0be8dSAneesh Kumar K.V 263664769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 263764769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 263864769240SAlex Tomas struct page **pagep, void **fsdata) 263964769240SAlex Tomas { 2640d2a17637SMingming Cao int ret, retries = 0; 264164769240SAlex Tomas struct page *page; 264264769240SAlex Tomas pgoff_t index; 264364769240SAlex Tomas unsigned from, to; 264464769240SAlex Tomas struct inode *inode = mapping->host; 264564769240SAlex Tomas handle_t *handle; 264664769240SAlex Tomas 264764769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 264864769240SAlex Tomas from = pos & (PAGE_CACHE_SIZE - 1); 264964769240SAlex Tomas to = from + len; 265079f0be8dSAneesh Kumar K.V 265179f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 265279f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 265379f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 265479f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 265579f0be8dSAneesh Kumar K.V } 265679f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 2657ba80b101STheodore Ts'o 2658ba80b101STheodore Ts'o trace_mark(ext4_da_write_begin, 2659ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u flags %u", 2660ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 2661ba80b101STheodore Ts'o (unsigned long long) pos, len, flags); 2662d2a17637SMingming Cao retry: 266364769240SAlex Tomas /* 266464769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 266564769240SAlex Tomas * if there is delayed block allocation. But we still need 266664769240SAlex Tomas * to journalling the i_disksize update if writes to the end 266764769240SAlex Tomas * of file which has an already mapped buffer. 266864769240SAlex Tomas */ 266964769240SAlex Tomas handle = ext4_journal_start(inode, 1); 267064769240SAlex Tomas if (IS_ERR(handle)) { 267164769240SAlex Tomas ret = PTR_ERR(handle); 267264769240SAlex Tomas goto out; 267364769240SAlex Tomas } 2674*ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2675*ebd3610bSJan Kara * started */ 2676*ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 267764769240SAlex Tomas 267854566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2679d5a0d4f7SEric Sandeen if (!page) { 2680d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2681d5a0d4f7SEric Sandeen ret = -ENOMEM; 2682d5a0d4f7SEric Sandeen goto out; 2683d5a0d4f7SEric Sandeen } 268464769240SAlex Tomas *pagep = page; 268564769240SAlex Tomas 268664769240SAlex Tomas ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 268764769240SAlex Tomas ext4_da_get_block_prep); 268864769240SAlex Tomas if (ret < 0) { 268964769240SAlex Tomas unlock_page(page); 269064769240SAlex Tomas ext4_journal_stop(handle); 269164769240SAlex Tomas page_cache_release(page); 2692ae4d5372SAneesh Kumar K.V /* 2693ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2694ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2695ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2696ae4d5372SAneesh Kumar K.V */ 2697ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2698ae4d5372SAneesh Kumar K.V vmtruncate(inode, inode->i_size); 269964769240SAlex Tomas } 270064769240SAlex Tomas 2701d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2702d2a17637SMingming Cao goto retry; 270364769240SAlex Tomas out: 270464769240SAlex Tomas return ret; 270564769240SAlex Tomas } 270664769240SAlex Tomas 2707632eaeabSMingming Cao /* 2708632eaeabSMingming Cao * Check if we should update i_disksize 2709632eaeabSMingming Cao * when write to the end of file but not require block allocation 2710632eaeabSMingming Cao */ 2711632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2712632eaeabSMingming Cao unsigned long offset) 2713632eaeabSMingming Cao { 2714632eaeabSMingming Cao struct buffer_head *bh; 2715632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2716632eaeabSMingming Cao unsigned int idx; 2717632eaeabSMingming Cao int i; 2718632eaeabSMingming Cao 2719632eaeabSMingming Cao bh = page_buffers(page); 2720632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2721632eaeabSMingming Cao 2722632eaeabSMingming Cao for (i = 0; i < idx; i++) 2723632eaeabSMingming Cao bh = bh->b_this_page; 2724632eaeabSMingming Cao 2725632eaeabSMingming Cao if (!buffer_mapped(bh) || (buffer_delay(bh))) 2726632eaeabSMingming Cao return 0; 2727632eaeabSMingming Cao return 1; 2728632eaeabSMingming Cao } 2729632eaeabSMingming Cao 273064769240SAlex Tomas static int ext4_da_write_end(struct file *file, 273164769240SAlex Tomas struct address_space *mapping, 273264769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 273364769240SAlex Tomas struct page *page, void *fsdata) 273464769240SAlex Tomas { 273564769240SAlex Tomas struct inode *inode = mapping->host; 273664769240SAlex Tomas int ret = 0, ret2; 273764769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 273864769240SAlex Tomas loff_t new_i_size; 2739632eaeabSMingming Cao unsigned long start, end; 274079f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 274179f0be8dSAneesh Kumar K.V 274279f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 274379f0be8dSAneesh Kumar K.V if (ext4_should_order_data(inode)) { 274479f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 274579f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 274679f0be8dSAneesh Kumar K.V } else if (ext4_should_writeback_data(inode)) { 274779f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 274879f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 274979f0be8dSAneesh Kumar K.V } else { 275079f0be8dSAneesh Kumar K.V BUG(); 275179f0be8dSAneesh Kumar K.V } 275279f0be8dSAneesh Kumar K.V } 2753632eaeabSMingming Cao 2754ba80b101STheodore Ts'o trace_mark(ext4_da_write_end, 2755ba80b101STheodore Ts'o "dev %s ino %lu pos %llu len %u copied %u", 2756ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, 2757ba80b101STheodore Ts'o (unsigned long long) pos, len, copied); 2758632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2759632eaeabSMingming Cao end = start + copied - 1; 276064769240SAlex Tomas 276164769240SAlex Tomas /* 276264769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 276364769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 276464769240SAlex Tomas * into that. 276564769240SAlex Tomas */ 276664769240SAlex Tomas 276764769240SAlex Tomas new_i_size = pos + copied; 2768632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 2769632eaeabSMingming Cao if (ext4_da_should_update_i_disksize(page, end)) { 2770632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2771632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 277264769240SAlex Tomas /* 2773632eaeabSMingming Cao * Updating i_disksize when extending file 2774632eaeabSMingming Cao * without needing block allocation 277564769240SAlex Tomas */ 277664769240SAlex Tomas if (ext4_should_order_data(inode)) 2777632eaeabSMingming Cao ret = ext4_jbd2_file_inode(handle, 2778632eaeabSMingming Cao inode); 277964769240SAlex Tomas 278064769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 278164769240SAlex Tomas } 2782632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2783cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2784cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2785cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2786cf17fea6SAneesh Kumar K.V */ 2787cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2788632eaeabSMingming Cao } 2789632eaeabSMingming Cao } 279064769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 279164769240SAlex Tomas page, fsdata); 279264769240SAlex Tomas copied = ret2; 279364769240SAlex Tomas if (ret2 < 0) 279464769240SAlex Tomas ret = ret2; 279564769240SAlex Tomas ret2 = ext4_journal_stop(handle); 279664769240SAlex Tomas if (!ret) 279764769240SAlex Tomas ret = ret2; 279864769240SAlex Tomas 279964769240SAlex Tomas return ret ? ret : copied; 280064769240SAlex Tomas } 280164769240SAlex Tomas 280264769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 280364769240SAlex Tomas { 280464769240SAlex Tomas /* 280564769240SAlex Tomas * Drop reserved blocks 280664769240SAlex Tomas */ 280764769240SAlex Tomas BUG_ON(!PageLocked(page)); 280864769240SAlex Tomas if (!page_has_buffers(page)) 280964769240SAlex Tomas goto out; 281064769240SAlex Tomas 2811d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 281264769240SAlex Tomas 281364769240SAlex Tomas out: 281464769240SAlex Tomas ext4_invalidatepage(page, offset); 281564769240SAlex Tomas 281664769240SAlex Tomas return; 281764769240SAlex Tomas } 281864769240SAlex Tomas 281964769240SAlex Tomas 282064769240SAlex Tomas /* 2821ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2822ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2823ac27a0ecSDave Kleikamp * 2824ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2825617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2826ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2827ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2828ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2829ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2830ac27a0ecSDave Kleikamp * 2831ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2832ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2833ac27a0ecSDave Kleikamp */ 2834617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2835ac27a0ecSDave Kleikamp { 2836ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2837ac27a0ecSDave Kleikamp journal_t *journal; 2838ac27a0ecSDave Kleikamp int err; 2839ac27a0ecSDave Kleikamp 284064769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 284164769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 284264769240SAlex Tomas /* 284364769240SAlex Tomas * With delalloc we want to sync the file 284464769240SAlex Tomas * so that we can make sure we allocate 284564769240SAlex Tomas * blocks for file 284664769240SAlex Tomas */ 284764769240SAlex Tomas filemap_write_and_wait(mapping); 284864769240SAlex Tomas } 284964769240SAlex Tomas 28500390131bSFrank Mayhar if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 2851ac27a0ecSDave Kleikamp /* 2852ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2853ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2854ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2855ac27a0ecSDave Kleikamp * do we expect this to happen. 2856ac27a0ecSDave Kleikamp * 2857ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2858ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2859ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2860ac27a0ecSDave Kleikamp * will.) 2861ac27a0ecSDave Kleikamp * 2862617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2863ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2864ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2865ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2866ac27a0ecSDave Kleikamp * everything they get. 2867ac27a0ecSDave Kleikamp */ 2868ac27a0ecSDave Kleikamp 2869617ba13bSMingming Cao EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 2870617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2871dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2872dab291afSMingming Cao err = jbd2_journal_flush(journal); 2873dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2874ac27a0ecSDave Kleikamp 2875ac27a0ecSDave Kleikamp if (err) 2876ac27a0ecSDave Kleikamp return 0; 2877ac27a0ecSDave Kleikamp } 2878ac27a0ecSDave Kleikamp 2879617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2880ac27a0ecSDave Kleikamp } 2881ac27a0ecSDave Kleikamp 2882ac27a0ecSDave Kleikamp static int bget_one(handle_t *handle, struct buffer_head *bh) 2883ac27a0ecSDave Kleikamp { 2884ac27a0ecSDave Kleikamp get_bh(bh); 2885ac27a0ecSDave Kleikamp return 0; 2886ac27a0ecSDave Kleikamp } 2887ac27a0ecSDave Kleikamp 2888ac27a0ecSDave Kleikamp static int bput_one(handle_t *handle, struct buffer_head *bh) 2889ac27a0ecSDave Kleikamp { 2890ac27a0ecSDave Kleikamp put_bh(bh); 2891ac27a0ecSDave Kleikamp return 0; 2892ac27a0ecSDave Kleikamp } 2893ac27a0ecSDave Kleikamp 2894ac27a0ecSDave Kleikamp /* 2895678aaf48SJan Kara * Note that we don't need to start a transaction unless we're journaling data 2896678aaf48SJan Kara * because we should have holes filled from ext4_page_mkwrite(). We even don't 2897678aaf48SJan Kara * need to file the inode to the transaction's list in ordered mode because if 2898678aaf48SJan Kara * we are writing back data added by write(), the inode is already there and if 2899678aaf48SJan Kara * we are writing back data modified via mmap(), noone guarantees in which 2900678aaf48SJan Kara * transaction the data will hit the disk. In case we are journaling data, we 2901678aaf48SJan Kara * cannot start transaction directly because transaction start ranks above page 2902678aaf48SJan Kara * lock so we have to do some magic. 2903ac27a0ecSDave Kleikamp * 2904678aaf48SJan Kara * In all journaling modes block_write_full_page() will start the I/O. 2905ac27a0ecSDave Kleikamp * 2906ac27a0ecSDave Kleikamp * Problem: 2907ac27a0ecSDave Kleikamp * 2908617ba13bSMingming Cao * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2909617ba13bSMingming Cao * ext4_writepage() 2910ac27a0ecSDave Kleikamp * 2911ac27a0ecSDave Kleikamp * Similar for: 2912ac27a0ecSDave Kleikamp * 2913617ba13bSMingming Cao * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... 2914ac27a0ecSDave Kleikamp * 2915617ba13bSMingming Cao * Same applies to ext4_get_block(). We will deadlock on various things like 29160e855ac8SAneesh Kumar K.V * lock_journal and i_data_sem 2917ac27a0ecSDave Kleikamp * 2918ac27a0ecSDave Kleikamp * Setting PF_MEMALLOC here doesn't work - too many internal memory 2919ac27a0ecSDave Kleikamp * allocations fail. 2920ac27a0ecSDave Kleikamp * 2921ac27a0ecSDave Kleikamp * 16May01: If we're reentered then journal_current_handle() will be 2922ac27a0ecSDave Kleikamp * non-zero. We simply *return*. 2923ac27a0ecSDave Kleikamp * 2924ac27a0ecSDave Kleikamp * 1 July 2001: @@@ FIXME: 2925ac27a0ecSDave Kleikamp * In journalled data mode, a data buffer may be metadata against the 2926ac27a0ecSDave Kleikamp * current transaction. But the same file is part of a shared mapping 2927ac27a0ecSDave Kleikamp * and someone does a writepage() on it. 2928ac27a0ecSDave Kleikamp * 2929ac27a0ecSDave Kleikamp * We will move the buffer onto the async_data list, but *after* it has 2930ac27a0ecSDave Kleikamp * been dirtied. So there's a small window where we have dirty data on 2931ac27a0ecSDave Kleikamp * BJ_Metadata. 2932ac27a0ecSDave Kleikamp * 2933ac27a0ecSDave Kleikamp * Note that this only applies to the last partial page in the file. The 2934ac27a0ecSDave Kleikamp * bit which block_write_full_page() uses prepare/commit for. (That's 2935ac27a0ecSDave Kleikamp * broken code anyway: it's wrong for msync()). 2936ac27a0ecSDave Kleikamp * 2937ac27a0ecSDave Kleikamp * It's a rare case: affects the final partial page, for journalled data 2938ac27a0ecSDave Kleikamp * where the file is subject to bith write() and writepage() in the same 2939ac27a0ecSDave Kleikamp * transction. To fix it we'll need a custom block_write_full_page(). 2940ac27a0ecSDave Kleikamp * We'll probably need that anyway for journalling writepage() output. 2941ac27a0ecSDave Kleikamp * 2942ac27a0ecSDave Kleikamp * We don't honour synchronous mounts for writepage(). That would be 2943ac27a0ecSDave Kleikamp * disastrous. Any write() or metadata operation will sync the fs for 2944ac27a0ecSDave Kleikamp * us. 2945ac27a0ecSDave Kleikamp * 2946ac27a0ecSDave Kleikamp */ 2947678aaf48SJan Kara static int __ext4_normal_writepage(struct page *page, 2948cf108bcaSJan Kara struct writeback_control *wbc) 2949cf108bcaSJan Kara { 2950cf108bcaSJan Kara struct inode *inode = page->mapping->host; 2951cf108bcaSJan Kara 2952cf108bcaSJan Kara if (test_opt(inode->i_sb, NOBH)) 2953f0e6c985SAneesh Kumar K.V return nobh_writepage(page, 2954f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, wbc); 2955cf108bcaSJan Kara else 2956f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 2957f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2958f0e6c985SAneesh Kumar K.V wbc); 2959cf108bcaSJan Kara } 2960cf108bcaSJan Kara 2961678aaf48SJan Kara static int ext4_normal_writepage(struct page *page, 2962ac27a0ecSDave Kleikamp struct writeback_control *wbc) 2963ac27a0ecSDave Kleikamp { 2964ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 2965cf108bcaSJan Kara loff_t size = i_size_read(inode); 2966cf108bcaSJan Kara loff_t len; 2967cf108bcaSJan Kara 2968ba80b101STheodore Ts'o trace_mark(ext4_normal_writepage, 2969ba80b101STheodore Ts'o "dev %s ino %lu page_index %lu", 2970ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, page->index); 2971cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 2972cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 2973cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 2974cf108bcaSJan Kara else 2975cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 2976f0e6c985SAneesh Kumar K.V 2977f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2978f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 2979f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 2980f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 2981f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 2982f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 2983f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 2984f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 2985f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 2986f0e6c985SAneesh Kumar K.V */ 2987cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 2988cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 2989f0e6c985SAneesh Kumar K.V } 2990cf108bcaSJan Kara 2991cf108bcaSJan Kara if (!ext4_journal_current_handle()) 2992678aaf48SJan Kara return __ext4_normal_writepage(page, wbc); 2993cf108bcaSJan Kara 2994cf108bcaSJan Kara redirty_page_for_writepage(wbc, page); 2995cf108bcaSJan Kara unlock_page(page); 2996cf108bcaSJan Kara return 0; 2997cf108bcaSJan Kara } 2998cf108bcaSJan Kara 2999cf108bcaSJan Kara static int __ext4_journalled_writepage(struct page *page, 3000cf108bcaSJan Kara struct writeback_control *wbc) 3001cf108bcaSJan Kara { 3002cf108bcaSJan Kara struct address_space *mapping = page->mapping; 3003cf108bcaSJan Kara struct inode *inode = mapping->host; 3004cf108bcaSJan Kara struct buffer_head *page_bufs; 3005ac27a0ecSDave Kleikamp handle_t *handle = NULL; 3006ac27a0ecSDave Kleikamp int ret = 0; 3007ac27a0ecSDave Kleikamp int err; 3008ac27a0ecSDave Kleikamp 3009f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 3010f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 3011cf108bcaSJan Kara if (ret != 0) 3012cf108bcaSJan Kara goto out_unlock; 3013cf108bcaSJan Kara 3014cf108bcaSJan Kara page_bufs = page_buffers(page); 3015cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, 3016cf108bcaSJan Kara bget_one); 3017cf108bcaSJan Kara /* As soon as we unlock the page, it can go away, but we have 3018cf108bcaSJan Kara * references to buffers so we are safe */ 3019cf108bcaSJan Kara unlock_page(page); 3020ac27a0ecSDave Kleikamp 3021617ba13bSMingming Cao handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 3022ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 3023ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 3024cf108bcaSJan Kara goto out; 3025ac27a0ecSDave Kleikamp } 3026ac27a0ecSDave Kleikamp 3027cf108bcaSJan Kara ret = walk_page_buffers(handle, page_bufs, 0, 3028cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 3029ac27a0ecSDave Kleikamp 3030cf108bcaSJan Kara err = walk_page_buffers(handle, page_bufs, 0, 3031cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, write_end_fn); 3032cf108bcaSJan Kara if (ret == 0) 3033cf108bcaSJan Kara ret = err; 3034617ba13bSMingming Cao err = ext4_journal_stop(handle); 3035ac27a0ecSDave Kleikamp if (!ret) 3036ac27a0ecSDave Kleikamp ret = err; 3037ac27a0ecSDave Kleikamp 3038cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, 3039cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, bput_one); 3040cf108bcaSJan Kara EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 3041cf108bcaSJan Kara goto out; 3042cf108bcaSJan Kara 3043cf108bcaSJan Kara out_unlock: 3044ac27a0ecSDave Kleikamp unlock_page(page); 3045cf108bcaSJan Kara out: 3046ac27a0ecSDave Kleikamp return ret; 3047ac27a0ecSDave Kleikamp } 3048ac27a0ecSDave Kleikamp 3049617ba13bSMingming Cao static int ext4_journalled_writepage(struct page *page, 3050ac27a0ecSDave Kleikamp struct writeback_control *wbc) 3051ac27a0ecSDave Kleikamp { 3052ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 3053cf108bcaSJan Kara loff_t size = i_size_read(inode); 3054cf108bcaSJan Kara loff_t len; 3055cf108bcaSJan Kara 3056ba80b101STheodore Ts'o trace_mark(ext4_journalled_writepage, 3057ba80b101STheodore Ts'o "dev %s ino %lu page_index %lu", 3058ba80b101STheodore Ts'o inode->i_sb->s_id, inode->i_ino, page->index); 3059cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 3060cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 3061cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 3062cf108bcaSJan Kara else 3063cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 3064f0e6c985SAneesh Kumar K.V 3065f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 3066f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 3067f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 3068f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 3069f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 3070f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 3071f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 3072f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 3073f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 3074f0e6c985SAneesh Kumar K.V */ 3075cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 3076cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 3077f0e6c985SAneesh Kumar K.V } 3078ac27a0ecSDave Kleikamp 3079617ba13bSMingming Cao if (ext4_journal_current_handle()) 3080ac27a0ecSDave Kleikamp goto no_write; 3081ac27a0ecSDave Kleikamp 3082cf108bcaSJan Kara if (PageChecked(page)) { 3083ac27a0ecSDave Kleikamp /* 3084ac27a0ecSDave Kleikamp * It's mmapped pagecache. Add buffers and journal it. There 3085ac27a0ecSDave Kleikamp * doesn't seem much point in redirtying the page here. 3086ac27a0ecSDave Kleikamp */ 3087ac27a0ecSDave Kleikamp ClearPageChecked(page); 3088cf108bcaSJan Kara return __ext4_journalled_writepage(page, wbc); 3089ac27a0ecSDave Kleikamp } else { 3090ac27a0ecSDave Kleikamp /* 3091ac27a0ecSDave Kleikamp * It may be a page full of checkpoint-mode buffers. We don't 3092ac27a0ecSDave Kleikamp * really know unless we go poke around in the buffer_heads. 3093ac27a0ecSDave Kleikamp * But block_write_full_page will do the right thing. 3094ac27a0ecSDave Kleikamp */ 3095f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 3096f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 3097f0e6c985SAneesh Kumar K.V wbc); 3098ac27a0ecSDave Kleikamp } 3099ac27a0ecSDave Kleikamp no_write: 3100ac27a0ecSDave Kleikamp redirty_page_for_writepage(wbc, page); 3101ac27a0ecSDave Kleikamp unlock_page(page); 3102cf108bcaSJan Kara return 0; 3103ac27a0ecSDave Kleikamp } 3104ac27a0ecSDave Kleikamp 3105617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 3106ac27a0ecSDave Kleikamp { 3107617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 3108ac27a0ecSDave Kleikamp } 3109ac27a0ecSDave Kleikamp 3110ac27a0ecSDave Kleikamp static int 3111617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 3112ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 3113ac27a0ecSDave Kleikamp { 3114617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 3115ac27a0ecSDave Kleikamp } 3116ac27a0ecSDave Kleikamp 3117617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 3118ac27a0ecSDave Kleikamp { 3119617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3120ac27a0ecSDave Kleikamp 3121ac27a0ecSDave Kleikamp /* 3122ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 3123ac27a0ecSDave Kleikamp */ 3124ac27a0ecSDave Kleikamp if (offset == 0) 3125ac27a0ecSDave Kleikamp ClearPageChecked(page); 3126ac27a0ecSDave Kleikamp 31270390131bSFrank Mayhar if (journal) 3128dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 31290390131bSFrank Mayhar else 31300390131bSFrank Mayhar block_invalidatepage(page, offset); 3131ac27a0ecSDave Kleikamp } 3132ac27a0ecSDave Kleikamp 3133617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 3134ac27a0ecSDave Kleikamp { 3135617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3136ac27a0ecSDave Kleikamp 3137ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 3138ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 3139ac27a0ecSDave Kleikamp return 0; 31400390131bSFrank Mayhar if (journal) 3141dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 31420390131bSFrank Mayhar else 31430390131bSFrank Mayhar return try_to_free_buffers(page); 3144ac27a0ecSDave Kleikamp } 3145ac27a0ecSDave Kleikamp 3146ac27a0ecSDave Kleikamp /* 3147ac27a0ecSDave Kleikamp * If the O_DIRECT write will extend the file then add this inode to the 3148ac27a0ecSDave Kleikamp * orphan list. So recovery will truncate it back to the original size 3149ac27a0ecSDave Kleikamp * if the machine crashes during the write. 3150ac27a0ecSDave Kleikamp * 3151ac27a0ecSDave Kleikamp * If the O_DIRECT write is intantiating holes inside i_size and the machine 31527fb5409dSJan Kara * crashes then stale disk data _may_ be exposed inside the file. But current 31537fb5409dSJan Kara * VFS code falls back into buffered path in that case so we are safe. 3154ac27a0ecSDave Kleikamp */ 3155617ba13bSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3156ac27a0ecSDave Kleikamp const struct iovec *iov, loff_t offset, 3157ac27a0ecSDave Kleikamp unsigned long nr_segs) 3158ac27a0ecSDave Kleikamp { 3159ac27a0ecSDave Kleikamp struct file *file = iocb->ki_filp; 3160ac27a0ecSDave Kleikamp struct inode *inode = file->f_mapping->host; 3161617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 31627fb5409dSJan Kara handle_t *handle; 3163ac27a0ecSDave Kleikamp ssize_t ret; 3164ac27a0ecSDave Kleikamp int orphan = 0; 3165ac27a0ecSDave Kleikamp size_t count = iov_length(iov, nr_segs); 3166ac27a0ecSDave Kleikamp 3167ac27a0ecSDave Kleikamp if (rw == WRITE) { 3168ac27a0ecSDave Kleikamp loff_t final_size = offset + count; 3169ac27a0ecSDave Kleikamp 31707fb5409dSJan Kara if (final_size > inode->i_size) { 31717fb5409dSJan Kara /* Credits for sb + inode write */ 31727fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 3173ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 3174ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 3175ac27a0ecSDave Kleikamp goto out; 3176ac27a0ecSDave Kleikamp } 3177617ba13bSMingming Cao ret = ext4_orphan_add(handle, inode); 31787fb5409dSJan Kara if (ret) { 31797fb5409dSJan Kara ext4_journal_stop(handle); 31807fb5409dSJan Kara goto out; 31817fb5409dSJan Kara } 3182ac27a0ecSDave Kleikamp orphan = 1; 3183ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 31847fb5409dSJan Kara ext4_journal_stop(handle); 3185ac27a0ecSDave Kleikamp } 3186ac27a0ecSDave Kleikamp } 3187ac27a0ecSDave Kleikamp 3188ac27a0ecSDave Kleikamp ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3189ac27a0ecSDave Kleikamp offset, nr_segs, 3190617ba13bSMingming Cao ext4_get_block, NULL); 3191ac27a0ecSDave Kleikamp 31927fb5409dSJan Kara if (orphan) { 3193ac27a0ecSDave Kleikamp int err; 3194ac27a0ecSDave Kleikamp 31957fb5409dSJan Kara /* Credits for sb + inode write */ 31967fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 31977fb5409dSJan Kara if (IS_ERR(handle)) { 31987fb5409dSJan Kara /* This is really bad luck. We've written the data 31997fb5409dSJan Kara * but cannot extend i_size. Bail out and pretend 32007fb5409dSJan Kara * the write failed... */ 32017fb5409dSJan Kara ret = PTR_ERR(handle); 32027fb5409dSJan Kara goto out; 32037fb5409dSJan Kara } 32047fb5409dSJan Kara if (inode->i_nlink) 3205617ba13bSMingming Cao ext4_orphan_del(handle, inode); 32067fb5409dSJan Kara if (ret > 0) { 3207ac27a0ecSDave Kleikamp loff_t end = offset + ret; 3208ac27a0ecSDave Kleikamp if (end > inode->i_size) { 3209ac27a0ecSDave Kleikamp ei->i_disksize = end; 3210ac27a0ecSDave Kleikamp i_size_write(inode, end); 3211ac27a0ecSDave Kleikamp /* 3212ac27a0ecSDave Kleikamp * We're going to return a positive `ret' 3213ac27a0ecSDave Kleikamp * here due to non-zero-length I/O, so there's 3214ac27a0ecSDave Kleikamp * no way of reporting error returns from 3215617ba13bSMingming Cao * ext4_mark_inode_dirty() to userspace. So 3216ac27a0ecSDave Kleikamp * ignore it. 3217ac27a0ecSDave Kleikamp */ 3218617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3219ac27a0ecSDave Kleikamp } 3220ac27a0ecSDave Kleikamp } 3221617ba13bSMingming Cao err = ext4_journal_stop(handle); 3222ac27a0ecSDave Kleikamp if (ret == 0) 3223ac27a0ecSDave Kleikamp ret = err; 3224ac27a0ecSDave Kleikamp } 3225ac27a0ecSDave Kleikamp out: 3226ac27a0ecSDave Kleikamp return ret; 3227ac27a0ecSDave Kleikamp } 3228ac27a0ecSDave Kleikamp 3229ac27a0ecSDave Kleikamp /* 3230617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3231ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3232ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3233ac27a0ecSDave Kleikamp * not necessarily locked. 3234ac27a0ecSDave Kleikamp * 3235ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3236ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3237ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3238ac27a0ecSDave Kleikamp * 3239ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3240ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3241ac27a0ecSDave Kleikamp */ 3242617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3243ac27a0ecSDave Kleikamp { 3244ac27a0ecSDave Kleikamp SetPageChecked(page); 3245ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3246ac27a0ecSDave Kleikamp } 3247ac27a0ecSDave Kleikamp 3248617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3249617ba13bSMingming Cao .readpage = ext4_readpage, 3250617ba13bSMingming Cao .readpages = ext4_readpages, 3251678aaf48SJan Kara .writepage = ext4_normal_writepage, 3252ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3253bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3254bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3255617ba13bSMingming Cao .bmap = ext4_bmap, 3256617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3257617ba13bSMingming Cao .releasepage = ext4_releasepage, 3258617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3259ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32608ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3261ac27a0ecSDave Kleikamp }; 3262ac27a0ecSDave Kleikamp 3263617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3264617ba13bSMingming Cao .readpage = ext4_readpage, 3265617ba13bSMingming Cao .readpages = ext4_readpages, 3266678aaf48SJan Kara .writepage = ext4_normal_writepage, 3267ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3268bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3269bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3270617ba13bSMingming Cao .bmap = ext4_bmap, 3271617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3272617ba13bSMingming Cao .releasepage = ext4_releasepage, 3273617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3274ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32758ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3276ac27a0ecSDave Kleikamp }; 3277ac27a0ecSDave Kleikamp 3278617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3279617ba13bSMingming Cao .readpage = ext4_readpage, 3280617ba13bSMingming Cao .readpages = ext4_readpages, 3281617ba13bSMingming Cao .writepage = ext4_journalled_writepage, 3282ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3283bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3284bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3285617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3286617ba13bSMingming Cao .bmap = ext4_bmap, 3287617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3288617ba13bSMingming Cao .releasepage = ext4_releasepage, 32898ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3290ac27a0ecSDave Kleikamp }; 3291ac27a0ecSDave Kleikamp 329264769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 329364769240SAlex Tomas .readpage = ext4_readpage, 329464769240SAlex Tomas .readpages = ext4_readpages, 329564769240SAlex Tomas .writepage = ext4_da_writepage, 329664769240SAlex Tomas .writepages = ext4_da_writepages, 329764769240SAlex Tomas .sync_page = block_sync_page, 329864769240SAlex Tomas .write_begin = ext4_da_write_begin, 329964769240SAlex Tomas .write_end = ext4_da_write_end, 330064769240SAlex Tomas .bmap = ext4_bmap, 330164769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 330264769240SAlex Tomas .releasepage = ext4_releasepage, 330364769240SAlex Tomas .direct_IO = ext4_direct_IO, 330464769240SAlex Tomas .migratepage = buffer_migrate_page, 33058ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 330664769240SAlex Tomas }; 330764769240SAlex Tomas 3308617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3309ac27a0ecSDave Kleikamp { 3310cd1aac32SAneesh Kumar K.V if (ext4_should_order_data(inode) && 3311cd1aac32SAneesh Kumar K.V test_opt(inode->i_sb, DELALLOC)) 3312cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3313cd1aac32SAneesh Kumar K.V else if (ext4_should_order_data(inode)) 3314617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_ordered_aops; 331564769240SAlex Tomas else if (ext4_should_writeback_data(inode) && 331664769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) 331764769240SAlex Tomas inode->i_mapping->a_ops = &ext4_da_aops; 3318617ba13bSMingming Cao else if (ext4_should_writeback_data(inode)) 3319617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_writeback_aops; 3320ac27a0ecSDave Kleikamp else 3321617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 3322ac27a0ecSDave Kleikamp } 3323ac27a0ecSDave Kleikamp 3324ac27a0ecSDave Kleikamp /* 3325617ba13bSMingming Cao * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3326ac27a0ecSDave Kleikamp * up to the end of the block which corresponds to `from'. 3327ac27a0ecSDave Kleikamp * This required during truncate. We need to physically zero the tail end 3328ac27a0ecSDave Kleikamp * of that block so it doesn't yield old data if the file is later grown. 3329ac27a0ecSDave Kleikamp */ 3330cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle, 3331ac27a0ecSDave Kleikamp struct address_space *mapping, loff_t from) 3332ac27a0ecSDave Kleikamp { 3333617ba13bSMingming Cao ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3334ac27a0ecSDave Kleikamp unsigned offset = from & (PAGE_CACHE_SIZE-1); 3335725d26d3SAneesh Kumar K.V unsigned blocksize, length, pos; 3336725d26d3SAneesh Kumar K.V ext4_lblk_t iblock; 3337ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 3338ac27a0ecSDave Kleikamp struct buffer_head *bh; 3339cf108bcaSJan Kara struct page *page; 3340ac27a0ecSDave Kleikamp int err = 0; 3341ac27a0ecSDave Kleikamp 3342cf108bcaSJan Kara page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 3343cf108bcaSJan Kara if (!page) 3344cf108bcaSJan Kara return -EINVAL; 3345cf108bcaSJan Kara 3346ac27a0ecSDave Kleikamp blocksize = inode->i_sb->s_blocksize; 3347ac27a0ecSDave Kleikamp length = blocksize - (offset & (blocksize - 1)); 3348ac27a0ecSDave Kleikamp iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3349ac27a0ecSDave Kleikamp 3350ac27a0ecSDave Kleikamp /* 3351ac27a0ecSDave Kleikamp * For "nobh" option, we can only work if we don't need to 3352ac27a0ecSDave Kleikamp * read-in the page - otherwise we create buffers to do the IO. 3353ac27a0ecSDave Kleikamp */ 3354ac27a0ecSDave Kleikamp if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3355617ba13bSMingming Cao ext4_should_writeback_data(inode) && PageUptodate(page)) { 3356eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3357ac27a0ecSDave Kleikamp set_page_dirty(page); 3358ac27a0ecSDave Kleikamp goto unlock; 3359ac27a0ecSDave Kleikamp } 3360ac27a0ecSDave Kleikamp 3361ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 3362ac27a0ecSDave Kleikamp create_empty_buffers(page, blocksize, 0); 3363ac27a0ecSDave Kleikamp 3364ac27a0ecSDave Kleikamp /* Find the buffer that contains "offset" */ 3365ac27a0ecSDave Kleikamp bh = page_buffers(page); 3366ac27a0ecSDave Kleikamp pos = blocksize; 3367ac27a0ecSDave Kleikamp while (offset >= pos) { 3368ac27a0ecSDave Kleikamp bh = bh->b_this_page; 3369ac27a0ecSDave Kleikamp iblock++; 3370ac27a0ecSDave Kleikamp pos += blocksize; 3371ac27a0ecSDave Kleikamp } 3372ac27a0ecSDave Kleikamp 3373ac27a0ecSDave Kleikamp err = 0; 3374ac27a0ecSDave Kleikamp if (buffer_freed(bh)) { 3375ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "freed: skip"); 3376ac27a0ecSDave Kleikamp goto unlock; 3377ac27a0ecSDave Kleikamp } 3378ac27a0ecSDave Kleikamp 3379ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3380ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "unmapped"); 3381617ba13bSMingming Cao ext4_get_block(inode, iblock, bh, 0); 3382ac27a0ecSDave Kleikamp /* unmapped? It's a hole - nothing to do */ 3383ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3384ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "still unmapped"); 3385ac27a0ecSDave Kleikamp goto unlock; 3386ac27a0ecSDave Kleikamp } 3387ac27a0ecSDave Kleikamp } 3388ac27a0ecSDave Kleikamp 3389ac27a0ecSDave Kleikamp /* Ok, it's mapped. Make sure it's up-to-date */ 3390ac27a0ecSDave Kleikamp if (PageUptodate(page)) 3391ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3392ac27a0ecSDave Kleikamp 3393ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3394ac27a0ecSDave Kleikamp err = -EIO; 3395ac27a0ecSDave Kleikamp ll_rw_block(READ, 1, &bh); 3396ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3397ac27a0ecSDave Kleikamp /* Uhhuh. Read error. Complain and punt. */ 3398ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) 3399ac27a0ecSDave Kleikamp goto unlock; 3400ac27a0ecSDave Kleikamp } 3401ac27a0ecSDave Kleikamp 3402617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3403ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "get write access"); 3404617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, bh); 3405ac27a0ecSDave Kleikamp if (err) 3406ac27a0ecSDave Kleikamp goto unlock; 3407ac27a0ecSDave Kleikamp } 3408ac27a0ecSDave Kleikamp 3409eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3410ac27a0ecSDave Kleikamp 3411ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "zeroed end of block"); 3412ac27a0ecSDave Kleikamp 3413ac27a0ecSDave Kleikamp err = 0; 3414617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 34150390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 3416ac27a0ecSDave Kleikamp } else { 3417617ba13bSMingming Cao if (ext4_should_order_data(inode)) 3418678aaf48SJan Kara err = ext4_jbd2_file_inode(handle, inode); 3419ac27a0ecSDave Kleikamp mark_buffer_dirty(bh); 3420ac27a0ecSDave Kleikamp } 3421ac27a0ecSDave Kleikamp 3422ac27a0ecSDave Kleikamp unlock: 3423ac27a0ecSDave Kleikamp unlock_page(page); 3424ac27a0ecSDave Kleikamp page_cache_release(page); 3425ac27a0ecSDave Kleikamp return err; 3426ac27a0ecSDave Kleikamp } 3427ac27a0ecSDave Kleikamp 3428ac27a0ecSDave Kleikamp /* 3429ac27a0ecSDave Kleikamp * Probably it should be a library function... search for first non-zero word 3430ac27a0ecSDave Kleikamp * or memcmp with zero_page, whatever is better for particular architecture. 3431ac27a0ecSDave Kleikamp * Linus? 3432ac27a0ecSDave Kleikamp */ 3433ac27a0ecSDave Kleikamp static inline int all_zeroes(__le32 *p, __le32 *q) 3434ac27a0ecSDave Kleikamp { 3435ac27a0ecSDave Kleikamp while (p < q) 3436ac27a0ecSDave Kleikamp if (*p++) 3437ac27a0ecSDave Kleikamp return 0; 3438ac27a0ecSDave Kleikamp return 1; 3439ac27a0ecSDave Kleikamp } 3440ac27a0ecSDave Kleikamp 3441ac27a0ecSDave Kleikamp /** 3442617ba13bSMingming Cao * ext4_find_shared - find the indirect blocks for partial truncation. 3443ac27a0ecSDave Kleikamp * @inode: inode in question 3444ac27a0ecSDave Kleikamp * @depth: depth of the affected branch 3445617ba13bSMingming Cao * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3446ac27a0ecSDave Kleikamp * @chain: place to store the pointers to partial indirect blocks 3447ac27a0ecSDave Kleikamp * @top: place to the (detached) top of branch 3448ac27a0ecSDave Kleikamp * 3449617ba13bSMingming Cao * This is a helper function used by ext4_truncate(). 3450ac27a0ecSDave Kleikamp * 3451ac27a0ecSDave Kleikamp * When we do truncate() we may have to clean the ends of several 3452ac27a0ecSDave Kleikamp * indirect blocks but leave the blocks themselves alive. Block is 3453ac27a0ecSDave Kleikamp * partially truncated if some data below the new i_size is refered 3454ac27a0ecSDave Kleikamp * from it (and it is on the path to the first completely truncated 3455ac27a0ecSDave Kleikamp * data block, indeed). We have to free the top of that path along 3456ac27a0ecSDave Kleikamp * with everything to the right of the path. Since no allocation 3457617ba13bSMingming Cao * past the truncation point is possible until ext4_truncate() 3458ac27a0ecSDave Kleikamp * finishes, we may safely do the latter, but top of branch may 3459ac27a0ecSDave Kleikamp * require special attention - pageout below the truncation point 3460ac27a0ecSDave Kleikamp * might try to populate it. 3461ac27a0ecSDave Kleikamp * 3462ac27a0ecSDave Kleikamp * We atomically detach the top of branch from the tree, store the 3463ac27a0ecSDave Kleikamp * block number of its root in *@top, pointers to buffer_heads of 3464ac27a0ecSDave Kleikamp * partially truncated blocks - in @chain[].bh and pointers to 3465ac27a0ecSDave Kleikamp * their last elements that should not be removed - in 3466ac27a0ecSDave Kleikamp * @chain[].p. Return value is the pointer to last filled element 3467ac27a0ecSDave Kleikamp * of @chain. 3468ac27a0ecSDave Kleikamp * 3469ac27a0ecSDave Kleikamp * The work left to caller to do the actual freeing of subtrees: 3470ac27a0ecSDave Kleikamp * a) free the subtree starting from *@top 3471ac27a0ecSDave Kleikamp * b) free the subtrees whose roots are stored in 3472ac27a0ecSDave Kleikamp * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3473ac27a0ecSDave Kleikamp * c) free the subtrees growing from the inode past the @chain[0]. 3474ac27a0ecSDave Kleikamp * (no partially truncated stuff there). */ 3475ac27a0ecSDave Kleikamp 3476617ba13bSMingming Cao static Indirect *ext4_find_shared(struct inode *inode, int depth, 3477725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3478ac27a0ecSDave Kleikamp { 3479ac27a0ecSDave Kleikamp Indirect *partial, *p; 3480ac27a0ecSDave Kleikamp int k, err; 3481ac27a0ecSDave Kleikamp 3482ac27a0ecSDave Kleikamp *top = 0; 3483ac27a0ecSDave Kleikamp /* Make k index the deepest non-null offest + 1 */ 3484ac27a0ecSDave Kleikamp for (k = depth; k > 1 && !offsets[k-1]; k--) 3485ac27a0ecSDave Kleikamp ; 3486617ba13bSMingming Cao partial = ext4_get_branch(inode, k, offsets, chain, &err); 3487ac27a0ecSDave Kleikamp /* Writer: pointers */ 3488ac27a0ecSDave Kleikamp if (!partial) 3489ac27a0ecSDave Kleikamp partial = chain + k-1; 3490ac27a0ecSDave Kleikamp /* 3491ac27a0ecSDave Kleikamp * If the branch acquired continuation since we've looked at it - 3492ac27a0ecSDave Kleikamp * fine, it should all survive and (new) top doesn't belong to us. 3493ac27a0ecSDave Kleikamp */ 3494ac27a0ecSDave Kleikamp if (!partial->key && *partial->p) 3495ac27a0ecSDave Kleikamp /* Writer: end */ 3496ac27a0ecSDave Kleikamp goto no_top; 3497af5bc92dSTheodore Ts'o for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 3498ac27a0ecSDave Kleikamp ; 3499ac27a0ecSDave Kleikamp /* 3500ac27a0ecSDave Kleikamp * OK, we've found the last block that must survive. The rest of our 3501ac27a0ecSDave Kleikamp * branch should be detached before unlocking. However, if that rest 3502ac27a0ecSDave Kleikamp * of branch is all ours and does not grow immediately from the inode 3503ac27a0ecSDave Kleikamp * it's easier to cheat and just decrement partial->p. 3504ac27a0ecSDave Kleikamp */ 3505ac27a0ecSDave Kleikamp if (p == chain + k - 1 && p > chain) { 3506ac27a0ecSDave Kleikamp p->p--; 3507ac27a0ecSDave Kleikamp } else { 3508ac27a0ecSDave Kleikamp *top = *p->p; 3509617ba13bSMingming Cao /* Nope, don't do this in ext4. Must leave the tree intact */ 3510ac27a0ecSDave Kleikamp #if 0 3511ac27a0ecSDave Kleikamp *p->p = 0; 3512ac27a0ecSDave Kleikamp #endif 3513ac27a0ecSDave Kleikamp } 3514ac27a0ecSDave Kleikamp /* Writer: end */ 3515ac27a0ecSDave Kleikamp 3516ac27a0ecSDave Kleikamp while (partial > p) { 3517ac27a0ecSDave Kleikamp brelse(partial->bh); 3518ac27a0ecSDave Kleikamp partial--; 3519ac27a0ecSDave Kleikamp } 3520ac27a0ecSDave Kleikamp no_top: 3521ac27a0ecSDave Kleikamp return partial; 3522ac27a0ecSDave Kleikamp } 3523ac27a0ecSDave Kleikamp 3524ac27a0ecSDave Kleikamp /* 3525ac27a0ecSDave Kleikamp * Zero a number of block pointers in either an inode or an indirect block. 3526ac27a0ecSDave Kleikamp * If we restart the transaction we must again get write access to the 3527ac27a0ecSDave Kleikamp * indirect block for further modification. 3528ac27a0ecSDave Kleikamp * 3529ac27a0ecSDave Kleikamp * We release `count' blocks on disk, but (last - first) may be greater 3530ac27a0ecSDave Kleikamp * than `count' because there can be holes in there. 3531ac27a0ecSDave Kleikamp */ 3532617ba13bSMingming Cao static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3533617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t block_to_free, 3534ac27a0ecSDave Kleikamp unsigned long count, __le32 *first, __le32 *last) 3535ac27a0ecSDave Kleikamp { 3536ac27a0ecSDave Kleikamp __le32 *p; 3537ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3538ac27a0ecSDave Kleikamp if (bh) { 35390390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 35400390131bSFrank Mayhar ext4_handle_dirty_metadata(handle, inode, bh); 3541ac27a0ecSDave Kleikamp } 3542617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3543617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3544ac27a0ecSDave Kleikamp if (bh) { 3545ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "retaking write access"); 3546617ba13bSMingming Cao ext4_journal_get_write_access(handle, bh); 3547ac27a0ecSDave Kleikamp } 3548ac27a0ecSDave Kleikamp } 3549ac27a0ecSDave Kleikamp 3550ac27a0ecSDave Kleikamp /* 3551ac27a0ecSDave Kleikamp * Any buffers which are on the journal will be in memory. We find 3552dab291afSMingming Cao * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3553ac27a0ecSDave Kleikamp * on them. We've already detached each block from the file, so 3554dab291afSMingming Cao * bforget() in jbd2_journal_forget() should be safe. 3555ac27a0ecSDave Kleikamp * 3556dab291afSMingming Cao * AKPM: turn on bforget in jbd2_journal_forget()!!! 3557ac27a0ecSDave Kleikamp */ 3558ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3559ac27a0ecSDave Kleikamp u32 nr = le32_to_cpu(*p); 3560ac27a0ecSDave Kleikamp if (nr) { 35611d03ec98SAneesh Kumar K.V struct buffer_head *tbh; 3562ac27a0ecSDave Kleikamp 3563ac27a0ecSDave Kleikamp *p = 0; 35641d03ec98SAneesh Kumar K.V tbh = sb_find_get_block(inode->i_sb, nr); 35651d03ec98SAneesh Kumar K.V ext4_forget(handle, 0, inode, tbh, nr); 3566ac27a0ecSDave Kleikamp } 3567ac27a0ecSDave Kleikamp } 3568ac27a0ecSDave Kleikamp 3569c9de560dSAlex Tomas ext4_free_blocks(handle, inode, block_to_free, count, 0); 3570ac27a0ecSDave Kleikamp } 3571ac27a0ecSDave Kleikamp 3572ac27a0ecSDave Kleikamp /** 3573617ba13bSMingming Cao * ext4_free_data - free a list of data blocks 3574ac27a0ecSDave Kleikamp * @handle: handle for this transaction 3575ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3576ac27a0ecSDave Kleikamp * @this_bh: indirect buffer_head which contains *@first and *@last 3577ac27a0ecSDave Kleikamp * @first: array of block numbers 3578ac27a0ecSDave Kleikamp * @last: points immediately past the end of array 3579ac27a0ecSDave Kleikamp * 3580ac27a0ecSDave Kleikamp * We are freeing all blocks refered from that array (numbers are stored as 3581ac27a0ecSDave Kleikamp * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3582ac27a0ecSDave Kleikamp * 3583ac27a0ecSDave Kleikamp * We accumulate contiguous runs of blocks to free. Conveniently, if these 3584ac27a0ecSDave Kleikamp * blocks are contiguous then releasing them at one time will only affect one 3585ac27a0ecSDave Kleikamp * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3586ac27a0ecSDave Kleikamp * actually use a lot of journal space. 3587ac27a0ecSDave Kleikamp * 3588ac27a0ecSDave Kleikamp * @this_bh will be %NULL if @first and @last point into the inode's direct 3589ac27a0ecSDave Kleikamp * block pointers. 3590ac27a0ecSDave Kleikamp */ 3591617ba13bSMingming Cao static void ext4_free_data(handle_t *handle, struct inode *inode, 3592ac27a0ecSDave Kleikamp struct buffer_head *this_bh, 3593ac27a0ecSDave Kleikamp __le32 *first, __le32 *last) 3594ac27a0ecSDave Kleikamp { 3595617ba13bSMingming Cao ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3596ac27a0ecSDave Kleikamp unsigned long count = 0; /* Number of blocks in the run */ 3597ac27a0ecSDave Kleikamp __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3598ac27a0ecSDave Kleikamp corresponding to 3599ac27a0ecSDave Kleikamp block_to_free */ 3600617ba13bSMingming Cao ext4_fsblk_t nr; /* Current block # */ 3601ac27a0ecSDave Kleikamp __le32 *p; /* Pointer into inode/ind 3602ac27a0ecSDave Kleikamp for current block */ 3603ac27a0ecSDave Kleikamp int err; 3604ac27a0ecSDave Kleikamp 3605ac27a0ecSDave Kleikamp if (this_bh) { /* For indirect block */ 3606ac27a0ecSDave Kleikamp BUFFER_TRACE(this_bh, "get_write_access"); 3607617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, this_bh); 3608ac27a0ecSDave Kleikamp /* Important: if we can't update the indirect pointers 3609ac27a0ecSDave Kleikamp * to the blocks, we can't free them. */ 3610ac27a0ecSDave Kleikamp if (err) 3611ac27a0ecSDave Kleikamp return; 3612ac27a0ecSDave Kleikamp } 3613ac27a0ecSDave Kleikamp 3614ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3615ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3616ac27a0ecSDave Kleikamp if (nr) { 3617ac27a0ecSDave Kleikamp /* accumulate blocks to free if they're contiguous */ 3618ac27a0ecSDave Kleikamp if (count == 0) { 3619ac27a0ecSDave Kleikamp block_to_free = nr; 3620ac27a0ecSDave Kleikamp block_to_free_p = p; 3621ac27a0ecSDave Kleikamp count = 1; 3622ac27a0ecSDave Kleikamp } else if (nr == block_to_free + count) { 3623ac27a0ecSDave Kleikamp count++; 3624ac27a0ecSDave Kleikamp } else { 3625617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, 3626ac27a0ecSDave Kleikamp block_to_free, 3627ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3628ac27a0ecSDave Kleikamp block_to_free = nr; 3629ac27a0ecSDave Kleikamp block_to_free_p = p; 3630ac27a0ecSDave Kleikamp count = 1; 3631ac27a0ecSDave Kleikamp } 3632ac27a0ecSDave Kleikamp } 3633ac27a0ecSDave Kleikamp } 3634ac27a0ecSDave Kleikamp 3635ac27a0ecSDave Kleikamp if (count > 0) 3636617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3637ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3638ac27a0ecSDave Kleikamp 3639ac27a0ecSDave Kleikamp if (this_bh) { 36400390131bSFrank Mayhar BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 364171dc8fbcSDuane Griffin 364271dc8fbcSDuane Griffin /* 364371dc8fbcSDuane Griffin * The buffer head should have an attached journal head at this 364471dc8fbcSDuane Griffin * point. However, if the data is corrupted and an indirect 364571dc8fbcSDuane Griffin * block pointed to itself, it would have been detached when 364671dc8fbcSDuane Griffin * the block was cleared. Check for this instead of OOPSing. 364771dc8fbcSDuane Griffin */ 3648e7f07968STheodore Ts'o if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh)) 36490390131bSFrank Mayhar ext4_handle_dirty_metadata(handle, inode, this_bh); 365071dc8fbcSDuane Griffin else 365171dc8fbcSDuane Griffin ext4_error(inode->i_sb, __func__, 365271dc8fbcSDuane Griffin "circular indirect block detected, " 365371dc8fbcSDuane Griffin "inode=%lu, block=%llu", 365471dc8fbcSDuane Griffin inode->i_ino, 365571dc8fbcSDuane Griffin (unsigned long long) this_bh->b_blocknr); 3656ac27a0ecSDave Kleikamp } 3657ac27a0ecSDave Kleikamp } 3658ac27a0ecSDave Kleikamp 3659ac27a0ecSDave Kleikamp /** 3660617ba13bSMingming Cao * ext4_free_branches - free an array of branches 3661ac27a0ecSDave Kleikamp * @handle: JBD handle for this transaction 3662ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3663ac27a0ecSDave Kleikamp * @parent_bh: the buffer_head which contains *@first and *@last 3664ac27a0ecSDave Kleikamp * @first: array of block numbers 3665ac27a0ecSDave Kleikamp * @last: pointer immediately past the end of array 3666ac27a0ecSDave Kleikamp * @depth: depth of the branches to free 3667ac27a0ecSDave Kleikamp * 3668ac27a0ecSDave Kleikamp * We are freeing all blocks refered from these branches (numbers are 3669ac27a0ecSDave Kleikamp * stored as little-endian 32-bit) and updating @inode->i_blocks 3670ac27a0ecSDave Kleikamp * appropriately. 3671ac27a0ecSDave Kleikamp */ 3672617ba13bSMingming Cao static void ext4_free_branches(handle_t *handle, struct inode *inode, 3673ac27a0ecSDave Kleikamp struct buffer_head *parent_bh, 3674ac27a0ecSDave Kleikamp __le32 *first, __le32 *last, int depth) 3675ac27a0ecSDave Kleikamp { 3676617ba13bSMingming Cao ext4_fsblk_t nr; 3677ac27a0ecSDave Kleikamp __le32 *p; 3678ac27a0ecSDave Kleikamp 36790390131bSFrank Mayhar if (ext4_handle_is_aborted(handle)) 3680ac27a0ecSDave Kleikamp return; 3681ac27a0ecSDave Kleikamp 3682ac27a0ecSDave Kleikamp if (depth--) { 3683ac27a0ecSDave Kleikamp struct buffer_head *bh; 3684617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3685ac27a0ecSDave Kleikamp p = last; 3686ac27a0ecSDave Kleikamp while (--p >= first) { 3687ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3688ac27a0ecSDave Kleikamp if (!nr) 3689ac27a0ecSDave Kleikamp continue; /* A hole */ 3690ac27a0ecSDave Kleikamp 3691ac27a0ecSDave Kleikamp /* Go read the buffer for the next level down */ 3692ac27a0ecSDave Kleikamp bh = sb_bread(inode->i_sb, nr); 3693ac27a0ecSDave Kleikamp 3694ac27a0ecSDave Kleikamp /* 3695ac27a0ecSDave Kleikamp * A read failure? Report error and clear slot 3696ac27a0ecSDave Kleikamp * (should be rare). 3697ac27a0ecSDave Kleikamp */ 3698ac27a0ecSDave Kleikamp if (!bh) { 3699617ba13bSMingming Cao ext4_error(inode->i_sb, "ext4_free_branches", 37002ae02107SMingming Cao "Read failure, inode=%lu, block=%llu", 3701ac27a0ecSDave Kleikamp inode->i_ino, nr); 3702ac27a0ecSDave Kleikamp continue; 3703ac27a0ecSDave Kleikamp } 3704ac27a0ecSDave Kleikamp 3705ac27a0ecSDave Kleikamp /* This zaps the entire block. Bottom up. */ 3706ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "free child branches"); 3707617ba13bSMingming Cao ext4_free_branches(handle, inode, bh, 3708ac27a0ecSDave Kleikamp (__le32 *) bh->b_data, 3709ac27a0ecSDave Kleikamp (__le32 *) bh->b_data + addr_per_block, 3710ac27a0ecSDave Kleikamp depth); 3711ac27a0ecSDave Kleikamp 3712ac27a0ecSDave Kleikamp /* 3713ac27a0ecSDave Kleikamp * We've probably journalled the indirect block several 3714ac27a0ecSDave Kleikamp * times during the truncate. But it's no longer 3715ac27a0ecSDave Kleikamp * needed and we now drop it from the transaction via 3716dab291afSMingming Cao * jbd2_journal_revoke(). 3717ac27a0ecSDave Kleikamp * 3718ac27a0ecSDave Kleikamp * That's easy if it's exclusively part of this 3719ac27a0ecSDave Kleikamp * transaction. But if it's part of the committing 3720dab291afSMingming Cao * transaction then jbd2_journal_forget() will simply 3721ac27a0ecSDave Kleikamp * brelse() it. That means that if the underlying 3722617ba13bSMingming Cao * block is reallocated in ext4_get_block(), 3723ac27a0ecSDave Kleikamp * unmap_underlying_metadata() will find this block 3724ac27a0ecSDave Kleikamp * and will try to get rid of it. damn, damn. 3725ac27a0ecSDave Kleikamp * 3726ac27a0ecSDave Kleikamp * If this block has already been committed to the 3727ac27a0ecSDave Kleikamp * journal, a revoke record will be written. And 3728ac27a0ecSDave Kleikamp * revoke records must be emitted *before* clearing 3729ac27a0ecSDave Kleikamp * this block's bit in the bitmaps. 3730ac27a0ecSDave Kleikamp */ 3731617ba13bSMingming Cao ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 3732ac27a0ecSDave Kleikamp 3733ac27a0ecSDave Kleikamp /* 3734ac27a0ecSDave Kleikamp * Everything below this this pointer has been 3735ac27a0ecSDave Kleikamp * released. Now let this top-of-subtree go. 3736ac27a0ecSDave Kleikamp * 3737ac27a0ecSDave Kleikamp * We want the freeing of this indirect block to be 3738ac27a0ecSDave Kleikamp * atomic in the journal with the updating of the 3739ac27a0ecSDave Kleikamp * bitmap block which owns it. So make some room in 3740ac27a0ecSDave Kleikamp * the journal. 3741ac27a0ecSDave Kleikamp * 3742ac27a0ecSDave Kleikamp * We zero the parent pointer *after* freeing its 3743ac27a0ecSDave Kleikamp * pointee in the bitmaps, so if extend_transaction() 3744ac27a0ecSDave Kleikamp * for some reason fails to put the bitmap changes and 3745ac27a0ecSDave Kleikamp * the release into the same transaction, recovery 3746ac27a0ecSDave Kleikamp * will merely complain about releasing a free block, 3747ac27a0ecSDave Kleikamp * rather than leaking blocks. 3748ac27a0ecSDave Kleikamp */ 37490390131bSFrank Mayhar if (ext4_handle_is_aborted(handle)) 3750ac27a0ecSDave Kleikamp return; 3751ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3752617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3753617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3754ac27a0ecSDave Kleikamp } 3755ac27a0ecSDave Kleikamp 3756c9de560dSAlex Tomas ext4_free_blocks(handle, inode, nr, 1, 1); 3757ac27a0ecSDave Kleikamp 3758ac27a0ecSDave Kleikamp if (parent_bh) { 3759ac27a0ecSDave Kleikamp /* 3760ac27a0ecSDave Kleikamp * The block which we have just freed is 3761ac27a0ecSDave Kleikamp * pointed to by an indirect block: journal it 3762ac27a0ecSDave Kleikamp */ 3763ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "get_write_access"); 3764617ba13bSMingming Cao if (!ext4_journal_get_write_access(handle, 3765ac27a0ecSDave Kleikamp parent_bh)){ 3766ac27a0ecSDave Kleikamp *p = 0; 3767ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, 37680390131bSFrank Mayhar "call ext4_handle_dirty_metadata"); 37690390131bSFrank Mayhar ext4_handle_dirty_metadata(handle, 37700390131bSFrank Mayhar inode, 3771ac27a0ecSDave Kleikamp parent_bh); 3772ac27a0ecSDave Kleikamp } 3773ac27a0ecSDave Kleikamp } 3774ac27a0ecSDave Kleikamp } 3775ac27a0ecSDave Kleikamp } else { 3776ac27a0ecSDave Kleikamp /* We have reached the bottom of the tree. */ 3777ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "free data blocks"); 3778617ba13bSMingming Cao ext4_free_data(handle, inode, parent_bh, first, last); 3779ac27a0ecSDave Kleikamp } 3780ac27a0ecSDave Kleikamp } 3781ac27a0ecSDave Kleikamp 378291ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 378391ef4cafSDuane Griffin { 378491ef4cafSDuane Griffin if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 378591ef4cafSDuane Griffin return 0; 378691ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 378791ef4cafSDuane Griffin return 1; 378891ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 378991ef4cafSDuane Griffin return 1; 379091ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 379191ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 379291ef4cafSDuane Griffin return 0; 379391ef4cafSDuane Griffin } 379491ef4cafSDuane Griffin 3795ac27a0ecSDave Kleikamp /* 3796617ba13bSMingming Cao * ext4_truncate() 3797ac27a0ecSDave Kleikamp * 3798617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3799617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3800ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3801ac27a0ecSDave Kleikamp * 3802ac27a0ecSDave Kleikamp * As we work through the truncate and commmit bits of it to the journal there 3803ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3804ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3805ac27a0ecSDave Kleikamp * 3806ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3807ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3808ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3809ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3810ac27a0ecSDave Kleikamp * left-to-right works OK too). 3811ac27a0ecSDave Kleikamp * 3812ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3813ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3814ac27a0ecSDave Kleikamp * 3815ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3816617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3817ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3818617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3819617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3820ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3821617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3822ac27a0ecSDave Kleikamp */ 3823617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3824ac27a0ecSDave Kleikamp { 3825ac27a0ecSDave Kleikamp handle_t *handle; 3826617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3827ac27a0ecSDave Kleikamp __le32 *i_data = ei->i_data; 3828617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3829ac27a0ecSDave Kleikamp struct address_space *mapping = inode->i_mapping; 3830725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 3831ac27a0ecSDave Kleikamp Indirect chain[4]; 3832ac27a0ecSDave Kleikamp Indirect *partial; 3833ac27a0ecSDave Kleikamp __le32 nr = 0; 3834ac27a0ecSDave Kleikamp int n; 3835725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 3836ac27a0ecSDave Kleikamp unsigned blocksize = inode->i_sb->s_blocksize; 3837ac27a0ecSDave Kleikamp 383891ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3839ac27a0ecSDave Kleikamp return; 3840ac27a0ecSDave Kleikamp 38411d03ec98SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 3842cf108bcaSJan Kara ext4_ext_truncate(inode); 38431d03ec98SAneesh Kumar K.V return; 38441d03ec98SAneesh Kumar K.V } 3845a86c6181SAlex Tomas 3846ac27a0ecSDave Kleikamp handle = start_transaction(inode); 3847cf108bcaSJan Kara if (IS_ERR(handle)) 3848ac27a0ecSDave Kleikamp return; /* AKPM: return what? */ 3849ac27a0ecSDave Kleikamp 3850ac27a0ecSDave Kleikamp last_block = (inode->i_size + blocksize-1) 3851617ba13bSMingming Cao >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 3852ac27a0ecSDave Kleikamp 3853cf108bcaSJan Kara if (inode->i_size & (blocksize - 1)) 3854cf108bcaSJan Kara if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 3855cf108bcaSJan Kara goto out_stop; 3856ac27a0ecSDave Kleikamp 3857617ba13bSMingming Cao n = ext4_block_to_path(inode, last_block, offsets, NULL); 3858ac27a0ecSDave Kleikamp if (n == 0) 3859ac27a0ecSDave Kleikamp goto out_stop; /* error */ 3860ac27a0ecSDave Kleikamp 3861ac27a0ecSDave Kleikamp /* 3862ac27a0ecSDave Kleikamp * OK. This truncate is going to happen. We add the inode to the 3863ac27a0ecSDave Kleikamp * orphan list, so that if this truncate spans multiple transactions, 3864ac27a0ecSDave Kleikamp * and we crash, we will resume the truncate when the filesystem 3865ac27a0ecSDave Kleikamp * recovers. It also marks the inode dirty, to catch the new size. 3866ac27a0ecSDave Kleikamp * 3867ac27a0ecSDave Kleikamp * Implication: the file must always be in a sane, consistent 3868ac27a0ecSDave Kleikamp * truncatable state while each transaction commits. 3869ac27a0ecSDave Kleikamp */ 3870617ba13bSMingming Cao if (ext4_orphan_add(handle, inode)) 3871ac27a0ecSDave Kleikamp goto out_stop; 3872ac27a0ecSDave Kleikamp 3873ac27a0ecSDave Kleikamp /* 3874632eaeabSMingming Cao * From here we block out all ext4_get_block() callers who want to 3875632eaeabSMingming Cao * modify the block allocation tree. 3876632eaeabSMingming Cao */ 3877632eaeabSMingming Cao down_write(&ei->i_data_sem); 3878b4df2030STheodore Ts'o 3879c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 3880b4df2030STheodore Ts'o 3881632eaeabSMingming Cao /* 3882ac27a0ecSDave Kleikamp * The orphan list entry will now protect us from any crash which 3883ac27a0ecSDave Kleikamp * occurs before the truncate completes, so it is now safe to propagate 3884ac27a0ecSDave Kleikamp * the new, shorter inode size (held for now in i_size) into the 3885ac27a0ecSDave Kleikamp * on-disk inode. We do this via i_disksize, which is the value which 3886617ba13bSMingming Cao * ext4 *really* writes onto the disk inode. 3887ac27a0ecSDave Kleikamp */ 3888ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3889ac27a0ecSDave Kleikamp 3890ac27a0ecSDave Kleikamp if (n == 1) { /* direct blocks */ 3891617ba13bSMingming Cao ext4_free_data(handle, inode, NULL, i_data+offsets[0], 3892617ba13bSMingming Cao i_data + EXT4_NDIR_BLOCKS); 3893ac27a0ecSDave Kleikamp goto do_indirects; 3894ac27a0ecSDave Kleikamp } 3895ac27a0ecSDave Kleikamp 3896617ba13bSMingming Cao partial = ext4_find_shared(inode, n, offsets, chain, &nr); 3897ac27a0ecSDave Kleikamp /* Kill the top of shared branch (not detached) */ 3898ac27a0ecSDave Kleikamp if (nr) { 3899ac27a0ecSDave Kleikamp if (partial == chain) { 3900ac27a0ecSDave Kleikamp /* Shared branch grows from the inode */ 3901617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, 3902ac27a0ecSDave Kleikamp &nr, &nr+1, (chain+n-1) - partial); 3903ac27a0ecSDave Kleikamp *partial->p = 0; 3904ac27a0ecSDave Kleikamp /* 3905ac27a0ecSDave Kleikamp * We mark the inode dirty prior to restart, 3906ac27a0ecSDave Kleikamp * and prior to stop. No need for it here. 3907ac27a0ecSDave Kleikamp */ 3908ac27a0ecSDave Kleikamp } else { 3909ac27a0ecSDave Kleikamp /* Shared branch grows from an indirect block */ 3910ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "get_write_access"); 3911617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, 3912ac27a0ecSDave Kleikamp partial->p, 3913ac27a0ecSDave Kleikamp partial->p+1, (chain+n-1) - partial); 3914ac27a0ecSDave Kleikamp } 3915ac27a0ecSDave Kleikamp } 3916ac27a0ecSDave Kleikamp /* Clear the ends of indirect blocks on the shared branch */ 3917ac27a0ecSDave Kleikamp while (partial > chain) { 3918617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 3919ac27a0ecSDave Kleikamp (__le32*)partial->bh->b_data+addr_per_block, 3920ac27a0ecSDave Kleikamp (chain+n-1) - partial); 3921ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 3922ac27a0ecSDave Kleikamp brelse (partial->bh); 3923ac27a0ecSDave Kleikamp partial--; 3924ac27a0ecSDave Kleikamp } 3925ac27a0ecSDave Kleikamp do_indirects: 3926ac27a0ecSDave Kleikamp /* Kill the remaining (whole) subtrees */ 3927ac27a0ecSDave Kleikamp switch (offsets[0]) { 3928ac27a0ecSDave Kleikamp default: 3929617ba13bSMingming Cao nr = i_data[EXT4_IND_BLOCK]; 3930ac27a0ecSDave Kleikamp if (nr) { 3931617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 3932617ba13bSMingming Cao i_data[EXT4_IND_BLOCK] = 0; 3933ac27a0ecSDave Kleikamp } 3934617ba13bSMingming Cao case EXT4_IND_BLOCK: 3935617ba13bSMingming Cao nr = i_data[EXT4_DIND_BLOCK]; 3936ac27a0ecSDave Kleikamp if (nr) { 3937617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 3938617ba13bSMingming Cao i_data[EXT4_DIND_BLOCK] = 0; 3939ac27a0ecSDave Kleikamp } 3940617ba13bSMingming Cao case EXT4_DIND_BLOCK: 3941617ba13bSMingming Cao nr = i_data[EXT4_TIND_BLOCK]; 3942ac27a0ecSDave Kleikamp if (nr) { 3943617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 3944617ba13bSMingming Cao i_data[EXT4_TIND_BLOCK] = 0; 3945ac27a0ecSDave Kleikamp } 3946617ba13bSMingming Cao case EXT4_TIND_BLOCK: 3947ac27a0ecSDave Kleikamp ; 3948ac27a0ecSDave Kleikamp } 3949ac27a0ecSDave Kleikamp 39500e855ac8SAneesh Kumar K.V up_write(&ei->i_data_sem); 3951ef7f3835SKalpak Shah inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3952617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3953ac27a0ecSDave Kleikamp 3954ac27a0ecSDave Kleikamp /* 3955ac27a0ecSDave Kleikamp * In a multi-transaction truncate, we only make the final transaction 3956ac27a0ecSDave Kleikamp * synchronous 3957ac27a0ecSDave Kleikamp */ 3958ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 39590390131bSFrank Mayhar ext4_handle_sync(handle); 3960ac27a0ecSDave Kleikamp out_stop: 3961ac27a0ecSDave Kleikamp /* 3962ac27a0ecSDave Kleikamp * If this was a simple ftruncate(), and the file will remain alive 3963ac27a0ecSDave Kleikamp * then we need to clear up the orphan record which we created above. 3964ac27a0ecSDave Kleikamp * However, if this was a real unlink then we were called by 3965617ba13bSMingming Cao * ext4_delete_inode(), and we allow that function to clean up the 3966ac27a0ecSDave Kleikamp * orphan info for us. 3967ac27a0ecSDave Kleikamp */ 3968ac27a0ecSDave Kleikamp if (inode->i_nlink) 3969617ba13bSMingming Cao ext4_orphan_del(handle, inode); 3970ac27a0ecSDave Kleikamp 3971617ba13bSMingming Cao ext4_journal_stop(handle); 3972ac27a0ecSDave Kleikamp } 3973ac27a0ecSDave Kleikamp 3974ac27a0ecSDave Kleikamp /* 3975617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3976ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3977ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3978ac27a0ecSDave Kleikamp * inode. 3979ac27a0ecSDave Kleikamp */ 3980617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3981617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3982ac27a0ecSDave Kleikamp { 3983240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3984ac27a0ecSDave Kleikamp struct buffer_head *bh; 3985240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3986240799cdSTheodore Ts'o ext4_fsblk_t block; 3987240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3988ac27a0ecSDave Kleikamp 39893a06d778SAneesh Kumar K.V iloc->bh = NULL; 3990240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3991ac27a0ecSDave Kleikamp return -EIO; 3992ac27a0ecSDave Kleikamp 3993240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3994240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3995240799cdSTheodore Ts'o if (!gdp) 3996240799cdSTheodore Ts'o return -EIO; 3997240799cdSTheodore Ts'o 3998240799cdSTheodore Ts'o /* 3999240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 4000240799cdSTheodore Ts'o */ 4001240799cdSTheodore Ts'o inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 4002240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 4003240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 4004240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 4005240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 4006240799cdSTheodore Ts'o 4007240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 4008ac27a0ecSDave Kleikamp if (!bh) { 4009240799cdSTheodore Ts'o ext4_error(sb, "ext4_get_inode_loc", "unable to read " 4010240799cdSTheodore Ts'o "inode block - inode=%lu, block=%llu", 4011ac27a0ecSDave Kleikamp inode->i_ino, block); 4012ac27a0ecSDave Kleikamp return -EIO; 4013ac27a0ecSDave Kleikamp } 4014ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 4015ac27a0ecSDave Kleikamp lock_buffer(bh); 40169c83a923SHidehiro Kawai 40179c83a923SHidehiro Kawai /* 40189c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 40199c83a923SHidehiro Kawai * to write out another inode in the same block. In this 40209c83a923SHidehiro Kawai * case, we don't have to read the block because we may 40219c83a923SHidehiro Kawai * read the old inode data successfully. 40229c83a923SHidehiro Kawai */ 40239c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 40249c83a923SHidehiro Kawai set_buffer_uptodate(bh); 40259c83a923SHidehiro Kawai 4026ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 4027ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 4028ac27a0ecSDave Kleikamp unlock_buffer(bh); 4029ac27a0ecSDave Kleikamp goto has_buffer; 4030ac27a0ecSDave Kleikamp } 4031ac27a0ecSDave Kleikamp 4032ac27a0ecSDave Kleikamp /* 4033ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 4034ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 4035ac27a0ecSDave Kleikamp * block. 4036ac27a0ecSDave Kleikamp */ 4037ac27a0ecSDave Kleikamp if (in_mem) { 4038ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 4039240799cdSTheodore Ts'o int i, start; 4040ac27a0ecSDave Kleikamp 4041240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 4042ac27a0ecSDave Kleikamp 4043ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 4044240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 4045ac27a0ecSDave Kleikamp if (!bitmap_bh) 4046ac27a0ecSDave Kleikamp goto make_io; 4047ac27a0ecSDave Kleikamp 4048ac27a0ecSDave Kleikamp /* 4049ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 4050ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 4051ac27a0ecSDave Kleikamp * of one, so skip it. 4052ac27a0ecSDave Kleikamp */ 4053ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 4054ac27a0ecSDave Kleikamp brelse(bitmap_bh); 4055ac27a0ecSDave Kleikamp goto make_io; 4056ac27a0ecSDave Kleikamp } 4057240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 4058ac27a0ecSDave Kleikamp if (i == inode_offset) 4059ac27a0ecSDave Kleikamp continue; 4060617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 4061ac27a0ecSDave Kleikamp break; 4062ac27a0ecSDave Kleikamp } 4063ac27a0ecSDave Kleikamp brelse(bitmap_bh); 4064240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 4065ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 4066ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 4067ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 4068ac27a0ecSDave Kleikamp unlock_buffer(bh); 4069ac27a0ecSDave Kleikamp goto has_buffer; 4070ac27a0ecSDave Kleikamp } 4071ac27a0ecSDave Kleikamp } 4072ac27a0ecSDave Kleikamp 4073ac27a0ecSDave Kleikamp make_io: 4074ac27a0ecSDave Kleikamp /* 4075240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 4076240799cdSTheodore Ts'o * blocks from the inode table. 4077240799cdSTheodore Ts'o */ 4078240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 4079240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 4080240799cdSTheodore Ts'o unsigned num; 4081240799cdSTheodore Ts'o 4082240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 4083240799cdSTheodore Ts'o /* Make sure s_inode_readahead_blks is a power of 2 */ 4084240799cdSTheodore Ts'o while (EXT4_SB(sb)->s_inode_readahead_blks & 4085240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks-1)) 4086240799cdSTheodore Ts'o EXT4_SB(sb)->s_inode_readahead_blks = 4087240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks & 4088240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks-1)); 4089240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4090240799cdSTheodore Ts'o if (table > b) 4091240799cdSTheodore Ts'o b = table; 4092240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4093240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 4094240799cdSTheodore Ts'o if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4095240799cdSTheodore Ts'o EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 4096560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 4097240799cdSTheodore Ts'o table += num / inodes_per_block; 4098240799cdSTheodore Ts'o if (end > table) 4099240799cdSTheodore Ts'o end = table; 4100240799cdSTheodore Ts'o while (b <= end) 4101240799cdSTheodore Ts'o sb_breadahead(sb, b++); 4102240799cdSTheodore Ts'o } 4103240799cdSTheodore Ts'o 4104240799cdSTheodore Ts'o /* 4105ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 4106ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 4107ac27a0ecSDave Kleikamp * Read the block from disk. 4108ac27a0ecSDave Kleikamp */ 4109ac27a0ecSDave Kleikamp get_bh(bh); 4110ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 4111ac27a0ecSDave Kleikamp submit_bh(READ_META, bh); 4112ac27a0ecSDave Kleikamp wait_on_buffer(bh); 4113ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 4114240799cdSTheodore Ts'o ext4_error(sb, __func__, 4115240799cdSTheodore Ts'o "unable to read inode block - inode=%lu, " 4116240799cdSTheodore Ts'o "block=%llu", inode->i_ino, block); 4117ac27a0ecSDave Kleikamp brelse(bh); 4118ac27a0ecSDave Kleikamp return -EIO; 4119ac27a0ecSDave Kleikamp } 4120ac27a0ecSDave Kleikamp } 4121ac27a0ecSDave Kleikamp has_buffer: 4122ac27a0ecSDave Kleikamp iloc->bh = bh; 4123ac27a0ecSDave Kleikamp return 0; 4124ac27a0ecSDave Kleikamp } 4125ac27a0ecSDave Kleikamp 4126617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4127ac27a0ecSDave Kleikamp { 4128ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 4129617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 4130617ba13bSMingming Cao !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 4131ac27a0ecSDave Kleikamp } 4132ac27a0ecSDave Kleikamp 4133617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 4134ac27a0ecSDave Kleikamp { 4135617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 4136ac27a0ecSDave Kleikamp 4137ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4138617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 4139ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 4140617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 4141ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 4142617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 4143ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 4144617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 4145ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 4146617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 4147ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 4148ac27a0ecSDave Kleikamp } 4149ac27a0ecSDave Kleikamp 4150ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4151ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 4152ff9ddf7eSJan Kara { 4153ff9ddf7eSJan Kara unsigned int flags = ei->vfs_inode.i_flags; 4154ff9ddf7eSJan Kara 4155ff9ddf7eSJan Kara ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4156ff9ddf7eSJan Kara EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4157ff9ddf7eSJan Kara if (flags & S_SYNC) 4158ff9ddf7eSJan Kara ei->i_flags |= EXT4_SYNC_FL; 4159ff9ddf7eSJan Kara if (flags & S_APPEND) 4160ff9ddf7eSJan Kara ei->i_flags |= EXT4_APPEND_FL; 4161ff9ddf7eSJan Kara if (flags & S_IMMUTABLE) 4162ff9ddf7eSJan Kara ei->i_flags |= EXT4_IMMUTABLE_FL; 4163ff9ddf7eSJan Kara if (flags & S_NOATIME) 4164ff9ddf7eSJan Kara ei->i_flags |= EXT4_NOATIME_FL; 4165ff9ddf7eSJan Kara if (flags & S_DIRSYNC) 4166ff9ddf7eSJan Kara ei->i_flags |= EXT4_DIRSYNC_FL; 4167ff9ddf7eSJan Kara } 41680fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 41690fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 41700fc1b451SAneesh Kumar K.V { 41710fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 41728180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 41738180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 41740fc1b451SAneesh Kumar K.V 41750fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 41760fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 41770fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 41780fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 41790fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 41808180a562SAneesh Kumar K.V if (ei->i_flags & EXT4_HUGE_FILE_FL) { 41818180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 41828180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 41838180a562SAneesh Kumar K.V } else { 41840fc1b451SAneesh Kumar K.V return i_blocks; 41858180a562SAneesh Kumar K.V } 41860fc1b451SAneesh Kumar K.V } else { 41870fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 41880fc1b451SAneesh Kumar K.V } 41890fc1b451SAneesh Kumar K.V } 4190ff9ddf7eSJan Kara 41911d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4192ac27a0ecSDave Kleikamp { 4193617ba13bSMingming Cao struct ext4_iloc iloc; 4194617ba13bSMingming Cao struct ext4_inode *raw_inode; 41951d1fe1eeSDavid Howells struct ext4_inode_info *ei; 4196ac27a0ecSDave Kleikamp struct buffer_head *bh; 41971d1fe1eeSDavid Howells struct inode *inode; 41981d1fe1eeSDavid Howells long ret; 4199ac27a0ecSDave Kleikamp int block; 4200ac27a0ecSDave Kleikamp 42011d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 42021d1fe1eeSDavid Howells if (!inode) 42031d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 42041d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 42051d1fe1eeSDavid Howells return inode; 42061d1fe1eeSDavid Howells 42071d1fe1eeSDavid Howells ei = EXT4_I(inode); 420803010a33STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL 4209617ba13bSMingming Cao ei->i_acl = EXT4_ACL_NOT_CACHED; 4210617ba13bSMingming Cao ei->i_default_acl = EXT4_ACL_NOT_CACHED; 4211ac27a0ecSDave Kleikamp #endif 4212ac27a0ecSDave Kleikamp 42131d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 42141d1fe1eeSDavid Howells if (ret < 0) 4215ac27a0ecSDave Kleikamp goto bad_inode; 4216ac27a0ecSDave Kleikamp bh = iloc.bh; 4217617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 4218ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4219ac27a0ecSDave Kleikamp inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4220ac27a0ecSDave Kleikamp inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4221ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 4222ac27a0ecSDave Kleikamp inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4223ac27a0ecSDave Kleikamp inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4224ac27a0ecSDave Kleikamp } 4225ac27a0ecSDave Kleikamp inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4226ac27a0ecSDave Kleikamp 4227ac27a0ecSDave Kleikamp ei->i_state = 0; 4228ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 4229ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4230ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 4231ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 4232ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 4233ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 4234ac27a0ecSDave Kleikamp */ 4235ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 4236ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 4237617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4238ac27a0ecSDave Kleikamp /* this inode is deleted */ 4239ac27a0ecSDave Kleikamp brelse(bh); 42401d1fe1eeSDavid Howells ret = -ESTALE; 4241ac27a0ecSDave Kleikamp goto bad_inode; 4242ac27a0ecSDave Kleikamp } 4243ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 4244ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 4245ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 4246ac27a0ecSDave Kleikamp * the process of deleting those. */ 4247ac27a0ecSDave Kleikamp } 4248ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 42490fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 42507973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 42519b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4252a48380f7SAneesh Kumar K.V cpu_to_le32(EXT4_OS_HURD)) { 4253a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 4254a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4255ac27a0ecSDave Kleikamp } 4256a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 4257ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 4258ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4259ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 4260ac27a0ecSDave Kleikamp /* 4261ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 4262ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 4263ac27a0ecSDave Kleikamp */ 4264617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 4265ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 4266ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 4267ac27a0ecSDave Kleikamp 42680040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4269ac27a0ecSDave Kleikamp ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4270617ba13bSMingming Cao if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4271e5d2861fSKirill Korotaev EXT4_INODE_SIZE(inode->i_sb)) { 4272e5d2861fSKirill Korotaev brelse(bh); 42731d1fe1eeSDavid Howells ret = -EIO; 4274ac27a0ecSDave Kleikamp goto bad_inode; 4275e5d2861fSKirill Korotaev } 4276ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 4277ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 4278617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 4279617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 4280ac27a0ecSDave Kleikamp } else { 4281ac27a0ecSDave Kleikamp __le32 *magic = (void *)raw_inode + 4282617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE + 4283ac27a0ecSDave Kleikamp ei->i_extra_isize; 4284617ba13bSMingming Cao if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4285617ba13bSMingming Cao ei->i_state |= EXT4_STATE_XATTR; 4286ac27a0ecSDave Kleikamp } 4287ac27a0ecSDave Kleikamp } else 4288ac27a0ecSDave Kleikamp ei->i_extra_isize = 0; 4289ac27a0ecSDave Kleikamp 4290ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4291ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4292ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4293ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4294ef7f3835SKalpak Shah 429525ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 429625ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 429725ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 429825ec56b5SJean Noel Cordenner inode->i_version |= 429925ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 430025ec56b5SJean Noel Cordenner } 430125ec56b5SJean Noel Cordenner 4302ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 4303617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 4304617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 4305617ba13bSMingming Cao ext4_set_aops(inode); 4306ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 4307617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 4308617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 4309ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 4310e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 4311617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 4312e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 4313e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 4314e83c1397SDuane Griffin } else { 4315617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 4316617ba13bSMingming Cao ext4_set_aops(inode); 4317ac27a0ecSDave Kleikamp } 4318ac27a0ecSDave Kleikamp } else { 4319617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 4320ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 4321ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4322ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4323ac27a0ecSDave Kleikamp else 4324ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4325ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4326ac27a0ecSDave Kleikamp } 4327ac27a0ecSDave Kleikamp brelse(iloc.bh); 4328617ba13bSMingming Cao ext4_set_inode_flags(inode); 43291d1fe1eeSDavid Howells unlock_new_inode(inode); 43301d1fe1eeSDavid Howells return inode; 4331ac27a0ecSDave Kleikamp 4332ac27a0ecSDave Kleikamp bad_inode: 43331d1fe1eeSDavid Howells iget_failed(inode); 43341d1fe1eeSDavid Howells return ERR_PTR(ret); 4335ac27a0ecSDave Kleikamp } 4336ac27a0ecSDave Kleikamp 43370fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 43380fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 43390fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 43400fc1b451SAneesh Kumar K.V { 43410fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 43420fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 43430fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 43440fc1b451SAneesh Kumar K.V 43450fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 43460fc1b451SAneesh Kumar K.V /* 43470fc1b451SAneesh Kumar K.V * i_blocks can be represnted in a 32 bit variable 43480fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 43490fc1b451SAneesh Kumar K.V */ 43508180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43510fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 43528180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 4353f287a1a5STheodore Ts'o return 0; 4354f287a1a5STheodore Ts'o } 4355f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4356f287a1a5STheodore Ts'o return -EFBIG; 4357f287a1a5STheodore Ts'o 4358f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 43590fc1b451SAneesh Kumar K.V /* 43600fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 43610fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 43620fc1b451SAneesh Kumar K.V */ 43638180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43640fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 43658180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 43660fc1b451SAneesh Kumar K.V } else { 43678180a562SAneesh Kumar K.V ei->i_flags |= EXT4_HUGE_FILE_FL; 43688180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 43698180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 43708180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43718180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 43720fc1b451SAneesh Kumar K.V } 4373f287a1a5STheodore Ts'o return 0; 43740fc1b451SAneesh Kumar K.V } 43750fc1b451SAneesh Kumar K.V 4376ac27a0ecSDave Kleikamp /* 4377ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4378ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4379ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4380ac27a0ecSDave Kleikamp * 4381ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4382ac27a0ecSDave Kleikamp */ 4383617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4384ac27a0ecSDave Kleikamp struct inode *inode, 4385617ba13bSMingming Cao struct ext4_iloc *iloc) 4386ac27a0ecSDave Kleikamp { 4387617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4388617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4389ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4390ac27a0ecSDave Kleikamp int err = 0, rc, block; 4391ac27a0ecSDave Kleikamp 4392ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4393ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 4394617ba13bSMingming Cao if (ei->i_state & EXT4_STATE_NEW) 4395617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4396ac27a0ecSDave Kleikamp 4397ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4398ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4399ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 4400ac27a0ecSDave Kleikamp raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4401ac27a0ecSDave Kleikamp raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4402ac27a0ecSDave Kleikamp /* 4403ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4404ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4405ac27a0ecSDave Kleikamp */ 4406ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4407ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 4408ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_uid)); 4409ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 4410ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_gid)); 4411ac27a0ecSDave Kleikamp } else { 4412ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4413ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4414ac27a0ecSDave Kleikamp } 4415ac27a0ecSDave Kleikamp } else { 4416ac27a0ecSDave Kleikamp raw_inode->i_uid_low = 4417ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4418ac27a0ecSDave Kleikamp raw_inode->i_gid_low = 4419ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4420ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4421ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4422ac27a0ecSDave Kleikamp } 4423ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4424ef7f3835SKalpak Shah 4425ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4426ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4427ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4428ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4429ef7f3835SKalpak Shah 44300fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 44310fc1b451SAneesh Kumar K.V goto out_brelse; 4432ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4433267e4db9SAneesh Kumar K.V /* clear the migrate flag in the raw_inode */ 4434267e4db9SAneesh Kumar K.V raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); 44359b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 44369b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4437a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4438a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 44397973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4440a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4441ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4442ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4443617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4444617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4445617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4446617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4447ac27a0ecSDave Kleikamp /* If this is the first large file 4448ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4449ac27a0ecSDave Kleikamp */ 4450617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4451617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4452ac27a0ecSDave Kleikamp if (err) 4453ac27a0ecSDave Kleikamp goto out_brelse; 4454617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4455617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4456617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4457ac27a0ecSDave Kleikamp sb->s_dirt = 1; 44580390131bSFrank Mayhar ext4_handle_sync(handle); 44590390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, 4460617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4461ac27a0ecSDave Kleikamp } 4462ac27a0ecSDave Kleikamp } 4463ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4464ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4465ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4466ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4467ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4468ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4469ac27a0ecSDave Kleikamp } else { 4470ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4471ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4472ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4473ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4474ac27a0ecSDave Kleikamp } 4475617ba13bSMingming Cao } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4476ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4477ac27a0ecSDave Kleikamp 447825ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 447925ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 448025ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 448125ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 448225ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4483ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 448425ec56b5SJean Noel Cordenner } 448525ec56b5SJean Noel Cordenner 44860390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 44870390131bSFrank Mayhar rc = ext4_handle_dirty_metadata(handle, inode, bh); 4488ac27a0ecSDave Kleikamp if (!err) 4489ac27a0ecSDave Kleikamp err = rc; 4490617ba13bSMingming Cao ei->i_state &= ~EXT4_STATE_NEW; 4491ac27a0ecSDave Kleikamp 4492ac27a0ecSDave Kleikamp out_brelse: 4493ac27a0ecSDave Kleikamp brelse(bh); 4494617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4495ac27a0ecSDave Kleikamp return err; 4496ac27a0ecSDave Kleikamp } 4497ac27a0ecSDave Kleikamp 4498ac27a0ecSDave Kleikamp /* 4499617ba13bSMingming Cao * ext4_write_inode() 4500ac27a0ecSDave Kleikamp * 4501ac27a0ecSDave Kleikamp * We are called from a few places: 4502ac27a0ecSDave Kleikamp * 4503ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4504ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 4505ac27a0ecSDave Kleikamp * trasnaction to commit. 4506ac27a0ecSDave Kleikamp * 4507ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4508ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4509ac27a0ecSDave Kleikamp * 4510ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4511ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4512ac27a0ecSDave Kleikamp * journal commit. 4513ac27a0ecSDave Kleikamp * 4514ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4515ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4516617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4517ac27a0ecSDave Kleikamp * knfsd. 4518ac27a0ecSDave Kleikamp * 4519ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4520ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4521ac27a0ecSDave Kleikamp * which we are interested. 4522ac27a0ecSDave Kleikamp * 4523ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4524ac27a0ecSDave Kleikamp * 4525ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4526ac27a0ecSDave Kleikamp * stuff(); 4527ac27a0ecSDave Kleikamp * inode->i_size = expr; 4528ac27a0ecSDave Kleikamp * 4529ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4530ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4531ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4532ac27a0ecSDave Kleikamp */ 4533617ba13bSMingming Cao int ext4_write_inode(struct inode *inode, int wait) 4534ac27a0ecSDave Kleikamp { 4535ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4536ac27a0ecSDave Kleikamp return 0; 4537ac27a0ecSDave Kleikamp 4538617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4539b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4540ac27a0ecSDave Kleikamp dump_stack(); 4541ac27a0ecSDave Kleikamp return -EIO; 4542ac27a0ecSDave Kleikamp } 4543ac27a0ecSDave Kleikamp 4544ac27a0ecSDave Kleikamp if (!wait) 4545ac27a0ecSDave Kleikamp return 0; 4546ac27a0ecSDave Kleikamp 4547617ba13bSMingming Cao return ext4_force_commit(inode->i_sb); 4548ac27a0ecSDave Kleikamp } 4549ac27a0ecSDave Kleikamp 45500390131bSFrank Mayhar int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh) 45510390131bSFrank Mayhar { 45520390131bSFrank Mayhar int err = 0; 45530390131bSFrank Mayhar 45540390131bSFrank Mayhar mark_buffer_dirty(bh); 45550390131bSFrank Mayhar if (inode && inode_needs_sync(inode)) { 45560390131bSFrank Mayhar sync_dirty_buffer(bh); 45570390131bSFrank Mayhar if (buffer_req(bh) && !buffer_uptodate(bh)) { 45580390131bSFrank Mayhar ext4_error(inode->i_sb, __func__, 45590390131bSFrank Mayhar "IO error syncing inode, " 45600390131bSFrank Mayhar "inode=%lu, block=%llu", 45610390131bSFrank Mayhar inode->i_ino, 45620390131bSFrank Mayhar (unsigned long long)bh->b_blocknr); 45630390131bSFrank Mayhar err = -EIO; 45640390131bSFrank Mayhar } 45650390131bSFrank Mayhar } 45660390131bSFrank Mayhar return err; 45670390131bSFrank Mayhar } 45680390131bSFrank Mayhar 4569ac27a0ecSDave Kleikamp /* 4570617ba13bSMingming Cao * ext4_setattr() 4571ac27a0ecSDave Kleikamp * 4572ac27a0ecSDave Kleikamp * Called from notify_change. 4573ac27a0ecSDave Kleikamp * 4574ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4575ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4576ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4577ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4578ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4579ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4580ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4581ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4582ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4583ac27a0ecSDave Kleikamp * 4584678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4585678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4586678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4587678aaf48SJan Kara * This way we are sure that all the data written in the previous 4588678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4589678aaf48SJan Kara * writeback). 4590678aaf48SJan Kara * 4591678aaf48SJan Kara * Called with inode->i_mutex down. 4592ac27a0ecSDave Kleikamp */ 4593617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4594ac27a0ecSDave Kleikamp { 4595ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4596ac27a0ecSDave Kleikamp int error, rc = 0; 4597ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4598ac27a0ecSDave Kleikamp 4599ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4600ac27a0ecSDave Kleikamp if (error) 4601ac27a0ecSDave Kleikamp return error; 4602ac27a0ecSDave Kleikamp 4603ac27a0ecSDave Kleikamp if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4604ac27a0ecSDave Kleikamp (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4605ac27a0ecSDave Kleikamp handle_t *handle; 4606ac27a0ecSDave Kleikamp 4607ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4608ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 4609617ba13bSMingming Cao handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4610617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4611ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4612ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4613ac27a0ecSDave Kleikamp goto err_out; 4614ac27a0ecSDave Kleikamp } 4615ac27a0ecSDave Kleikamp error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 4616ac27a0ecSDave Kleikamp if (error) { 4617617ba13bSMingming Cao ext4_journal_stop(handle); 4618ac27a0ecSDave Kleikamp return error; 4619ac27a0ecSDave Kleikamp } 4620ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4621ac27a0ecSDave Kleikamp * one transaction */ 4622ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4623ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4624ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4625ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4626617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4627617ba13bSMingming Cao ext4_journal_stop(handle); 4628ac27a0ecSDave Kleikamp } 4629ac27a0ecSDave Kleikamp 4630e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4631e2b46574SEric Sandeen if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4632e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4633e2b46574SEric Sandeen 4634e2b46574SEric Sandeen if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4635e2b46574SEric Sandeen error = -EFBIG; 4636e2b46574SEric Sandeen goto err_out; 4637e2b46574SEric Sandeen } 4638e2b46574SEric Sandeen } 4639e2b46574SEric Sandeen } 4640e2b46574SEric Sandeen 4641ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4642ac27a0ecSDave Kleikamp attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4643ac27a0ecSDave Kleikamp handle_t *handle; 4644ac27a0ecSDave Kleikamp 4645617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4646ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4647ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4648ac27a0ecSDave Kleikamp goto err_out; 4649ac27a0ecSDave Kleikamp } 4650ac27a0ecSDave Kleikamp 4651617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 4652617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4653617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4654ac27a0ecSDave Kleikamp if (!error) 4655ac27a0ecSDave Kleikamp error = rc; 4656617ba13bSMingming Cao ext4_journal_stop(handle); 4657678aaf48SJan Kara 4658678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4659678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4660678aaf48SJan Kara attr->ia_size); 4661678aaf48SJan Kara if (error) { 4662678aaf48SJan Kara /* Do as much error cleanup as possible */ 4663678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4664678aaf48SJan Kara if (IS_ERR(handle)) { 4665678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4666678aaf48SJan Kara goto err_out; 4667678aaf48SJan Kara } 4668678aaf48SJan Kara ext4_orphan_del(handle, inode); 4669678aaf48SJan Kara ext4_journal_stop(handle); 4670678aaf48SJan Kara goto err_out; 4671678aaf48SJan Kara } 4672678aaf48SJan Kara } 4673ac27a0ecSDave Kleikamp } 4674ac27a0ecSDave Kleikamp 4675ac27a0ecSDave Kleikamp rc = inode_setattr(inode, attr); 4676ac27a0ecSDave Kleikamp 4677617ba13bSMingming Cao /* If inode_setattr's call to ext4_truncate failed to get a 4678ac27a0ecSDave Kleikamp * transaction handle at all, we need to clean up the in-core 4679ac27a0ecSDave Kleikamp * orphan list manually. */ 4680ac27a0ecSDave Kleikamp if (inode->i_nlink) 4681617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4682ac27a0ecSDave Kleikamp 4683ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4684617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4685ac27a0ecSDave Kleikamp 4686ac27a0ecSDave Kleikamp err_out: 4687617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4688ac27a0ecSDave Kleikamp if (!error) 4689ac27a0ecSDave Kleikamp error = rc; 4690ac27a0ecSDave Kleikamp return error; 4691ac27a0ecSDave Kleikamp } 4692ac27a0ecSDave Kleikamp 46933e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 46943e3398a0SMingming Cao struct kstat *stat) 46953e3398a0SMingming Cao { 46963e3398a0SMingming Cao struct inode *inode; 46973e3398a0SMingming Cao unsigned long delalloc_blocks; 46983e3398a0SMingming Cao 46993e3398a0SMingming Cao inode = dentry->d_inode; 47003e3398a0SMingming Cao generic_fillattr(inode, stat); 47013e3398a0SMingming Cao 47023e3398a0SMingming Cao /* 47033e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 47043e3398a0SMingming Cao * otherwise in the case of system crash before the real block 47053e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 47063e3398a0SMingming Cao * on-disk file blocks. 47073e3398a0SMingming Cao * We always keep i_blocks updated together with real 47083e3398a0SMingming Cao * allocation. But to not confuse with user, stat 47093e3398a0SMingming Cao * will return the blocks that include the delayed allocation 47103e3398a0SMingming Cao * blocks for this file. 47113e3398a0SMingming Cao */ 47123e3398a0SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 47133e3398a0SMingming Cao delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 47143e3398a0SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 47153e3398a0SMingming Cao 47163e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 47173e3398a0SMingming Cao return 0; 47183e3398a0SMingming Cao } 4719ac27a0ecSDave Kleikamp 4720a02908f1SMingming Cao static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 4721a02908f1SMingming Cao int chunk) 4722ac27a0ecSDave Kleikamp { 4723a02908f1SMingming Cao int indirects; 4724ac27a0ecSDave Kleikamp 4725a02908f1SMingming Cao /* if nrblocks are contiguous */ 4726a02908f1SMingming Cao if (chunk) { 4727a02908f1SMingming Cao /* 4728a02908f1SMingming Cao * With N contiguous data blocks, it need at most 4729a02908f1SMingming Cao * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 4730a02908f1SMingming Cao * 2 dindirect blocks 4731a02908f1SMingming Cao * 1 tindirect block 4732a02908f1SMingming Cao */ 4733a02908f1SMingming Cao indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 4734a02908f1SMingming Cao return indirects + 3; 4735a02908f1SMingming Cao } 4736a02908f1SMingming Cao /* 4737a02908f1SMingming Cao * if nrblocks are not contiguous, worse case, each block touch 4738a02908f1SMingming Cao * a indirect block, and each indirect block touch a double indirect 4739a02908f1SMingming Cao * block, plus a triple indirect block 4740a02908f1SMingming Cao */ 4741a02908f1SMingming Cao indirects = nrblocks * 2 + 1; 4742a02908f1SMingming Cao return indirects; 4743a02908f1SMingming Cao } 4744a86c6181SAlex Tomas 4745a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4746a02908f1SMingming Cao { 4747a02908f1SMingming Cao if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4748ac51d837STheodore Ts'o return ext4_indirect_trans_blocks(inode, nrblocks, chunk); 4749ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4750a02908f1SMingming Cao } 4751ac51d837STheodore Ts'o 4752a02908f1SMingming Cao /* 4753a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4754a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4755a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4756a02908f1SMingming Cao * 4757a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 4758a02908f1SMingming Cao * different block groups too. If they are contiugous, with flexbg, 4759a02908f1SMingming Cao * they could still across block group boundary. 4760a02908f1SMingming Cao * 4761a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4762a02908f1SMingming Cao */ 4763a02908f1SMingming Cao int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4764a02908f1SMingming Cao { 4765a02908f1SMingming Cao int groups, gdpblocks; 4766a02908f1SMingming Cao int idxblocks; 4767a02908f1SMingming Cao int ret = 0; 4768a02908f1SMingming Cao 4769a02908f1SMingming Cao /* 4770a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4771a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4772a02908f1SMingming Cao * physically contiguous on disk 4773a02908f1SMingming Cao * 4774a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4775a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4776a02908f1SMingming Cao */ 4777a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4778a02908f1SMingming Cao 4779a02908f1SMingming Cao ret = idxblocks; 4780a02908f1SMingming Cao 4781a02908f1SMingming Cao /* 4782a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4783a02908f1SMingming Cao * to account 4784a02908f1SMingming Cao */ 4785a02908f1SMingming Cao groups = idxblocks; 4786a02908f1SMingming Cao if (chunk) 4787a02908f1SMingming Cao groups += 1; 4788ac27a0ecSDave Kleikamp else 4789a02908f1SMingming Cao groups += nrblocks; 4790ac27a0ecSDave Kleikamp 4791a02908f1SMingming Cao gdpblocks = groups; 4792a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_groups_count) 4793a02908f1SMingming Cao groups = EXT4_SB(inode->i_sb)->s_groups_count; 4794a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4795a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4796a02908f1SMingming Cao 4797a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4798a02908f1SMingming Cao ret += groups + gdpblocks; 4799a02908f1SMingming Cao 4800a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4801a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4802ac27a0ecSDave Kleikamp 4803ac27a0ecSDave Kleikamp return ret; 4804ac27a0ecSDave Kleikamp } 4805ac27a0ecSDave Kleikamp 4806ac27a0ecSDave Kleikamp /* 4807a02908f1SMingming Cao * Calulate the total number of credits to reserve to fit 4808f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4809f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4810a02908f1SMingming Cao * 4811525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4812a02908f1SMingming Cao * 4813525f4ed8SMingming Cao * We need to consider the worse case, when 4814a02908f1SMingming Cao * one new block per extent. 4815a02908f1SMingming Cao */ 4816a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4817a02908f1SMingming Cao { 4818a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4819a02908f1SMingming Cao int ret; 4820a02908f1SMingming Cao 4821a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4822a02908f1SMingming Cao 4823a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4824a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4825a02908f1SMingming Cao ret += bpp; 4826a02908f1SMingming Cao return ret; 4827a02908f1SMingming Cao } 4828f3bd1f3fSMingming Cao 4829f3bd1f3fSMingming Cao /* 4830f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4831f3bd1f3fSMingming Cao * 4832f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 4833f3bd1f3fSMingming Cao * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks. 4834f3bd1f3fSMingming Cao * 4835f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4836f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4837f3bd1f3fSMingming Cao */ 4838f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4839f3bd1f3fSMingming Cao { 4840f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4841f3bd1f3fSMingming Cao } 4842f3bd1f3fSMingming Cao 4843a02908f1SMingming Cao /* 4844617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4845ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4846ac27a0ecSDave Kleikamp */ 4847617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4848617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4849ac27a0ecSDave Kleikamp { 4850ac27a0ecSDave Kleikamp int err = 0; 4851ac27a0ecSDave Kleikamp 485225ec56b5SJean Noel Cordenner if (test_opt(inode->i_sb, I_VERSION)) 485325ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 485425ec56b5SJean Noel Cordenner 4855ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4856ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4857ac27a0ecSDave Kleikamp 4858dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4859617ba13bSMingming Cao err = ext4_do_update_inode(handle, inode, iloc); 4860ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4861ac27a0ecSDave Kleikamp return err; 4862ac27a0ecSDave Kleikamp } 4863ac27a0ecSDave Kleikamp 4864ac27a0ecSDave Kleikamp /* 4865ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4866ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4867ac27a0ecSDave Kleikamp */ 4868ac27a0ecSDave Kleikamp 4869ac27a0ecSDave Kleikamp int 4870617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4871617ba13bSMingming Cao struct ext4_iloc *iloc) 4872ac27a0ecSDave Kleikamp { 48730390131bSFrank Mayhar int err; 48740390131bSFrank Mayhar 4875617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4876ac27a0ecSDave Kleikamp if (!err) { 4877ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4878617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4879ac27a0ecSDave Kleikamp if (err) { 4880ac27a0ecSDave Kleikamp brelse(iloc->bh); 4881ac27a0ecSDave Kleikamp iloc->bh = NULL; 4882ac27a0ecSDave Kleikamp } 4883ac27a0ecSDave Kleikamp } 4884617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4885ac27a0ecSDave Kleikamp return err; 4886ac27a0ecSDave Kleikamp } 4887ac27a0ecSDave Kleikamp 4888ac27a0ecSDave Kleikamp /* 48896dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 48906dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 48916dd4ee7cSKalpak Shah */ 48921d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 48931d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 48941d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 48951d03ec98SAneesh Kumar K.V handle_t *handle) 48966dd4ee7cSKalpak Shah { 48976dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 48986dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 48996dd4ee7cSKalpak Shah struct ext4_xattr_entry *entry; 49006dd4ee7cSKalpak Shah 49016dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 49026dd4ee7cSKalpak Shah return 0; 49036dd4ee7cSKalpak Shah 49046dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 49056dd4ee7cSKalpak Shah 49066dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 49076dd4ee7cSKalpak Shah entry = IFIRST(header); 49086dd4ee7cSKalpak Shah 49096dd4ee7cSKalpak Shah /* No extended attributes present */ 49106dd4ee7cSKalpak Shah if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 49116dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 49126dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 49136dd4ee7cSKalpak Shah new_extra_isize); 49146dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 49156dd4ee7cSKalpak Shah return 0; 49166dd4ee7cSKalpak Shah } 49176dd4ee7cSKalpak Shah 49186dd4ee7cSKalpak Shah /* try to expand with EAs present */ 49196dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 49206dd4ee7cSKalpak Shah raw_inode, handle); 49216dd4ee7cSKalpak Shah } 49226dd4ee7cSKalpak Shah 49236dd4ee7cSKalpak Shah /* 4924ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4925ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4926ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4927ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4928ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4929ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4930ac27a0ecSDave Kleikamp * 4931ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4932ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4933ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4934ac27a0ecSDave Kleikamp * we start and wait on commits. 4935ac27a0ecSDave Kleikamp * 4936ac27a0ecSDave Kleikamp * Is this efficient/effective? Well, we're being nice to the system 4937ac27a0ecSDave Kleikamp * by cleaning up our inodes proactively so they can be reaped 4938ac27a0ecSDave Kleikamp * without I/O. But we are potentially leaving up to five seconds' 4939ac27a0ecSDave Kleikamp * worth of inodes floating about which prune_icache wants us to 4940ac27a0ecSDave Kleikamp * write out. One way to fix that would be to get prune_icache() 4941ac27a0ecSDave Kleikamp * to do a write_super() to free up some memory. It has the desired 4942ac27a0ecSDave Kleikamp * effect. 4943ac27a0ecSDave Kleikamp */ 4944617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4945ac27a0ecSDave Kleikamp { 4946617ba13bSMingming Cao struct ext4_iloc iloc; 49476dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 49486dd4ee7cSKalpak Shah static unsigned int mnt_count; 49496dd4ee7cSKalpak Shah int err, ret; 4950ac27a0ecSDave Kleikamp 4951ac27a0ecSDave Kleikamp might_sleep(); 4952617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 49530390131bSFrank Mayhar if (ext4_handle_valid(handle) && 49540390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 49556dd4ee7cSKalpak Shah !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 49566dd4ee7cSKalpak Shah /* 49576dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 49586dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 49596dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 49606dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 49616dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 49626dd4ee7cSKalpak Shah */ 49636dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 49646dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 49656dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 49666dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 49676dd4ee7cSKalpak Shah iloc, handle); 49686dd4ee7cSKalpak Shah if (ret) { 49696dd4ee7cSKalpak Shah EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 4970c1bddad9SAneesh Kumar K.V if (mnt_count != 4971c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 497246e665e9SHarvey Harrison ext4_warning(inode->i_sb, __func__, 49736dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 49746dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 49756dd4ee7cSKalpak Shah inode->i_ino); 4976c1bddad9SAneesh Kumar K.V mnt_count = 4977c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 49786dd4ee7cSKalpak Shah } 49796dd4ee7cSKalpak Shah } 49806dd4ee7cSKalpak Shah } 49816dd4ee7cSKalpak Shah } 4982ac27a0ecSDave Kleikamp if (!err) 4983617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4984ac27a0ecSDave Kleikamp return err; 4985ac27a0ecSDave Kleikamp } 4986ac27a0ecSDave Kleikamp 4987ac27a0ecSDave Kleikamp /* 4988617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4989ac27a0ecSDave Kleikamp * 4990ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4991ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4992ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4993ac27a0ecSDave Kleikamp * 4994ac27a0ecSDave Kleikamp * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 4995ac27a0ecSDave Kleikamp * are allocated to the file. 4996ac27a0ecSDave Kleikamp * 4997ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4998ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4999ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 5000ac27a0ecSDave Kleikamp */ 5001617ba13bSMingming Cao void ext4_dirty_inode(struct inode *inode) 5002ac27a0ecSDave Kleikamp { 5003617ba13bSMingming Cao handle_t *current_handle = ext4_journal_current_handle(); 5004ac27a0ecSDave Kleikamp handle_t *handle; 5005ac27a0ecSDave Kleikamp 50060390131bSFrank Mayhar if (!ext4_handle_valid(current_handle)) { 50070390131bSFrank Mayhar ext4_mark_inode_dirty(current_handle, inode); 50080390131bSFrank Mayhar return; 50090390131bSFrank Mayhar } 50100390131bSFrank Mayhar 5011617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 5012ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 5013ac27a0ecSDave Kleikamp goto out; 5014ac27a0ecSDave Kleikamp if (current_handle && 5015ac27a0ecSDave Kleikamp current_handle->h_transaction != handle->h_transaction) { 5016ac27a0ecSDave Kleikamp /* This task has a transaction open against a different fs */ 5017ac27a0ecSDave Kleikamp printk(KERN_EMERG "%s: transactions do not match!\n", 501846e665e9SHarvey Harrison __func__); 5019ac27a0ecSDave Kleikamp } else { 5020ac27a0ecSDave Kleikamp jbd_debug(5, "marking dirty. outer handle=%p\n", 5021ac27a0ecSDave Kleikamp current_handle); 5022617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 5023ac27a0ecSDave Kleikamp } 5024617ba13bSMingming Cao ext4_journal_stop(handle); 5025ac27a0ecSDave Kleikamp out: 5026ac27a0ecSDave Kleikamp return; 5027ac27a0ecSDave Kleikamp } 5028ac27a0ecSDave Kleikamp 5029ac27a0ecSDave Kleikamp #if 0 5030ac27a0ecSDave Kleikamp /* 5031ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 5032ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 5033617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 5034ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 5035ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 5036ac27a0ecSDave Kleikamp */ 5037617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5038ac27a0ecSDave Kleikamp { 5039617ba13bSMingming Cao struct ext4_iloc iloc; 5040ac27a0ecSDave Kleikamp 5041ac27a0ecSDave Kleikamp int err = 0; 5042ac27a0ecSDave Kleikamp if (handle) { 5043617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 5044ac27a0ecSDave Kleikamp if (!err) { 5045ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 5046dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 5047ac27a0ecSDave Kleikamp if (!err) 50480390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 50490390131bSFrank Mayhar inode, 5050ac27a0ecSDave Kleikamp iloc.bh); 5051ac27a0ecSDave Kleikamp brelse(iloc.bh); 5052ac27a0ecSDave Kleikamp } 5053ac27a0ecSDave Kleikamp } 5054617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 5055ac27a0ecSDave Kleikamp return err; 5056ac27a0ecSDave Kleikamp } 5057ac27a0ecSDave Kleikamp #endif 5058ac27a0ecSDave Kleikamp 5059617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 5060ac27a0ecSDave Kleikamp { 5061ac27a0ecSDave Kleikamp journal_t *journal; 5062ac27a0ecSDave Kleikamp handle_t *handle; 5063ac27a0ecSDave Kleikamp int err; 5064ac27a0ecSDave Kleikamp 5065ac27a0ecSDave Kleikamp /* 5066ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 5067ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 5068ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 5069ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 5070ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 5071ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 5072ac27a0ecSDave Kleikamp * nobody is changing anything. 5073ac27a0ecSDave Kleikamp */ 5074ac27a0ecSDave Kleikamp 5075617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 50760390131bSFrank Mayhar if (!journal) 50770390131bSFrank Mayhar return 0; 5078d699594dSDave Hansen if (is_journal_aborted(journal)) 5079ac27a0ecSDave Kleikamp return -EROFS; 5080ac27a0ecSDave Kleikamp 5081dab291afSMingming Cao jbd2_journal_lock_updates(journal); 5082dab291afSMingming Cao jbd2_journal_flush(journal); 5083ac27a0ecSDave Kleikamp 5084ac27a0ecSDave Kleikamp /* 5085ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 5086ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 5087ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 5088ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 5089ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 5090ac27a0ecSDave Kleikamp */ 5091ac27a0ecSDave Kleikamp 5092ac27a0ecSDave Kleikamp if (val) 5093617ba13bSMingming Cao EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 5094ac27a0ecSDave Kleikamp else 5095617ba13bSMingming Cao EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 5096617ba13bSMingming Cao ext4_set_aops(inode); 5097ac27a0ecSDave Kleikamp 5098dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 5099ac27a0ecSDave Kleikamp 5100ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 5101ac27a0ecSDave Kleikamp 5102617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 5103ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 5104ac27a0ecSDave Kleikamp return PTR_ERR(handle); 5105ac27a0ecSDave Kleikamp 5106617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 51070390131bSFrank Mayhar ext4_handle_sync(handle); 5108617ba13bSMingming Cao ext4_journal_stop(handle); 5109617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 5110ac27a0ecSDave Kleikamp 5111ac27a0ecSDave Kleikamp return err; 5112ac27a0ecSDave Kleikamp } 51132e9ee850SAneesh Kumar K.V 51142e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 51152e9ee850SAneesh Kumar K.V { 51162e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 51172e9ee850SAneesh Kumar K.V } 51182e9ee850SAneesh Kumar K.V 51192e9ee850SAneesh Kumar K.V int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page) 51202e9ee850SAneesh Kumar K.V { 51212e9ee850SAneesh Kumar K.V loff_t size; 51222e9ee850SAneesh Kumar K.V unsigned long len; 51232e9ee850SAneesh Kumar K.V int ret = -EINVAL; 512479f0be8dSAneesh Kumar K.V void *fsdata; 51252e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 51262e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 51272e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 51282e9ee850SAneesh Kumar K.V 51292e9ee850SAneesh Kumar K.V /* 51302e9ee850SAneesh Kumar K.V * Get i_alloc_sem to stop truncates messing with the inode. We cannot 51312e9ee850SAneesh Kumar K.V * get i_mutex because we are already holding mmap_sem. 51322e9ee850SAneesh Kumar K.V */ 51332e9ee850SAneesh Kumar K.V down_read(&inode->i_alloc_sem); 51342e9ee850SAneesh Kumar K.V size = i_size_read(inode); 51352e9ee850SAneesh Kumar K.V if (page->mapping != mapping || size <= page_offset(page) 51362e9ee850SAneesh Kumar K.V || !PageUptodate(page)) { 51372e9ee850SAneesh Kumar K.V /* page got truncated from under us? */ 51382e9ee850SAneesh Kumar K.V goto out_unlock; 51392e9ee850SAneesh Kumar K.V } 51402e9ee850SAneesh Kumar K.V ret = 0; 51412e9ee850SAneesh Kumar K.V if (PageMappedToDisk(page)) 51422e9ee850SAneesh Kumar K.V goto out_unlock; 51432e9ee850SAneesh Kumar K.V 51442e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 51452e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 51462e9ee850SAneesh Kumar K.V else 51472e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 51482e9ee850SAneesh Kumar K.V 51492e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 51502e9ee850SAneesh Kumar K.V /* return if we have all the buffers mapped */ 51512e9ee850SAneesh Kumar K.V if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 51522e9ee850SAneesh Kumar K.V ext4_bh_unmapped)) 51532e9ee850SAneesh Kumar K.V goto out_unlock; 51542e9ee850SAneesh Kumar K.V } 51552e9ee850SAneesh Kumar K.V /* 51562e9ee850SAneesh Kumar K.V * OK, we need to fill the hole... Do write_begin write_end 51572e9ee850SAneesh Kumar K.V * to do block allocation/reservation.We are not holding 51582e9ee850SAneesh Kumar K.V * inode.i__mutex here. That allow * parallel write_begin, 51592e9ee850SAneesh Kumar K.V * write_end call. lock_page prevent this from happening 51602e9ee850SAneesh Kumar K.V * on the same page though 51612e9ee850SAneesh Kumar K.V */ 51622e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 516379f0be8dSAneesh Kumar K.V len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 51642e9ee850SAneesh Kumar K.V if (ret < 0) 51652e9ee850SAneesh Kumar K.V goto out_unlock; 51662e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 516779f0be8dSAneesh Kumar K.V len, len, page, fsdata); 51682e9ee850SAneesh Kumar K.V if (ret < 0) 51692e9ee850SAneesh Kumar K.V goto out_unlock; 51702e9ee850SAneesh Kumar K.V ret = 0; 51712e9ee850SAneesh Kumar K.V out_unlock: 51722e9ee850SAneesh Kumar K.V up_read(&inode->i_alloc_sem); 51732e9ee850SAneesh Kumar K.V return ret; 51742e9ee850SAneesh Kumar K.V } 5175