1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * Goal-directed block allocation by Stephen Tweedie 16ac27a0ecSDave Kleikamp * (sct@redhat.com), 1993, 1998 17ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 18ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 19ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 20ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 21ac27a0ecSDave Kleikamp * 22617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23ac27a0ecSDave Kleikamp */ 24ac27a0ecSDave Kleikamp 25ac27a0ecSDave Kleikamp #include <linux/module.h> 26ac27a0ecSDave Kleikamp #include <linux/fs.h> 27ac27a0ecSDave Kleikamp #include <linux/time.h> 28dab291afSMingming Cao #include <linux/jbd2.h> 29ac27a0ecSDave Kleikamp #include <linux/highuid.h> 30ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 31ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 32ac27a0ecSDave Kleikamp #include <linux/string.h> 33ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 34ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3564769240SAlex Tomas #include <linux/pagevec.h> 36ac27a0ecSDave Kleikamp #include <linux/mpage.h> 37ac27a0ecSDave Kleikamp #include <linux/uio.h> 38ac27a0ecSDave Kleikamp #include <linux/bio.h> 393dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 40ac27a0ecSDave Kleikamp #include "xattr.h" 41ac27a0ecSDave Kleikamp #include "acl.h" 42d2a17637SMingming Cao #include "ext4_extents.h" 43ac27a0ecSDave Kleikamp 44678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 45678aaf48SJan Kara loff_t new_size) 46678aaf48SJan Kara { 47678aaf48SJan Kara return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode, 48678aaf48SJan Kara new_size); 49678aaf48SJan Kara } 50678aaf48SJan Kara 5164769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 5264769240SAlex Tomas 53ac27a0ecSDave Kleikamp /* 54ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 55ac27a0ecSDave Kleikamp */ 56617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 57ac27a0ecSDave Kleikamp { 58617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 59ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 60ac27a0ecSDave Kleikamp 61ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 62ac27a0ecSDave Kleikamp } 63ac27a0ecSDave Kleikamp 64ac27a0ecSDave Kleikamp /* 65617ba13bSMingming Cao * The ext4 forget function must perform a revoke if we are freeing data 66ac27a0ecSDave Kleikamp * which has been journaled. Metadata (eg. indirect blocks) must be 67ac27a0ecSDave Kleikamp * revoked in all cases. 68ac27a0ecSDave Kleikamp * 69ac27a0ecSDave Kleikamp * "bh" may be NULL: a metadata block may have been freed from memory 70ac27a0ecSDave Kleikamp * but there may still be a record of it in the journal, and that record 71ac27a0ecSDave Kleikamp * still needs to be revoked. 72ac27a0ecSDave Kleikamp */ 73617ba13bSMingming Cao int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 74617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t blocknr) 75ac27a0ecSDave Kleikamp { 76ac27a0ecSDave Kleikamp int err; 77ac27a0ecSDave Kleikamp 78ac27a0ecSDave Kleikamp might_sleep(); 79ac27a0ecSDave Kleikamp 80ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "enter"); 81ac27a0ecSDave Kleikamp 82ac27a0ecSDave Kleikamp jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 83ac27a0ecSDave Kleikamp "data mode %lx\n", 84ac27a0ecSDave Kleikamp bh, is_metadata, inode->i_mode, 85ac27a0ecSDave Kleikamp test_opt(inode->i_sb, DATA_FLAGS)); 86ac27a0ecSDave Kleikamp 87ac27a0ecSDave Kleikamp /* Never use the revoke function if we are doing full data 88ac27a0ecSDave Kleikamp * journaling: there is no need to, and a V1 superblock won't 89ac27a0ecSDave Kleikamp * support it. Otherwise, only skip the revoke on un-journaled 90ac27a0ecSDave Kleikamp * data blocks. */ 91ac27a0ecSDave Kleikamp 92617ba13bSMingming Cao if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 93617ba13bSMingming Cao (!is_metadata && !ext4_should_journal_data(inode))) { 94ac27a0ecSDave Kleikamp if (bh) { 95dab291afSMingming Cao BUFFER_TRACE(bh, "call jbd2_journal_forget"); 96617ba13bSMingming Cao return ext4_journal_forget(handle, bh); 97ac27a0ecSDave Kleikamp } 98ac27a0ecSDave Kleikamp return 0; 99ac27a0ecSDave Kleikamp } 100ac27a0ecSDave Kleikamp 101ac27a0ecSDave Kleikamp /* 102ac27a0ecSDave Kleikamp * data!=journal && (is_metadata || should_journal_data(inode)) 103ac27a0ecSDave Kleikamp */ 104617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_revoke"); 105617ba13bSMingming Cao err = ext4_journal_revoke(handle, blocknr, bh); 106ac27a0ecSDave Kleikamp if (err) 10746e665e9SHarvey Harrison ext4_abort(inode->i_sb, __func__, 108ac27a0ecSDave Kleikamp "error %d when attempting revoke", err); 109ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "exit"); 110ac27a0ecSDave Kleikamp return err; 111ac27a0ecSDave Kleikamp } 112ac27a0ecSDave Kleikamp 113ac27a0ecSDave Kleikamp /* 114ac27a0ecSDave Kleikamp * Work out how many blocks we need to proceed with the next chunk of a 115ac27a0ecSDave Kleikamp * truncate transaction. 116ac27a0ecSDave Kleikamp */ 117ac27a0ecSDave Kleikamp static unsigned long blocks_for_truncate(struct inode *inode) 118ac27a0ecSDave Kleikamp { 119725d26d3SAneesh Kumar K.V ext4_lblk_t needed; 120ac27a0ecSDave Kleikamp 121ac27a0ecSDave Kleikamp needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 122ac27a0ecSDave Kleikamp 123ac27a0ecSDave Kleikamp /* Give ourselves just enough room to cope with inodes in which 124ac27a0ecSDave Kleikamp * i_blocks is corrupt: we've seen disk corruptions in the past 125ac27a0ecSDave Kleikamp * which resulted in random data in an inode which looked enough 126617ba13bSMingming Cao * like a regular file for ext4 to try to delete it. Things 127ac27a0ecSDave Kleikamp * will go a bit crazy if that happens, but at least we should 128ac27a0ecSDave Kleikamp * try not to panic the whole kernel. */ 129ac27a0ecSDave Kleikamp if (needed < 2) 130ac27a0ecSDave Kleikamp needed = 2; 131ac27a0ecSDave Kleikamp 132ac27a0ecSDave Kleikamp /* But we need to bound the transaction so we don't overflow the 133ac27a0ecSDave Kleikamp * journal. */ 134617ba13bSMingming Cao if (needed > EXT4_MAX_TRANS_DATA) 135617ba13bSMingming Cao needed = EXT4_MAX_TRANS_DATA; 136ac27a0ecSDave Kleikamp 137617ba13bSMingming Cao return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 138ac27a0ecSDave Kleikamp } 139ac27a0ecSDave Kleikamp 140ac27a0ecSDave Kleikamp /* 141ac27a0ecSDave Kleikamp * Truncate transactions can be complex and absolutely huge. So we need to 142ac27a0ecSDave Kleikamp * be able to restart the transaction at a conventient checkpoint to make 143ac27a0ecSDave Kleikamp * sure we don't overflow the journal. 144ac27a0ecSDave Kleikamp * 145ac27a0ecSDave Kleikamp * start_transaction gets us a new handle for a truncate transaction, 146ac27a0ecSDave Kleikamp * and extend_transaction tries to extend the existing one a bit. If 147ac27a0ecSDave Kleikamp * extend fails, we need to propagate the failure up and restart the 148ac27a0ecSDave Kleikamp * transaction in the top-level truncate loop. --sct 149ac27a0ecSDave Kleikamp */ 150ac27a0ecSDave Kleikamp static handle_t *start_transaction(struct inode *inode) 151ac27a0ecSDave Kleikamp { 152ac27a0ecSDave Kleikamp handle_t *result; 153ac27a0ecSDave Kleikamp 154617ba13bSMingming Cao result = ext4_journal_start(inode, blocks_for_truncate(inode)); 155ac27a0ecSDave Kleikamp if (!IS_ERR(result)) 156ac27a0ecSDave Kleikamp return result; 157ac27a0ecSDave Kleikamp 158617ba13bSMingming Cao ext4_std_error(inode->i_sb, PTR_ERR(result)); 159ac27a0ecSDave Kleikamp return result; 160ac27a0ecSDave Kleikamp } 161ac27a0ecSDave Kleikamp 162ac27a0ecSDave Kleikamp /* 163ac27a0ecSDave Kleikamp * Try to extend this transaction for the purposes of truncation. 164ac27a0ecSDave Kleikamp * 165ac27a0ecSDave Kleikamp * Returns 0 if we managed to create more room. If we can't create more 166ac27a0ecSDave Kleikamp * room, and the transaction must be restarted we return 1. 167ac27a0ecSDave Kleikamp */ 168ac27a0ecSDave Kleikamp static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 169ac27a0ecSDave Kleikamp { 170617ba13bSMingming Cao if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) 171ac27a0ecSDave Kleikamp return 0; 172617ba13bSMingming Cao if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 173ac27a0ecSDave Kleikamp return 0; 174ac27a0ecSDave Kleikamp return 1; 175ac27a0ecSDave Kleikamp } 176ac27a0ecSDave Kleikamp 177ac27a0ecSDave Kleikamp /* 178ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 179ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 180ac27a0ecSDave Kleikamp * this transaction. 181ac27a0ecSDave Kleikamp */ 182617ba13bSMingming Cao static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 183ac27a0ecSDave Kleikamp { 184ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 185617ba13bSMingming Cao return ext4_journal_restart(handle, blocks_for_truncate(inode)); 186ac27a0ecSDave Kleikamp } 187ac27a0ecSDave Kleikamp 188ac27a0ecSDave Kleikamp /* 189ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 190ac27a0ecSDave Kleikamp */ 191617ba13bSMingming Cao void ext4_delete_inode (struct inode * inode) 192ac27a0ecSDave Kleikamp { 193ac27a0ecSDave Kleikamp handle_t *handle; 194bc965ab3STheodore Ts'o int err; 195ac27a0ecSDave Kleikamp 196678aaf48SJan Kara if (ext4_should_order_data(inode)) 197678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 198ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 199ac27a0ecSDave Kleikamp 200ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 201ac27a0ecSDave Kleikamp goto no_delete; 202ac27a0ecSDave Kleikamp 203bc965ab3STheodore Ts'o handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 204ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 205bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 206ac27a0ecSDave Kleikamp /* 207ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 208ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 209ac27a0ecSDave Kleikamp * cleaned up. 210ac27a0ecSDave Kleikamp */ 211617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 212ac27a0ecSDave Kleikamp goto no_delete; 213ac27a0ecSDave Kleikamp } 214ac27a0ecSDave Kleikamp 215ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 216ac27a0ecSDave Kleikamp handle->h_sync = 1; 217ac27a0ecSDave Kleikamp inode->i_size = 0; 218bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 219bc965ab3STheodore Ts'o if (err) { 220bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 221bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 222bc965ab3STheodore Ts'o goto stop_handle; 223bc965ab3STheodore Ts'o } 224ac27a0ecSDave Kleikamp if (inode->i_blocks) 225617ba13bSMingming Cao ext4_truncate(inode); 226bc965ab3STheodore Ts'o 227bc965ab3STheodore Ts'o /* 228bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 229bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 230bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 231bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 232bc965ab3STheodore Ts'o */ 233bc965ab3STheodore Ts'o if (handle->h_buffer_credits < 3) { 234bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 235bc965ab3STheodore Ts'o if (err > 0) 236bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 237bc965ab3STheodore Ts'o if (err != 0) { 238bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 239bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 240bc965ab3STheodore Ts'o stop_handle: 241bc965ab3STheodore Ts'o ext4_journal_stop(handle); 242bc965ab3STheodore Ts'o goto no_delete; 243bc965ab3STheodore Ts'o } 244bc965ab3STheodore Ts'o } 245bc965ab3STheodore Ts'o 246ac27a0ecSDave Kleikamp /* 247617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 248ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 249617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 250ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 251617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 252ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 253ac27a0ecSDave Kleikamp */ 254617ba13bSMingming Cao ext4_orphan_del(handle, inode); 255617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 256ac27a0ecSDave Kleikamp 257ac27a0ecSDave Kleikamp /* 258ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 259ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 260ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 261ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 262ac27a0ecSDave Kleikamp * fails. 263ac27a0ecSDave Kleikamp */ 264617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 265ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 266ac27a0ecSDave Kleikamp clear_inode(inode); 267ac27a0ecSDave Kleikamp else 268617ba13bSMingming Cao ext4_free_inode(handle, inode); 269617ba13bSMingming Cao ext4_journal_stop(handle); 270ac27a0ecSDave Kleikamp return; 271ac27a0ecSDave Kleikamp no_delete: 272ac27a0ecSDave Kleikamp clear_inode(inode); /* We must guarantee clearing of inode... */ 273ac27a0ecSDave Kleikamp } 274ac27a0ecSDave Kleikamp 275ac27a0ecSDave Kleikamp typedef struct { 276ac27a0ecSDave Kleikamp __le32 *p; 277ac27a0ecSDave Kleikamp __le32 key; 278ac27a0ecSDave Kleikamp struct buffer_head *bh; 279ac27a0ecSDave Kleikamp } Indirect; 280ac27a0ecSDave Kleikamp 281ac27a0ecSDave Kleikamp static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 282ac27a0ecSDave Kleikamp { 283ac27a0ecSDave Kleikamp p->key = *(p->p = v); 284ac27a0ecSDave Kleikamp p->bh = bh; 285ac27a0ecSDave Kleikamp } 286ac27a0ecSDave Kleikamp 287ac27a0ecSDave Kleikamp /** 288617ba13bSMingming Cao * ext4_block_to_path - parse the block number into array of offsets 289ac27a0ecSDave Kleikamp * @inode: inode in question (we are only interested in its superblock) 290ac27a0ecSDave Kleikamp * @i_block: block number to be parsed 291ac27a0ecSDave Kleikamp * @offsets: array to store the offsets in 292ac27a0ecSDave Kleikamp * @boundary: set this non-zero if the referred-to block is likely to be 293ac27a0ecSDave Kleikamp * followed (on disk) by an indirect block. 294ac27a0ecSDave Kleikamp * 295617ba13bSMingming Cao * To store the locations of file's data ext4 uses a data structure common 296ac27a0ecSDave Kleikamp * for UNIX filesystems - tree of pointers anchored in the inode, with 297ac27a0ecSDave Kleikamp * data blocks at leaves and indirect blocks in intermediate nodes. 298ac27a0ecSDave Kleikamp * This function translates the block number into path in that tree - 299ac27a0ecSDave Kleikamp * return value is the path length and @offsets[n] is the offset of 300ac27a0ecSDave Kleikamp * pointer to (n+1)th node in the nth one. If @block is out of range 301ac27a0ecSDave Kleikamp * (negative or too large) warning is printed and zero returned. 302ac27a0ecSDave Kleikamp * 303ac27a0ecSDave Kleikamp * Note: function doesn't find node addresses, so no IO is needed. All 304ac27a0ecSDave Kleikamp * we need to know is the capacity of indirect blocks (taken from the 305ac27a0ecSDave Kleikamp * inode->i_sb). 306ac27a0ecSDave Kleikamp */ 307ac27a0ecSDave Kleikamp 308ac27a0ecSDave Kleikamp /* 309ac27a0ecSDave Kleikamp * Portability note: the last comparison (check that we fit into triple 310ac27a0ecSDave Kleikamp * indirect block) is spelled differently, because otherwise on an 311ac27a0ecSDave Kleikamp * architecture with 32-bit longs and 8Kb pages we might get into trouble 312ac27a0ecSDave Kleikamp * if our filesystem had 8Kb blocks. We might use long long, but that would 313ac27a0ecSDave Kleikamp * kill us on x86. Oh, well, at least the sign propagation does not matter - 314ac27a0ecSDave Kleikamp * i_block would have to be negative in the very beginning, so we would not 315ac27a0ecSDave Kleikamp * get there at all. 316ac27a0ecSDave Kleikamp */ 317ac27a0ecSDave Kleikamp 318617ba13bSMingming Cao static int ext4_block_to_path(struct inode *inode, 319725d26d3SAneesh Kumar K.V ext4_lblk_t i_block, 320725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], int *boundary) 321ac27a0ecSDave Kleikamp { 322617ba13bSMingming Cao int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 323617ba13bSMingming Cao int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 324617ba13bSMingming Cao const long direct_blocks = EXT4_NDIR_BLOCKS, 325ac27a0ecSDave Kleikamp indirect_blocks = ptrs, 326ac27a0ecSDave Kleikamp double_blocks = (1 << (ptrs_bits * 2)); 327ac27a0ecSDave Kleikamp int n = 0; 328ac27a0ecSDave Kleikamp int final = 0; 329ac27a0ecSDave Kleikamp 330ac27a0ecSDave Kleikamp if (i_block < 0) { 331617ba13bSMingming Cao ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0"); 332ac27a0ecSDave Kleikamp } else if (i_block < direct_blocks) { 333ac27a0ecSDave Kleikamp offsets[n++] = i_block; 334ac27a0ecSDave Kleikamp final = direct_blocks; 335ac27a0ecSDave Kleikamp } else if ( (i_block -= direct_blocks) < indirect_blocks) { 336617ba13bSMingming Cao offsets[n++] = EXT4_IND_BLOCK; 337ac27a0ecSDave Kleikamp offsets[n++] = i_block; 338ac27a0ecSDave Kleikamp final = ptrs; 339ac27a0ecSDave Kleikamp } else if ((i_block -= indirect_blocks) < double_blocks) { 340617ba13bSMingming Cao offsets[n++] = EXT4_DIND_BLOCK; 341ac27a0ecSDave Kleikamp offsets[n++] = i_block >> ptrs_bits; 342ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 343ac27a0ecSDave Kleikamp final = ptrs; 344ac27a0ecSDave Kleikamp } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 345617ba13bSMingming Cao offsets[n++] = EXT4_TIND_BLOCK; 346ac27a0ecSDave Kleikamp offsets[n++] = i_block >> (ptrs_bits * 2); 347ac27a0ecSDave Kleikamp offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 348ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 349ac27a0ecSDave Kleikamp final = ptrs; 350ac27a0ecSDave Kleikamp } else { 351e2b46574SEric Sandeen ext4_warning(inode->i_sb, "ext4_block_to_path", 3520e855ac8SAneesh Kumar K.V "block %lu > max", 353e2b46574SEric Sandeen i_block + direct_blocks + 354e2b46574SEric Sandeen indirect_blocks + double_blocks); 355ac27a0ecSDave Kleikamp } 356ac27a0ecSDave Kleikamp if (boundary) 357ac27a0ecSDave Kleikamp *boundary = final - 1 - (i_block & (ptrs - 1)); 358ac27a0ecSDave Kleikamp return n; 359ac27a0ecSDave Kleikamp } 360ac27a0ecSDave Kleikamp 361ac27a0ecSDave Kleikamp /** 362617ba13bSMingming Cao * ext4_get_branch - read the chain of indirect blocks leading to data 363ac27a0ecSDave Kleikamp * @inode: inode in question 364ac27a0ecSDave Kleikamp * @depth: depth of the chain (1 - direct pointer, etc.) 365ac27a0ecSDave Kleikamp * @offsets: offsets of pointers in inode/indirect blocks 366ac27a0ecSDave Kleikamp * @chain: place to store the result 367ac27a0ecSDave Kleikamp * @err: here we store the error value 368ac27a0ecSDave Kleikamp * 369ac27a0ecSDave Kleikamp * Function fills the array of triples <key, p, bh> and returns %NULL 370ac27a0ecSDave Kleikamp * if everything went OK or the pointer to the last filled triple 371ac27a0ecSDave Kleikamp * (incomplete one) otherwise. Upon the return chain[i].key contains 372ac27a0ecSDave Kleikamp * the number of (i+1)-th block in the chain (as it is stored in memory, 373ac27a0ecSDave Kleikamp * i.e. little-endian 32-bit), chain[i].p contains the address of that 374ac27a0ecSDave Kleikamp * number (it points into struct inode for i==0 and into the bh->b_data 375ac27a0ecSDave Kleikamp * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 376ac27a0ecSDave Kleikamp * block for i>0 and NULL for i==0. In other words, it holds the block 377ac27a0ecSDave Kleikamp * numbers of the chain, addresses they were taken from (and where we can 378ac27a0ecSDave Kleikamp * verify that chain did not change) and buffer_heads hosting these 379ac27a0ecSDave Kleikamp * numbers. 380ac27a0ecSDave Kleikamp * 381ac27a0ecSDave Kleikamp * Function stops when it stumbles upon zero pointer (absent block) 382ac27a0ecSDave Kleikamp * (pointer to last triple returned, *@err == 0) 383ac27a0ecSDave Kleikamp * or when it gets an IO error reading an indirect block 384ac27a0ecSDave Kleikamp * (ditto, *@err == -EIO) 385ac27a0ecSDave Kleikamp * or when it reads all @depth-1 indirect blocks successfully and finds 386ac27a0ecSDave Kleikamp * the whole chain, all way to the data (returns %NULL, *err == 0). 387c278bfecSAneesh Kumar K.V * 388c278bfecSAneesh Kumar K.V * Need to be called with 3890e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) 390ac27a0ecSDave Kleikamp */ 391725d26d3SAneesh Kumar K.V static Indirect *ext4_get_branch(struct inode *inode, int depth, 392725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, 393ac27a0ecSDave Kleikamp Indirect chain[4], int *err) 394ac27a0ecSDave Kleikamp { 395ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 396ac27a0ecSDave Kleikamp Indirect *p = chain; 397ac27a0ecSDave Kleikamp struct buffer_head *bh; 398ac27a0ecSDave Kleikamp 399ac27a0ecSDave Kleikamp *err = 0; 400ac27a0ecSDave Kleikamp /* i_data is not going away, no lock needed */ 401617ba13bSMingming Cao add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets); 402ac27a0ecSDave Kleikamp if (!p->key) 403ac27a0ecSDave Kleikamp goto no_block; 404ac27a0ecSDave Kleikamp while (--depth) { 405ac27a0ecSDave Kleikamp bh = sb_bread(sb, le32_to_cpu(p->key)); 406ac27a0ecSDave Kleikamp if (!bh) 407ac27a0ecSDave Kleikamp goto failure; 408ac27a0ecSDave Kleikamp add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); 409ac27a0ecSDave Kleikamp /* Reader: end */ 410ac27a0ecSDave Kleikamp if (!p->key) 411ac27a0ecSDave Kleikamp goto no_block; 412ac27a0ecSDave Kleikamp } 413ac27a0ecSDave Kleikamp return NULL; 414ac27a0ecSDave Kleikamp 415ac27a0ecSDave Kleikamp failure: 416ac27a0ecSDave Kleikamp *err = -EIO; 417ac27a0ecSDave Kleikamp no_block: 418ac27a0ecSDave Kleikamp return p; 419ac27a0ecSDave Kleikamp } 420ac27a0ecSDave Kleikamp 421ac27a0ecSDave Kleikamp /** 422617ba13bSMingming Cao * ext4_find_near - find a place for allocation with sufficient locality 423ac27a0ecSDave Kleikamp * @inode: owner 424ac27a0ecSDave Kleikamp * @ind: descriptor of indirect block. 425ac27a0ecSDave Kleikamp * 4261cc8dcf5SBenoit Boissinot * This function returns the preferred place for block allocation. 427ac27a0ecSDave Kleikamp * It is used when heuristic for sequential allocation fails. 428ac27a0ecSDave Kleikamp * Rules are: 429ac27a0ecSDave Kleikamp * + if there is a block to the left of our position - allocate near it. 430ac27a0ecSDave Kleikamp * + if pointer will live in indirect block - allocate near that block. 431ac27a0ecSDave Kleikamp * + if pointer will live in inode - allocate in the same 432ac27a0ecSDave Kleikamp * cylinder group. 433ac27a0ecSDave Kleikamp * 434ac27a0ecSDave Kleikamp * In the latter case we colour the starting block by the callers PID to 435ac27a0ecSDave Kleikamp * prevent it from clashing with concurrent allocations for a different inode 436ac27a0ecSDave Kleikamp * in the same block group. The PID is used here so that functionally related 437ac27a0ecSDave Kleikamp * files will be close-by on-disk. 438ac27a0ecSDave Kleikamp * 439ac27a0ecSDave Kleikamp * Caller must make sure that @ind is valid and will stay that way. 440ac27a0ecSDave Kleikamp */ 441617ba13bSMingming Cao static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 442ac27a0ecSDave Kleikamp { 443617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 444ac27a0ecSDave Kleikamp __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data; 445ac27a0ecSDave Kleikamp __le32 *p; 446617ba13bSMingming Cao ext4_fsblk_t bg_start; 44774d3487fSValerie Clement ext4_fsblk_t last_block; 448617ba13bSMingming Cao ext4_grpblk_t colour; 449ac27a0ecSDave Kleikamp 450ac27a0ecSDave Kleikamp /* Try to find previous block */ 451ac27a0ecSDave Kleikamp for (p = ind->p - 1; p >= start; p--) { 452ac27a0ecSDave Kleikamp if (*p) 453ac27a0ecSDave Kleikamp return le32_to_cpu(*p); 454ac27a0ecSDave Kleikamp } 455ac27a0ecSDave Kleikamp 456ac27a0ecSDave Kleikamp /* No such thing, so let's try location of indirect block */ 457ac27a0ecSDave Kleikamp if (ind->bh) 458ac27a0ecSDave Kleikamp return ind->bh->b_blocknr; 459ac27a0ecSDave Kleikamp 460ac27a0ecSDave Kleikamp /* 461ac27a0ecSDave Kleikamp * It is going to be referred to from the inode itself? OK, just put it 462ac27a0ecSDave Kleikamp * into the same cylinder group then. 463ac27a0ecSDave Kleikamp */ 464617ba13bSMingming Cao bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); 46574d3487fSValerie Clement last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 46674d3487fSValerie Clement 46774d3487fSValerie Clement if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 468ac27a0ecSDave Kleikamp colour = (current->pid % 16) * 469617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 47074d3487fSValerie Clement else 47174d3487fSValerie Clement colour = (current->pid % 16) * ((last_block - bg_start) / 16); 472ac27a0ecSDave Kleikamp return bg_start + colour; 473ac27a0ecSDave Kleikamp } 474ac27a0ecSDave Kleikamp 475ac27a0ecSDave Kleikamp /** 4761cc8dcf5SBenoit Boissinot * ext4_find_goal - find a preferred place for allocation. 477ac27a0ecSDave Kleikamp * @inode: owner 478ac27a0ecSDave Kleikamp * @block: block we want 479ac27a0ecSDave Kleikamp * @partial: pointer to the last triple within a chain 480ac27a0ecSDave Kleikamp * 4811cc8dcf5SBenoit Boissinot * Normally this function find the preferred place for block allocation, 482fb01bfdaSAkinobu Mita * returns it. 483ac27a0ecSDave Kleikamp */ 484725d26d3SAneesh Kumar K.V static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 485fb01bfdaSAkinobu Mita Indirect *partial) 486ac27a0ecSDave Kleikamp { 487617ba13bSMingming Cao struct ext4_block_alloc_info *block_i; 488ac27a0ecSDave Kleikamp 489617ba13bSMingming Cao block_i = EXT4_I(inode)->i_block_alloc_info; 490ac27a0ecSDave Kleikamp 491ac27a0ecSDave Kleikamp /* 492ac27a0ecSDave Kleikamp * try the heuristic for sequential allocation, 493ac27a0ecSDave Kleikamp * failing that at least try to get decent locality. 494ac27a0ecSDave Kleikamp */ 495ac27a0ecSDave Kleikamp if (block_i && (block == block_i->last_alloc_logical_block + 1) 496ac27a0ecSDave Kleikamp && (block_i->last_alloc_physical_block != 0)) { 497ac27a0ecSDave Kleikamp return block_i->last_alloc_physical_block + 1; 498ac27a0ecSDave Kleikamp } 499ac27a0ecSDave Kleikamp 500617ba13bSMingming Cao return ext4_find_near(inode, partial); 501ac27a0ecSDave Kleikamp } 502ac27a0ecSDave Kleikamp 503ac27a0ecSDave Kleikamp /** 504617ba13bSMingming Cao * ext4_blks_to_allocate: Look up the block map and count the number 505ac27a0ecSDave Kleikamp * of direct blocks need to be allocated for the given branch. 506ac27a0ecSDave Kleikamp * 507ac27a0ecSDave Kleikamp * @branch: chain of indirect blocks 508ac27a0ecSDave Kleikamp * @k: number of blocks need for indirect blocks 509ac27a0ecSDave Kleikamp * @blks: number of data blocks to be mapped. 510ac27a0ecSDave Kleikamp * @blocks_to_boundary: the offset in the indirect block 511ac27a0ecSDave Kleikamp * 512ac27a0ecSDave Kleikamp * return the total number of blocks to be allocate, including the 513ac27a0ecSDave Kleikamp * direct and indirect blocks. 514ac27a0ecSDave Kleikamp */ 515617ba13bSMingming Cao static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks, 516ac27a0ecSDave Kleikamp int blocks_to_boundary) 517ac27a0ecSDave Kleikamp { 518ac27a0ecSDave Kleikamp unsigned long count = 0; 519ac27a0ecSDave Kleikamp 520ac27a0ecSDave Kleikamp /* 521ac27a0ecSDave Kleikamp * Simple case, [t,d]Indirect block(s) has not allocated yet 522ac27a0ecSDave Kleikamp * then it's clear blocks on that path have not allocated 523ac27a0ecSDave Kleikamp */ 524ac27a0ecSDave Kleikamp if (k > 0) { 525ac27a0ecSDave Kleikamp /* right now we don't handle cross boundary allocation */ 526ac27a0ecSDave Kleikamp if (blks < blocks_to_boundary + 1) 527ac27a0ecSDave Kleikamp count += blks; 528ac27a0ecSDave Kleikamp else 529ac27a0ecSDave Kleikamp count += blocks_to_boundary + 1; 530ac27a0ecSDave Kleikamp return count; 531ac27a0ecSDave Kleikamp } 532ac27a0ecSDave Kleikamp 533ac27a0ecSDave Kleikamp count++; 534ac27a0ecSDave Kleikamp while (count < blks && count <= blocks_to_boundary && 535ac27a0ecSDave Kleikamp le32_to_cpu(*(branch[0].p + count)) == 0) { 536ac27a0ecSDave Kleikamp count++; 537ac27a0ecSDave Kleikamp } 538ac27a0ecSDave Kleikamp return count; 539ac27a0ecSDave Kleikamp } 540ac27a0ecSDave Kleikamp 541ac27a0ecSDave Kleikamp /** 542617ba13bSMingming Cao * ext4_alloc_blocks: multiple allocate blocks needed for a branch 543ac27a0ecSDave Kleikamp * @indirect_blks: the number of blocks need to allocate for indirect 544ac27a0ecSDave Kleikamp * blocks 545ac27a0ecSDave Kleikamp * 546ac27a0ecSDave Kleikamp * @new_blocks: on return it will store the new block numbers for 547ac27a0ecSDave Kleikamp * the indirect blocks(if needed) and the first direct block, 548ac27a0ecSDave Kleikamp * @blks: on return it will store the total number of allocated 549ac27a0ecSDave Kleikamp * direct blocks 550ac27a0ecSDave Kleikamp */ 551617ba13bSMingming Cao static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 5527061eba7SAneesh Kumar K.V ext4_lblk_t iblock, ext4_fsblk_t goal, 5537061eba7SAneesh Kumar K.V int indirect_blks, int blks, 554617ba13bSMingming Cao ext4_fsblk_t new_blocks[4], int *err) 555ac27a0ecSDave Kleikamp { 556ac27a0ecSDave Kleikamp int target, i; 5577061eba7SAneesh Kumar K.V unsigned long count = 0, blk_allocated = 0; 558ac27a0ecSDave Kleikamp int index = 0; 559617ba13bSMingming Cao ext4_fsblk_t current_block = 0; 560ac27a0ecSDave Kleikamp int ret = 0; 561ac27a0ecSDave Kleikamp 562ac27a0ecSDave Kleikamp /* 563ac27a0ecSDave Kleikamp * Here we try to allocate the requested multiple blocks at once, 564ac27a0ecSDave Kleikamp * on a best-effort basis. 565ac27a0ecSDave Kleikamp * To build a branch, we should allocate blocks for 566ac27a0ecSDave Kleikamp * the indirect blocks(if not allocated yet), and at least 567ac27a0ecSDave Kleikamp * the first direct block of this branch. That's the 568ac27a0ecSDave Kleikamp * minimum number of blocks need to allocate(required) 569ac27a0ecSDave Kleikamp */ 5707061eba7SAneesh Kumar K.V /* first we try to allocate the indirect blocks */ 5717061eba7SAneesh Kumar K.V target = indirect_blks; 5727061eba7SAneesh Kumar K.V while (target > 0) { 573ac27a0ecSDave Kleikamp count = target; 574ac27a0ecSDave Kleikamp /* allocating blocks for indirect blocks and direct blocks */ 5757061eba7SAneesh Kumar K.V current_block = ext4_new_meta_blocks(handle, inode, 5767061eba7SAneesh Kumar K.V goal, &count, err); 577ac27a0ecSDave Kleikamp if (*err) 578ac27a0ecSDave Kleikamp goto failed_out; 579ac27a0ecSDave Kleikamp 580ac27a0ecSDave Kleikamp target -= count; 581ac27a0ecSDave Kleikamp /* allocate blocks for indirect blocks */ 582ac27a0ecSDave Kleikamp while (index < indirect_blks && count) { 583ac27a0ecSDave Kleikamp new_blocks[index++] = current_block++; 584ac27a0ecSDave Kleikamp count--; 585ac27a0ecSDave Kleikamp } 5867061eba7SAneesh Kumar K.V if (count > 0) { 5877061eba7SAneesh Kumar K.V /* 5887061eba7SAneesh Kumar K.V * save the new block number 5897061eba7SAneesh Kumar K.V * for the first direct block 5907061eba7SAneesh Kumar K.V */ 5917061eba7SAneesh Kumar K.V new_blocks[index] = current_block; 5927061eba7SAneesh Kumar K.V printk(KERN_INFO "%s returned more blocks than " 5937061eba7SAneesh Kumar K.V "requested\n", __func__); 5947061eba7SAneesh Kumar K.V WARN_ON(1); 595ac27a0ecSDave Kleikamp break; 596ac27a0ecSDave Kleikamp } 5977061eba7SAneesh Kumar K.V } 598ac27a0ecSDave Kleikamp 5997061eba7SAneesh Kumar K.V target = blks - count ; 6007061eba7SAneesh Kumar K.V blk_allocated = count; 6017061eba7SAneesh Kumar K.V if (!target) 6027061eba7SAneesh Kumar K.V goto allocated; 6037061eba7SAneesh Kumar K.V /* Now allocate data blocks */ 6047061eba7SAneesh Kumar K.V count = target; 605654b4908SAneesh Kumar K.V /* allocating blocks for data blocks */ 6067061eba7SAneesh Kumar K.V current_block = ext4_new_blocks(handle, inode, iblock, 6077061eba7SAneesh Kumar K.V goal, &count, err); 6087061eba7SAneesh Kumar K.V if (*err && (target == blks)) { 6097061eba7SAneesh Kumar K.V /* 6107061eba7SAneesh Kumar K.V * if the allocation failed and we didn't allocate 6117061eba7SAneesh Kumar K.V * any blocks before 6127061eba7SAneesh Kumar K.V */ 6137061eba7SAneesh Kumar K.V goto failed_out; 6147061eba7SAneesh Kumar K.V } 6157061eba7SAneesh Kumar K.V if (!*err) { 6167061eba7SAneesh Kumar K.V if (target == blks) { 6177061eba7SAneesh Kumar K.V /* 6187061eba7SAneesh Kumar K.V * save the new block number 6197061eba7SAneesh Kumar K.V * for the first direct block 6207061eba7SAneesh Kumar K.V */ 621ac27a0ecSDave Kleikamp new_blocks[index] = current_block; 6227061eba7SAneesh Kumar K.V } 6237061eba7SAneesh Kumar K.V blk_allocated += count; 6247061eba7SAneesh Kumar K.V } 6257061eba7SAneesh Kumar K.V allocated: 626ac27a0ecSDave Kleikamp /* total number of blocks allocated for direct blocks */ 6277061eba7SAneesh Kumar K.V ret = blk_allocated; 628ac27a0ecSDave Kleikamp *err = 0; 629ac27a0ecSDave Kleikamp return ret; 630ac27a0ecSDave Kleikamp failed_out: 631ac27a0ecSDave Kleikamp for (i = 0; i <index; i++) 632c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 633ac27a0ecSDave Kleikamp return ret; 634ac27a0ecSDave Kleikamp } 635ac27a0ecSDave Kleikamp 636ac27a0ecSDave Kleikamp /** 637617ba13bSMingming Cao * ext4_alloc_branch - allocate and set up a chain of blocks. 638ac27a0ecSDave Kleikamp * @inode: owner 639ac27a0ecSDave Kleikamp * @indirect_blks: number of allocated indirect blocks 640ac27a0ecSDave Kleikamp * @blks: number of allocated direct blocks 641ac27a0ecSDave Kleikamp * @offsets: offsets (in the blocks) to store the pointers to next. 642ac27a0ecSDave Kleikamp * @branch: place to store the chain in. 643ac27a0ecSDave Kleikamp * 644ac27a0ecSDave Kleikamp * This function allocates blocks, zeroes out all but the last one, 645ac27a0ecSDave Kleikamp * links them into chain and (if we are synchronous) writes them to disk. 646ac27a0ecSDave Kleikamp * In other words, it prepares a branch that can be spliced onto the 647ac27a0ecSDave Kleikamp * inode. It stores the information about that chain in the branch[], in 648617ba13bSMingming Cao * the same format as ext4_get_branch() would do. We are calling it after 649ac27a0ecSDave Kleikamp * we had read the existing part of chain and partial points to the last 650ac27a0ecSDave Kleikamp * triple of that (one with zero ->key). Upon the exit we have the same 651617ba13bSMingming Cao * picture as after the successful ext4_get_block(), except that in one 652ac27a0ecSDave Kleikamp * place chain is disconnected - *branch->p is still zero (we did not 653ac27a0ecSDave Kleikamp * set the last link), but branch->key contains the number that should 654ac27a0ecSDave Kleikamp * be placed into *branch->p to fill that gap. 655ac27a0ecSDave Kleikamp * 656ac27a0ecSDave Kleikamp * If allocation fails we free all blocks we've allocated (and forget 657ac27a0ecSDave Kleikamp * their buffer_heads) and return the error value the from failed 658617ba13bSMingming Cao * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 659ac27a0ecSDave Kleikamp * as described above and return 0. 660ac27a0ecSDave Kleikamp */ 661617ba13bSMingming Cao static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 6627061eba7SAneesh Kumar K.V ext4_lblk_t iblock, int indirect_blks, 6637061eba7SAneesh Kumar K.V int *blks, ext4_fsblk_t goal, 664725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, Indirect *branch) 665ac27a0ecSDave Kleikamp { 666ac27a0ecSDave Kleikamp int blocksize = inode->i_sb->s_blocksize; 667ac27a0ecSDave Kleikamp int i, n = 0; 668ac27a0ecSDave Kleikamp int err = 0; 669ac27a0ecSDave Kleikamp struct buffer_head *bh; 670ac27a0ecSDave Kleikamp int num; 671617ba13bSMingming Cao ext4_fsblk_t new_blocks[4]; 672617ba13bSMingming Cao ext4_fsblk_t current_block; 673ac27a0ecSDave Kleikamp 6747061eba7SAneesh Kumar K.V num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 675ac27a0ecSDave Kleikamp *blks, new_blocks, &err); 676ac27a0ecSDave Kleikamp if (err) 677ac27a0ecSDave Kleikamp return err; 678ac27a0ecSDave Kleikamp 679ac27a0ecSDave Kleikamp branch[0].key = cpu_to_le32(new_blocks[0]); 680ac27a0ecSDave Kleikamp /* 681ac27a0ecSDave Kleikamp * metadata blocks and data blocks are allocated. 682ac27a0ecSDave Kleikamp */ 683ac27a0ecSDave Kleikamp for (n = 1; n <= indirect_blks; n++) { 684ac27a0ecSDave Kleikamp /* 685ac27a0ecSDave Kleikamp * Get buffer_head for parent block, zero it out 686ac27a0ecSDave Kleikamp * and set the pointer to new one, then send 687ac27a0ecSDave Kleikamp * parent to disk. 688ac27a0ecSDave Kleikamp */ 689ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 690ac27a0ecSDave Kleikamp branch[n].bh = bh; 691ac27a0ecSDave Kleikamp lock_buffer(bh); 692ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 693617ba13bSMingming Cao err = ext4_journal_get_create_access(handle, bh); 694ac27a0ecSDave Kleikamp if (err) { 695ac27a0ecSDave Kleikamp unlock_buffer(bh); 696ac27a0ecSDave Kleikamp brelse(bh); 697ac27a0ecSDave Kleikamp goto failed; 698ac27a0ecSDave Kleikamp } 699ac27a0ecSDave Kleikamp 700ac27a0ecSDave Kleikamp memset(bh->b_data, 0, blocksize); 701ac27a0ecSDave Kleikamp branch[n].p = (__le32 *) bh->b_data + offsets[n]; 702ac27a0ecSDave Kleikamp branch[n].key = cpu_to_le32(new_blocks[n]); 703ac27a0ecSDave Kleikamp *branch[n].p = branch[n].key; 704ac27a0ecSDave Kleikamp if ( n == indirect_blks) { 705ac27a0ecSDave Kleikamp current_block = new_blocks[n]; 706ac27a0ecSDave Kleikamp /* 707ac27a0ecSDave Kleikamp * End of chain, update the last new metablock of 708ac27a0ecSDave Kleikamp * the chain to point to the new allocated 709ac27a0ecSDave Kleikamp * data blocks numbers 710ac27a0ecSDave Kleikamp */ 711ac27a0ecSDave Kleikamp for (i=1; i < num; i++) 712ac27a0ecSDave Kleikamp *(branch[n].p + i) = cpu_to_le32(++current_block); 713ac27a0ecSDave Kleikamp } 714ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "marking uptodate"); 715ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 716ac27a0ecSDave Kleikamp unlock_buffer(bh); 717ac27a0ecSDave Kleikamp 718617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 719617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 720ac27a0ecSDave Kleikamp if (err) 721ac27a0ecSDave Kleikamp goto failed; 722ac27a0ecSDave Kleikamp } 723ac27a0ecSDave Kleikamp *blks = num; 724ac27a0ecSDave Kleikamp return err; 725ac27a0ecSDave Kleikamp failed: 726ac27a0ecSDave Kleikamp /* Allocation failed, free what we already allocated */ 727ac27a0ecSDave Kleikamp for (i = 1; i <= n ; i++) { 728dab291afSMingming Cao BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 729617ba13bSMingming Cao ext4_journal_forget(handle, branch[i].bh); 730ac27a0ecSDave Kleikamp } 731ac27a0ecSDave Kleikamp for (i = 0; i <indirect_blks; i++) 732c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 733ac27a0ecSDave Kleikamp 734c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 735ac27a0ecSDave Kleikamp 736ac27a0ecSDave Kleikamp return err; 737ac27a0ecSDave Kleikamp } 738ac27a0ecSDave Kleikamp 739ac27a0ecSDave Kleikamp /** 740617ba13bSMingming Cao * ext4_splice_branch - splice the allocated branch onto inode. 741ac27a0ecSDave Kleikamp * @inode: owner 742ac27a0ecSDave Kleikamp * @block: (logical) number of block we are adding 743ac27a0ecSDave Kleikamp * @chain: chain of indirect blocks (with a missing link - see 744617ba13bSMingming Cao * ext4_alloc_branch) 745ac27a0ecSDave Kleikamp * @where: location of missing link 746ac27a0ecSDave Kleikamp * @num: number of indirect blocks we are adding 747ac27a0ecSDave Kleikamp * @blks: number of direct blocks we are adding 748ac27a0ecSDave Kleikamp * 749ac27a0ecSDave Kleikamp * This function fills the missing link and does all housekeeping needed in 750ac27a0ecSDave Kleikamp * inode (->i_blocks, etc.). In case of success we end up with the full 751ac27a0ecSDave Kleikamp * chain to new block and return 0. 752ac27a0ecSDave Kleikamp */ 753617ba13bSMingming Cao static int ext4_splice_branch(handle_t *handle, struct inode *inode, 754725d26d3SAneesh Kumar K.V ext4_lblk_t block, Indirect *where, int num, int blks) 755ac27a0ecSDave Kleikamp { 756ac27a0ecSDave Kleikamp int i; 757ac27a0ecSDave Kleikamp int err = 0; 758617ba13bSMingming Cao struct ext4_block_alloc_info *block_i; 759617ba13bSMingming Cao ext4_fsblk_t current_block; 760ac27a0ecSDave Kleikamp 761617ba13bSMingming Cao block_i = EXT4_I(inode)->i_block_alloc_info; 762ac27a0ecSDave Kleikamp /* 763ac27a0ecSDave Kleikamp * If we're splicing into a [td]indirect block (as opposed to the 764ac27a0ecSDave Kleikamp * inode) then we need to get write access to the [td]indirect block 765ac27a0ecSDave Kleikamp * before the splice. 766ac27a0ecSDave Kleikamp */ 767ac27a0ecSDave Kleikamp if (where->bh) { 768ac27a0ecSDave Kleikamp BUFFER_TRACE(where->bh, "get_write_access"); 769617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, where->bh); 770ac27a0ecSDave Kleikamp if (err) 771ac27a0ecSDave Kleikamp goto err_out; 772ac27a0ecSDave Kleikamp } 773ac27a0ecSDave Kleikamp /* That's it */ 774ac27a0ecSDave Kleikamp 775ac27a0ecSDave Kleikamp *where->p = where->key; 776ac27a0ecSDave Kleikamp 777ac27a0ecSDave Kleikamp /* 778ac27a0ecSDave Kleikamp * Update the host buffer_head or inode to point to more just allocated 779ac27a0ecSDave Kleikamp * direct blocks blocks 780ac27a0ecSDave Kleikamp */ 781ac27a0ecSDave Kleikamp if (num == 0 && blks > 1) { 782ac27a0ecSDave Kleikamp current_block = le32_to_cpu(where->key) + 1; 783ac27a0ecSDave Kleikamp for (i = 1; i < blks; i++) 784ac27a0ecSDave Kleikamp *(where->p + i ) = cpu_to_le32(current_block++); 785ac27a0ecSDave Kleikamp } 786ac27a0ecSDave Kleikamp 787ac27a0ecSDave Kleikamp /* 788ac27a0ecSDave Kleikamp * update the most recently allocated logical & physical block 789ac27a0ecSDave Kleikamp * in i_block_alloc_info, to assist find the proper goal block for next 790ac27a0ecSDave Kleikamp * allocation 791ac27a0ecSDave Kleikamp */ 792ac27a0ecSDave Kleikamp if (block_i) { 793ac27a0ecSDave Kleikamp block_i->last_alloc_logical_block = block + blks - 1; 794ac27a0ecSDave Kleikamp block_i->last_alloc_physical_block = 795ac27a0ecSDave Kleikamp le32_to_cpu(where[num].key) + blks - 1; 796ac27a0ecSDave Kleikamp } 797ac27a0ecSDave Kleikamp 798ac27a0ecSDave Kleikamp /* We are done with atomic stuff, now do the rest of housekeeping */ 799ac27a0ecSDave Kleikamp 800ef7f3835SKalpak Shah inode->i_ctime = ext4_current_time(inode); 801617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 802ac27a0ecSDave Kleikamp 803ac27a0ecSDave Kleikamp /* had we spliced it onto indirect block? */ 804ac27a0ecSDave Kleikamp if (where->bh) { 805ac27a0ecSDave Kleikamp /* 806ac27a0ecSDave Kleikamp * If we spliced it onto an indirect block, we haven't 807ac27a0ecSDave Kleikamp * altered the inode. Note however that if it is being spliced 808ac27a0ecSDave Kleikamp * onto an indirect block at the very end of the file (the 809ac27a0ecSDave Kleikamp * file is growing) then we *will* alter the inode to reflect 810ac27a0ecSDave Kleikamp * the new i_size. But that is not done here - it is done in 811617ba13bSMingming Cao * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 812ac27a0ecSDave Kleikamp */ 813ac27a0ecSDave Kleikamp jbd_debug(5, "splicing indirect only\n"); 814617ba13bSMingming Cao BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata"); 815617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, where->bh); 816ac27a0ecSDave Kleikamp if (err) 817ac27a0ecSDave Kleikamp goto err_out; 818ac27a0ecSDave Kleikamp } else { 819ac27a0ecSDave Kleikamp /* 820ac27a0ecSDave Kleikamp * OK, we spliced it into the inode itself on a direct block. 821ac27a0ecSDave Kleikamp * Inode was dirtied above. 822ac27a0ecSDave Kleikamp */ 823ac27a0ecSDave Kleikamp jbd_debug(5, "splicing direct\n"); 824ac27a0ecSDave Kleikamp } 825ac27a0ecSDave Kleikamp return err; 826ac27a0ecSDave Kleikamp 827ac27a0ecSDave Kleikamp err_out: 828ac27a0ecSDave Kleikamp for (i = 1; i <= num; i++) { 829dab291afSMingming Cao BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 830617ba13bSMingming Cao ext4_journal_forget(handle, where[i].bh); 831c9de560dSAlex Tomas ext4_free_blocks(handle, inode, 832c9de560dSAlex Tomas le32_to_cpu(where[i-1].key), 1, 0); 833ac27a0ecSDave Kleikamp } 834c9de560dSAlex Tomas ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 835ac27a0ecSDave Kleikamp 836ac27a0ecSDave Kleikamp return err; 837ac27a0ecSDave Kleikamp } 838ac27a0ecSDave Kleikamp 839ac27a0ecSDave Kleikamp /* 840ac27a0ecSDave Kleikamp * Allocation strategy is simple: if we have to allocate something, we will 841ac27a0ecSDave Kleikamp * have to go the whole way to leaf. So let's do it before attaching anything 842ac27a0ecSDave Kleikamp * to tree, set linkage between the newborn blocks, write them if sync is 843ac27a0ecSDave Kleikamp * required, recheck the path, free and repeat if check fails, otherwise 844ac27a0ecSDave Kleikamp * set the last missing link (that will protect us from any truncate-generated 845ac27a0ecSDave Kleikamp * removals - all blocks on the path are immune now) and possibly force the 846ac27a0ecSDave Kleikamp * write on the parent block. 847ac27a0ecSDave Kleikamp * That has a nice additional property: no special recovery from the failed 848ac27a0ecSDave Kleikamp * allocations is needed - we simply release blocks and do not touch anything 849ac27a0ecSDave Kleikamp * reachable from inode. 850ac27a0ecSDave Kleikamp * 851ac27a0ecSDave Kleikamp * `handle' can be NULL if create == 0. 852ac27a0ecSDave Kleikamp * 853ac27a0ecSDave Kleikamp * return > 0, # of blocks mapped or allocated. 854ac27a0ecSDave Kleikamp * return = 0, if plain lookup failed. 855ac27a0ecSDave Kleikamp * return < 0, error case. 856c278bfecSAneesh Kumar K.V * 857c278bfecSAneesh Kumar K.V * 858c278bfecSAneesh Kumar K.V * Need to be called with 8590e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 8600e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 861ac27a0ecSDave Kleikamp */ 862617ba13bSMingming Cao int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, 863725d26d3SAneesh Kumar K.V ext4_lblk_t iblock, unsigned long maxblocks, 864ac27a0ecSDave Kleikamp struct buffer_head *bh_result, 865ac27a0ecSDave Kleikamp int create, int extend_disksize) 866ac27a0ecSDave Kleikamp { 867ac27a0ecSDave Kleikamp int err = -EIO; 868725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 869ac27a0ecSDave Kleikamp Indirect chain[4]; 870ac27a0ecSDave Kleikamp Indirect *partial; 871617ba13bSMingming Cao ext4_fsblk_t goal; 872ac27a0ecSDave Kleikamp int indirect_blks; 873ac27a0ecSDave Kleikamp int blocks_to_boundary = 0; 874ac27a0ecSDave Kleikamp int depth; 875617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 876ac27a0ecSDave Kleikamp int count = 0; 877617ba13bSMingming Cao ext4_fsblk_t first_block = 0; 87861628a3fSMingming Cao loff_t disksize; 879ac27a0ecSDave Kleikamp 880ac27a0ecSDave Kleikamp 881a86c6181SAlex Tomas J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 882ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 883725d26d3SAneesh Kumar K.V depth = ext4_block_to_path(inode, iblock, offsets, 884725d26d3SAneesh Kumar K.V &blocks_to_boundary); 885ac27a0ecSDave Kleikamp 886ac27a0ecSDave Kleikamp if (depth == 0) 887ac27a0ecSDave Kleikamp goto out; 888ac27a0ecSDave Kleikamp 889617ba13bSMingming Cao partial = ext4_get_branch(inode, depth, offsets, chain, &err); 890ac27a0ecSDave Kleikamp 891ac27a0ecSDave Kleikamp /* Simplest case - block found, no allocation needed */ 892ac27a0ecSDave Kleikamp if (!partial) { 893ac27a0ecSDave Kleikamp first_block = le32_to_cpu(chain[depth - 1].key); 894ac27a0ecSDave Kleikamp clear_buffer_new(bh_result); 895ac27a0ecSDave Kleikamp count++; 896ac27a0ecSDave Kleikamp /*map more blocks*/ 897ac27a0ecSDave Kleikamp while (count < maxblocks && count <= blocks_to_boundary) { 898617ba13bSMingming Cao ext4_fsblk_t blk; 899ac27a0ecSDave Kleikamp 900ac27a0ecSDave Kleikamp blk = le32_to_cpu(*(chain[depth-1].p + count)); 901ac27a0ecSDave Kleikamp 902ac27a0ecSDave Kleikamp if (blk == first_block + count) 903ac27a0ecSDave Kleikamp count++; 904ac27a0ecSDave Kleikamp else 905ac27a0ecSDave Kleikamp break; 906ac27a0ecSDave Kleikamp } 907ac27a0ecSDave Kleikamp goto got_it; 908ac27a0ecSDave Kleikamp } 909ac27a0ecSDave Kleikamp 910ac27a0ecSDave Kleikamp /* Next simple case - plain lookup or failed read of indirect block */ 911ac27a0ecSDave Kleikamp if (!create || err == -EIO) 912ac27a0ecSDave Kleikamp goto cleanup; 913ac27a0ecSDave Kleikamp 914ac27a0ecSDave Kleikamp /* 915ac27a0ecSDave Kleikamp * Okay, we need to do block allocation. Lazily initialize the block 916ac27a0ecSDave Kleikamp * allocation info here if necessary 917ac27a0ecSDave Kleikamp */ 918ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 919617ba13bSMingming Cao ext4_init_block_alloc_info(inode); 920ac27a0ecSDave Kleikamp 921fb01bfdaSAkinobu Mita goal = ext4_find_goal(inode, iblock, partial); 922ac27a0ecSDave Kleikamp 923ac27a0ecSDave Kleikamp /* the number of blocks need to allocate for [d,t]indirect blocks */ 924ac27a0ecSDave Kleikamp indirect_blks = (chain + depth) - partial - 1; 925ac27a0ecSDave Kleikamp 926ac27a0ecSDave Kleikamp /* 927ac27a0ecSDave Kleikamp * Next look up the indirect map to count the totoal number of 928ac27a0ecSDave Kleikamp * direct blocks to allocate for this branch. 929ac27a0ecSDave Kleikamp */ 930617ba13bSMingming Cao count = ext4_blks_to_allocate(partial, indirect_blks, 931ac27a0ecSDave Kleikamp maxblocks, blocks_to_boundary); 932ac27a0ecSDave Kleikamp /* 933617ba13bSMingming Cao * Block out ext4_truncate while we alter the tree 934ac27a0ecSDave Kleikamp */ 9357061eba7SAneesh Kumar K.V err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 9367061eba7SAneesh Kumar K.V &count, goal, 937ac27a0ecSDave Kleikamp offsets + (partial - chain), partial); 938ac27a0ecSDave Kleikamp 939ac27a0ecSDave Kleikamp /* 940617ba13bSMingming Cao * The ext4_splice_branch call will free and forget any buffers 941ac27a0ecSDave Kleikamp * on the new chain if there is a failure, but that risks using 942ac27a0ecSDave Kleikamp * up transaction credits, especially for bitmaps where the 943ac27a0ecSDave Kleikamp * credits cannot be returned. Can we handle this somehow? We 944ac27a0ecSDave Kleikamp * may need to return -EAGAIN upwards in the worst case. --sct 945ac27a0ecSDave Kleikamp */ 946ac27a0ecSDave Kleikamp if (!err) 947617ba13bSMingming Cao err = ext4_splice_branch(handle, inode, iblock, 948ac27a0ecSDave Kleikamp partial, indirect_blks, count); 949ac27a0ecSDave Kleikamp /* 9500e855ac8SAneesh Kumar K.V * i_disksize growing is protected by i_data_sem. Don't forget to 951ac27a0ecSDave Kleikamp * protect it if you're about to implement concurrent 952617ba13bSMingming Cao * ext4_get_block() -bzzz 953ac27a0ecSDave Kleikamp */ 95461628a3fSMingming Cao if (!err && extend_disksize) { 95561628a3fSMingming Cao disksize = ((loff_t) iblock + count) << inode->i_blkbits; 95661628a3fSMingming Cao if (disksize > i_size_read(inode)) 95761628a3fSMingming Cao disksize = i_size_read(inode); 95861628a3fSMingming Cao if (disksize > ei->i_disksize) 95961628a3fSMingming Cao ei->i_disksize = disksize; 96061628a3fSMingming Cao } 961ac27a0ecSDave Kleikamp if (err) 962ac27a0ecSDave Kleikamp goto cleanup; 963ac27a0ecSDave Kleikamp 964ac27a0ecSDave Kleikamp set_buffer_new(bh_result); 965ac27a0ecSDave Kleikamp got_it: 966ac27a0ecSDave Kleikamp map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 967ac27a0ecSDave Kleikamp if (count > blocks_to_boundary) 968ac27a0ecSDave Kleikamp set_buffer_boundary(bh_result); 969ac27a0ecSDave Kleikamp err = count; 970ac27a0ecSDave Kleikamp /* Clean up and exit */ 971ac27a0ecSDave Kleikamp partial = chain + depth - 1; /* the whole chain */ 972ac27a0ecSDave Kleikamp cleanup: 973ac27a0ecSDave Kleikamp while (partial > chain) { 974ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 975ac27a0ecSDave Kleikamp brelse(partial->bh); 976ac27a0ecSDave Kleikamp partial--; 977ac27a0ecSDave Kleikamp } 978ac27a0ecSDave Kleikamp BUFFER_TRACE(bh_result, "returned"); 979ac27a0ecSDave Kleikamp out: 980ac27a0ecSDave Kleikamp return err; 981ac27a0ecSDave Kleikamp } 982ac27a0ecSDave Kleikamp 98312219aeaSAneesh Kumar K.V /* 98412219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 98512219aeaSAneesh Kumar K.V * to allocate @blocks for non extent file based file 98612219aeaSAneesh Kumar K.V */ 98712219aeaSAneesh Kumar K.V static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 98812219aeaSAneesh Kumar K.V { 98912219aeaSAneesh Kumar K.V int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 99012219aeaSAneesh Kumar K.V int ind_blks, dind_blks, tind_blks; 99112219aeaSAneesh Kumar K.V 99212219aeaSAneesh Kumar K.V /* number of new indirect blocks needed */ 99312219aeaSAneesh Kumar K.V ind_blks = (blocks + icap - 1) / icap; 99412219aeaSAneesh Kumar K.V 99512219aeaSAneesh Kumar K.V dind_blks = (ind_blks + icap - 1) / icap; 99612219aeaSAneesh Kumar K.V 99712219aeaSAneesh Kumar K.V tind_blks = 1; 99812219aeaSAneesh Kumar K.V 99912219aeaSAneesh Kumar K.V return ind_blks + dind_blks + tind_blks; 100012219aeaSAneesh Kumar K.V } 100112219aeaSAneesh Kumar K.V 100212219aeaSAneesh Kumar K.V /* 100312219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 100412219aeaSAneesh Kumar K.V * to allocate given number of blocks 100512219aeaSAneesh Kumar K.V */ 100612219aeaSAneesh Kumar K.V static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 100712219aeaSAneesh Kumar K.V { 100812219aeaSAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 100912219aeaSAneesh Kumar K.V return ext4_ext_calc_metadata_amount(inode, blocks); 101012219aeaSAneesh Kumar K.V 101112219aeaSAneesh Kumar K.V return ext4_indirect_calc_metadata_amount(inode, blocks); 101212219aeaSAneesh Kumar K.V } 101312219aeaSAneesh Kumar K.V 101412219aeaSAneesh Kumar K.V static void ext4_da_update_reserve_space(struct inode *inode, int used) 101512219aeaSAneesh Kumar K.V { 101612219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 101712219aeaSAneesh Kumar K.V int total, mdb, mdb_free; 101812219aeaSAneesh Kumar K.V 101912219aeaSAneesh Kumar K.V spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 102012219aeaSAneesh Kumar K.V /* recalculate the number of metablocks still need to be reserved */ 102112219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - used; 102212219aeaSAneesh Kumar K.V mdb = ext4_calc_metadata_amount(inode, total); 102312219aeaSAneesh Kumar K.V 102412219aeaSAneesh Kumar K.V /* figure out how many metablocks to release */ 102512219aeaSAneesh Kumar K.V BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 102612219aeaSAneesh Kumar K.V mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 102712219aeaSAneesh Kumar K.V 102812219aeaSAneesh Kumar K.V /* Account for allocated meta_blocks */ 102912219aeaSAneesh Kumar K.V mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 103012219aeaSAneesh Kumar K.V 103112219aeaSAneesh Kumar K.V /* update fs free blocks counter for truncate case */ 103212219aeaSAneesh Kumar K.V percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free); 103312219aeaSAneesh Kumar K.V 103412219aeaSAneesh Kumar K.V /* update per-inode reservations */ 103512219aeaSAneesh Kumar K.V BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 103612219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= used; 103712219aeaSAneesh Kumar K.V 103812219aeaSAneesh Kumar K.V BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 103912219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks = mdb; 104012219aeaSAneesh Kumar K.V EXT4_I(inode)->i_allocated_meta_blocks = 0; 104112219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 104212219aeaSAneesh Kumar K.V } 104312219aeaSAneesh Kumar K.V 10447fb5409dSJan Kara /* Maximum number of blocks we map for direct IO at once. */ 10457fb5409dSJan Kara #define DIO_MAX_BLOCKS 4096 10467fb5409dSJan Kara /* 10477fb5409dSJan Kara * Number of credits we need for writing DIO_MAX_BLOCKS: 10487fb5409dSJan Kara * We need sb + group descriptor + bitmap + inode -> 4 10497fb5409dSJan Kara * For B blocks with A block pointers per block we need: 10507fb5409dSJan Kara * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect). 10517fb5409dSJan Kara * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25. 10527fb5409dSJan Kara */ 10537fb5409dSJan Kara #define DIO_CREDITS 25 1054ac27a0ecSDave Kleikamp 1055f5ab0d1fSMingming Cao 1056f5ab0d1fSMingming Cao /* 10572b2d6d01STheodore Ts'o * The ext4_get_blocks_wrap() function try to look up the requested blocks, 10582b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 1059f5ab0d1fSMingming Cao * 1060f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1061f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 1062f5ab0d1fSMingming Cao * mapped. 1063f5ab0d1fSMingming Cao * 1064f5ab0d1fSMingming Cao * If file type is extents based, it will call ext4_ext_get_blocks(), 1065f5ab0d1fSMingming Cao * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping 1066f5ab0d1fSMingming Cao * based files 1067f5ab0d1fSMingming Cao * 1068f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 1069f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 1070f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 1071f5ab0d1fSMingming Cao * the buffer head is mapped. 1072f5ab0d1fSMingming Cao * 1073f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 1074f5ab0d1fSMingming Cao * that casem, buffer head is unmapped 1075f5ab0d1fSMingming Cao * 1076f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 1077f5ab0d1fSMingming Cao */ 10780e855ac8SAneesh Kumar K.V int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 10790e855ac8SAneesh Kumar K.V unsigned long max_blocks, struct buffer_head *bh, 1080d2a17637SMingming Cao int create, int extend_disksize, int flag) 10810e855ac8SAneesh Kumar K.V { 10820e855ac8SAneesh Kumar K.V int retval; 1083f5ab0d1fSMingming Cao 1084f5ab0d1fSMingming Cao clear_buffer_mapped(bh); 1085f5ab0d1fSMingming Cao 10864df3d265SAneesh Kumar K.V /* 10874df3d265SAneesh Kumar K.V * Try to see if we can get the block without requesting 10884df3d265SAneesh Kumar K.V * for new file system block. 10894df3d265SAneesh Kumar K.V */ 10900e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 10914df3d265SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 10924df3d265SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 10934df3d265SAneesh Kumar K.V bh, 0, 0); 10944df3d265SAneesh Kumar K.V } else { 10954df3d265SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, 10964df3d265SAneesh Kumar K.V inode, block, max_blocks, bh, 0, 0); 10970e855ac8SAneesh Kumar K.V } 10984df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 1099f5ab0d1fSMingming Cao 1100f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 1101f5ab0d1fSMingming Cao if (!create) 11024df3d265SAneesh Kumar K.V return retval; 11034df3d265SAneesh Kumar K.V 11044df3d265SAneesh Kumar K.V /* 1105f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 1106f5ab0d1fSMingming Cao * 1107f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 1108f5ab0d1fSMingming Cao * ext4_ext_get_block() returns th create = 0 1109f5ab0d1fSMingming Cao * with buffer head unmapped. 1110f5ab0d1fSMingming Cao */ 1111f5ab0d1fSMingming Cao if (retval > 0 && buffer_mapped(bh)) 1112f5ab0d1fSMingming Cao return retval; 1113f5ab0d1fSMingming Cao 1114f5ab0d1fSMingming Cao /* 1115f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 1116f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 1117f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 1118f5ab0d1fSMingming Cao * with create == 1 flag. 11194df3d265SAneesh Kumar K.V */ 11204df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 1121d2a17637SMingming Cao 1122d2a17637SMingming Cao /* 1123d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 1124d2a17637SMingming Cao * we have already reserved fs blocks for allocation 1125d2a17637SMingming Cao * let the underlying get_block() function know to 1126d2a17637SMingming Cao * avoid double accounting 1127d2a17637SMingming Cao */ 1128d2a17637SMingming Cao if (flag) 1129d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 1; 11304df3d265SAneesh Kumar K.V /* 11314df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 11324df3d265SAneesh Kumar K.V * could have changed the inode type in between 11334df3d265SAneesh Kumar K.V */ 11340e855ac8SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 11350e855ac8SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 11360e855ac8SAneesh Kumar K.V bh, create, extend_disksize); 11370e855ac8SAneesh Kumar K.V } else { 11380e855ac8SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, inode, block, 11390e855ac8SAneesh Kumar K.V max_blocks, bh, create, extend_disksize); 1140267e4db9SAneesh Kumar K.V 1141267e4db9SAneesh Kumar K.V if (retval > 0 && buffer_new(bh)) { 1142267e4db9SAneesh Kumar K.V /* 1143267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 1144267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 1145267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 1146267e4db9SAneesh Kumar K.V */ 1147267e4db9SAneesh Kumar K.V EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & 1148267e4db9SAneesh Kumar K.V ~EXT4_EXT_MIGRATE; 1149267e4db9SAneesh Kumar K.V } 11500e855ac8SAneesh Kumar K.V } 1151d2a17637SMingming Cao 1152d2a17637SMingming Cao if (flag) { 1153d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1154d2a17637SMingming Cao /* 1155d2a17637SMingming Cao * Update reserved blocks/metadata blocks 1156d2a17637SMingming Cao * after successful block allocation 1157d2a17637SMingming Cao * which were deferred till now 1158d2a17637SMingming Cao */ 1159d2a17637SMingming Cao if ((retval > 0) && buffer_delay(bh)) 116012219aeaSAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval); 1161d2a17637SMingming Cao } 1162d2a17637SMingming Cao 11630e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 11640e855ac8SAneesh Kumar K.V return retval; 11650e855ac8SAneesh Kumar K.V } 11660e855ac8SAneesh Kumar K.V 1167617ba13bSMingming Cao static int ext4_get_block(struct inode *inode, sector_t iblock, 1168ac27a0ecSDave Kleikamp struct buffer_head *bh_result, int create) 1169ac27a0ecSDave Kleikamp { 11703e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 11717fb5409dSJan Kara int ret = 0, started = 0; 1172ac27a0ecSDave Kleikamp unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1173ac27a0ecSDave Kleikamp 11747fb5409dSJan Kara if (create && !handle) { 11757fb5409dSJan Kara /* Direct IO write... */ 11767fb5409dSJan Kara if (max_blocks > DIO_MAX_BLOCKS) 11777fb5409dSJan Kara max_blocks = DIO_MAX_BLOCKS; 11787fb5409dSJan Kara handle = ext4_journal_start(inode, DIO_CREDITS + 11797fb5409dSJan Kara 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)); 11807fb5409dSJan Kara if (IS_ERR(handle)) { 1181ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 11827fb5409dSJan Kara goto out; 11837fb5409dSJan Kara } 11847fb5409dSJan Kara started = 1; 1185ac27a0ecSDave Kleikamp } 1186ac27a0ecSDave Kleikamp 1187a86c6181SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, 1188d2a17637SMingming Cao max_blocks, bh_result, create, 0, 0); 1189ac27a0ecSDave Kleikamp if (ret > 0) { 1190ac27a0ecSDave Kleikamp bh_result->b_size = (ret << inode->i_blkbits); 1191ac27a0ecSDave Kleikamp ret = 0; 1192ac27a0ecSDave Kleikamp } 11937fb5409dSJan Kara if (started) 11947fb5409dSJan Kara ext4_journal_stop(handle); 11957fb5409dSJan Kara out: 1196ac27a0ecSDave Kleikamp return ret; 1197ac27a0ecSDave Kleikamp } 1198ac27a0ecSDave Kleikamp 1199ac27a0ecSDave Kleikamp /* 1200ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 1201ac27a0ecSDave Kleikamp */ 1202617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1203725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 1204ac27a0ecSDave Kleikamp { 1205ac27a0ecSDave Kleikamp struct buffer_head dummy; 1206ac27a0ecSDave Kleikamp int fatal = 0, err; 1207ac27a0ecSDave Kleikamp 1208ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 1209ac27a0ecSDave Kleikamp 1210ac27a0ecSDave Kleikamp dummy.b_state = 0; 1211ac27a0ecSDave Kleikamp dummy.b_blocknr = -1000; 1212ac27a0ecSDave Kleikamp buffer_trace_init(&dummy.b_history); 1213a86c6181SAlex Tomas err = ext4_get_blocks_wrap(handle, inode, block, 1, 1214d2a17637SMingming Cao &dummy, create, 1, 0); 1215ac27a0ecSDave Kleikamp /* 1216617ba13bSMingming Cao * ext4_get_blocks_handle() returns number of blocks 1217ac27a0ecSDave Kleikamp * mapped. 0 in case of a HOLE. 1218ac27a0ecSDave Kleikamp */ 1219ac27a0ecSDave Kleikamp if (err > 0) { 1220ac27a0ecSDave Kleikamp if (err > 1) 1221ac27a0ecSDave Kleikamp WARN_ON(1); 1222ac27a0ecSDave Kleikamp err = 0; 1223ac27a0ecSDave Kleikamp } 1224ac27a0ecSDave Kleikamp *errp = err; 1225ac27a0ecSDave Kleikamp if (!err && buffer_mapped(&dummy)) { 1226ac27a0ecSDave Kleikamp struct buffer_head *bh; 1227ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1228ac27a0ecSDave Kleikamp if (!bh) { 1229ac27a0ecSDave Kleikamp *errp = -EIO; 1230ac27a0ecSDave Kleikamp goto err; 1231ac27a0ecSDave Kleikamp } 1232ac27a0ecSDave Kleikamp if (buffer_new(&dummy)) { 1233ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 1234ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 1235ac27a0ecSDave Kleikamp 1236ac27a0ecSDave Kleikamp /* 1237ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 1238ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 1239ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 1240617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 1241ac27a0ecSDave Kleikamp * problem. 1242ac27a0ecSDave Kleikamp */ 1243ac27a0ecSDave Kleikamp lock_buffer(bh); 1244ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 1245617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 1246ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 1247ac27a0ecSDave Kleikamp memset(bh->b_data,0,inode->i_sb->s_blocksize); 1248ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 1249ac27a0ecSDave Kleikamp } 1250ac27a0ecSDave Kleikamp unlock_buffer(bh); 1251617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1252617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 1253ac27a0ecSDave Kleikamp if (!fatal) 1254ac27a0ecSDave Kleikamp fatal = err; 1255ac27a0ecSDave Kleikamp } else { 1256ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 1257ac27a0ecSDave Kleikamp } 1258ac27a0ecSDave Kleikamp if (fatal) { 1259ac27a0ecSDave Kleikamp *errp = fatal; 1260ac27a0ecSDave Kleikamp brelse(bh); 1261ac27a0ecSDave Kleikamp bh = NULL; 1262ac27a0ecSDave Kleikamp } 1263ac27a0ecSDave Kleikamp return bh; 1264ac27a0ecSDave Kleikamp } 1265ac27a0ecSDave Kleikamp err: 1266ac27a0ecSDave Kleikamp return NULL; 1267ac27a0ecSDave Kleikamp } 1268ac27a0ecSDave Kleikamp 1269617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1270725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 1271ac27a0ecSDave Kleikamp { 1272ac27a0ecSDave Kleikamp struct buffer_head * bh; 1273ac27a0ecSDave Kleikamp 1274617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 1275ac27a0ecSDave Kleikamp if (!bh) 1276ac27a0ecSDave Kleikamp return bh; 1277ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1278ac27a0ecSDave Kleikamp return bh; 1279ac27a0ecSDave Kleikamp ll_rw_block(READ_META, 1, &bh); 1280ac27a0ecSDave Kleikamp wait_on_buffer(bh); 1281ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1282ac27a0ecSDave Kleikamp return bh; 1283ac27a0ecSDave Kleikamp put_bh(bh); 1284ac27a0ecSDave Kleikamp *err = -EIO; 1285ac27a0ecSDave Kleikamp return NULL; 1286ac27a0ecSDave Kleikamp } 1287ac27a0ecSDave Kleikamp 1288ac27a0ecSDave Kleikamp static int walk_page_buffers( handle_t *handle, 1289ac27a0ecSDave Kleikamp struct buffer_head *head, 1290ac27a0ecSDave Kleikamp unsigned from, 1291ac27a0ecSDave Kleikamp unsigned to, 1292ac27a0ecSDave Kleikamp int *partial, 1293ac27a0ecSDave Kleikamp int (*fn)( handle_t *handle, 1294ac27a0ecSDave Kleikamp struct buffer_head *bh)) 1295ac27a0ecSDave Kleikamp { 1296ac27a0ecSDave Kleikamp struct buffer_head *bh; 1297ac27a0ecSDave Kleikamp unsigned block_start, block_end; 1298ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 1299ac27a0ecSDave Kleikamp int err, ret = 0; 1300ac27a0ecSDave Kleikamp struct buffer_head *next; 1301ac27a0ecSDave Kleikamp 1302ac27a0ecSDave Kleikamp for ( bh = head, block_start = 0; 1303ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 1304ac27a0ecSDave Kleikamp block_start = block_end, bh = next) 1305ac27a0ecSDave Kleikamp { 1306ac27a0ecSDave Kleikamp next = bh->b_this_page; 1307ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 1308ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 1309ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 1310ac27a0ecSDave Kleikamp *partial = 1; 1311ac27a0ecSDave Kleikamp continue; 1312ac27a0ecSDave Kleikamp } 1313ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 1314ac27a0ecSDave Kleikamp if (!ret) 1315ac27a0ecSDave Kleikamp ret = err; 1316ac27a0ecSDave Kleikamp } 1317ac27a0ecSDave Kleikamp return ret; 1318ac27a0ecSDave Kleikamp } 1319ac27a0ecSDave Kleikamp 1320ac27a0ecSDave Kleikamp /* 1321ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 1322ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 1323617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 1324dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 1325ac27a0ecSDave Kleikamp * prepare_write() is the right place. 1326ac27a0ecSDave Kleikamp * 1327617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 1328617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 1329ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 1330ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 1331ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 1332ac27a0ecSDave Kleikamp * 1333617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 1334ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 1335ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 1336ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 1337ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 1338ac27a0ecSDave Kleikamp * violation. 1339ac27a0ecSDave Kleikamp * 1340dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1341ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 1342ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 1343ac27a0ecSDave Kleikamp * write. 1344ac27a0ecSDave Kleikamp */ 1345ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle, 1346ac27a0ecSDave Kleikamp struct buffer_head *bh) 1347ac27a0ecSDave Kleikamp { 1348ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1349ac27a0ecSDave Kleikamp return 0; 1350617ba13bSMingming Cao return ext4_journal_get_write_access(handle, bh); 1351ac27a0ecSDave Kleikamp } 1352ac27a0ecSDave Kleikamp 1353bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 1354bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 1355bfc1af65SNick Piggin struct page **pagep, void **fsdata) 1356ac27a0ecSDave Kleikamp { 1357bfc1af65SNick Piggin struct inode *inode = mapping->host; 13587479d2b9SAndrew Morton int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1359ac27a0ecSDave Kleikamp handle_t *handle; 1360ac27a0ecSDave Kleikamp int retries = 0; 1361bfc1af65SNick Piggin struct page *page; 1362bfc1af65SNick Piggin pgoff_t index; 1363bfc1af65SNick Piggin unsigned from, to; 1364bfc1af65SNick Piggin 1365bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 1366bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1367bfc1af65SNick Piggin to = from + len; 1368ac27a0ecSDave Kleikamp 1369ac27a0ecSDave Kleikamp retry: 1370617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 13717479d2b9SAndrew Morton if (IS_ERR(handle)) { 13727479d2b9SAndrew Morton ret = PTR_ERR(handle); 13737479d2b9SAndrew Morton goto out; 13747479d2b9SAndrew Morton } 1375ac27a0ecSDave Kleikamp 1376cf108bcaSJan Kara page = __grab_cache_page(mapping, index); 1377cf108bcaSJan Kara if (!page) { 1378cf108bcaSJan Kara ext4_journal_stop(handle); 1379cf108bcaSJan Kara ret = -ENOMEM; 1380cf108bcaSJan Kara goto out; 1381cf108bcaSJan Kara } 1382cf108bcaSJan Kara *pagep = page; 1383cf108bcaSJan Kara 1384bfc1af65SNick Piggin ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1385bfc1af65SNick Piggin ext4_get_block); 1386bfc1af65SNick Piggin 1387bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 1388ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), 1389ac27a0ecSDave Kleikamp from, to, NULL, do_journal_get_write_access); 1390b46be050SAndrey Savochkin } 1391bfc1af65SNick Piggin 1392bfc1af65SNick Piggin if (ret) { 1393bfc1af65SNick Piggin unlock_page(page); 1394cf108bcaSJan Kara ext4_journal_stop(handle); 1395bfc1af65SNick Piggin page_cache_release(page); 1396bfc1af65SNick Piggin } 1397bfc1af65SNick Piggin 1398617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1399ac27a0ecSDave Kleikamp goto retry; 14007479d2b9SAndrew Morton out: 1401ac27a0ecSDave Kleikamp return ret; 1402ac27a0ecSDave Kleikamp } 1403ac27a0ecSDave Kleikamp 1404bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 1405bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1406ac27a0ecSDave Kleikamp { 1407ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1408ac27a0ecSDave Kleikamp return 0; 1409ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 1410617ba13bSMingming Cao return ext4_journal_dirty_metadata(handle, bh); 1411ac27a0ecSDave Kleikamp } 1412ac27a0ecSDave Kleikamp 1413ac27a0ecSDave Kleikamp /* 1414ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1415ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1416ac27a0ecSDave Kleikamp * 1417617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1418ac27a0ecSDave Kleikamp * buffers are managed internally. 1419ac27a0ecSDave Kleikamp */ 1420bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1421bfc1af65SNick Piggin struct address_space *mapping, 1422bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1423bfc1af65SNick Piggin struct page *page, void *fsdata) 1424ac27a0ecSDave Kleikamp { 1425617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1426cf108bcaSJan Kara struct inode *inode = mapping->host; 1427bfc1af65SNick Piggin unsigned from, to; 1428ac27a0ecSDave Kleikamp int ret = 0, ret2; 1429ac27a0ecSDave Kleikamp 1430bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1431bfc1af65SNick Piggin to = from + len; 1432bfc1af65SNick Piggin 1433678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1434ac27a0ecSDave Kleikamp 1435ac27a0ecSDave Kleikamp if (ret == 0) { 1436ac27a0ecSDave Kleikamp /* 1437bfc1af65SNick Piggin * generic_write_end() will run mark_inode_dirty() if i_size 1438ac27a0ecSDave Kleikamp * changes. So let's piggyback the i_disksize mark_inode_dirty 1439ac27a0ecSDave Kleikamp * into that. 1440ac27a0ecSDave Kleikamp */ 1441ac27a0ecSDave Kleikamp loff_t new_i_size; 1442ac27a0ecSDave Kleikamp 1443bfc1af65SNick Piggin new_i_size = pos + copied; 1444617ba13bSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) 1445617ba13bSMingming Cao EXT4_I(inode)->i_disksize = new_i_size; 1446cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1447bfc1af65SNick Piggin page, fsdata); 1448f8a87d89SRoel Kluin copied = ret2; 1449f8a87d89SRoel Kluin if (ret2 < 0) 1450f8a87d89SRoel Kluin ret = ret2; 1451ac27a0ecSDave Kleikamp } 1452617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1453ac27a0ecSDave Kleikamp if (!ret) 1454ac27a0ecSDave Kleikamp ret = ret2; 1455bfc1af65SNick Piggin 1456bfc1af65SNick Piggin return ret ? ret : copied; 1457ac27a0ecSDave Kleikamp } 1458ac27a0ecSDave Kleikamp 1459bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1460bfc1af65SNick Piggin struct address_space *mapping, 1461bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1462bfc1af65SNick Piggin struct page *page, void *fsdata) 1463ac27a0ecSDave Kleikamp { 1464617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1465cf108bcaSJan Kara struct inode *inode = mapping->host; 1466ac27a0ecSDave Kleikamp int ret = 0, ret2; 1467ac27a0ecSDave Kleikamp loff_t new_i_size; 1468ac27a0ecSDave Kleikamp 1469bfc1af65SNick Piggin new_i_size = pos + copied; 1470617ba13bSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) 1471617ba13bSMingming Cao EXT4_I(inode)->i_disksize = new_i_size; 1472ac27a0ecSDave Kleikamp 1473cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1474bfc1af65SNick Piggin page, fsdata); 1475f8a87d89SRoel Kluin copied = ret2; 1476f8a87d89SRoel Kluin if (ret2 < 0) 1477f8a87d89SRoel Kluin ret = ret2; 1478ac27a0ecSDave Kleikamp 1479617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1480ac27a0ecSDave Kleikamp if (!ret) 1481ac27a0ecSDave Kleikamp ret = ret2; 1482bfc1af65SNick Piggin 1483bfc1af65SNick Piggin return ret ? ret : copied; 1484ac27a0ecSDave Kleikamp } 1485ac27a0ecSDave Kleikamp 1486bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1487bfc1af65SNick Piggin struct address_space *mapping, 1488bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1489bfc1af65SNick Piggin struct page *page, void *fsdata) 1490ac27a0ecSDave Kleikamp { 1491617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1492bfc1af65SNick Piggin struct inode *inode = mapping->host; 1493ac27a0ecSDave Kleikamp int ret = 0, ret2; 1494ac27a0ecSDave Kleikamp int partial = 0; 1495bfc1af65SNick Piggin unsigned from, to; 1496ac27a0ecSDave Kleikamp 1497bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1498bfc1af65SNick Piggin to = from + len; 1499bfc1af65SNick Piggin 1500bfc1af65SNick Piggin if (copied < len) { 1501bfc1af65SNick Piggin if (!PageUptodate(page)) 1502bfc1af65SNick Piggin copied = 0; 1503bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1504bfc1af65SNick Piggin } 1505ac27a0ecSDave Kleikamp 1506ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), from, 1507bfc1af65SNick Piggin to, &partial, write_end_fn); 1508ac27a0ecSDave Kleikamp if (!partial) 1509ac27a0ecSDave Kleikamp SetPageUptodate(page); 1510bfc1af65SNick Piggin if (pos+copied > inode->i_size) 1511bfc1af65SNick Piggin i_size_write(inode, pos+copied); 1512617ba13bSMingming Cao EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1513617ba13bSMingming Cao if (inode->i_size > EXT4_I(inode)->i_disksize) { 1514617ba13bSMingming Cao EXT4_I(inode)->i_disksize = inode->i_size; 1515617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1516ac27a0ecSDave Kleikamp if (!ret) 1517ac27a0ecSDave Kleikamp ret = ret2; 1518ac27a0ecSDave Kleikamp } 1519bfc1af65SNick Piggin 1520cf108bcaSJan Kara unlock_page(page); 1521617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1522ac27a0ecSDave Kleikamp if (!ret) 1523ac27a0ecSDave Kleikamp ret = ret2; 1524bfc1af65SNick Piggin page_cache_release(page); 1525bfc1af65SNick Piggin 1526bfc1af65SNick Piggin return ret ? ret : copied; 1527ac27a0ecSDave Kleikamp } 1528d2a17637SMingming Cao 1529d2a17637SMingming Cao static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1530d2a17637SMingming Cao { 1531d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1532d2a17637SMingming Cao unsigned long md_needed, mdblocks, total = 0; 1533d2a17637SMingming Cao 1534d2a17637SMingming Cao /* 1535d2a17637SMingming Cao * recalculate the amount of metadata blocks to reserve 1536d2a17637SMingming Cao * in order to allocate nrblocks 1537d2a17637SMingming Cao * worse case is one extent per block 1538d2a17637SMingming Cao */ 1539d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1540d2a17637SMingming Cao total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1541d2a17637SMingming Cao mdblocks = ext4_calc_metadata_amount(inode, total); 1542d2a17637SMingming Cao BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1543d2a17637SMingming Cao 1544d2a17637SMingming Cao md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1545d2a17637SMingming Cao total = md_needed + nrblocks; 1546d2a17637SMingming Cao 1547d2a17637SMingming Cao if (ext4_has_free_blocks(sbi, total) < total) { 1548d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1549d2a17637SMingming Cao return -ENOSPC; 1550d2a17637SMingming Cao } 1551d2a17637SMingming Cao /* reduce fs free blocks counter */ 1552d2a17637SMingming Cao percpu_counter_sub(&sbi->s_freeblocks_counter, total); 1553d2a17637SMingming Cao 1554d2a17637SMingming Cao EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1555d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1556d2a17637SMingming Cao 1557d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1558d2a17637SMingming Cao return 0; /* success */ 1559d2a17637SMingming Cao } 1560d2a17637SMingming Cao 156112219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1562d2a17637SMingming Cao { 1563d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1564d2a17637SMingming Cao int total, mdb, mdb_free, release; 1565d2a17637SMingming Cao 1566d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1567d2a17637SMingming Cao /* recalculate the number of metablocks still need to be reserved */ 156812219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1569d2a17637SMingming Cao mdb = ext4_calc_metadata_amount(inode, total); 1570d2a17637SMingming Cao 1571d2a17637SMingming Cao /* figure out how many metablocks to release */ 1572d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1573d2a17637SMingming Cao mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1574d2a17637SMingming Cao 1575d2a17637SMingming Cao release = to_free + mdb_free; 1576d2a17637SMingming Cao 1577d2a17637SMingming Cao /* update fs free blocks counter for truncate case */ 1578d2a17637SMingming Cao percpu_counter_add(&sbi->s_freeblocks_counter, release); 1579d2a17637SMingming Cao 1580d2a17637SMingming Cao /* update per-inode reservations */ 158112219aeaSAneesh Kumar K.V BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 158212219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1583d2a17637SMingming Cao 1584d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1585d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1586d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1587d2a17637SMingming Cao } 1588d2a17637SMingming Cao 1589d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1590d2a17637SMingming Cao unsigned long offset) 1591d2a17637SMingming Cao { 1592d2a17637SMingming Cao int to_release = 0; 1593d2a17637SMingming Cao struct buffer_head *head, *bh; 1594d2a17637SMingming Cao unsigned int curr_off = 0; 1595d2a17637SMingming Cao 1596d2a17637SMingming Cao head = page_buffers(page); 1597d2a17637SMingming Cao bh = head; 1598d2a17637SMingming Cao do { 1599d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1600d2a17637SMingming Cao 1601d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1602d2a17637SMingming Cao to_release++; 1603d2a17637SMingming Cao clear_buffer_delay(bh); 1604d2a17637SMingming Cao } 1605d2a17637SMingming Cao curr_off = next_off; 1606d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 160712219aeaSAneesh Kumar K.V ext4_da_release_space(page->mapping->host, to_release); 1608d2a17637SMingming Cao } 1609ac27a0ecSDave Kleikamp 1610ac27a0ecSDave Kleikamp /* 161164769240SAlex Tomas * Delayed allocation stuff 161264769240SAlex Tomas */ 161364769240SAlex Tomas 161464769240SAlex Tomas struct mpage_da_data { 161564769240SAlex Tomas struct inode *inode; 161664769240SAlex Tomas struct buffer_head lbh; /* extent of blocks */ 161764769240SAlex Tomas unsigned long first_page, next_page; /* extent of pages */ 161864769240SAlex Tomas get_block_t *get_block; 161964769240SAlex Tomas struct writeback_control *wbc; 162064769240SAlex Tomas }; 162164769240SAlex Tomas 162264769240SAlex Tomas /* 162364769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 162464769240SAlex Tomas * them with __mpage_writepage() 162564769240SAlex Tomas * 162664769240SAlex Tomas * @mpd->inode: inode 162764769240SAlex Tomas * @mpd->first_page: first page of the extent 162864769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 162964769240SAlex Tomas * @mpd->get_block: the filesystem's block mapper function 163064769240SAlex Tomas * 163164769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 163264769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 163364769240SAlex Tomas * 163464769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 163564769240SAlex Tomas */ 163664769240SAlex Tomas static int mpage_da_submit_io(struct mpage_da_data *mpd) 163764769240SAlex Tomas { 163864769240SAlex Tomas struct address_space *mapping = mpd->inode->i_mapping; 163964769240SAlex Tomas struct mpage_data mpd_pp = { 164064769240SAlex Tomas .bio = NULL, 164164769240SAlex Tomas .last_block_in_bio = 0, 164264769240SAlex Tomas .get_block = mpd->get_block, 164364769240SAlex Tomas .use_writepage = 1, 164464769240SAlex Tomas }; 164564769240SAlex Tomas int ret = 0, err, nr_pages, i; 164664769240SAlex Tomas unsigned long index, end; 164764769240SAlex Tomas struct pagevec pvec; 164864769240SAlex Tomas 164964769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 165064769240SAlex Tomas 165164769240SAlex Tomas pagevec_init(&pvec, 0); 165264769240SAlex Tomas index = mpd->first_page; 165364769240SAlex Tomas end = mpd->next_page - 1; 165464769240SAlex Tomas 165564769240SAlex Tomas while (index <= end) { 165664769240SAlex Tomas /* XXX: optimize tail */ 165764769240SAlex Tomas nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 165864769240SAlex Tomas if (nr_pages == 0) 165964769240SAlex Tomas break; 166064769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 166164769240SAlex Tomas struct page *page = pvec.pages[i]; 166264769240SAlex Tomas 166364769240SAlex Tomas index = page->index; 166464769240SAlex Tomas if (index > end) 166564769240SAlex Tomas break; 166664769240SAlex Tomas index++; 166764769240SAlex Tomas 166864769240SAlex Tomas err = __mpage_writepage(page, mpd->wbc, &mpd_pp); 166964769240SAlex Tomas 167064769240SAlex Tomas /* 167164769240SAlex Tomas * In error case, we have to continue because 167264769240SAlex Tomas * remaining pages are still locked 167364769240SAlex Tomas * XXX: unlock and re-dirty them? 167464769240SAlex Tomas */ 167564769240SAlex Tomas if (ret == 0) 167664769240SAlex Tomas ret = err; 167764769240SAlex Tomas } 167864769240SAlex Tomas pagevec_release(&pvec); 167964769240SAlex Tomas } 168064769240SAlex Tomas if (mpd_pp.bio) 168164769240SAlex Tomas mpage_bio_submit(WRITE, mpd_pp.bio); 168264769240SAlex Tomas 168364769240SAlex Tomas return ret; 168464769240SAlex Tomas } 168564769240SAlex Tomas 168664769240SAlex Tomas /* 168764769240SAlex Tomas * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 168864769240SAlex Tomas * 168964769240SAlex Tomas * @mpd->inode - inode to walk through 169064769240SAlex Tomas * @exbh->b_blocknr - first block on a disk 169164769240SAlex Tomas * @exbh->b_size - amount of space in bytes 169264769240SAlex Tomas * @logical - first logical block to start assignment with 169364769240SAlex Tomas * 169464769240SAlex Tomas * the function goes through all passed space and put actual disk 169564769240SAlex Tomas * block numbers into buffer heads, dropping BH_Delay 169664769240SAlex Tomas */ 169764769240SAlex Tomas static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 169864769240SAlex Tomas struct buffer_head *exbh) 169964769240SAlex Tomas { 170064769240SAlex Tomas struct inode *inode = mpd->inode; 170164769240SAlex Tomas struct address_space *mapping = inode->i_mapping; 170264769240SAlex Tomas int blocks = exbh->b_size >> inode->i_blkbits; 170364769240SAlex Tomas sector_t pblock = exbh->b_blocknr, cur_logical; 170464769240SAlex Tomas struct buffer_head *head, *bh; 170564769240SAlex Tomas unsigned long index, end; 170664769240SAlex Tomas struct pagevec pvec; 170764769240SAlex Tomas int nr_pages, i; 170864769240SAlex Tomas 170964769240SAlex Tomas index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 171064769240SAlex Tomas end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 171164769240SAlex Tomas cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 171264769240SAlex Tomas 171364769240SAlex Tomas pagevec_init(&pvec, 0); 171464769240SAlex Tomas 171564769240SAlex Tomas while (index <= end) { 171664769240SAlex Tomas /* XXX: optimize tail */ 171764769240SAlex Tomas nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 171864769240SAlex Tomas if (nr_pages == 0) 171964769240SAlex Tomas break; 172064769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 172164769240SAlex Tomas struct page *page = pvec.pages[i]; 172264769240SAlex Tomas 172364769240SAlex Tomas index = page->index; 172464769240SAlex Tomas if (index > end) 172564769240SAlex Tomas break; 172664769240SAlex Tomas index++; 172764769240SAlex Tomas 172864769240SAlex Tomas BUG_ON(!PageLocked(page)); 172964769240SAlex Tomas BUG_ON(PageWriteback(page)); 173064769240SAlex Tomas BUG_ON(!page_has_buffers(page)); 173164769240SAlex Tomas 173264769240SAlex Tomas bh = page_buffers(page); 173364769240SAlex Tomas head = bh; 173464769240SAlex Tomas 173564769240SAlex Tomas /* skip blocks out of the range */ 173664769240SAlex Tomas do { 173764769240SAlex Tomas if (cur_logical >= logical) 173864769240SAlex Tomas break; 173964769240SAlex Tomas cur_logical++; 174064769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 174164769240SAlex Tomas 174264769240SAlex Tomas do { 174364769240SAlex Tomas if (cur_logical >= logical + blocks) 174464769240SAlex Tomas break; 174564769240SAlex Tomas if (buffer_delay(bh)) { 174664769240SAlex Tomas bh->b_blocknr = pblock; 174764769240SAlex Tomas clear_buffer_delay(bh); 174861628a3fSMingming Cao } else if (buffer_mapped(bh)) 174964769240SAlex Tomas BUG_ON(bh->b_blocknr != pblock); 175064769240SAlex Tomas 175164769240SAlex Tomas cur_logical++; 175264769240SAlex Tomas pblock++; 175364769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 175464769240SAlex Tomas } 175564769240SAlex Tomas pagevec_release(&pvec); 175664769240SAlex Tomas } 175764769240SAlex Tomas } 175864769240SAlex Tomas 175964769240SAlex Tomas 176064769240SAlex Tomas /* 176164769240SAlex Tomas * __unmap_underlying_blocks - just a helper function to unmap 176264769240SAlex Tomas * set of blocks described by @bh 176364769240SAlex Tomas */ 176464769240SAlex Tomas static inline void __unmap_underlying_blocks(struct inode *inode, 176564769240SAlex Tomas struct buffer_head *bh) 176664769240SAlex Tomas { 176764769240SAlex Tomas struct block_device *bdev = inode->i_sb->s_bdev; 176864769240SAlex Tomas int blocks, i; 176964769240SAlex Tomas 177064769240SAlex Tomas blocks = bh->b_size >> inode->i_blkbits; 177164769240SAlex Tomas for (i = 0; i < blocks; i++) 177264769240SAlex Tomas unmap_underlying_metadata(bdev, bh->b_blocknr + i); 177364769240SAlex Tomas } 177464769240SAlex Tomas 177564769240SAlex Tomas /* 177664769240SAlex Tomas * mpage_da_map_blocks - go through given space 177764769240SAlex Tomas * 177864769240SAlex Tomas * @mpd->lbh - bh describing space 177964769240SAlex Tomas * @mpd->get_block - the filesystem's block mapper function 178064769240SAlex Tomas * 178164769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 178264769240SAlex Tomas * 178364769240SAlex Tomas * The function ignores errors ->get_block() returns, thus real 178464769240SAlex Tomas * error handling is postponed to __mpage_writepage() 178564769240SAlex Tomas */ 178664769240SAlex Tomas static void mpage_da_map_blocks(struct mpage_da_data *mpd) 178764769240SAlex Tomas { 178864769240SAlex Tomas struct buffer_head *lbh = &mpd->lbh; 178964769240SAlex Tomas int err = 0, remain = lbh->b_size; 179064769240SAlex Tomas sector_t next = lbh->b_blocknr; 179164769240SAlex Tomas struct buffer_head new; 179264769240SAlex Tomas 179364769240SAlex Tomas /* 179464769240SAlex Tomas * We consider only non-mapped and non-allocated blocks 179564769240SAlex Tomas */ 179664769240SAlex Tomas if (buffer_mapped(lbh) && !buffer_delay(lbh)) 179764769240SAlex Tomas return; 179864769240SAlex Tomas 179964769240SAlex Tomas while (remain) { 180064769240SAlex Tomas new.b_state = lbh->b_state; 180164769240SAlex Tomas new.b_blocknr = 0; 180264769240SAlex Tomas new.b_size = remain; 180364769240SAlex Tomas err = mpd->get_block(mpd->inode, next, &new, 1); 180464769240SAlex Tomas if (err) { 180564769240SAlex Tomas /* 180664769240SAlex Tomas * Rather than implement own error handling 180764769240SAlex Tomas * here, we just leave remaining blocks 180864769240SAlex Tomas * unallocated and try again with ->writepage() 180964769240SAlex Tomas */ 181064769240SAlex Tomas break; 181164769240SAlex Tomas } 181264769240SAlex Tomas BUG_ON(new.b_size == 0); 181364769240SAlex Tomas 181464769240SAlex Tomas if (buffer_new(&new)) 181564769240SAlex Tomas __unmap_underlying_blocks(mpd->inode, &new); 181664769240SAlex Tomas 181764769240SAlex Tomas /* 181864769240SAlex Tomas * If blocks are delayed marked, we need to 181964769240SAlex Tomas * put actual blocknr and drop delayed bit 182064769240SAlex Tomas */ 182164769240SAlex Tomas if (buffer_delay(lbh)) 182264769240SAlex Tomas mpage_put_bnr_to_bhs(mpd, next, &new); 182364769240SAlex Tomas 182464769240SAlex Tomas /* go for the remaining blocks */ 182564769240SAlex Tomas next += new.b_size >> mpd->inode->i_blkbits; 182664769240SAlex Tomas remain -= new.b_size; 182764769240SAlex Tomas } 182864769240SAlex Tomas } 182964769240SAlex Tomas 183064769240SAlex Tomas #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | (1 << BH_Delay)) 183164769240SAlex Tomas 183264769240SAlex Tomas /* 183364769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 183464769240SAlex Tomas * 183564769240SAlex Tomas * @mpd->lbh - extent of blocks 183664769240SAlex Tomas * @logical - logical number of the block in the file 183764769240SAlex Tomas * @bh - bh of the block (used to access block's state) 183864769240SAlex Tomas * 183964769240SAlex Tomas * the function is used to collect contig. blocks in same state 184064769240SAlex Tomas */ 184164769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 184264769240SAlex Tomas sector_t logical, struct buffer_head *bh) 184364769240SAlex Tomas { 184464769240SAlex Tomas struct buffer_head *lbh = &mpd->lbh; 184564769240SAlex Tomas sector_t next; 184664769240SAlex Tomas 184764769240SAlex Tomas next = lbh->b_blocknr + (lbh->b_size >> mpd->inode->i_blkbits); 184864769240SAlex Tomas 184964769240SAlex Tomas /* 185064769240SAlex Tomas * First block in the extent 185164769240SAlex Tomas */ 185264769240SAlex Tomas if (lbh->b_size == 0) { 185364769240SAlex Tomas lbh->b_blocknr = logical; 185464769240SAlex Tomas lbh->b_size = bh->b_size; 185564769240SAlex Tomas lbh->b_state = bh->b_state & BH_FLAGS; 185664769240SAlex Tomas return; 185764769240SAlex Tomas } 185864769240SAlex Tomas 185964769240SAlex Tomas /* 186064769240SAlex Tomas * Can we merge the block to our big extent? 186164769240SAlex Tomas */ 186264769240SAlex Tomas if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) { 186364769240SAlex Tomas lbh->b_size += bh->b_size; 186464769240SAlex Tomas return; 186564769240SAlex Tomas } 186664769240SAlex Tomas 186764769240SAlex Tomas /* 186864769240SAlex Tomas * We couldn't merge the block to our extent, so we 186964769240SAlex Tomas * need to flush current extent and start new one 187064769240SAlex Tomas */ 187164769240SAlex Tomas mpage_da_map_blocks(mpd); 187264769240SAlex Tomas 187364769240SAlex Tomas /* 187464769240SAlex Tomas * Now start a new extent 187564769240SAlex Tomas */ 187664769240SAlex Tomas lbh->b_size = bh->b_size; 187764769240SAlex Tomas lbh->b_state = bh->b_state & BH_FLAGS; 187864769240SAlex Tomas lbh->b_blocknr = logical; 187964769240SAlex Tomas } 188064769240SAlex Tomas 188164769240SAlex Tomas /* 188264769240SAlex Tomas * __mpage_da_writepage - finds extent of pages and blocks 188364769240SAlex Tomas * 188464769240SAlex Tomas * @page: page to consider 188564769240SAlex Tomas * @wbc: not used, we just follow rules 188664769240SAlex Tomas * @data: context 188764769240SAlex Tomas * 188864769240SAlex Tomas * The function finds extents of pages and scan them for all blocks. 188964769240SAlex Tomas */ 189064769240SAlex Tomas static int __mpage_da_writepage(struct page *page, 189164769240SAlex Tomas struct writeback_control *wbc, void *data) 189264769240SAlex Tomas { 189364769240SAlex Tomas struct mpage_da_data *mpd = data; 189464769240SAlex Tomas struct inode *inode = mpd->inode; 189564769240SAlex Tomas struct buffer_head *bh, *head, fake; 189664769240SAlex Tomas sector_t logical; 189764769240SAlex Tomas 189864769240SAlex Tomas /* 189964769240SAlex Tomas * Can we merge this page to current extent? 190064769240SAlex Tomas */ 190164769240SAlex Tomas if (mpd->next_page != page->index) { 190264769240SAlex Tomas /* 190364769240SAlex Tomas * Nope, we can't. So, we map non-allocated blocks 190464769240SAlex Tomas * and start IO on them using __mpage_writepage() 190564769240SAlex Tomas */ 190664769240SAlex Tomas if (mpd->next_page != mpd->first_page) { 190764769240SAlex Tomas mpage_da_map_blocks(mpd); 190864769240SAlex Tomas mpage_da_submit_io(mpd); 190964769240SAlex Tomas } 191064769240SAlex Tomas 191164769240SAlex Tomas /* 191264769240SAlex Tomas * Start next extent of pages ... 191364769240SAlex Tomas */ 191464769240SAlex Tomas mpd->first_page = page->index; 191564769240SAlex Tomas 191664769240SAlex Tomas /* 191764769240SAlex Tomas * ... and blocks 191864769240SAlex Tomas */ 191964769240SAlex Tomas mpd->lbh.b_size = 0; 192064769240SAlex Tomas mpd->lbh.b_state = 0; 192164769240SAlex Tomas mpd->lbh.b_blocknr = 0; 192264769240SAlex Tomas } 192364769240SAlex Tomas 192464769240SAlex Tomas mpd->next_page = page->index + 1; 192564769240SAlex Tomas logical = (sector_t) page->index << 192664769240SAlex Tomas (PAGE_CACHE_SHIFT - inode->i_blkbits); 192764769240SAlex Tomas 192864769240SAlex Tomas if (!page_has_buffers(page)) { 192964769240SAlex Tomas /* 193064769240SAlex Tomas * There is no attached buffer heads yet (mmap?) 193164769240SAlex Tomas * we treat the page asfull of dirty blocks 193264769240SAlex Tomas */ 193364769240SAlex Tomas bh = &fake; 193464769240SAlex Tomas bh->b_size = PAGE_CACHE_SIZE; 193564769240SAlex Tomas bh->b_state = 0; 193664769240SAlex Tomas set_buffer_dirty(bh); 193764769240SAlex Tomas set_buffer_uptodate(bh); 193864769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 193964769240SAlex Tomas } else { 194064769240SAlex Tomas /* 194164769240SAlex Tomas * Page with regular buffer heads, just add all dirty ones 194264769240SAlex Tomas */ 194364769240SAlex Tomas head = page_buffers(page); 194464769240SAlex Tomas bh = head; 194564769240SAlex Tomas do { 194664769240SAlex Tomas BUG_ON(buffer_locked(bh)); 194764769240SAlex Tomas if (buffer_dirty(bh)) 194864769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 194964769240SAlex Tomas logical++; 195064769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 195164769240SAlex Tomas } 195264769240SAlex Tomas 195364769240SAlex Tomas return 0; 195464769240SAlex Tomas } 195564769240SAlex Tomas 195664769240SAlex Tomas /* 195764769240SAlex Tomas * mpage_da_writepages - walk the list of dirty pages of the given 195864769240SAlex Tomas * address space, allocates non-allocated blocks, maps newly-allocated 195964769240SAlex Tomas * blocks to existing bhs and issue IO them 196064769240SAlex Tomas * 196164769240SAlex Tomas * @mapping: address space structure to write 196264769240SAlex Tomas * @wbc: subtract the number of written pages from *@wbc->nr_to_write 196364769240SAlex Tomas * @get_block: the filesystem's block mapper function. 196464769240SAlex Tomas * 196564769240SAlex Tomas * This is a library function, which implements the writepages() 196664769240SAlex Tomas * address_space_operation. 196764769240SAlex Tomas * 196864769240SAlex Tomas * In order to avoid duplication of logic that deals with partial pages, 196964769240SAlex Tomas * multiple bio per page, etc, we find non-allocated blocks, allocate 197064769240SAlex Tomas * them with minimal calls to ->get_block() and re-use __mpage_writepage() 197164769240SAlex Tomas * 197264769240SAlex Tomas * It's important that we call __mpage_writepage() only once for each 197364769240SAlex Tomas * involved page, otherwise we'd have to implement more complicated logic 197464769240SAlex Tomas * to deal with pages w/o PG_lock or w/ PG_writeback and so on. 197564769240SAlex Tomas * 197664769240SAlex Tomas * See comments to mpage_writepages() 197764769240SAlex Tomas */ 197864769240SAlex Tomas static int mpage_da_writepages(struct address_space *mapping, 197964769240SAlex Tomas struct writeback_control *wbc, 198064769240SAlex Tomas get_block_t get_block) 198164769240SAlex Tomas { 198264769240SAlex Tomas struct mpage_da_data mpd; 198364769240SAlex Tomas int ret; 198464769240SAlex Tomas 198564769240SAlex Tomas if (!get_block) 198664769240SAlex Tomas return generic_writepages(mapping, wbc); 198764769240SAlex Tomas 198864769240SAlex Tomas mpd.wbc = wbc; 198964769240SAlex Tomas mpd.inode = mapping->host; 199064769240SAlex Tomas mpd.lbh.b_size = 0; 199164769240SAlex Tomas mpd.lbh.b_state = 0; 199264769240SAlex Tomas mpd.lbh.b_blocknr = 0; 199364769240SAlex Tomas mpd.first_page = 0; 199464769240SAlex Tomas mpd.next_page = 0; 199564769240SAlex Tomas mpd.get_block = get_block; 199664769240SAlex Tomas 199764769240SAlex Tomas ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd); 199864769240SAlex Tomas 199964769240SAlex Tomas /* 200064769240SAlex Tomas * Handle last extent of pages 200164769240SAlex Tomas */ 200264769240SAlex Tomas if (mpd.next_page != mpd.first_page) { 200364769240SAlex Tomas mpage_da_map_blocks(&mpd); 200464769240SAlex Tomas mpage_da_submit_io(&mpd); 200564769240SAlex Tomas } 200664769240SAlex Tomas 200764769240SAlex Tomas return ret; 200864769240SAlex Tomas } 200964769240SAlex Tomas 201064769240SAlex Tomas /* 201164769240SAlex Tomas * this is a special callback for ->write_begin() only 201264769240SAlex Tomas * it's intention is to return mapped block or reserve space 201364769240SAlex Tomas */ 201464769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 201564769240SAlex Tomas struct buffer_head *bh_result, int create) 201664769240SAlex Tomas { 201764769240SAlex Tomas int ret = 0; 201864769240SAlex Tomas 201964769240SAlex Tomas BUG_ON(create == 0); 202064769240SAlex Tomas BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 202164769240SAlex Tomas 202264769240SAlex Tomas /* 202364769240SAlex Tomas * first, we need to know whether the block is allocated already 202464769240SAlex Tomas * preallocated blocks are unmapped but should treated 202564769240SAlex Tomas * the same as allocated blocks. 202664769240SAlex Tomas */ 2027d2a17637SMingming Cao ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0); 2028d2a17637SMingming Cao if ((ret == 0) && !buffer_delay(bh_result)) { 2029d2a17637SMingming Cao /* the block isn't (pre)allocated yet, let's reserve space */ 203064769240SAlex Tomas /* 203164769240SAlex Tomas * XXX: __block_prepare_write() unmaps passed block, 203264769240SAlex Tomas * is it OK? 203364769240SAlex Tomas */ 2034d2a17637SMingming Cao ret = ext4_da_reserve_space(inode, 1); 2035d2a17637SMingming Cao if (ret) 2036d2a17637SMingming Cao /* not enough space to reserve */ 2037d2a17637SMingming Cao return ret; 2038d2a17637SMingming Cao 203964769240SAlex Tomas map_bh(bh_result, inode->i_sb, 0); 204064769240SAlex Tomas set_buffer_new(bh_result); 204164769240SAlex Tomas set_buffer_delay(bh_result); 204264769240SAlex Tomas } else if (ret > 0) { 204364769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 204464769240SAlex Tomas ret = 0; 204564769240SAlex Tomas } 204664769240SAlex Tomas 204764769240SAlex Tomas return ret; 204864769240SAlex Tomas } 2049d2a17637SMingming Cao #define EXT4_DELALLOC_RSVED 1 205064769240SAlex Tomas static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, 205164769240SAlex Tomas struct buffer_head *bh_result, int create) 205264769240SAlex Tomas { 205361628a3fSMingming Cao int ret; 205464769240SAlex Tomas unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 205564769240SAlex Tomas loff_t disksize = EXT4_I(inode)->i_disksize; 205664769240SAlex Tomas handle_t *handle = NULL; 205764769240SAlex Tomas 205861628a3fSMingming Cao handle = ext4_journal_current_handle(); 2059f0e6c985SAneesh Kumar K.V if (!handle) { 2060f0e6c985SAneesh Kumar K.V ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks, 2061f0e6c985SAneesh Kumar K.V bh_result, 0, 0, 0); 2062f0e6c985SAneesh Kumar K.V BUG_ON(!ret); 2063f0e6c985SAneesh Kumar K.V } else { 206464769240SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks, 2065d2a17637SMingming Cao bh_result, create, 0, EXT4_DELALLOC_RSVED); 2066f0e6c985SAneesh Kumar K.V } 2067f0e6c985SAneesh Kumar K.V 206864769240SAlex Tomas if (ret > 0) { 206964769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 207064769240SAlex Tomas 207164769240SAlex Tomas /* 207264769240SAlex Tomas * Update on-disk size along with block allocation 207364769240SAlex Tomas * we don't use 'extend_disksize' as size may change 207464769240SAlex Tomas * within already allocated block -bzzz 207564769240SAlex Tomas */ 207664769240SAlex Tomas disksize = ((loff_t) iblock + ret) << inode->i_blkbits; 207764769240SAlex Tomas if (disksize > i_size_read(inode)) 207864769240SAlex Tomas disksize = i_size_read(inode); 207964769240SAlex Tomas if (disksize > EXT4_I(inode)->i_disksize) { 208064769240SAlex Tomas /* 208164769240SAlex Tomas * XXX: replace with spinlock if seen contended -bzzz 208264769240SAlex Tomas */ 208364769240SAlex Tomas down_write(&EXT4_I(inode)->i_data_sem); 208464769240SAlex Tomas if (disksize > EXT4_I(inode)->i_disksize) 208564769240SAlex Tomas EXT4_I(inode)->i_disksize = disksize; 208664769240SAlex Tomas up_write(&EXT4_I(inode)->i_data_sem); 208764769240SAlex Tomas 208864769240SAlex Tomas if (EXT4_I(inode)->i_disksize == disksize) { 208961628a3fSMingming Cao ret = ext4_mark_inode_dirty(handle, inode); 209064769240SAlex Tomas return ret; 209164769240SAlex Tomas } 209261628a3fSMingming Cao } 209361628a3fSMingming Cao ret = 0; 209461628a3fSMingming Cao } 209561628a3fSMingming Cao return ret; 209661628a3fSMingming Cao } 209761628a3fSMingming Cao 209861628a3fSMingming Cao static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 209961628a3fSMingming Cao { 2100f0e6c985SAneesh Kumar K.V /* 2101f0e6c985SAneesh Kumar K.V * unmapped buffer is possible for holes. 2102f0e6c985SAneesh Kumar K.V * delay buffer is possible with delayed allocation 2103f0e6c985SAneesh Kumar K.V */ 2104f0e6c985SAneesh Kumar K.V return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)); 2105f0e6c985SAneesh Kumar K.V } 2106f0e6c985SAneesh Kumar K.V 2107f0e6c985SAneesh Kumar K.V static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock, 2108f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 2109f0e6c985SAneesh Kumar K.V { 2110f0e6c985SAneesh Kumar K.V int ret = 0; 2111f0e6c985SAneesh Kumar K.V unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2112f0e6c985SAneesh Kumar K.V 2113f0e6c985SAneesh Kumar K.V /* 2114f0e6c985SAneesh Kumar K.V * we don't want to do block allocation in writepage 2115f0e6c985SAneesh Kumar K.V * so call get_block_wrap with create = 0 2116f0e6c985SAneesh Kumar K.V */ 2117f0e6c985SAneesh Kumar K.V ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks, 2118f0e6c985SAneesh Kumar K.V bh_result, 0, 0, 0); 2119f0e6c985SAneesh Kumar K.V if (ret > 0) { 2120f0e6c985SAneesh Kumar K.V bh_result->b_size = (ret << inode->i_blkbits); 2121f0e6c985SAneesh Kumar K.V ret = 0; 2122f0e6c985SAneesh Kumar K.V } 2123f0e6c985SAneesh Kumar K.V return ret; 212461628a3fSMingming Cao } 212561628a3fSMingming Cao 212661628a3fSMingming Cao /* 2127f0e6c985SAneesh Kumar K.V * get called vi ext4_da_writepages after taking page lock (have journal handle) 2128f0e6c985SAneesh Kumar K.V * get called via journal_submit_inode_data_buffers (no journal handle) 2129f0e6c985SAneesh Kumar K.V * get called via shrink_page_list via pdflush (no journal handle) 2130f0e6c985SAneesh Kumar K.V * or grab_page_cache when doing write_begin (have journal handle) 213161628a3fSMingming Cao */ 213264769240SAlex Tomas static int ext4_da_writepage(struct page *page, 213364769240SAlex Tomas struct writeback_control *wbc) 213464769240SAlex Tomas { 213564769240SAlex Tomas int ret = 0; 213661628a3fSMingming Cao loff_t size; 213761628a3fSMingming Cao unsigned long len; 213861628a3fSMingming Cao struct buffer_head *page_bufs; 213961628a3fSMingming Cao struct inode *inode = page->mapping->host; 214064769240SAlex Tomas 214161628a3fSMingming Cao size = i_size_read(inode); 214261628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 214361628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 214461628a3fSMingming Cao else 214561628a3fSMingming Cao len = PAGE_CACHE_SIZE; 214661628a3fSMingming Cao 2147f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2148f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2149f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2150f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 215161628a3fSMingming Cao /* 2152f0e6c985SAneesh Kumar K.V * We don't want to do block allocation 2153f0e6c985SAneesh Kumar K.V * So redirty the page and return 2154cd1aac32SAneesh Kumar K.V * We may reach here when we do a journal commit 2155cd1aac32SAneesh Kumar K.V * via journal_submit_inode_data_buffers. 2156cd1aac32SAneesh Kumar K.V * If we don't have mapping block we just ignore 2157f0e6c985SAneesh Kumar K.V * them. We can also reach here via shrink_page_list 2158f0e6c985SAneesh Kumar K.V */ 2159f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2160f0e6c985SAneesh Kumar K.V unlock_page(page); 2161f0e6c985SAneesh Kumar K.V return 0; 2162f0e6c985SAneesh Kumar K.V } 2163f0e6c985SAneesh Kumar K.V } else { 2164f0e6c985SAneesh Kumar K.V /* 2165f0e6c985SAneesh Kumar K.V * The test for page_has_buffers() is subtle: 2166f0e6c985SAneesh Kumar K.V * We know the page is dirty but it lost buffers. That means 2167f0e6c985SAneesh Kumar K.V * that at some moment in time after write_begin()/write_end() 2168f0e6c985SAneesh Kumar K.V * has been called all buffers have been clean and thus they 2169f0e6c985SAneesh Kumar K.V * must have been written at least once. So they are all 2170f0e6c985SAneesh Kumar K.V * mapped and we can happily proceed with mapping them 2171f0e6c985SAneesh Kumar K.V * and writing the page. 2172f0e6c985SAneesh Kumar K.V * 2173f0e6c985SAneesh Kumar K.V * Try to initialize the buffer_heads and check whether 2174f0e6c985SAneesh Kumar K.V * all are mapped and non delay. We don't want to 2175f0e6c985SAneesh Kumar K.V * do block allocation here. 2176f0e6c985SAneesh Kumar K.V */ 2177f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2178f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 2179f0e6c985SAneesh Kumar K.V if (!ret) { 2180f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2181f0e6c985SAneesh Kumar K.V /* check whether all are mapped and non delay */ 2182f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2183f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 2184f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2185f0e6c985SAneesh Kumar K.V unlock_page(page); 2186f0e6c985SAneesh Kumar K.V return 0; 2187f0e6c985SAneesh Kumar K.V } 2188f0e6c985SAneesh Kumar K.V } else { 2189f0e6c985SAneesh Kumar K.V /* 2190f0e6c985SAneesh Kumar K.V * We can't do block allocation here 2191f0e6c985SAneesh Kumar K.V * so just redity the page and unlock 2192f0e6c985SAneesh Kumar K.V * and return 219361628a3fSMingming Cao */ 219461628a3fSMingming Cao redirty_page_for_writepage(wbc, page); 219561628a3fSMingming Cao unlock_page(page); 219661628a3fSMingming Cao return 0; 219761628a3fSMingming Cao } 219864769240SAlex Tomas } 219964769240SAlex Tomas 220064769240SAlex Tomas if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2201f0e6c985SAneesh Kumar K.V ret = nobh_writepage(page, ext4_normal_get_block_write, wbc); 220264769240SAlex Tomas else 2203f0e6c985SAneesh Kumar K.V ret = block_write_full_page(page, 2204f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2205f0e6c985SAneesh Kumar K.V wbc); 220664769240SAlex Tomas 220764769240SAlex Tomas return ret; 220864769240SAlex Tomas } 220964769240SAlex Tomas 221061628a3fSMingming Cao /* 221161628a3fSMingming Cao * For now just follow the DIO way to estimate the max credits 221261628a3fSMingming Cao * needed to write out EXT4_MAX_WRITEBACK_PAGES. 221361628a3fSMingming Cao * todo: need to calculate the max credits need for 221461628a3fSMingming Cao * extent based files, currently the DIO credits is based on 221561628a3fSMingming Cao * indirect-blocks mapping way. 221661628a3fSMingming Cao * 221761628a3fSMingming Cao * Probably should have a generic way to calculate credits 221861628a3fSMingming Cao * for DIO, writepages, and truncate 221961628a3fSMingming Cao */ 222061628a3fSMingming Cao #define EXT4_MAX_WRITEBACK_PAGES DIO_MAX_BLOCKS 222161628a3fSMingming Cao #define EXT4_MAX_WRITEBACK_CREDITS DIO_CREDITS 222261628a3fSMingming Cao 222364769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 222464769240SAlex Tomas struct writeback_control *wbc) 222564769240SAlex Tomas { 222661628a3fSMingming Cao struct inode *inode = mapping->host; 222761628a3fSMingming Cao handle_t *handle = NULL; 222861628a3fSMingming Cao int needed_blocks; 222961628a3fSMingming Cao int ret = 0; 223061628a3fSMingming Cao long to_write; 223161628a3fSMingming Cao loff_t range_start = 0; 223261628a3fSMingming Cao 223361628a3fSMingming Cao /* 223461628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 223561628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 223661628a3fSMingming Cao * because that could violate lock ordering on umount 223761628a3fSMingming Cao */ 223861628a3fSMingming Cao if (!mapping->nrpages) 223961628a3fSMingming Cao return 0; 224061628a3fSMingming Cao 224161628a3fSMingming Cao /* 224261628a3fSMingming Cao * Estimate the worse case needed credits to write out 224361628a3fSMingming Cao * EXT4_MAX_BUF_BLOCKS pages 224461628a3fSMingming Cao */ 224561628a3fSMingming Cao needed_blocks = EXT4_MAX_WRITEBACK_CREDITS; 224661628a3fSMingming Cao 224761628a3fSMingming Cao to_write = wbc->nr_to_write; 224861628a3fSMingming Cao if (!wbc->range_cyclic) { 224961628a3fSMingming Cao /* 225061628a3fSMingming Cao * If range_cyclic is not set force range_cont 225161628a3fSMingming Cao * and save the old writeback_index 225261628a3fSMingming Cao */ 225361628a3fSMingming Cao wbc->range_cont = 1; 225461628a3fSMingming Cao range_start = wbc->range_start; 225561628a3fSMingming Cao } 225661628a3fSMingming Cao 225761628a3fSMingming Cao while (!ret && to_write) { 225861628a3fSMingming Cao /* start a new transaction*/ 225961628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 226061628a3fSMingming Cao if (IS_ERR(handle)) { 226161628a3fSMingming Cao ret = PTR_ERR(handle); 226261628a3fSMingming Cao goto out_writepages; 226361628a3fSMingming Cao } 2264cd1aac32SAneesh Kumar K.V if (ext4_should_order_data(inode)) { 2265cd1aac32SAneesh Kumar K.V /* 2266cd1aac32SAneesh Kumar K.V * With ordered mode we need to add 2267cd1aac32SAneesh Kumar K.V * the inode to the journal handle 2268cd1aac32SAneesh Kumar K.V * when we do block allocation. 2269cd1aac32SAneesh Kumar K.V */ 2270cd1aac32SAneesh Kumar K.V ret = ext4_jbd2_file_inode(handle, inode); 2271cd1aac32SAneesh Kumar K.V if (ret) { 2272cd1aac32SAneesh Kumar K.V ext4_journal_stop(handle); 2273cd1aac32SAneesh Kumar K.V goto out_writepages; 2274cd1aac32SAneesh Kumar K.V } 2275cd1aac32SAneesh Kumar K.V 2276cd1aac32SAneesh Kumar K.V } 227761628a3fSMingming Cao /* 227861628a3fSMingming Cao * set the max dirty pages could be write at a time 227961628a3fSMingming Cao * to fit into the reserved transaction credits 228061628a3fSMingming Cao */ 228161628a3fSMingming Cao if (wbc->nr_to_write > EXT4_MAX_WRITEBACK_PAGES) 228261628a3fSMingming Cao wbc->nr_to_write = EXT4_MAX_WRITEBACK_PAGES; 228361628a3fSMingming Cao 228461628a3fSMingming Cao to_write -= wbc->nr_to_write; 228561628a3fSMingming Cao ret = mpage_da_writepages(mapping, wbc, 228661628a3fSMingming Cao ext4_da_get_block_write); 228761628a3fSMingming Cao ext4_journal_stop(handle); 228861628a3fSMingming Cao if (wbc->nr_to_write) { 228961628a3fSMingming Cao /* 229061628a3fSMingming Cao * There is no more writeout needed 229161628a3fSMingming Cao * or we requested for a noblocking writeout 229261628a3fSMingming Cao * and we found the device congested 229361628a3fSMingming Cao */ 229461628a3fSMingming Cao to_write += wbc->nr_to_write; 229561628a3fSMingming Cao break; 229661628a3fSMingming Cao } 229761628a3fSMingming Cao wbc->nr_to_write = to_write; 229861628a3fSMingming Cao } 229961628a3fSMingming Cao 230061628a3fSMingming Cao out_writepages: 230161628a3fSMingming Cao wbc->nr_to_write = to_write; 230261628a3fSMingming Cao if (range_start) 230361628a3fSMingming Cao wbc->range_start = range_start; 230461628a3fSMingming Cao return ret; 230564769240SAlex Tomas } 230664769240SAlex Tomas 230764769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 230864769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 230964769240SAlex Tomas struct page **pagep, void **fsdata) 231064769240SAlex Tomas { 2311d2a17637SMingming Cao int ret, retries = 0; 231264769240SAlex Tomas struct page *page; 231364769240SAlex Tomas pgoff_t index; 231464769240SAlex Tomas unsigned from, to; 231564769240SAlex Tomas struct inode *inode = mapping->host; 231664769240SAlex Tomas handle_t *handle; 231764769240SAlex Tomas 231864769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 231964769240SAlex Tomas from = pos & (PAGE_CACHE_SIZE - 1); 232064769240SAlex Tomas to = from + len; 232164769240SAlex Tomas 2322d2a17637SMingming Cao retry: 232364769240SAlex Tomas /* 232464769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 232564769240SAlex Tomas * if there is delayed block allocation. But we still need 232664769240SAlex Tomas * to journalling the i_disksize update if writes to the end 232764769240SAlex Tomas * of file which has an already mapped buffer. 232864769240SAlex Tomas */ 232964769240SAlex Tomas handle = ext4_journal_start(inode, 1); 233064769240SAlex Tomas if (IS_ERR(handle)) { 233164769240SAlex Tomas ret = PTR_ERR(handle); 233264769240SAlex Tomas goto out; 233364769240SAlex Tomas } 233464769240SAlex Tomas 233564769240SAlex Tomas page = __grab_cache_page(mapping, index); 2336d5a0d4f7SEric Sandeen if (!page) { 2337d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2338d5a0d4f7SEric Sandeen ret = -ENOMEM; 2339d5a0d4f7SEric Sandeen goto out; 2340d5a0d4f7SEric Sandeen } 234164769240SAlex Tomas *pagep = page; 234264769240SAlex Tomas 234364769240SAlex Tomas ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 234464769240SAlex Tomas ext4_da_get_block_prep); 234564769240SAlex Tomas if (ret < 0) { 234664769240SAlex Tomas unlock_page(page); 234764769240SAlex Tomas ext4_journal_stop(handle); 234864769240SAlex Tomas page_cache_release(page); 234964769240SAlex Tomas } 235064769240SAlex Tomas 2351d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2352d2a17637SMingming Cao goto retry; 235364769240SAlex Tomas out: 235464769240SAlex Tomas return ret; 235564769240SAlex Tomas } 235664769240SAlex Tomas 2357632eaeabSMingming Cao /* 2358632eaeabSMingming Cao * Check if we should update i_disksize 2359632eaeabSMingming Cao * when write to the end of file but not require block allocation 2360632eaeabSMingming Cao */ 2361632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2362632eaeabSMingming Cao unsigned long offset) 2363632eaeabSMingming Cao { 2364632eaeabSMingming Cao struct buffer_head *bh; 2365632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2366632eaeabSMingming Cao unsigned int idx; 2367632eaeabSMingming Cao int i; 2368632eaeabSMingming Cao 2369632eaeabSMingming Cao bh = page_buffers(page); 2370632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2371632eaeabSMingming Cao 2372632eaeabSMingming Cao for (i=0; i < idx; i++) 2373632eaeabSMingming Cao bh = bh->b_this_page; 2374632eaeabSMingming Cao 2375632eaeabSMingming Cao if (!buffer_mapped(bh) || (buffer_delay(bh))) 2376632eaeabSMingming Cao return 0; 2377632eaeabSMingming Cao return 1; 2378632eaeabSMingming Cao } 2379632eaeabSMingming Cao 238064769240SAlex Tomas static int ext4_da_write_end(struct file *file, 238164769240SAlex Tomas struct address_space *mapping, 238264769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 238364769240SAlex Tomas struct page *page, void *fsdata) 238464769240SAlex Tomas { 238564769240SAlex Tomas struct inode *inode = mapping->host; 238664769240SAlex Tomas int ret = 0, ret2; 238764769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 238864769240SAlex Tomas loff_t new_i_size; 2389632eaeabSMingming Cao unsigned long start, end; 2390632eaeabSMingming Cao 2391632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2392632eaeabSMingming Cao end = start + copied -1; 239364769240SAlex Tomas 239464769240SAlex Tomas /* 239564769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 239664769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 239764769240SAlex Tomas * into that. 239864769240SAlex Tomas */ 239964769240SAlex Tomas 240064769240SAlex Tomas new_i_size = pos + copied; 2401632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 2402632eaeabSMingming Cao if (ext4_da_should_update_i_disksize(page, end)) { 2403632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2404632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 240564769240SAlex Tomas /* 2406632eaeabSMingming Cao * Updating i_disksize when extending file 2407632eaeabSMingming Cao * without needing block allocation 240864769240SAlex Tomas */ 240964769240SAlex Tomas if (ext4_should_order_data(inode)) 2410632eaeabSMingming Cao ret = ext4_jbd2_file_inode(handle, 2411632eaeabSMingming Cao inode); 241264769240SAlex Tomas 241364769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 241464769240SAlex Tomas } 2415632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2416632eaeabSMingming Cao } 2417632eaeabSMingming Cao } 241864769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 241964769240SAlex Tomas page, fsdata); 242064769240SAlex Tomas copied = ret2; 242164769240SAlex Tomas if (ret2 < 0) 242264769240SAlex Tomas ret = ret2; 242364769240SAlex Tomas ret2 = ext4_journal_stop(handle); 242464769240SAlex Tomas if (!ret) 242564769240SAlex Tomas ret = ret2; 242664769240SAlex Tomas 242764769240SAlex Tomas return ret ? ret : copied; 242864769240SAlex Tomas } 242964769240SAlex Tomas 243064769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 243164769240SAlex Tomas { 243264769240SAlex Tomas /* 243364769240SAlex Tomas * Drop reserved blocks 243464769240SAlex Tomas */ 243564769240SAlex Tomas BUG_ON(!PageLocked(page)); 243664769240SAlex Tomas if (!page_has_buffers(page)) 243764769240SAlex Tomas goto out; 243864769240SAlex Tomas 2439d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 244064769240SAlex Tomas 244164769240SAlex Tomas out: 244264769240SAlex Tomas ext4_invalidatepage(page, offset); 244364769240SAlex Tomas 244464769240SAlex Tomas return; 244564769240SAlex Tomas } 244664769240SAlex Tomas 244764769240SAlex Tomas 244864769240SAlex Tomas /* 2449ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2450ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2451ac27a0ecSDave Kleikamp * 2452ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2453617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2454ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2455ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2456ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2457ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2458ac27a0ecSDave Kleikamp * 2459ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2460ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2461ac27a0ecSDave Kleikamp */ 2462617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2463ac27a0ecSDave Kleikamp { 2464ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2465ac27a0ecSDave Kleikamp journal_t *journal; 2466ac27a0ecSDave Kleikamp int err; 2467ac27a0ecSDave Kleikamp 246864769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 246964769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 247064769240SAlex Tomas /* 247164769240SAlex Tomas * With delalloc we want to sync the file 247264769240SAlex Tomas * so that we can make sure we allocate 247364769240SAlex Tomas * blocks for file 247464769240SAlex Tomas */ 247564769240SAlex Tomas filemap_write_and_wait(mapping); 247664769240SAlex Tomas } 247764769240SAlex Tomas 2478617ba13bSMingming Cao if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 2479ac27a0ecSDave Kleikamp /* 2480ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2481ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2482ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2483ac27a0ecSDave Kleikamp * do we expect this to happen. 2484ac27a0ecSDave Kleikamp * 2485ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2486ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2487ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2488ac27a0ecSDave Kleikamp * will.) 2489ac27a0ecSDave Kleikamp * 2490617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2491ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2492ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2493ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2494ac27a0ecSDave Kleikamp * everything they get. 2495ac27a0ecSDave Kleikamp */ 2496ac27a0ecSDave Kleikamp 2497617ba13bSMingming Cao EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 2498617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2499dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2500dab291afSMingming Cao err = jbd2_journal_flush(journal); 2501dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2502ac27a0ecSDave Kleikamp 2503ac27a0ecSDave Kleikamp if (err) 2504ac27a0ecSDave Kleikamp return 0; 2505ac27a0ecSDave Kleikamp } 2506ac27a0ecSDave Kleikamp 2507617ba13bSMingming Cao return generic_block_bmap(mapping,block,ext4_get_block); 2508ac27a0ecSDave Kleikamp } 2509ac27a0ecSDave Kleikamp 2510ac27a0ecSDave Kleikamp static int bget_one(handle_t *handle, struct buffer_head *bh) 2511ac27a0ecSDave Kleikamp { 2512ac27a0ecSDave Kleikamp get_bh(bh); 2513ac27a0ecSDave Kleikamp return 0; 2514ac27a0ecSDave Kleikamp } 2515ac27a0ecSDave Kleikamp 2516ac27a0ecSDave Kleikamp static int bput_one(handle_t *handle, struct buffer_head *bh) 2517ac27a0ecSDave Kleikamp { 2518ac27a0ecSDave Kleikamp put_bh(bh); 2519ac27a0ecSDave Kleikamp return 0; 2520ac27a0ecSDave Kleikamp } 2521ac27a0ecSDave Kleikamp 2522ac27a0ecSDave Kleikamp /* 2523678aaf48SJan Kara * Note that we don't need to start a transaction unless we're journaling data 2524678aaf48SJan Kara * because we should have holes filled from ext4_page_mkwrite(). We even don't 2525678aaf48SJan Kara * need to file the inode to the transaction's list in ordered mode because if 2526678aaf48SJan Kara * we are writing back data added by write(), the inode is already there and if 2527678aaf48SJan Kara * we are writing back data modified via mmap(), noone guarantees in which 2528678aaf48SJan Kara * transaction the data will hit the disk. In case we are journaling data, we 2529678aaf48SJan Kara * cannot start transaction directly because transaction start ranks above page 2530678aaf48SJan Kara * lock so we have to do some magic. 2531ac27a0ecSDave Kleikamp * 2532678aaf48SJan Kara * In all journaling modes block_write_full_page() will start the I/O. 2533ac27a0ecSDave Kleikamp * 2534ac27a0ecSDave Kleikamp * Problem: 2535ac27a0ecSDave Kleikamp * 2536617ba13bSMingming Cao * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2537617ba13bSMingming Cao * ext4_writepage() 2538ac27a0ecSDave Kleikamp * 2539ac27a0ecSDave Kleikamp * Similar for: 2540ac27a0ecSDave Kleikamp * 2541617ba13bSMingming Cao * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... 2542ac27a0ecSDave Kleikamp * 2543617ba13bSMingming Cao * Same applies to ext4_get_block(). We will deadlock on various things like 25440e855ac8SAneesh Kumar K.V * lock_journal and i_data_sem 2545ac27a0ecSDave Kleikamp * 2546ac27a0ecSDave Kleikamp * Setting PF_MEMALLOC here doesn't work - too many internal memory 2547ac27a0ecSDave Kleikamp * allocations fail. 2548ac27a0ecSDave Kleikamp * 2549ac27a0ecSDave Kleikamp * 16May01: If we're reentered then journal_current_handle() will be 2550ac27a0ecSDave Kleikamp * non-zero. We simply *return*. 2551ac27a0ecSDave Kleikamp * 2552ac27a0ecSDave Kleikamp * 1 July 2001: @@@ FIXME: 2553ac27a0ecSDave Kleikamp * In journalled data mode, a data buffer may be metadata against the 2554ac27a0ecSDave Kleikamp * current transaction. But the same file is part of a shared mapping 2555ac27a0ecSDave Kleikamp * and someone does a writepage() on it. 2556ac27a0ecSDave Kleikamp * 2557ac27a0ecSDave Kleikamp * We will move the buffer onto the async_data list, but *after* it has 2558ac27a0ecSDave Kleikamp * been dirtied. So there's a small window where we have dirty data on 2559ac27a0ecSDave Kleikamp * BJ_Metadata. 2560ac27a0ecSDave Kleikamp * 2561ac27a0ecSDave Kleikamp * Note that this only applies to the last partial page in the file. The 2562ac27a0ecSDave Kleikamp * bit which block_write_full_page() uses prepare/commit for. (That's 2563ac27a0ecSDave Kleikamp * broken code anyway: it's wrong for msync()). 2564ac27a0ecSDave Kleikamp * 2565ac27a0ecSDave Kleikamp * It's a rare case: affects the final partial page, for journalled data 2566ac27a0ecSDave Kleikamp * where the file is subject to bith write() and writepage() in the same 2567ac27a0ecSDave Kleikamp * transction. To fix it we'll need a custom block_write_full_page(). 2568ac27a0ecSDave Kleikamp * We'll probably need that anyway for journalling writepage() output. 2569ac27a0ecSDave Kleikamp * 2570ac27a0ecSDave Kleikamp * We don't honour synchronous mounts for writepage(). That would be 2571ac27a0ecSDave Kleikamp * disastrous. Any write() or metadata operation will sync the fs for 2572ac27a0ecSDave Kleikamp * us. 2573ac27a0ecSDave Kleikamp * 2574ac27a0ecSDave Kleikamp */ 2575678aaf48SJan Kara static int __ext4_normal_writepage(struct page *page, 2576cf108bcaSJan Kara struct writeback_control *wbc) 2577cf108bcaSJan Kara { 2578cf108bcaSJan Kara struct inode *inode = page->mapping->host; 2579cf108bcaSJan Kara 2580cf108bcaSJan Kara if (test_opt(inode->i_sb, NOBH)) 2581f0e6c985SAneesh Kumar K.V return nobh_writepage(page, 2582f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, wbc); 2583cf108bcaSJan Kara else 2584f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 2585f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2586f0e6c985SAneesh Kumar K.V wbc); 2587cf108bcaSJan Kara } 2588cf108bcaSJan Kara 2589678aaf48SJan Kara static int ext4_normal_writepage(struct page *page, 2590ac27a0ecSDave Kleikamp struct writeback_control *wbc) 2591ac27a0ecSDave Kleikamp { 2592ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 2593cf108bcaSJan Kara loff_t size = i_size_read(inode); 2594cf108bcaSJan Kara loff_t len; 2595cf108bcaSJan Kara 2596cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 2597cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 2598cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 2599cf108bcaSJan Kara else 2600cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 2601f0e6c985SAneesh Kumar K.V 2602f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2603f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 2604f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 2605f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 2606f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 2607f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 2608f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 2609f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 2610f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 2611f0e6c985SAneesh Kumar K.V */ 2612cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 2613cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 2614f0e6c985SAneesh Kumar K.V } 2615cf108bcaSJan Kara 2616cf108bcaSJan Kara if (!ext4_journal_current_handle()) 2617678aaf48SJan Kara return __ext4_normal_writepage(page, wbc); 2618cf108bcaSJan Kara 2619cf108bcaSJan Kara redirty_page_for_writepage(wbc, page); 2620cf108bcaSJan Kara unlock_page(page); 2621cf108bcaSJan Kara return 0; 2622cf108bcaSJan Kara } 2623cf108bcaSJan Kara 2624cf108bcaSJan Kara static int __ext4_journalled_writepage(struct page *page, 2625cf108bcaSJan Kara struct writeback_control *wbc) 2626cf108bcaSJan Kara { 2627cf108bcaSJan Kara struct address_space *mapping = page->mapping; 2628cf108bcaSJan Kara struct inode *inode = mapping->host; 2629cf108bcaSJan Kara struct buffer_head *page_bufs; 2630ac27a0ecSDave Kleikamp handle_t *handle = NULL; 2631ac27a0ecSDave Kleikamp int ret = 0; 2632ac27a0ecSDave Kleikamp int err; 2633ac27a0ecSDave Kleikamp 2634f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2635f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 2636cf108bcaSJan Kara if (ret != 0) 2637cf108bcaSJan Kara goto out_unlock; 2638cf108bcaSJan Kara 2639cf108bcaSJan Kara page_bufs = page_buffers(page); 2640cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, 2641cf108bcaSJan Kara bget_one); 2642cf108bcaSJan Kara /* As soon as we unlock the page, it can go away, but we have 2643cf108bcaSJan Kara * references to buffers so we are safe */ 2644cf108bcaSJan Kara unlock_page(page); 2645ac27a0ecSDave Kleikamp 2646617ba13bSMingming Cao handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2647ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 2648ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 2649cf108bcaSJan Kara goto out; 2650ac27a0ecSDave Kleikamp } 2651ac27a0ecSDave Kleikamp 2652cf108bcaSJan Kara ret = walk_page_buffers(handle, page_bufs, 0, 2653cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 2654ac27a0ecSDave Kleikamp 2655cf108bcaSJan Kara err = walk_page_buffers(handle, page_bufs, 0, 2656cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, write_end_fn); 2657cf108bcaSJan Kara if (ret == 0) 2658cf108bcaSJan Kara ret = err; 2659617ba13bSMingming Cao err = ext4_journal_stop(handle); 2660ac27a0ecSDave Kleikamp if (!ret) 2661ac27a0ecSDave Kleikamp ret = err; 2662ac27a0ecSDave Kleikamp 2663cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, 2664cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, bput_one); 2665cf108bcaSJan Kara EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2666cf108bcaSJan Kara goto out; 2667cf108bcaSJan Kara 2668cf108bcaSJan Kara out_unlock: 2669ac27a0ecSDave Kleikamp unlock_page(page); 2670cf108bcaSJan Kara out: 2671ac27a0ecSDave Kleikamp return ret; 2672ac27a0ecSDave Kleikamp } 2673ac27a0ecSDave Kleikamp 2674617ba13bSMingming Cao static int ext4_journalled_writepage(struct page *page, 2675ac27a0ecSDave Kleikamp struct writeback_control *wbc) 2676ac27a0ecSDave Kleikamp { 2677ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 2678cf108bcaSJan Kara loff_t size = i_size_read(inode); 2679cf108bcaSJan Kara loff_t len; 2680cf108bcaSJan Kara 2681cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 2682cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 2683cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 2684cf108bcaSJan Kara else 2685cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 2686f0e6c985SAneesh Kumar K.V 2687f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2688f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 2689f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 2690f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 2691f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 2692f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 2693f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 2694f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 2695f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 2696f0e6c985SAneesh Kumar K.V */ 2697cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 2698cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 2699f0e6c985SAneesh Kumar K.V } 2700ac27a0ecSDave Kleikamp 2701617ba13bSMingming Cao if (ext4_journal_current_handle()) 2702ac27a0ecSDave Kleikamp goto no_write; 2703ac27a0ecSDave Kleikamp 2704cf108bcaSJan Kara if (PageChecked(page)) { 2705ac27a0ecSDave Kleikamp /* 2706ac27a0ecSDave Kleikamp * It's mmapped pagecache. Add buffers and journal it. There 2707ac27a0ecSDave Kleikamp * doesn't seem much point in redirtying the page here. 2708ac27a0ecSDave Kleikamp */ 2709ac27a0ecSDave Kleikamp ClearPageChecked(page); 2710cf108bcaSJan Kara return __ext4_journalled_writepage(page, wbc); 2711ac27a0ecSDave Kleikamp } else { 2712ac27a0ecSDave Kleikamp /* 2713ac27a0ecSDave Kleikamp * It may be a page full of checkpoint-mode buffers. We don't 2714ac27a0ecSDave Kleikamp * really know unless we go poke around in the buffer_heads. 2715ac27a0ecSDave Kleikamp * But block_write_full_page will do the right thing. 2716ac27a0ecSDave Kleikamp */ 2717f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 2718f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2719f0e6c985SAneesh Kumar K.V wbc); 2720ac27a0ecSDave Kleikamp } 2721ac27a0ecSDave Kleikamp no_write: 2722ac27a0ecSDave Kleikamp redirty_page_for_writepage(wbc, page); 2723ac27a0ecSDave Kleikamp unlock_page(page); 2724cf108bcaSJan Kara return 0; 2725ac27a0ecSDave Kleikamp } 2726ac27a0ecSDave Kleikamp 2727617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2728ac27a0ecSDave Kleikamp { 2729617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 2730ac27a0ecSDave Kleikamp } 2731ac27a0ecSDave Kleikamp 2732ac27a0ecSDave Kleikamp static int 2733617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2734ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2735ac27a0ecSDave Kleikamp { 2736617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2737ac27a0ecSDave Kleikamp } 2738ac27a0ecSDave Kleikamp 2739617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2740ac27a0ecSDave Kleikamp { 2741617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2742ac27a0ecSDave Kleikamp 2743ac27a0ecSDave Kleikamp /* 2744ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2745ac27a0ecSDave Kleikamp */ 2746ac27a0ecSDave Kleikamp if (offset == 0) 2747ac27a0ecSDave Kleikamp ClearPageChecked(page); 2748ac27a0ecSDave Kleikamp 2749dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 2750ac27a0ecSDave Kleikamp } 2751ac27a0ecSDave Kleikamp 2752617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2753ac27a0ecSDave Kleikamp { 2754617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2755ac27a0ecSDave Kleikamp 2756ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2757ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2758ac27a0ecSDave Kleikamp return 0; 2759dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 2760ac27a0ecSDave Kleikamp } 2761ac27a0ecSDave Kleikamp 2762ac27a0ecSDave Kleikamp /* 2763ac27a0ecSDave Kleikamp * If the O_DIRECT write will extend the file then add this inode to the 2764ac27a0ecSDave Kleikamp * orphan list. So recovery will truncate it back to the original size 2765ac27a0ecSDave Kleikamp * if the machine crashes during the write. 2766ac27a0ecSDave Kleikamp * 2767ac27a0ecSDave Kleikamp * If the O_DIRECT write is intantiating holes inside i_size and the machine 27687fb5409dSJan Kara * crashes then stale disk data _may_ be exposed inside the file. But current 27697fb5409dSJan Kara * VFS code falls back into buffered path in that case so we are safe. 2770ac27a0ecSDave Kleikamp */ 2771617ba13bSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 2772ac27a0ecSDave Kleikamp const struct iovec *iov, loff_t offset, 2773ac27a0ecSDave Kleikamp unsigned long nr_segs) 2774ac27a0ecSDave Kleikamp { 2775ac27a0ecSDave Kleikamp struct file *file = iocb->ki_filp; 2776ac27a0ecSDave Kleikamp struct inode *inode = file->f_mapping->host; 2777617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 27787fb5409dSJan Kara handle_t *handle; 2779ac27a0ecSDave Kleikamp ssize_t ret; 2780ac27a0ecSDave Kleikamp int orphan = 0; 2781ac27a0ecSDave Kleikamp size_t count = iov_length(iov, nr_segs); 2782ac27a0ecSDave Kleikamp 2783ac27a0ecSDave Kleikamp if (rw == WRITE) { 2784ac27a0ecSDave Kleikamp loff_t final_size = offset + count; 2785ac27a0ecSDave Kleikamp 27867fb5409dSJan Kara if (final_size > inode->i_size) { 27877fb5409dSJan Kara /* Credits for sb + inode write */ 27887fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 2789ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 2790ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 2791ac27a0ecSDave Kleikamp goto out; 2792ac27a0ecSDave Kleikamp } 2793617ba13bSMingming Cao ret = ext4_orphan_add(handle, inode); 27947fb5409dSJan Kara if (ret) { 27957fb5409dSJan Kara ext4_journal_stop(handle); 27967fb5409dSJan Kara goto out; 27977fb5409dSJan Kara } 2798ac27a0ecSDave Kleikamp orphan = 1; 2799ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 28007fb5409dSJan Kara ext4_journal_stop(handle); 2801ac27a0ecSDave Kleikamp } 2802ac27a0ecSDave Kleikamp } 2803ac27a0ecSDave Kleikamp 2804ac27a0ecSDave Kleikamp ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 2805ac27a0ecSDave Kleikamp offset, nr_segs, 2806617ba13bSMingming Cao ext4_get_block, NULL); 2807ac27a0ecSDave Kleikamp 28087fb5409dSJan Kara if (orphan) { 2809ac27a0ecSDave Kleikamp int err; 2810ac27a0ecSDave Kleikamp 28117fb5409dSJan Kara /* Credits for sb + inode write */ 28127fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 28137fb5409dSJan Kara if (IS_ERR(handle)) { 28147fb5409dSJan Kara /* This is really bad luck. We've written the data 28157fb5409dSJan Kara * but cannot extend i_size. Bail out and pretend 28167fb5409dSJan Kara * the write failed... */ 28177fb5409dSJan Kara ret = PTR_ERR(handle); 28187fb5409dSJan Kara goto out; 28197fb5409dSJan Kara } 28207fb5409dSJan Kara if (inode->i_nlink) 2821617ba13bSMingming Cao ext4_orphan_del(handle, inode); 28227fb5409dSJan Kara if (ret > 0) { 2823ac27a0ecSDave Kleikamp loff_t end = offset + ret; 2824ac27a0ecSDave Kleikamp if (end > inode->i_size) { 2825ac27a0ecSDave Kleikamp ei->i_disksize = end; 2826ac27a0ecSDave Kleikamp i_size_write(inode, end); 2827ac27a0ecSDave Kleikamp /* 2828ac27a0ecSDave Kleikamp * We're going to return a positive `ret' 2829ac27a0ecSDave Kleikamp * here due to non-zero-length I/O, so there's 2830ac27a0ecSDave Kleikamp * no way of reporting error returns from 2831617ba13bSMingming Cao * ext4_mark_inode_dirty() to userspace. So 2832ac27a0ecSDave Kleikamp * ignore it. 2833ac27a0ecSDave Kleikamp */ 2834617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 2835ac27a0ecSDave Kleikamp } 2836ac27a0ecSDave Kleikamp } 2837617ba13bSMingming Cao err = ext4_journal_stop(handle); 2838ac27a0ecSDave Kleikamp if (ret == 0) 2839ac27a0ecSDave Kleikamp ret = err; 2840ac27a0ecSDave Kleikamp } 2841ac27a0ecSDave Kleikamp out: 2842ac27a0ecSDave Kleikamp return ret; 2843ac27a0ecSDave Kleikamp } 2844ac27a0ecSDave Kleikamp 2845ac27a0ecSDave Kleikamp /* 2846617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 2847ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 2848ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 2849ac27a0ecSDave Kleikamp * not necessarily locked. 2850ac27a0ecSDave Kleikamp * 2851ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 2852ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 2853ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 2854ac27a0ecSDave Kleikamp * 2855ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 2856ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 2857ac27a0ecSDave Kleikamp */ 2858617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 2859ac27a0ecSDave Kleikamp { 2860ac27a0ecSDave Kleikamp SetPageChecked(page); 2861ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 2862ac27a0ecSDave Kleikamp } 2863ac27a0ecSDave Kleikamp 2864617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 2865617ba13bSMingming Cao .readpage = ext4_readpage, 2866617ba13bSMingming Cao .readpages = ext4_readpages, 2867678aaf48SJan Kara .writepage = ext4_normal_writepage, 2868ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 2869bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2870bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 2871617ba13bSMingming Cao .bmap = ext4_bmap, 2872617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2873617ba13bSMingming Cao .releasepage = ext4_releasepage, 2874617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 2875ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 2876*8ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2877ac27a0ecSDave Kleikamp }; 2878ac27a0ecSDave Kleikamp 2879617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 2880617ba13bSMingming Cao .readpage = ext4_readpage, 2881617ba13bSMingming Cao .readpages = ext4_readpages, 2882678aaf48SJan Kara .writepage = ext4_normal_writepage, 2883ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 2884bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2885bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 2886617ba13bSMingming Cao .bmap = ext4_bmap, 2887617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2888617ba13bSMingming Cao .releasepage = ext4_releasepage, 2889617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 2890ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 2891*8ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2892ac27a0ecSDave Kleikamp }; 2893ac27a0ecSDave Kleikamp 2894617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 2895617ba13bSMingming Cao .readpage = ext4_readpage, 2896617ba13bSMingming Cao .readpages = ext4_readpages, 2897617ba13bSMingming Cao .writepage = ext4_journalled_writepage, 2898ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 2899bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2900bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 2901617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 2902617ba13bSMingming Cao .bmap = ext4_bmap, 2903617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2904617ba13bSMingming Cao .releasepage = ext4_releasepage, 2905*8ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2906ac27a0ecSDave Kleikamp }; 2907ac27a0ecSDave Kleikamp 290864769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 290964769240SAlex Tomas .readpage = ext4_readpage, 291064769240SAlex Tomas .readpages = ext4_readpages, 291164769240SAlex Tomas .writepage = ext4_da_writepage, 291264769240SAlex Tomas .writepages = ext4_da_writepages, 291364769240SAlex Tomas .sync_page = block_sync_page, 291464769240SAlex Tomas .write_begin = ext4_da_write_begin, 291564769240SAlex Tomas .write_end = ext4_da_write_end, 291664769240SAlex Tomas .bmap = ext4_bmap, 291764769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 291864769240SAlex Tomas .releasepage = ext4_releasepage, 291964769240SAlex Tomas .direct_IO = ext4_direct_IO, 292064769240SAlex Tomas .migratepage = buffer_migrate_page, 2921*8ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 292264769240SAlex Tomas }; 292364769240SAlex Tomas 2924617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 2925ac27a0ecSDave Kleikamp { 2926cd1aac32SAneesh Kumar K.V if (ext4_should_order_data(inode) && 2927cd1aac32SAneesh Kumar K.V test_opt(inode->i_sb, DELALLOC)) 2928cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 2929cd1aac32SAneesh Kumar K.V else if (ext4_should_order_data(inode)) 2930617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_ordered_aops; 293164769240SAlex Tomas else if (ext4_should_writeback_data(inode) && 293264769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) 293364769240SAlex Tomas inode->i_mapping->a_ops = &ext4_da_aops; 2934617ba13bSMingming Cao else if (ext4_should_writeback_data(inode)) 2935617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_writeback_aops; 2936ac27a0ecSDave Kleikamp else 2937617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 2938ac27a0ecSDave Kleikamp } 2939ac27a0ecSDave Kleikamp 2940ac27a0ecSDave Kleikamp /* 2941617ba13bSMingming Cao * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 2942ac27a0ecSDave Kleikamp * up to the end of the block which corresponds to `from'. 2943ac27a0ecSDave Kleikamp * This required during truncate. We need to physically zero the tail end 2944ac27a0ecSDave Kleikamp * of that block so it doesn't yield old data if the file is later grown. 2945ac27a0ecSDave Kleikamp */ 2946cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle, 2947ac27a0ecSDave Kleikamp struct address_space *mapping, loff_t from) 2948ac27a0ecSDave Kleikamp { 2949617ba13bSMingming Cao ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 2950ac27a0ecSDave Kleikamp unsigned offset = from & (PAGE_CACHE_SIZE-1); 2951725d26d3SAneesh Kumar K.V unsigned blocksize, length, pos; 2952725d26d3SAneesh Kumar K.V ext4_lblk_t iblock; 2953ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2954ac27a0ecSDave Kleikamp struct buffer_head *bh; 2955cf108bcaSJan Kara struct page *page; 2956ac27a0ecSDave Kleikamp int err = 0; 2957ac27a0ecSDave Kleikamp 2958cf108bcaSJan Kara page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 2959cf108bcaSJan Kara if (!page) 2960cf108bcaSJan Kara return -EINVAL; 2961cf108bcaSJan Kara 2962ac27a0ecSDave Kleikamp blocksize = inode->i_sb->s_blocksize; 2963ac27a0ecSDave Kleikamp length = blocksize - (offset & (blocksize - 1)); 2964ac27a0ecSDave Kleikamp iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 2965ac27a0ecSDave Kleikamp 2966ac27a0ecSDave Kleikamp /* 2967ac27a0ecSDave Kleikamp * For "nobh" option, we can only work if we don't need to 2968ac27a0ecSDave Kleikamp * read-in the page - otherwise we create buffers to do the IO. 2969ac27a0ecSDave Kleikamp */ 2970ac27a0ecSDave Kleikamp if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 2971617ba13bSMingming Cao ext4_should_writeback_data(inode) && PageUptodate(page)) { 2972eebd2aa3SChristoph Lameter zero_user(page, offset, length); 2973ac27a0ecSDave Kleikamp set_page_dirty(page); 2974ac27a0ecSDave Kleikamp goto unlock; 2975ac27a0ecSDave Kleikamp } 2976ac27a0ecSDave Kleikamp 2977ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2978ac27a0ecSDave Kleikamp create_empty_buffers(page, blocksize, 0); 2979ac27a0ecSDave Kleikamp 2980ac27a0ecSDave Kleikamp /* Find the buffer that contains "offset" */ 2981ac27a0ecSDave Kleikamp bh = page_buffers(page); 2982ac27a0ecSDave Kleikamp pos = blocksize; 2983ac27a0ecSDave Kleikamp while (offset >= pos) { 2984ac27a0ecSDave Kleikamp bh = bh->b_this_page; 2985ac27a0ecSDave Kleikamp iblock++; 2986ac27a0ecSDave Kleikamp pos += blocksize; 2987ac27a0ecSDave Kleikamp } 2988ac27a0ecSDave Kleikamp 2989ac27a0ecSDave Kleikamp err = 0; 2990ac27a0ecSDave Kleikamp if (buffer_freed(bh)) { 2991ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "freed: skip"); 2992ac27a0ecSDave Kleikamp goto unlock; 2993ac27a0ecSDave Kleikamp } 2994ac27a0ecSDave Kleikamp 2995ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 2996ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "unmapped"); 2997617ba13bSMingming Cao ext4_get_block(inode, iblock, bh, 0); 2998ac27a0ecSDave Kleikamp /* unmapped? It's a hole - nothing to do */ 2999ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3000ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "still unmapped"); 3001ac27a0ecSDave Kleikamp goto unlock; 3002ac27a0ecSDave Kleikamp } 3003ac27a0ecSDave Kleikamp } 3004ac27a0ecSDave Kleikamp 3005ac27a0ecSDave Kleikamp /* Ok, it's mapped. Make sure it's up-to-date */ 3006ac27a0ecSDave Kleikamp if (PageUptodate(page)) 3007ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3008ac27a0ecSDave Kleikamp 3009ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3010ac27a0ecSDave Kleikamp err = -EIO; 3011ac27a0ecSDave Kleikamp ll_rw_block(READ, 1, &bh); 3012ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3013ac27a0ecSDave Kleikamp /* Uhhuh. Read error. Complain and punt. */ 3014ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) 3015ac27a0ecSDave Kleikamp goto unlock; 3016ac27a0ecSDave Kleikamp } 3017ac27a0ecSDave Kleikamp 3018617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3019ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "get write access"); 3020617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, bh); 3021ac27a0ecSDave Kleikamp if (err) 3022ac27a0ecSDave Kleikamp goto unlock; 3023ac27a0ecSDave Kleikamp } 3024ac27a0ecSDave Kleikamp 3025eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3026ac27a0ecSDave Kleikamp 3027ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "zeroed end of block"); 3028ac27a0ecSDave Kleikamp 3029ac27a0ecSDave Kleikamp err = 0; 3030617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3031617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 3032ac27a0ecSDave Kleikamp } else { 3033617ba13bSMingming Cao if (ext4_should_order_data(inode)) 3034678aaf48SJan Kara err = ext4_jbd2_file_inode(handle, inode); 3035ac27a0ecSDave Kleikamp mark_buffer_dirty(bh); 3036ac27a0ecSDave Kleikamp } 3037ac27a0ecSDave Kleikamp 3038ac27a0ecSDave Kleikamp unlock: 3039ac27a0ecSDave Kleikamp unlock_page(page); 3040ac27a0ecSDave Kleikamp page_cache_release(page); 3041ac27a0ecSDave Kleikamp return err; 3042ac27a0ecSDave Kleikamp } 3043ac27a0ecSDave Kleikamp 3044ac27a0ecSDave Kleikamp /* 3045ac27a0ecSDave Kleikamp * Probably it should be a library function... search for first non-zero word 3046ac27a0ecSDave Kleikamp * or memcmp with zero_page, whatever is better for particular architecture. 3047ac27a0ecSDave Kleikamp * Linus? 3048ac27a0ecSDave Kleikamp */ 3049ac27a0ecSDave Kleikamp static inline int all_zeroes(__le32 *p, __le32 *q) 3050ac27a0ecSDave Kleikamp { 3051ac27a0ecSDave Kleikamp while (p < q) 3052ac27a0ecSDave Kleikamp if (*p++) 3053ac27a0ecSDave Kleikamp return 0; 3054ac27a0ecSDave Kleikamp return 1; 3055ac27a0ecSDave Kleikamp } 3056ac27a0ecSDave Kleikamp 3057ac27a0ecSDave Kleikamp /** 3058617ba13bSMingming Cao * ext4_find_shared - find the indirect blocks for partial truncation. 3059ac27a0ecSDave Kleikamp * @inode: inode in question 3060ac27a0ecSDave Kleikamp * @depth: depth of the affected branch 3061617ba13bSMingming Cao * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3062ac27a0ecSDave Kleikamp * @chain: place to store the pointers to partial indirect blocks 3063ac27a0ecSDave Kleikamp * @top: place to the (detached) top of branch 3064ac27a0ecSDave Kleikamp * 3065617ba13bSMingming Cao * This is a helper function used by ext4_truncate(). 3066ac27a0ecSDave Kleikamp * 3067ac27a0ecSDave Kleikamp * When we do truncate() we may have to clean the ends of several 3068ac27a0ecSDave Kleikamp * indirect blocks but leave the blocks themselves alive. Block is 3069ac27a0ecSDave Kleikamp * partially truncated if some data below the new i_size is refered 3070ac27a0ecSDave Kleikamp * from it (and it is on the path to the first completely truncated 3071ac27a0ecSDave Kleikamp * data block, indeed). We have to free the top of that path along 3072ac27a0ecSDave Kleikamp * with everything to the right of the path. Since no allocation 3073617ba13bSMingming Cao * past the truncation point is possible until ext4_truncate() 3074ac27a0ecSDave Kleikamp * finishes, we may safely do the latter, but top of branch may 3075ac27a0ecSDave Kleikamp * require special attention - pageout below the truncation point 3076ac27a0ecSDave Kleikamp * might try to populate it. 3077ac27a0ecSDave Kleikamp * 3078ac27a0ecSDave Kleikamp * We atomically detach the top of branch from the tree, store the 3079ac27a0ecSDave Kleikamp * block number of its root in *@top, pointers to buffer_heads of 3080ac27a0ecSDave Kleikamp * partially truncated blocks - in @chain[].bh and pointers to 3081ac27a0ecSDave Kleikamp * their last elements that should not be removed - in 3082ac27a0ecSDave Kleikamp * @chain[].p. Return value is the pointer to last filled element 3083ac27a0ecSDave Kleikamp * of @chain. 3084ac27a0ecSDave Kleikamp * 3085ac27a0ecSDave Kleikamp * The work left to caller to do the actual freeing of subtrees: 3086ac27a0ecSDave Kleikamp * a) free the subtree starting from *@top 3087ac27a0ecSDave Kleikamp * b) free the subtrees whose roots are stored in 3088ac27a0ecSDave Kleikamp * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3089ac27a0ecSDave Kleikamp * c) free the subtrees growing from the inode past the @chain[0]. 3090ac27a0ecSDave Kleikamp * (no partially truncated stuff there). */ 3091ac27a0ecSDave Kleikamp 3092617ba13bSMingming Cao static Indirect *ext4_find_shared(struct inode *inode, int depth, 3093725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3094ac27a0ecSDave Kleikamp { 3095ac27a0ecSDave Kleikamp Indirect *partial, *p; 3096ac27a0ecSDave Kleikamp int k, err; 3097ac27a0ecSDave Kleikamp 3098ac27a0ecSDave Kleikamp *top = 0; 3099ac27a0ecSDave Kleikamp /* Make k index the deepest non-null offest + 1 */ 3100ac27a0ecSDave Kleikamp for (k = depth; k > 1 && !offsets[k-1]; k--) 3101ac27a0ecSDave Kleikamp ; 3102617ba13bSMingming Cao partial = ext4_get_branch(inode, k, offsets, chain, &err); 3103ac27a0ecSDave Kleikamp /* Writer: pointers */ 3104ac27a0ecSDave Kleikamp if (!partial) 3105ac27a0ecSDave Kleikamp partial = chain + k-1; 3106ac27a0ecSDave Kleikamp /* 3107ac27a0ecSDave Kleikamp * If the branch acquired continuation since we've looked at it - 3108ac27a0ecSDave Kleikamp * fine, it should all survive and (new) top doesn't belong to us. 3109ac27a0ecSDave Kleikamp */ 3110ac27a0ecSDave Kleikamp if (!partial->key && *partial->p) 3111ac27a0ecSDave Kleikamp /* Writer: end */ 3112ac27a0ecSDave Kleikamp goto no_top; 3113ac27a0ecSDave Kleikamp for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) 3114ac27a0ecSDave Kleikamp ; 3115ac27a0ecSDave Kleikamp /* 3116ac27a0ecSDave Kleikamp * OK, we've found the last block that must survive. The rest of our 3117ac27a0ecSDave Kleikamp * branch should be detached before unlocking. However, if that rest 3118ac27a0ecSDave Kleikamp * of branch is all ours and does not grow immediately from the inode 3119ac27a0ecSDave Kleikamp * it's easier to cheat and just decrement partial->p. 3120ac27a0ecSDave Kleikamp */ 3121ac27a0ecSDave Kleikamp if (p == chain + k - 1 && p > chain) { 3122ac27a0ecSDave Kleikamp p->p--; 3123ac27a0ecSDave Kleikamp } else { 3124ac27a0ecSDave Kleikamp *top = *p->p; 3125617ba13bSMingming Cao /* Nope, don't do this in ext4. Must leave the tree intact */ 3126ac27a0ecSDave Kleikamp #if 0 3127ac27a0ecSDave Kleikamp *p->p = 0; 3128ac27a0ecSDave Kleikamp #endif 3129ac27a0ecSDave Kleikamp } 3130ac27a0ecSDave Kleikamp /* Writer: end */ 3131ac27a0ecSDave Kleikamp 3132ac27a0ecSDave Kleikamp while(partial > p) { 3133ac27a0ecSDave Kleikamp brelse(partial->bh); 3134ac27a0ecSDave Kleikamp partial--; 3135ac27a0ecSDave Kleikamp } 3136ac27a0ecSDave Kleikamp no_top: 3137ac27a0ecSDave Kleikamp return partial; 3138ac27a0ecSDave Kleikamp } 3139ac27a0ecSDave Kleikamp 3140ac27a0ecSDave Kleikamp /* 3141ac27a0ecSDave Kleikamp * Zero a number of block pointers in either an inode or an indirect block. 3142ac27a0ecSDave Kleikamp * If we restart the transaction we must again get write access to the 3143ac27a0ecSDave Kleikamp * indirect block for further modification. 3144ac27a0ecSDave Kleikamp * 3145ac27a0ecSDave Kleikamp * We release `count' blocks on disk, but (last - first) may be greater 3146ac27a0ecSDave Kleikamp * than `count' because there can be holes in there. 3147ac27a0ecSDave Kleikamp */ 3148617ba13bSMingming Cao static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3149617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t block_to_free, 3150ac27a0ecSDave Kleikamp unsigned long count, __le32 *first, __le32 *last) 3151ac27a0ecSDave Kleikamp { 3152ac27a0ecSDave Kleikamp __le32 *p; 3153ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3154ac27a0ecSDave Kleikamp if (bh) { 3155617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 3156617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, bh); 3157ac27a0ecSDave Kleikamp } 3158617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3159617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3160ac27a0ecSDave Kleikamp if (bh) { 3161ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "retaking write access"); 3162617ba13bSMingming Cao ext4_journal_get_write_access(handle, bh); 3163ac27a0ecSDave Kleikamp } 3164ac27a0ecSDave Kleikamp } 3165ac27a0ecSDave Kleikamp 3166ac27a0ecSDave Kleikamp /* 3167ac27a0ecSDave Kleikamp * Any buffers which are on the journal will be in memory. We find 3168dab291afSMingming Cao * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3169ac27a0ecSDave Kleikamp * on them. We've already detached each block from the file, so 3170dab291afSMingming Cao * bforget() in jbd2_journal_forget() should be safe. 3171ac27a0ecSDave Kleikamp * 3172dab291afSMingming Cao * AKPM: turn on bforget in jbd2_journal_forget()!!! 3173ac27a0ecSDave Kleikamp */ 3174ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3175ac27a0ecSDave Kleikamp u32 nr = le32_to_cpu(*p); 3176ac27a0ecSDave Kleikamp if (nr) { 31771d03ec98SAneesh Kumar K.V struct buffer_head *tbh; 3178ac27a0ecSDave Kleikamp 3179ac27a0ecSDave Kleikamp *p = 0; 31801d03ec98SAneesh Kumar K.V tbh = sb_find_get_block(inode->i_sb, nr); 31811d03ec98SAneesh Kumar K.V ext4_forget(handle, 0, inode, tbh, nr); 3182ac27a0ecSDave Kleikamp } 3183ac27a0ecSDave Kleikamp } 3184ac27a0ecSDave Kleikamp 3185c9de560dSAlex Tomas ext4_free_blocks(handle, inode, block_to_free, count, 0); 3186ac27a0ecSDave Kleikamp } 3187ac27a0ecSDave Kleikamp 3188ac27a0ecSDave Kleikamp /** 3189617ba13bSMingming Cao * ext4_free_data - free a list of data blocks 3190ac27a0ecSDave Kleikamp * @handle: handle for this transaction 3191ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3192ac27a0ecSDave Kleikamp * @this_bh: indirect buffer_head which contains *@first and *@last 3193ac27a0ecSDave Kleikamp * @first: array of block numbers 3194ac27a0ecSDave Kleikamp * @last: points immediately past the end of array 3195ac27a0ecSDave Kleikamp * 3196ac27a0ecSDave Kleikamp * We are freeing all blocks refered from that array (numbers are stored as 3197ac27a0ecSDave Kleikamp * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3198ac27a0ecSDave Kleikamp * 3199ac27a0ecSDave Kleikamp * We accumulate contiguous runs of blocks to free. Conveniently, if these 3200ac27a0ecSDave Kleikamp * blocks are contiguous then releasing them at one time will only affect one 3201ac27a0ecSDave Kleikamp * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3202ac27a0ecSDave Kleikamp * actually use a lot of journal space. 3203ac27a0ecSDave Kleikamp * 3204ac27a0ecSDave Kleikamp * @this_bh will be %NULL if @first and @last point into the inode's direct 3205ac27a0ecSDave Kleikamp * block pointers. 3206ac27a0ecSDave Kleikamp */ 3207617ba13bSMingming Cao static void ext4_free_data(handle_t *handle, struct inode *inode, 3208ac27a0ecSDave Kleikamp struct buffer_head *this_bh, 3209ac27a0ecSDave Kleikamp __le32 *first, __le32 *last) 3210ac27a0ecSDave Kleikamp { 3211617ba13bSMingming Cao ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3212ac27a0ecSDave Kleikamp unsigned long count = 0; /* Number of blocks in the run */ 3213ac27a0ecSDave Kleikamp __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3214ac27a0ecSDave Kleikamp corresponding to 3215ac27a0ecSDave Kleikamp block_to_free */ 3216617ba13bSMingming Cao ext4_fsblk_t nr; /* Current block # */ 3217ac27a0ecSDave Kleikamp __le32 *p; /* Pointer into inode/ind 3218ac27a0ecSDave Kleikamp for current block */ 3219ac27a0ecSDave Kleikamp int err; 3220ac27a0ecSDave Kleikamp 3221ac27a0ecSDave Kleikamp if (this_bh) { /* For indirect block */ 3222ac27a0ecSDave Kleikamp BUFFER_TRACE(this_bh, "get_write_access"); 3223617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, this_bh); 3224ac27a0ecSDave Kleikamp /* Important: if we can't update the indirect pointers 3225ac27a0ecSDave Kleikamp * to the blocks, we can't free them. */ 3226ac27a0ecSDave Kleikamp if (err) 3227ac27a0ecSDave Kleikamp return; 3228ac27a0ecSDave Kleikamp } 3229ac27a0ecSDave Kleikamp 3230ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3231ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3232ac27a0ecSDave Kleikamp if (nr) { 3233ac27a0ecSDave Kleikamp /* accumulate blocks to free if they're contiguous */ 3234ac27a0ecSDave Kleikamp if (count == 0) { 3235ac27a0ecSDave Kleikamp block_to_free = nr; 3236ac27a0ecSDave Kleikamp block_to_free_p = p; 3237ac27a0ecSDave Kleikamp count = 1; 3238ac27a0ecSDave Kleikamp } else if (nr == block_to_free + count) { 3239ac27a0ecSDave Kleikamp count++; 3240ac27a0ecSDave Kleikamp } else { 3241617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, 3242ac27a0ecSDave Kleikamp block_to_free, 3243ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3244ac27a0ecSDave Kleikamp block_to_free = nr; 3245ac27a0ecSDave Kleikamp block_to_free_p = p; 3246ac27a0ecSDave Kleikamp count = 1; 3247ac27a0ecSDave Kleikamp } 3248ac27a0ecSDave Kleikamp } 3249ac27a0ecSDave Kleikamp } 3250ac27a0ecSDave Kleikamp 3251ac27a0ecSDave Kleikamp if (count > 0) 3252617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3253ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3254ac27a0ecSDave Kleikamp 3255ac27a0ecSDave Kleikamp if (this_bh) { 3256617ba13bSMingming Cao BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata"); 325771dc8fbcSDuane Griffin 325871dc8fbcSDuane Griffin /* 325971dc8fbcSDuane Griffin * The buffer head should have an attached journal head at this 326071dc8fbcSDuane Griffin * point. However, if the data is corrupted and an indirect 326171dc8fbcSDuane Griffin * block pointed to itself, it would have been detached when 326271dc8fbcSDuane Griffin * the block was cleared. Check for this instead of OOPSing. 326371dc8fbcSDuane Griffin */ 326471dc8fbcSDuane Griffin if (bh2jh(this_bh)) 3265617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, this_bh); 326671dc8fbcSDuane Griffin else 326771dc8fbcSDuane Griffin ext4_error(inode->i_sb, __func__, 326871dc8fbcSDuane Griffin "circular indirect block detected, " 326971dc8fbcSDuane Griffin "inode=%lu, block=%llu", 327071dc8fbcSDuane Griffin inode->i_ino, 327171dc8fbcSDuane Griffin (unsigned long long) this_bh->b_blocknr); 3272ac27a0ecSDave Kleikamp } 3273ac27a0ecSDave Kleikamp } 3274ac27a0ecSDave Kleikamp 3275ac27a0ecSDave Kleikamp /** 3276617ba13bSMingming Cao * ext4_free_branches - free an array of branches 3277ac27a0ecSDave Kleikamp * @handle: JBD handle for this transaction 3278ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3279ac27a0ecSDave Kleikamp * @parent_bh: the buffer_head which contains *@first and *@last 3280ac27a0ecSDave Kleikamp * @first: array of block numbers 3281ac27a0ecSDave Kleikamp * @last: pointer immediately past the end of array 3282ac27a0ecSDave Kleikamp * @depth: depth of the branches to free 3283ac27a0ecSDave Kleikamp * 3284ac27a0ecSDave Kleikamp * We are freeing all blocks refered from these branches (numbers are 3285ac27a0ecSDave Kleikamp * stored as little-endian 32-bit) and updating @inode->i_blocks 3286ac27a0ecSDave Kleikamp * appropriately. 3287ac27a0ecSDave Kleikamp */ 3288617ba13bSMingming Cao static void ext4_free_branches(handle_t *handle, struct inode *inode, 3289ac27a0ecSDave Kleikamp struct buffer_head *parent_bh, 3290ac27a0ecSDave Kleikamp __le32 *first, __le32 *last, int depth) 3291ac27a0ecSDave Kleikamp { 3292617ba13bSMingming Cao ext4_fsblk_t nr; 3293ac27a0ecSDave Kleikamp __le32 *p; 3294ac27a0ecSDave Kleikamp 3295ac27a0ecSDave Kleikamp if (is_handle_aborted(handle)) 3296ac27a0ecSDave Kleikamp return; 3297ac27a0ecSDave Kleikamp 3298ac27a0ecSDave Kleikamp if (depth--) { 3299ac27a0ecSDave Kleikamp struct buffer_head *bh; 3300617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3301ac27a0ecSDave Kleikamp p = last; 3302ac27a0ecSDave Kleikamp while (--p >= first) { 3303ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3304ac27a0ecSDave Kleikamp if (!nr) 3305ac27a0ecSDave Kleikamp continue; /* A hole */ 3306ac27a0ecSDave Kleikamp 3307ac27a0ecSDave Kleikamp /* Go read the buffer for the next level down */ 3308ac27a0ecSDave Kleikamp bh = sb_bread(inode->i_sb, nr); 3309ac27a0ecSDave Kleikamp 3310ac27a0ecSDave Kleikamp /* 3311ac27a0ecSDave Kleikamp * A read failure? Report error and clear slot 3312ac27a0ecSDave Kleikamp * (should be rare). 3313ac27a0ecSDave Kleikamp */ 3314ac27a0ecSDave Kleikamp if (!bh) { 3315617ba13bSMingming Cao ext4_error(inode->i_sb, "ext4_free_branches", 33162ae02107SMingming Cao "Read failure, inode=%lu, block=%llu", 3317ac27a0ecSDave Kleikamp inode->i_ino, nr); 3318ac27a0ecSDave Kleikamp continue; 3319ac27a0ecSDave Kleikamp } 3320ac27a0ecSDave Kleikamp 3321ac27a0ecSDave Kleikamp /* This zaps the entire block. Bottom up. */ 3322ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "free child branches"); 3323617ba13bSMingming Cao ext4_free_branches(handle, inode, bh, 3324ac27a0ecSDave Kleikamp (__le32*)bh->b_data, 3325ac27a0ecSDave Kleikamp (__le32*)bh->b_data + addr_per_block, 3326ac27a0ecSDave Kleikamp depth); 3327ac27a0ecSDave Kleikamp 3328ac27a0ecSDave Kleikamp /* 3329ac27a0ecSDave Kleikamp * We've probably journalled the indirect block several 3330ac27a0ecSDave Kleikamp * times during the truncate. But it's no longer 3331ac27a0ecSDave Kleikamp * needed and we now drop it from the transaction via 3332dab291afSMingming Cao * jbd2_journal_revoke(). 3333ac27a0ecSDave Kleikamp * 3334ac27a0ecSDave Kleikamp * That's easy if it's exclusively part of this 3335ac27a0ecSDave Kleikamp * transaction. But if it's part of the committing 3336dab291afSMingming Cao * transaction then jbd2_journal_forget() will simply 3337ac27a0ecSDave Kleikamp * brelse() it. That means that if the underlying 3338617ba13bSMingming Cao * block is reallocated in ext4_get_block(), 3339ac27a0ecSDave Kleikamp * unmap_underlying_metadata() will find this block 3340ac27a0ecSDave Kleikamp * and will try to get rid of it. damn, damn. 3341ac27a0ecSDave Kleikamp * 3342ac27a0ecSDave Kleikamp * If this block has already been committed to the 3343ac27a0ecSDave Kleikamp * journal, a revoke record will be written. And 3344ac27a0ecSDave Kleikamp * revoke records must be emitted *before* clearing 3345ac27a0ecSDave Kleikamp * this block's bit in the bitmaps. 3346ac27a0ecSDave Kleikamp */ 3347617ba13bSMingming Cao ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 3348ac27a0ecSDave Kleikamp 3349ac27a0ecSDave Kleikamp /* 3350ac27a0ecSDave Kleikamp * Everything below this this pointer has been 3351ac27a0ecSDave Kleikamp * released. Now let this top-of-subtree go. 3352ac27a0ecSDave Kleikamp * 3353ac27a0ecSDave Kleikamp * We want the freeing of this indirect block to be 3354ac27a0ecSDave Kleikamp * atomic in the journal with the updating of the 3355ac27a0ecSDave Kleikamp * bitmap block which owns it. So make some room in 3356ac27a0ecSDave Kleikamp * the journal. 3357ac27a0ecSDave Kleikamp * 3358ac27a0ecSDave Kleikamp * We zero the parent pointer *after* freeing its 3359ac27a0ecSDave Kleikamp * pointee in the bitmaps, so if extend_transaction() 3360ac27a0ecSDave Kleikamp * for some reason fails to put the bitmap changes and 3361ac27a0ecSDave Kleikamp * the release into the same transaction, recovery 3362ac27a0ecSDave Kleikamp * will merely complain about releasing a free block, 3363ac27a0ecSDave Kleikamp * rather than leaking blocks. 3364ac27a0ecSDave Kleikamp */ 3365ac27a0ecSDave Kleikamp if (is_handle_aborted(handle)) 3366ac27a0ecSDave Kleikamp return; 3367ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3368617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3369617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3370ac27a0ecSDave Kleikamp } 3371ac27a0ecSDave Kleikamp 3372c9de560dSAlex Tomas ext4_free_blocks(handle, inode, nr, 1, 1); 3373ac27a0ecSDave Kleikamp 3374ac27a0ecSDave Kleikamp if (parent_bh) { 3375ac27a0ecSDave Kleikamp /* 3376ac27a0ecSDave Kleikamp * The block which we have just freed is 3377ac27a0ecSDave Kleikamp * pointed to by an indirect block: journal it 3378ac27a0ecSDave Kleikamp */ 3379ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "get_write_access"); 3380617ba13bSMingming Cao if (!ext4_journal_get_write_access(handle, 3381ac27a0ecSDave Kleikamp parent_bh)){ 3382ac27a0ecSDave Kleikamp *p = 0; 3383ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, 3384617ba13bSMingming Cao "call ext4_journal_dirty_metadata"); 3385617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, 3386ac27a0ecSDave Kleikamp parent_bh); 3387ac27a0ecSDave Kleikamp } 3388ac27a0ecSDave Kleikamp } 3389ac27a0ecSDave Kleikamp } 3390ac27a0ecSDave Kleikamp } else { 3391ac27a0ecSDave Kleikamp /* We have reached the bottom of the tree. */ 3392ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "free data blocks"); 3393617ba13bSMingming Cao ext4_free_data(handle, inode, parent_bh, first, last); 3394ac27a0ecSDave Kleikamp } 3395ac27a0ecSDave Kleikamp } 3396ac27a0ecSDave Kleikamp 339791ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 339891ef4cafSDuane Griffin { 339991ef4cafSDuane Griffin if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 340091ef4cafSDuane Griffin return 0; 340191ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 340291ef4cafSDuane Griffin return 1; 340391ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 340491ef4cafSDuane Griffin return 1; 340591ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 340691ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 340791ef4cafSDuane Griffin return 0; 340891ef4cafSDuane Griffin } 340991ef4cafSDuane Griffin 3410ac27a0ecSDave Kleikamp /* 3411617ba13bSMingming Cao * ext4_truncate() 3412ac27a0ecSDave Kleikamp * 3413617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3414617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3415ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3416ac27a0ecSDave Kleikamp * 3417ac27a0ecSDave Kleikamp * As we work through the truncate and commmit bits of it to the journal there 3418ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3419ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3420ac27a0ecSDave Kleikamp * 3421ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3422ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3423ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3424ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3425ac27a0ecSDave Kleikamp * left-to-right works OK too). 3426ac27a0ecSDave Kleikamp * 3427ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3428ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3429ac27a0ecSDave Kleikamp * 3430ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3431617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3432ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3433617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3434617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3435ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3436617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3437ac27a0ecSDave Kleikamp */ 3438617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3439ac27a0ecSDave Kleikamp { 3440ac27a0ecSDave Kleikamp handle_t *handle; 3441617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3442ac27a0ecSDave Kleikamp __le32 *i_data = ei->i_data; 3443617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3444ac27a0ecSDave Kleikamp struct address_space *mapping = inode->i_mapping; 3445725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 3446ac27a0ecSDave Kleikamp Indirect chain[4]; 3447ac27a0ecSDave Kleikamp Indirect *partial; 3448ac27a0ecSDave Kleikamp __le32 nr = 0; 3449ac27a0ecSDave Kleikamp int n; 3450725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 3451ac27a0ecSDave Kleikamp unsigned blocksize = inode->i_sb->s_blocksize; 3452ac27a0ecSDave Kleikamp 345391ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3454ac27a0ecSDave Kleikamp return; 3455ac27a0ecSDave Kleikamp 34561d03ec98SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 3457cf108bcaSJan Kara ext4_ext_truncate(inode); 34581d03ec98SAneesh Kumar K.V return; 34591d03ec98SAneesh Kumar K.V } 3460a86c6181SAlex Tomas 3461ac27a0ecSDave Kleikamp handle = start_transaction(inode); 3462cf108bcaSJan Kara if (IS_ERR(handle)) 3463ac27a0ecSDave Kleikamp return; /* AKPM: return what? */ 3464ac27a0ecSDave Kleikamp 3465ac27a0ecSDave Kleikamp last_block = (inode->i_size + blocksize-1) 3466617ba13bSMingming Cao >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 3467ac27a0ecSDave Kleikamp 3468cf108bcaSJan Kara if (inode->i_size & (blocksize - 1)) 3469cf108bcaSJan Kara if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 3470cf108bcaSJan Kara goto out_stop; 3471ac27a0ecSDave Kleikamp 3472617ba13bSMingming Cao n = ext4_block_to_path(inode, last_block, offsets, NULL); 3473ac27a0ecSDave Kleikamp if (n == 0) 3474ac27a0ecSDave Kleikamp goto out_stop; /* error */ 3475ac27a0ecSDave Kleikamp 3476ac27a0ecSDave Kleikamp /* 3477ac27a0ecSDave Kleikamp * OK. This truncate is going to happen. We add the inode to the 3478ac27a0ecSDave Kleikamp * orphan list, so that if this truncate spans multiple transactions, 3479ac27a0ecSDave Kleikamp * and we crash, we will resume the truncate when the filesystem 3480ac27a0ecSDave Kleikamp * recovers. It also marks the inode dirty, to catch the new size. 3481ac27a0ecSDave Kleikamp * 3482ac27a0ecSDave Kleikamp * Implication: the file must always be in a sane, consistent 3483ac27a0ecSDave Kleikamp * truncatable state while each transaction commits. 3484ac27a0ecSDave Kleikamp */ 3485617ba13bSMingming Cao if (ext4_orphan_add(handle, inode)) 3486ac27a0ecSDave Kleikamp goto out_stop; 3487ac27a0ecSDave Kleikamp 3488ac27a0ecSDave Kleikamp /* 3489632eaeabSMingming Cao * From here we block out all ext4_get_block() callers who want to 3490632eaeabSMingming Cao * modify the block allocation tree. 3491632eaeabSMingming Cao */ 3492632eaeabSMingming Cao down_write(&ei->i_data_sem); 3493632eaeabSMingming Cao /* 3494ac27a0ecSDave Kleikamp * The orphan list entry will now protect us from any crash which 3495ac27a0ecSDave Kleikamp * occurs before the truncate completes, so it is now safe to propagate 3496ac27a0ecSDave Kleikamp * the new, shorter inode size (held for now in i_size) into the 3497ac27a0ecSDave Kleikamp * on-disk inode. We do this via i_disksize, which is the value which 3498617ba13bSMingming Cao * ext4 *really* writes onto the disk inode. 3499ac27a0ecSDave Kleikamp */ 3500ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3501ac27a0ecSDave Kleikamp 3502ac27a0ecSDave Kleikamp if (n == 1) { /* direct blocks */ 3503617ba13bSMingming Cao ext4_free_data(handle, inode, NULL, i_data+offsets[0], 3504617ba13bSMingming Cao i_data + EXT4_NDIR_BLOCKS); 3505ac27a0ecSDave Kleikamp goto do_indirects; 3506ac27a0ecSDave Kleikamp } 3507ac27a0ecSDave Kleikamp 3508617ba13bSMingming Cao partial = ext4_find_shared(inode, n, offsets, chain, &nr); 3509ac27a0ecSDave Kleikamp /* Kill the top of shared branch (not detached) */ 3510ac27a0ecSDave Kleikamp if (nr) { 3511ac27a0ecSDave Kleikamp if (partial == chain) { 3512ac27a0ecSDave Kleikamp /* Shared branch grows from the inode */ 3513617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, 3514ac27a0ecSDave Kleikamp &nr, &nr+1, (chain+n-1) - partial); 3515ac27a0ecSDave Kleikamp *partial->p = 0; 3516ac27a0ecSDave Kleikamp /* 3517ac27a0ecSDave Kleikamp * We mark the inode dirty prior to restart, 3518ac27a0ecSDave Kleikamp * and prior to stop. No need for it here. 3519ac27a0ecSDave Kleikamp */ 3520ac27a0ecSDave Kleikamp } else { 3521ac27a0ecSDave Kleikamp /* Shared branch grows from an indirect block */ 3522ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "get_write_access"); 3523617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, 3524ac27a0ecSDave Kleikamp partial->p, 3525ac27a0ecSDave Kleikamp partial->p+1, (chain+n-1) - partial); 3526ac27a0ecSDave Kleikamp } 3527ac27a0ecSDave Kleikamp } 3528ac27a0ecSDave Kleikamp /* Clear the ends of indirect blocks on the shared branch */ 3529ac27a0ecSDave Kleikamp while (partial > chain) { 3530617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 3531ac27a0ecSDave Kleikamp (__le32*)partial->bh->b_data+addr_per_block, 3532ac27a0ecSDave Kleikamp (chain+n-1) - partial); 3533ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 3534ac27a0ecSDave Kleikamp brelse (partial->bh); 3535ac27a0ecSDave Kleikamp partial--; 3536ac27a0ecSDave Kleikamp } 3537ac27a0ecSDave Kleikamp do_indirects: 3538ac27a0ecSDave Kleikamp /* Kill the remaining (whole) subtrees */ 3539ac27a0ecSDave Kleikamp switch (offsets[0]) { 3540ac27a0ecSDave Kleikamp default: 3541617ba13bSMingming Cao nr = i_data[EXT4_IND_BLOCK]; 3542ac27a0ecSDave Kleikamp if (nr) { 3543617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 3544617ba13bSMingming Cao i_data[EXT4_IND_BLOCK] = 0; 3545ac27a0ecSDave Kleikamp } 3546617ba13bSMingming Cao case EXT4_IND_BLOCK: 3547617ba13bSMingming Cao nr = i_data[EXT4_DIND_BLOCK]; 3548ac27a0ecSDave Kleikamp if (nr) { 3549617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 3550617ba13bSMingming Cao i_data[EXT4_DIND_BLOCK] = 0; 3551ac27a0ecSDave Kleikamp } 3552617ba13bSMingming Cao case EXT4_DIND_BLOCK: 3553617ba13bSMingming Cao nr = i_data[EXT4_TIND_BLOCK]; 3554ac27a0ecSDave Kleikamp if (nr) { 3555617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 3556617ba13bSMingming Cao i_data[EXT4_TIND_BLOCK] = 0; 3557ac27a0ecSDave Kleikamp } 3558617ba13bSMingming Cao case EXT4_TIND_BLOCK: 3559ac27a0ecSDave Kleikamp ; 3560ac27a0ecSDave Kleikamp } 3561ac27a0ecSDave Kleikamp 3562617ba13bSMingming Cao ext4_discard_reservation(inode); 3563ac27a0ecSDave Kleikamp 35640e855ac8SAneesh Kumar K.V up_write(&ei->i_data_sem); 3565ef7f3835SKalpak Shah inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3566617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3567ac27a0ecSDave Kleikamp 3568ac27a0ecSDave Kleikamp /* 3569ac27a0ecSDave Kleikamp * In a multi-transaction truncate, we only make the final transaction 3570ac27a0ecSDave Kleikamp * synchronous 3571ac27a0ecSDave Kleikamp */ 3572ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 3573ac27a0ecSDave Kleikamp handle->h_sync = 1; 3574ac27a0ecSDave Kleikamp out_stop: 3575ac27a0ecSDave Kleikamp /* 3576ac27a0ecSDave Kleikamp * If this was a simple ftruncate(), and the file will remain alive 3577ac27a0ecSDave Kleikamp * then we need to clear up the orphan record which we created above. 3578ac27a0ecSDave Kleikamp * However, if this was a real unlink then we were called by 3579617ba13bSMingming Cao * ext4_delete_inode(), and we allow that function to clean up the 3580ac27a0ecSDave Kleikamp * orphan info for us. 3581ac27a0ecSDave Kleikamp */ 3582ac27a0ecSDave Kleikamp if (inode->i_nlink) 3583617ba13bSMingming Cao ext4_orphan_del(handle, inode); 3584ac27a0ecSDave Kleikamp 3585617ba13bSMingming Cao ext4_journal_stop(handle); 3586ac27a0ecSDave Kleikamp } 3587ac27a0ecSDave Kleikamp 3588617ba13bSMingming Cao static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb, 3589617ba13bSMingming Cao unsigned long ino, struct ext4_iloc *iloc) 3590ac27a0ecSDave Kleikamp { 3591fd2d4291SAvantika Mathur ext4_group_t block_group; 3592ac27a0ecSDave Kleikamp unsigned long offset; 3593617ba13bSMingming Cao ext4_fsblk_t block; 3594617ba13bSMingming Cao struct ext4_group_desc *gdp; 3595ac27a0ecSDave Kleikamp 3596617ba13bSMingming Cao if (!ext4_valid_inum(sb, ino)) { 3597ac27a0ecSDave Kleikamp /* 3598ac27a0ecSDave Kleikamp * This error is already checked for in namei.c unless we are 3599ac27a0ecSDave Kleikamp * looking at an NFS filehandle, in which case no error 3600ac27a0ecSDave Kleikamp * report is needed 3601ac27a0ecSDave Kleikamp */ 3602ac27a0ecSDave Kleikamp return 0; 3603ac27a0ecSDave Kleikamp } 3604ac27a0ecSDave Kleikamp 3605617ba13bSMingming Cao block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb); 3606c0a4ef38SAkinobu Mita gdp = ext4_get_group_desc(sb, block_group, NULL); 3607c0a4ef38SAkinobu Mita if (!gdp) 3608ac27a0ecSDave Kleikamp return 0; 3609ac27a0ecSDave Kleikamp 3610ac27a0ecSDave Kleikamp /* 3611ac27a0ecSDave Kleikamp * Figure out the offset within the block group inode table 3612ac27a0ecSDave Kleikamp */ 3613617ba13bSMingming Cao offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) * 3614617ba13bSMingming Cao EXT4_INODE_SIZE(sb); 36158fadc143SAlexandre Ratchov block = ext4_inode_table(sb, gdp) + 36168fadc143SAlexandre Ratchov (offset >> EXT4_BLOCK_SIZE_BITS(sb)); 3617ac27a0ecSDave Kleikamp 3618ac27a0ecSDave Kleikamp iloc->block_group = block_group; 3619617ba13bSMingming Cao iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1); 3620ac27a0ecSDave Kleikamp return block; 3621ac27a0ecSDave Kleikamp } 3622ac27a0ecSDave Kleikamp 3623ac27a0ecSDave Kleikamp /* 3624617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3625ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3626ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3627ac27a0ecSDave Kleikamp * inode. 3628ac27a0ecSDave Kleikamp */ 3629617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3630617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3631ac27a0ecSDave Kleikamp { 3632617ba13bSMingming Cao ext4_fsblk_t block; 3633ac27a0ecSDave Kleikamp struct buffer_head *bh; 3634ac27a0ecSDave Kleikamp 3635617ba13bSMingming Cao block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc); 3636ac27a0ecSDave Kleikamp if (!block) 3637ac27a0ecSDave Kleikamp return -EIO; 3638ac27a0ecSDave Kleikamp 3639ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, block); 3640ac27a0ecSDave Kleikamp if (!bh) { 3641617ba13bSMingming Cao ext4_error (inode->i_sb, "ext4_get_inode_loc", 3642ac27a0ecSDave Kleikamp "unable to read inode block - " 36432ae02107SMingming Cao "inode=%lu, block=%llu", 3644ac27a0ecSDave Kleikamp inode->i_ino, block); 3645ac27a0ecSDave Kleikamp return -EIO; 3646ac27a0ecSDave Kleikamp } 3647ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3648ac27a0ecSDave Kleikamp lock_buffer(bh); 36499c83a923SHidehiro Kawai 36509c83a923SHidehiro Kawai /* 36519c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 36529c83a923SHidehiro Kawai * to write out another inode in the same block. In this 36539c83a923SHidehiro Kawai * case, we don't have to read the block because we may 36549c83a923SHidehiro Kawai * read the old inode data successfully. 36559c83a923SHidehiro Kawai */ 36569c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 36579c83a923SHidehiro Kawai set_buffer_uptodate(bh); 36589c83a923SHidehiro Kawai 3659ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3660ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3661ac27a0ecSDave Kleikamp unlock_buffer(bh); 3662ac27a0ecSDave Kleikamp goto has_buffer; 3663ac27a0ecSDave Kleikamp } 3664ac27a0ecSDave Kleikamp 3665ac27a0ecSDave Kleikamp /* 3666ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3667ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3668ac27a0ecSDave Kleikamp * block. 3669ac27a0ecSDave Kleikamp */ 3670ac27a0ecSDave Kleikamp if (in_mem) { 3671ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3672617ba13bSMingming Cao struct ext4_group_desc *desc; 3673ac27a0ecSDave Kleikamp int inodes_per_buffer; 3674ac27a0ecSDave Kleikamp int inode_offset, i; 3675fd2d4291SAvantika Mathur ext4_group_t block_group; 3676ac27a0ecSDave Kleikamp int start; 3677ac27a0ecSDave Kleikamp 3678ac27a0ecSDave Kleikamp block_group = (inode->i_ino - 1) / 3679617ba13bSMingming Cao EXT4_INODES_PER_GROUP(inode->i_sb); 3680ac27a0ecSDave Kleikamp inodes_per_buffer = bh->b_size / 3681617ba13bSMingming Cao EXT4_INODE_SIZE(inode->i_sb); 3682ac27a0ecSDave Kleikamp inode_offset = ((inode->i_ino - 1) % 3683617ba13bSMingming Cao EXT4_INODES_PER_GROUP(inode->i_sb)); 3684ac27a0ecSDave Kleikamp start = inode_offset & ~(inodes_per_buffer - 1); 3685ac27a0ecSDave Kleikamp 3686ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3687617ba13bSMingming Cao desc = ext4_get_group_desc(inode->i_sb, 3688ac27a0ecSDave Kleikamp block_group, NULL); 3689ac27a0ecSDave Kleikamp if (!desc) 3690ac27a0ecSDave Kleikamp goto make_io; 3691ac27a0ecSDave Kleikamp 3692ac27a0ecSDave Kleikamp bitmap_bh = sb_getblk(inode->i_sb, 36938fadc143SAlexandre Ratchov ext4_inode_bitmap(inode->i_sb, desc)); 3694ac27a0ecSDave Kleikamp if (!bitmap_bh) 3695ac27a0ecSDave Kleikamp goto make_io; 3696ac27a0ecSDave Kleikamp 3697ac27a0ecSDave Kleikamp /* 3698ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3699ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3700ac27a0ecSDave Kleikamp * of one, so skip it. 3701ac27a0ecSDave Kleikamp */ 3702ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3703ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3704ac27a0ecSDave Kleikamp goto make_io; 3705ac27a0ecSDave Kleikamp } 3706ac27a0ecSDave Kleikamp for (i = start; i < start + inodes_per_buffer; i++) { 3707ac27a0ecSDave Kleikamp if (i == inode_offset) 3708ac27a0ecSDave Kleikamp continue; 3709617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3710ac27a0ecSDave Kleikamp break; 3711ac27a0ecSDave Kleikamp } 3712ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3713ac27a0ecSDave Kleikamp if (i == start + inodes_per_buffer) { 3714ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3715ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3716ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3717ac27a0ecSDave Kleikamp unlock_buffer(bh); 3718ac27a0ecSDave Kleikamp goto has_buffer; 3719ac27a0ecSDave Kleikamp } 3720ac27a0ecSDave Kleikamp } 3721ac27a0ecSDave Kleikamp 3722ac27a0ecSDave Kleikamp make_io: 3723ac27a0ecSDave Kleikamp /* 3724ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3725ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3726ac27a0ecSDave Kleikamp * Read the block from disk. 3727ac27a0ecSDave Kleikamp */ 3728ac27a0ecSDave Kleikamp get_bh(bh); 3729ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 3730ac27a0ecSDave Kleikamp submit_bh(READ_META, bh); 3731ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3732ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3733617ba13bSMingming Cao ext4_error(inode->i_sb, "ext4_get_inode_loc", 3734ac27a0ecSDave Kleikamp "unable to read inode block - " 37352ae02107SMingming Cao "inode=%lu, block=%llu", 3736ac27a0ecSDave Kleikamp inode->i_ino, block); 3737ac27a0ecSDave Kleikamp brelse(bh); 3738ac27a0ecSDave Kleikamp return -EIO; 3739ac27a0ecSDave Kleikamp } 3740ac27a0ecSDave Kleikamp } 3741ac27a0ecSDave Kleikamp has_buffer: 3742ac27a0ecSDave Kleikamp iloc->bh = bh; 3743ac27a0ecSDave Kleikamp return 0; 3744ac27a0ecSDave Kleikamp } 3745ac27a0ecSDave Kleikamp 3746617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3747ac27a0ecSDave Kleikamp { 3748ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3749617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 3750617ba13bSMingming Cao !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 3751ac27a0ecSDave Kleikamp } 3752ac27a0ecSDave Kleikamp 3753617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3754ac27a0ecSDave Kleikamp { 3755617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3756ac27a0ecSDave Kleikamp 3757ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3758617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3759ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3760617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3761ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3762617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3763ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3764617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3765ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3766617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3767ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3768ac27a0ecSDave Kleikamp } 3769ac27a0ecSDave Kleikamp 3770ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3771ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3772ff9ddf7eSJan Kara { 3773ff9ddf7eSJan Kara unsigned int flags = ei->vfs_inode.i_flags; 3774ff9ddf7eSJan Kara 3775ff9ddf7eSJan Kara ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 3776ff9ddf7eSJan Kara EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 3777ff9ddf7eSJan Kara if (flags & S_SYNC) 3778ff9ddf7eSJan Kara ei->i_flags |= EXT4_SYNC_FL; 3779ff9ddf7eSJan Kara if (flags & S_APPEND) 3780ff9ddf7eSJan Kara ei->i_flags |= EXT4_APPEND_FL; 3781ff9ddf7eSJan Kara if (flags & S_IMMUTABLE) 3782ff9ddf7eSJan Kara ei->i_flags |= EXT4_IMMUTABLE_FL; 3783ff9ddf7eSJan Kara if (flags & S_NOATIME) 3784ff9ddf7eSJan Kara ei->i_flags |= EXT4_NOATIME_FL; 3785ff9ddf7eSJan Kara if (flags & S_DIRSYNC) 3786ff9ddf7eSJan Kara ei->i_flags |= EXT4_DIRSYNC_FL; 3787ff9ddf7eSJan Kara } 37880fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 37890fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 37900fc1b451SAneesh Kumar K.V { 37910fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 37928180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 37938180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 37940fc1b451SAneesh Kumar K.V 37950fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 37960fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 37970fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 37980fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 37990fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 38008180a562SAneesh Kumar K.V if (ei->i_flags & EXT4_HUGE_FILE_FL) { 38018180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 38028180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 38038180a562SAneesh Kumar K.V } else { 38040fc1b451SAneesh Kumar K.V return i_blocks; 38058180a562SAneesh Kumar K.V } 38060fc1b451SAneesh Kumar K.V } else { 38070fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 38080fc1b451SAneesh Kumar K.V } 38090fc1b451SAneesh Kumar K.V } 3810ff9ddf7eSJan Kara 38111d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3812ac27a0ecSDave Kleikamp { 3813617ba13bSMingming Cao struct ext4_iloc iloc; 3814617ba13bSMingming Cao struct ext4_inode *raw_inode; 38151d1fe1eeSDavid Howells struct ext4_inode_info *ei; 3816ac27a0ecSDave Kleikamp struct buffer_head *bh; 38171d1fe1eeSDavid Howells struct inode *inode; 38181d1fe1eeSDavid Howells long ret; 3819ac27a0ecSDave Kleikamp int block; 3820ac27a0ecSDave Kleikamp 38211d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 38221d1fe1eeSDavid Howells if (!inode) 38231d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 38241d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 38251d1fe1eeSDavid Howells return inode; 38261d1fe1eeSDavid Howells 38271d1fe1eeSDavid Howells ei = EXT4_I(inode); 3828617ba13bSMingming Cao #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 3829617ba13bSMingming Cao ei->i_acl = EXT4_ACL_NOT_CACHED; 3830617ba13bSMingming Cao ei->i_default_acl = EXT4_ACL_NOT_CACHED; 3831ac27a0ecSDave Kleikamp #endif 3832ac27a0ecSDave Kleikamp ei->i_block_alloc_info = NULL; 3833ac27a0ecSDave Kleikamp 38341d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 38351d1fe1eeSDavid Howells if (ret < 0) 3836ac27a0ecSDave Kleikamp goto bad_inode; 3837ac27a0ecSDave Kleikamp bh = iloc.bh; 3838617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3839ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 3840ac27a0ecSDave Kleikamp inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 3841ac27a0ecSDave Kleikamp inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3842ac27a0ecSDave Kleikamp if(!(test_opt (inode->i_sb, NO_UID32))) { 3843ac27a0ecSDave Kleikamp inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 3844ac27a0ecSDave Kleikamp inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3845ac27a0ecSDave Kleikamp } 3846ac27a0ecSDave Kleikamp inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 3847ac27a0ecSDave Kleikamp 3848ac27a0ecSDave Kleikamp ei->i_state = 0; 3849ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3850ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3851ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3852ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3853ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3854ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3855ac27a0ecSDave Kleikamp */ 3856ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3857ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3858617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3859ac27a0ecSDave Kleikamp /* this inode is deleted */ 3860ac27a0ecSDave Kleikamp brelse (bh); 38611d1fe1eeSDavid Howells ret = -ESTALE; 3862ac27a0ecSDave Kleikamp goto bad_inode; 3863ac27a0ecSDave Kleikamp } 3864ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3865ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3866ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3867ac27a0ecSDave Kleikamp * the process of deleting those. */ 3868ac27a0ecSDave Kleikamp } 3869ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 38700fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 38717973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 38729b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 3873a48380f7SAneesh Kumar K.V cpu_to_le32(EXT4_OS_HURD)) { 3874a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3875a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3876ac27a0ecSDave Kleikamp } 3877a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3878ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3879ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3880ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3881ac27a0ecSDave Kleikamp /* 3882ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3883ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3884ac27a0ecSDave Kleikamp */ 3885617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3886ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3887ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3888ac27a0ecSDave Kleikamp 38890040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3890ac27a0ecSDave Kleikamp ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3891617ba13bSMingming Cao if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3892e5d2861fSKirill Korotaev EXT4_INODE_SIZE(inode->i_sb)) { 3893e5d2861fSKirill Korotaev brelse (bh); 38941d1fe1eeSDavid Howells ret = -EIO; 3895ac27a0ecSDave Kleikamp goto bad_inode; 3896e5d2861fSKirill Korotaev } 3897ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3898ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3899617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3900617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3901ac27a0ecSDave Kleikamp } else { 3902ac27a0ecSDave Kleikamp __le32 *magic = (void *)raw_inode + 3903617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE + 3904ac27a0ecSDave Kleikamp ei->i_extra_isize; 3905617ba13bSMingming Cao if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 3906617ba13bSMingming Cao ei->i_state |= EXT4_STATE_XATTR; 3907ac27a0ecSDave Kleikamp } 3908ac27a0ecSDave Kleikamp } else 3909ac27a0ecSDave Kleikamp ei->i_extra_isize = 0; 3910ac27a0ecSDave Kleikamp 3911ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3912ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3913ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3914ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3915ef7f3835SKalpak Shah 391625ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 391725ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 391825ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 391925ec56b5SJean Noel Cordenner inode->i_version |= 392025ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 392125ec56b5SJean Noel Cordenner } 392225ec56b5SJean Noel Cordenner 3923ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3924617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3925617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3926617ba13bSMingming Cao ext4_set_aops(inode); 3927ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3928617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3929617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3930ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3931617ba13bSMingming Cao if (ext4_inode_is_fast_symlink(inode)) 3932617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3933ac27a0ecSDave Kleikamp else { 3934617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3935617ba13bSMingming Cao ext4_set_aops(inode); 3936ac27a0ecSDave Kleikamp } 3937ac27a0ecSDave Kleikamp } else { 3938617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3939ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3940ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3941ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3942ac27a0ecSDave Kleikamp else 3943ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3944ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3945ac27a0ecSDave Kleikamp } 3946ac27a0ecSDave Kleikamp brelse (iloc.bh); 3947617ba13bSMingming Cao ext4_set_inode_flags(inode); 39481d1fe1eeSDavid Howells unlock_new_inode(inode); 39491d1fe1eeSDavid Howells return inode; 3950ac27a0ecSDave Kleikamp 3951ac27a0ecSDave Kleikamp bad_inode: 39521d1fe1eeSDavid Howells iget_failed(inode); 39531d1fe1eeSDavid Howells return ERR_PTR(ret); 3954ac27a0ecSDave Kleikamp } 3955ac27a0ecSDave Kleikamp 39560fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 39570fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 39580fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 39590fc1b451SAneesh Kumar K.V { 39600fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 39610fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 39620fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 39630fc1b451SAneesh Kumar K.V int err = 0; 39640fc1b451SAneesh Kumar K.V 39650fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 39660fc1b451SAneesh Kumar K.V /* 39670fc1b451SAneesh Kumar K.V * i_blocks can be represnted in a 32 bit variable 39680fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39690fc1b451SAneesh Kumar K.V */ 39708180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39710fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 39728180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 39730fc1b451SAneesh Kumar K.V } else if (i_blocks <= 0xffffffffffffULL) { 39740fc1b451SAneesh Kumar K.V /* 39750fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 39760fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39770fc1b451SAneesh Kumar K.V */ 39780fc1b451SAneesh Kumar K.V err = ext4_update_rocompat_feature(handle, sb, 39790fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE); 39800fc1b451SAneesh Kumar K.V if (err) 39810fc1b451SAneesh Kumar K.V goto err_out; 39820fc1b451SAneesh Kumar K.V /* i_block is stored in the split 48 bit fields */ 39838180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39840fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 39858180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 39860fc1b451SAneesh Kumar K.V } else { 39878180a562SAneesh Kumar K.V /* 39888180a562SAneesh Kumar K.V * i_blocks should be represented in a 48 bit variable 39898180a562SAneesh Kumar K.V * as multiple of file system block size 39908180a562SAneesh Kumar K.V */ 39918180a562SAneesh Kumar K.V err = ext4_update_rocompat_feature(handle, sb, 39928180a562SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE); 39938180a562SAneesh Kumar K.V if (err) 39948180a562SAneesh Kumar K.V goto err_out; 39958180a562SAneesh Kumar K.V ei->i_flags |= EXT4_HUGE_FILE_FL; 39968180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 39978180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 39988180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39998180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 40000fc1b451SAneesh Kumar K.V } 40010fc1b451SAneesh Kumar K.V err_out: 40020fc1b451SAneesh Kumar K.V return err; 40030fc1b451SAneesh Kumar K.V } 40040fc1b451SAneesh Kumar K.V 4005ac27a0ecSDave Kleikamp /* 4006ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4007ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4008ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4009ac27a0ecSDave Kleikamp * 4010ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4011ac27a0ecSDave Kleikamp */ 4012617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4013ac27a0ecSDave Kleikamp struct inode *inode, 4014617ba13bSMingming Cao struct ext4_iloc *iloc) 4015ac27a0ecSDave Kleikamp { 4016617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4017617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4018ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4019ac27a0ecSDave Kleikamp int err = 0, rc, block; 4020ac27a0ecSDave Kleikamp 4021ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4022ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 4023617ba13bSMingming Cao if (ei->i_state & EXT4_STATE_NEW) 4024617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4025ac27a0ecSDave Kleikamp 4026ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4027ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4028ac27a0ecSDave Kleikamp if(!(test_opt(inode->i_sb, NO_UID32))) { 4029ac27a0ecSDave Kleikamp raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4030ac27a0ecSDave Kleikamp raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4031ac27a0ecSDave Kleikamp /* 4032ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4033ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4034ac27a0ecSDave Kleikamp */ 4035ac27a0ecSDave Kleikamp if(!ei->i_dtime) { 4036ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 4037ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_uid)); 4038ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 4039ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_gid)); 4040ac27a0ecSDave Kleikamp } else { 4041ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4042ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4043ac27a0ecSDave Kleikamp } 4044ac27a0ecSDave Kleikamp } else { 4045ac27a0ecSDave Kleikamp raw_inode->i_uid_low = 4046ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4047ac27a0ecSDave Kleikamp raw_inode->i_gid_low = 4048ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4049ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4050ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4051ac27a0ecSDave Kleikamp } 4052ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4053ef7f3835SKalpak Shah 4054ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4055ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4056ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4057ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4058ef7f3835SKalpak Shah 40590fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 40600fc1b451SAneesh Kumar K.V goto out_brelse; 4061ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4062267e4db9SAneesh Kumar K.V /* clear the migrate flag in the raw_inode */ 4063267e4db9SAneesh Kumar K.V raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); 40649b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 40659b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4066a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4067a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 40687973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4069a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4070ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4071ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4072617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4073617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4074617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4075617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4076ac27a0ecSDave Kleikamp /* If this is the first large file 4077ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4078ac27a0ecSDave Kleikamp */ 4079617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4080617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4081ac27a0ecSDave Kleikamp if (err) 4082ac27a0ecSDave Kleikamp goto out_brelse; 4083617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4084617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4085617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4086ac27a0ecSDave Kleikamp sb->s_dirt = 1; 4087ac27a0ecSDave Kleikamp handle->h_sync = 1; 4088617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, 4089617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4090ac27a0ecSDave Kleikamp } 4091ac27a0ecSDave Kleikamp } 4092ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4093ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4094ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4095ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4096ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4097ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4098ac27a0ecSDave Kleikamp } else { 4099ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4100ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4101ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4102ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4103ac27a0ecSDave Kleikamp } 4104617ba13bSMingming Cao } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4105ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4106ac27a0ecSDave Kleikamp 410725ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 410825ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 410925ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 411025ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 411125ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4112ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 411325ec56b5SJean Noel Cordenner } 411425ec56b5SJean Noel Cordenner 4115ac27a0ecSDave Kleikamp 4116617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 4117617ba13bSMingming Cao rc = ext4_journal_dirty_metadata(handle, bh); 4118ac27a0ecSDave Kleikamp if (!err) 4119ac27a0ecSDave Kleikamp err = rc; 4120617ba13bSMingming Cao ei->i_state &= ~EXT4_STATE_NEW; 4121ac27a0ecSDave Kleikamp 4122ac27a0ecSDave Kleikamp out_brelse: 4123ac27a0ecSDave Kleikamp brelse (bh); 4124617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4125ac27a0ecSDave Kleikamp return err; 4126ac27a0ecSDave Kleikamp } 4127ac27a0ecSDave Kleikamp 4128ac27a0ecSDave Kleikamp /* 4129617ba13bSMingming Cao * ext4_write_inode() 4130ac27a0ecSDave Kleikamp * 4131ac27a0ecSDave Kleikamp * We are called from a few places: 4132ac27a0ecSDave Kleikamp * 4133ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4134ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 4135ac27a0ecSDave Kleikamp * trasnaction to commit. 4136ac27a0ecSDave Kleikamp * 4137ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4138ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4139ac27a0ecSDave Kleikamp * 4140ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4141ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4142ac27a0ecSDave Kleikamp * journal commit. 4143ac27a0ecSDave Kleikamp * 4144ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4145ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4146617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4147ac27a0ecSDave Kleikamp * knfsd. 4148ac27a0ecSDave Kleikamp * 4149ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4150ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4151ac27a0ecSDave Kleikamp * which we are interested. 4152ac27a0ecSDave Kleikamp * 4153ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4154ac27a0ecSDave Kleikamp * 4155ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4156ac27a0ecSDave Kleikamp * stuff(); 4157ac27a0ecSDave Kleikamp * inode->i_size = expr; 4158ac27a0ecSDave Kleikamp * 4159ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4160ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4161ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4162ac27a0ecSDave Kleikamp */ 4163617ba13bSMingming Cao int ext4_write_inode(struct inode *inode, int wait) 4164ac27a0ecSDave Kleikamp { 4165ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4166ac27a0ecSDave Kleikamp return 0; 4167ac27a0ecSDave Kleikamp 4168617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4169b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4170ac27a0ecSDave Kleikamp dump_stack(); 4171ac27a0ecSDave Kleikamp return -EIO; 4172ac27a0ecSDave Kleikamp } 4173ac27a0ecSDave Kleikamp 4174ac27a0ecSDave Kleikamp if (!wait) 4175ac27a0ecSDave Kleikamp return 0; 4176ac27a0ecSDave Kleikamp 4177617ba13bSMingming Cao return ext4_force_commit(inode->i_sb); 4178ac27a0ecSDave Kleikamp } 4179ac27a0ecSDave Kleikamp 4180ac27a0ecSDave Kleikamp /* 4181617ba13bSMingming Cao * ext4_setattr() 4182ac27a0ecSDave Kleikamp * 4183ac27a0ecSDave Kleikamp * Called from notify_change. 4184ac27a0ecSDave Kleikamp * 4185ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4186ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4187ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4188ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4189ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4190ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4191ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4192ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4193ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4194ac27a0ecSDave Kleikamp * 4195678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4196678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4197678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4198678aaf48SJan Kara * This way we are sure that all the data written in the previous 4199678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4200678aaf48SJan Kara * writeback). 4201678aaf48SJan Kara * 4202678aaf48SJan Kara * Called with inode->i_mutex down. 4203ac27a0ecSDave Kleikamp */ 4204617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4205ac27a0ecSDave Kleikamp { 4206ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4207ac27a0ecSDave Kleikamp int error, rc = 0; 4208ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4209ac27a0ecSDave Kleikamp 4210ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4211ac27a0ecSDave Kleikamp if (error) 4212ac27a0ecSDave Kleikamp return error; 4213ac27a0ecSDave Kleikamp 4214ac27a0ecSDave Kleikamp if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4215ac27a0ecSDave Kleikamp (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4216ac27a0ecSDave Kleikamp handle_t *handle; 4217ac27a0ecSDave Kleikamp 4218ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4219ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 4220617ba13bSMingming Cao handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4221617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4222ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4223ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4224ac27a0ecSDave Kleikamp goto err_out; 4225ac27a0ecSDave Kleikamp } 4226ac27a0ecSDave Kleikamp error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 4227ac27a0ecSDave Kleikamp if (error) { 4228617ba13bSMingming Cao ext4_journal_stop(handle); 4229ac27a0ecSDave Kleikamp return error; 4230ac27a0ecSDave Kleikamp } 4231ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4232ac27a0ecSDave Kleikamp * one transaction */ 4233ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4234ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4235ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4236ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4237617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4238617ba13bSMingming Cao ext4_journal_stop(handle); 4239ac27a0ecSDave Kleikamp } 4240ac27a0ecSDave Kleikamp 4241e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4242e2b46574SEric Sandeen if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4243e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4244e2b46574SEric Sandeen 4245e2b46574SEric Sandeen if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4246e2b46574SEric Sandeen error = -EFBIG; 4247e2b46574SEric Sandeen goto err_out; 4248e2b46574SEric Sandeen } 4249e2b46574SEric Sandeen } 4250e2b46574SEric Sandeen } 4251e2b46574SEric Sandeen 4252ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4253ac27a0ecSDave Kleikamp attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4254ac27a0ecSDave Kleikamp handle_t *handle; 4255ac27a0ecSDave Kleikamp 4256617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4257ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4258ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4259ac27a0ecSDave Kleikamp goto err_out; 4260ac27a0ecSDave Kleikamp } 4261ac27a0ecSDave Kleikamp 4262617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 4263617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4264617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4265ac27a0ecSDave Kleikamp if (!error) 4266ac27a0ecSDave Kleikamp error = rc; 4267617ba13bSMingming Cao ext4_journal_stop(handle); 4268678aaf48SJan Kara 4269678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4270678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4271678aaf48SJan Kara attr->ia_size); 4272678aaf48SJan Kara if (error) { 4273678aaf48SJan Kara /* Do as much error cleanup as possible */ 4274678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4275678aaf48SJan Kara if (IS_ERR(handle)) { 4276678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4277678aaf48SJan Kara goto err_out; 4278678aaf48SJan Kara } 4279678aaf48SJan Kara ext4_orphan_del(handle, inode); 4280678aaf48SJan Kara ext4_journal_stop(handle); 4281678aaf48SJan Kara goto err_out; 4282678aaf48SJan Kara } 4283678aaf48SJan Kara } 4284ac27a0ecSDave Kleikamp } 4285ac27a0ecSDave Kleikamp 4286ac27a0ecSDave Kleikamp rc = inode_setattr(inode, attr); 4287ac27a0ecSDave Kleikamp 4288617ba13bSMingming Cao /* If inode_setattr's call to ext4_truncate failed to get a 4289ac27a0ecSDave Kleikamp * transaction handle at all, we need to clean up the in-core 4290ac27a0ecSDave Kleikamp * orphan list manually. */ 4291ac27a0ecSDave Kleikamp if (inode->i_nlink) 4292617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4293ac27a0ecSDave Kleikamp 4294ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4295617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4296ac27a0ecSDave Kleikamp 4297ac27a0ecSDave Kleikamp err_out: 4298617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4299ac27a0ecSDave Kleikamp if (!error) 4300ac27a0ecSDave Kleikamp error = rc; 4301ac27a0ecSDave Kleikamp return error; 4302ac27a0ecSDave Kleikamp } 4303ac27a0ecSDave Kleikamp 43043e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 43053e3398a0SMingming Cao struct kstat *stat) 43063e3398a0SMingming Cao { 43073e3398a0SMingming Cao struct inode *inode; 43083e3398a0SMingming Cao unsigned long delalloc_blocks; 43093e3398a0SMingming Cao 43103e3398a0SMingming Cao inode = dentry->d_inode; 43113e3398a0SMingming Cao generic_fillattr(inode, stat); 43123e3398a0SMingming Cao 43133e3398a0SMingming Cao /* 43143e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 43153e3398a0SMingming Cao * otherwise in the case of system crash before the real block 43163e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 43173e3398a0SMingming Cao * on-disk file blocks. 43183e3398a0SMingming Cao * We always keep i_blocks updated together with real 43193e3398a0SMingming Cao * allocation. But to not confuse with user, stat 43203e3398a0SMingming Cao * will return the blocks that include the delayed allocation 43213e3398a0SMingming Cao * blocks for this file. 43223e3398a0SMingming Cao */ 43233e3398a0SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 43243e3398a0SMingming Cao delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 43253e3398a0SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 43263e3398a0SMingming Cao 43273e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 43283e3398a0SMingming Cao return 0; 43293e3398a0SMingming Cao } 4330ac27a0ecSDave Kleikamp 4331ac27a0ecSDave Kleikamp /* 4332ac27a0ecSDave Kleikamp * How many blocks doth make a writepage()? 4333ac27a0ecSDave Kleikamp * 4334ac27a0ecSDave Kleikamp * With N blocks per page, it may be: 4335ac27a0ecSDave Kleikamp * N data blocks 4336ac27a0ecSDave Kleikamp * 2 indirect block 4337ac27a0ecSDave Kleikamp * 2 dindirect 4338ac27a0ecSDave Kleikamp * 1 tindirect 4339ac27a0ecSDave Kleikamp * N+5 bitmap blocks (from the above) 4340ac27a0ecSDave Kleikamp * N+5 group descriptor summary blocks 4341ac27a0ecSDave Kleikamp * 1 inode block 4342ac27a0ecSDave Kleikamp * 1 superblock. 4343617ba13bSMingming Cao * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files 4344ac27a0ecSDave Kleikamp * 4345617ba13bSMingming Cao * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS 4346ac27a0ecSDave Kleikamp * 4347ac27a0ecSDave Kleikamp * With ordered or writeback data it's the same, less the N data blocks. 4348ac27a0ecSDave Kleikamp * 4349ac27a0ecSDave Kleikamp * If the inode's direct blocks can hold an integral number of pages then a 4350ac27a0ecSDave Kleikamp * page cannot straddle two indirect blocks, and we can only touch one indirect 4351ac27a0ecSDave Kleikamp * and dindirect block, and the "5" above becomes "3". 4352ac27a0ecSDave Kleikamp * 4353ac27a0ecSDave Kleikamp * This still overestimates under most circumstances. If we were to pass the 4354ac27a0ecSDave Kleikamp * start and end offsets in here as well we could do block_to_path() on each 4355ac27a0ecSDave Kleikamp * block and work out the exact number of indirects which are touched. Pah. 4356ac27a0ecSDave Kleikamp */ 4357ac27a0ecSDave Kleikamp 4358a86c6181SAlex Tomas int ext4_writepage_trans_blocks(struct inode *inode) 4359ac27a0ecSDave Kleikamp { 4360617ba13bSMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4361617ba13bSMingming Cao int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3; 4362ac27a0ecSDave Kleikamp int ret; 4363ac27a0ecSDave Kleikamp 4364a86c6181SAlex Tomas if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 4365a86c6181SAlex Tomas return ext4_ext_writepage_trans_blocks(inode, bpp); 4366a86c6181SAlex Tomas 4367617ba13bSMingming Cao if (ext4_should_journal_data(inode)) 4368ac27a0ecSDave Kleikamp ret = 3 * (bpp + indirects) + 2; 4369ac27a0ecSDave Kleikamp else 4370ac27a0ecSDave Kleikamp ret = 2 * (bpp + indirects) + 2; 4371ac27a0ecSDave Kleikamp 4372ac27a0ecSDave Kleikamp #ifdef CONFIG_QUOTA 4373ac27a0ecSDave Kleikamp /* We know that structure was already allocated during DQUOT_INIT so 4374ac27a0ecSDave Kleikamp * we will be updating only the data blocks + inodes */ 4375617ba13bSMingming Cao ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); 4376ac27a0ecSDave Kleikamp #endif 4377ac27a0ecSDave Kleikamp 4378ac27a0ecSDave Kleikamp return ret; 4379ac27a0ecSDave Kleikamp } 4380ac27a0ecSDave Kleikamp 4381ac27a0ecSDave Kleikamp /* 4382617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4383ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4384ac27a0ecSDave Kleikamp */ 4385617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4386617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4387ac27a0ecSDave Kleikamp { 4388ac27a0ecSDave Kleikamp int err = 0; 4389ac27a0ecSDave Kleikamp 439025ec56b5SJean Noel Cordenner if (test_opt(inode->i_sb, I_VERSION)) 439125ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 439225ec56b5SJean Noel Cordenner 4393ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4394ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4395ac27a0ecSDave Kleikamp 4396dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4397617ba13bSMingming Cao err = ext4_do_update_inode(handle, inode, iloc); 4398ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4399ac27a0ecSDave Kleikamp return err; 4400ac27a0ecSDave Kleikamp } 4401ac27a0ecSDave Kleikamp 4402ac27a0ecSDave Kleikamp /* 4403ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4404ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4405ac27a0ecSDave Kleikamp */ 4406ac27a0ecSDave Kleikamp 4407ac27a0ecSDave Kleikamp int 4408617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4409617ba13bSMingming Cao struct ext4_iloc *iloc) 4410ac27a0ecSDave Kleikamp { 4411ac27a0ecSDave Kleikamp int err = 0; 4412ac27a0ecSDave Kleikamp if (handle) { 4413617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4414ac27a0ecSDave Kleikamp if (!err) { 4415ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4416617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4417ac27a0ecSDave Kleikamp if (err) { 4418ac27a0ecSDave Kleikamp brelse(iloc->bh); 4419ac27a0ecSDave Kleikamp iloc->bh = NULL; 4420ac27a0ecSDave Kleikamp } 4421ac27a0ecSDave Kleikamp } 4422ac27a0ecSDave Kleikamp } 4423617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4424ac27a0ecSDave Kleikamp return err; 4425ac27a0ecSDave Kleikamp } 4426ac27a0ecSDave Kleikamp 4427ac27a0ecSDave Kleikamp /* 44286dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 44296dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 44306dd4ee7cSKalpak Shah */ 44311d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 44321d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 44331d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 44341d03ec98SAneesh Kumar K.V handle_t *handle) 44356dd4ee7cSKalpak Shah { 44366dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 44376dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 44386dd4ee7cSKalpak Shah struct ext4_xattr_entry *entry; 44396dd4ee7cSKalpak Shah 44406dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 44416dd4ee7cSKalpak Shah return 0; 44426dd4ee7cSKalpak Shah 44436dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 44446dd4ee7cSKalpak Shah 44456dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 44466dd4ee7cSKalpak Shah entry = IFIRST(header); 44476dd4ee7cSKalpak Shah 44486dd4ee7cSKalpak Shah /* No extended attributes present */ 44496dd4ee7cSKalpak Shah if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 44506dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 44516dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 44526dd4ee7cSKalpak Shah new_extra_isize); 44536dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 44546dd4ee7cSKalpak Shah return 0; 44556dd4ee7cSKalpak Shah } 44566dd4ee7cSKalpak Shah 44576dd4ee7cSKalpak Shah /* try to expand with EAs present */ 44586dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 44596dd4ee7cSKalpak Shah raw_inode, handle); 44606dd4ee7cSKalpak Shah } 44616dd4ee7cSKalpak Shah 44626dd4ee7cSKalpak Shah /* 4463ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4464ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4465ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4466ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4467ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4468ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4469ac27a0ecSDave Kleikamp * 4470ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4471ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4472ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4473ac27a0ecSDave Kleikamp * we start and wait on commits. 4474ac27a0ecSDave Kleikamp * 4475ac27a0ecSDave Kleikamp * Is this efficient/effective? Well, we're being nice to the system 4476ac27a0ecSDave Kleikamp * by cleaning up our inodes proactively so they can be reaped 4477ac27a0ecSDave Kleikamp * without I/O. But we are potentially leaving up to five seconds' 4478ac27a0ecSDave Kleikamp * worth of inodes floating about which prune_icache wants us to 4479ac27a0ecSDave Kleikamp * write out. One way to fix that would be to get prune_icache() 4480ac27a0ecSDave Kleikamp * to do a write_super() to free up some memory. It has the desired 4481ac27a0ecSDave Kleikamp * effect. 4482ac27a0ecSDave Kleikamp */ 4483617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4484ac27a0ecSDave Kleikamp { 4485617ba13bSMingming Cao struct ext4_iloc iloc; 44866dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 44876dd4ee7cSKalpak Shah static unsigned int mnt_count; 44886dd4ee7cSKalpak Shah int err, ret; 4489ac27a0ecSDave Kleikamp 4490ac27a0ecSDave Kleikamp might_sleep(); 4491617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 44926dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 44936dd4ee7cSKalpak Shah !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 44946dd4ee7cSKalpak Shah /* 44956dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 44966dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 44976dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 44986dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 44996dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 45006dd4ee7cSKalpak Shah */ 45016dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 45026dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 45036dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 45046dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 45056dd4ee7cSKalpak Shah iloc, handle); 45066dd4ee7cSKalpak Shah if (ret) { 45076dd4ee7cSKalpak Shah EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 4508c1bddad9SAneesh Kumar K.V if (mnt_count != 4509c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 451046e665e9SHarvey Harrison ext4_warning(inode->i_sb, __func__, 45116dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 45126dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 45136dd4ee7cSKalpak Shah inode->i_ino); 4514c1bddad9SAneesh Kumar K.V mnt_count = 4515c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 45166dd4ee7cSKalpak Shah } 45176dd4ee7cSKalpak Shah } 45186dd4ee7cSKalpak Shah } 45196dd4ee7cSKalpak Shah } 4520ac27a0ecSDave Kleikamp if (!err) 4521617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4522ac27a0ecSDave Kleikamp return err; 4523ac27a0ecSDave Kleikamp } 4524ac27a0ecSDave Kleikamp 4525ac27a0ecSDave Kleikamp /* 4526617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4527ac27a0ecSDave Kleikamp * 4528ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4529ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4530ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4531ac27a0ecSDave Kleikamp * 4532ac27a0ecSDave Kleikamp * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 4533ac27a0ecSDave Kleikamp * are allocated to the file. 4534ac27a0ecSDave Kleikamp * 4535ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4536ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4537ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4538ac27a0ecSDave Kleikamp */ 4539617ba13bSMingming Cao void ext4_dirty_inode(struct inode *inode) 4540ac27a0ecSDave Kleikamp { 4541617ba13bSMingming Cao handle_t *current_handle = ext4_journal_current_handle(); 4542ac27a0ecSDave Kleikamp handle_t *handle; 4543ac27a0ecSDave Kleikamp 4544617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4545ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4546ac27a0ecSDave Kleikamp goto out; 4547ac27a0ecSDave Kleikamp if (current_handle && 4548ac27a0ecSDave Kleikamp current_handle->h_transaction != handle->h_transaction) { 4549ac27a0ecSDave Kleikamp /* This task has a transaction open against a different fs */ 4550ac27a0ecSDave Kleikamp printk(KERN_EMERG "%s: transactions do not match!\n", 455146e665e9SHarvey Harrison __func__); 4552ac27a0ecSDave Kleikamp } else { 4553ac27a0ecSDave Kleikamp jbd_debug(5, "marking dirty. outer handle=%p\n", 4554ac27a0ecSDave Kleikamp current_handle); 4555617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4556ac27a0ecSDave Kleikamp } 4557617ba13bSMingming Cao ext4_journal_stop(handle); 4558ac27a0ecSDave Kleikamp out: 4559ac27a0ecSDave Kleikamp return; 4560ac27a0ecSDave Kleikamp } 4561ac27a0ecSDave Kleikamp 4562ac27a0ecSDave Kleikamp #if 0 4563ac27a0ecSDave Kleikamp /* 4564ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4565ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4566617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4567ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4568ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4569ac27a0ecSDave Kleikamp */ 4570617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4571ac27a0ecSDave Kleikamp { 4572617ba13bSMingming Cao struct ext4_iloc iloc; 4573ac27a0ecSDave Kleikamp 4574ac27a0ecSDave Kleikamp int err = 0; 4575ac27a0ecSDave Kleikamp if (handle) { 4576617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4577ac27a0ecSDave Kleikamp if (!err) { 4578ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4579dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4580ac27a0ecSDave Kleikamp if (!err) 4581617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, 4582ac27a0ecSDave Kleikamp iloc.bh); 4583ac27a0ecSDave Kleikamp brelse(iloc.bh); 4584ac27a0ecSDave Kleikamp } 4585ac27a0ecSDave Kleikamp } 4586617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4587ac27a0ecSDave Kleikamp return err; 4588ac27a0ecSDave Kleikamp } 4589ac27a0ecSDave Kleikamp #endif 4590ac27a0ecSDave Kleikamp 4591617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4592ac27a0ecSDave Kleikamp { 4593ac27a0ecSDave Kleikamp journal_t *journal; 4594ac27a0ecSDave Kleikamp handle_t *handle; 4595ac27a0ecSDave Kleikamp int err; 4596ac27a0ecSDave Kleikamp 4597ac27a0ecSDave Kleikamp /* 4598ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4599ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4600ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4601ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4602ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4603ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4604ac27a0ecSDave Kleikamp * nobody is changing anything. 4605ac27a0ecSDave Kleikamp */ 4606ac27a0ecSDave Kleikamp 4607617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 4608d699594dSDave Hansen if (is_journal_aborted(journal)) 4609ac27a0ecSDave Kleikamp return -EROFS; 4610ac27a0ecSDave Kleikamp 4611dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4612dab291afSMingming Cao jbd2_journal_flush(journal); 4613ac27a0ecSDave Kleikamp 4614ac27a0ecSDave Kleikamp /* 4615ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4616ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4617ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4618ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4619ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4620ac27a0ecSDave Kleikamp */ 4621ac27a0ecSDave Kleikamp 4622ac27a0ecSDave Kleikamp if (val) 4623617ba13bSMingming Cao EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 4624ac27a0ecSDave Kleikamp else 4625617ba13bSMingming Cao EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 4626617ba13bSMingming Cao ext4_set_aops(inode); 4627ac27a0ecSDave Kleikamp 4628dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 4629ac27a0ecSDave Kleikamp 4630ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4631ac27a0ecSDave Kleikamp 4632617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4633ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4634ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4635ac27a0ecSDave Kleikamp 4636617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 4637ac27a0ecSDave Kleikamp handle->h_sync = 1; 4638617ba13bSMingming Cao ext4_journal_stop(handle); 4639617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4640ac27a0ecSDave Kleikamp 4641ac27a0ecSDave Kleikamp return err; 4642ac27a0ecSDave Kleikamp } 46432e9ee850SAneesh Kumar K.V 46442e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 46452e9ee850SAneesh Kumar K.V { 46462e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 46472e9ee850SAneesh Kumar K.V } 46482e9ee850SAneesh Kumar K.V 46492e9ee850SAneesh Kumar K.V int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page) 46502e9ee850SAneesh Kumar K.V { 46512e9ee850SAneesh Kumar K.V loff_t size; 46522e9ee850SAneesh Kumar K.V unsigned long len; 46532e9ee850SAneesh Kumar K.V int ret = -EINVAL; 46542e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 46552e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 46562e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 46572e9ee850SAneesh Kumar K.V 46582e9ee850SAneesh Kumar K.V /* 46592e9ee850SAneesh Kumar K.V * Get i_alloc_sem to stop truncates messing with the inode. We cannot 46602e9ee850SAneesh Kumar K.V * get i_mutex because we are already holding mmap_sem. 46612e9ee850SAneesh Kumar K.V */ 46622e9ee850SAneesh Kumar K.V down_read(&inode->i_alloc_sem); 46632e9ee850SAneesh Kumar K.V size = i_size_read(inode); 46642e9ee850SAneesh Kumar K.V if (page->mapping != mapping || size <= page_offset(page) 46652e9ee850SAneesh Kumar K.V || !PageUptodate(page)) { 46662e9ee850SAneesh Kumar K.V /* page got truncated from under us? */ 46672e9ee850SAneesh Kumar K.V goto out_unlock; 46682e9ee850SAneesh Kumar K.V } 46692e9ee850SAneesh Kumar K.V ret = 0; 46702e9ee850SAneesh Kumar K.V if (PageMappedToDisk(page)) 46712e9ee850SAneesh Kumar K.V goto out_unlock; 46722e9ee850SAneesh Kumar K.V 46732e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 46742e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 46752e9ee850SAneesh Kumar K.V else 46762e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 46772e9ee850SAneesh Kumar K.V 46782e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 46792e9ee850SAneesh Kumar K.V /* return if we have all the buffers mapped */ 46802e9ee850SAneesh Kumar K.V if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 46812e9ee850SAneesh Kumar K.V ext4_bh_unmapped)) 46822e9ee850SAneesh Kumar K.V goto out_unlock; 46832e9ee850SAneesh Kumar K.V } 46842e9ee850SAneesh Kumar K.V /* 46852e9ee850SAneesh Kumar K.V * OK, we need to fill the hole... Do write_begin write_end 46862e9ee850SAneesh Kumar K.V * to do block allocation/reservation.We are not holding 46872e9ee850SAneesh Kumar K.V * inode.i__mutex here. That allow * parallel write_begin, 46882e9ee850SAneesh Kumar K.V * write_end call. lock_page prevent this from happening 46892e9ee850SAneesh Kumar K.V * on the same page though 46902e9ee850SAneesh Kumar K.V */ 46912e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 46922e9ee850SAneesh Kumar K.V len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); 46932e9ee850SAneesh Kumar K.V if (ret < 0) 46942e9ee850SAneesh Kumar K.V goto out_unlock; 46952e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 46962e9ee850SAneesh Kumar K.V len, len, page, NULL); 46972e9ee850SAneesh Kumar K.V if (ret < 0) 46982e9ee850SAneesh Kumar K.V goto out_unlock; 46992e9ee850SAneesh Kumar K.V ret = 0; 47002e9ee850SAneesh Kumar K.V out_unlock: 47012e9ee850SAneesh Kumar K.V up_read(&inode->i_alloc_sem); 47022e9ee850SAneesh Kumar K.V return ret; 47032e9ee850SAneesh Kumar K.V } 4704