1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * Goal-directed block allocation by Stephen Tweedie 16ac27a0ecSDave Kleikamp * (sct@redhat.com), 1993, 1998 17ac27a0ecSDave Kleikamp * Big-endian to little-endian byte-swapping/bitmaps by 18ac27a0ecSDave Kleikamp * David S. Miller (davem@caip.rutgers.edu), 1995 19ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 20ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 21ac27a0ecSDave Kleikamp * 22617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 23ac27a0ecSDave Kleikamp */ 24ac27a0ecSDave Kleikamp 25ac27a0ecSDave Kleikamp #include <linux/module.h> 26ac27a0ecSDave Kleikamp #include <linux/fs.h> 27ac27a0ecSDave Kleikamp #include <linux/time.h> 28dab291afSMingming Cao #include <linux/jbd2.h> 29ac27a0ecSDave Kleikamp #include <linux/highuid.h> 30ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 31ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 32ac27a0ecSDave Kleikamp #include <linux/string.h> 33ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 34ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3564769240SAlex Tomas #include <linux/pagevec.h> 36ac27a0ecSDave Kleikamp #include <linux/mpage.h> 37ac27a0ecSDave Kleikamp #include <linux/uio.h> 38ac27a0ecSDave Kleikamp #include <linux/bio.h> 393dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 40ac27a0ecSDave Kleikamp #include "xattr.h" 41ac27a0ecSDave Kleikamp #include "acl.h" 42d2a17637SMingming Cao #include "ext4_extents.h" 43ac27a0ecSDave Kleikamp 44a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 45a1d6cc56SAneesh Kumar K.V 46678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 47678aaf48SJan Kara loff_t new_size) 48678aaf48SJan Kara { 49678aaf48SJan Kara return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode, 50678aaf48SJan Kara new_size); 51678aaf48SJan Kara } 52678aaf48SJan Kara 5364769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 5464769240SAlex Tomas 55ac27a0ecSDave Kleikamp /* 56ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 57ac27a0ecSDave Kleikamp */ 58617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 59ac27a0ecSDave Kleikamp { 60617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 61ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 62ac27a0ecSDave Kleikamp 63ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 64ac27a0ecSDave Kleikamp } 65ac27a0ecSDave Kleikamp 66ac27a0ecSDave Kleikamp /* 67617ba13bSMingming Cao * The ext4 forget function must perform a revoke if we are freeing data 68ac27a0ecSDave Kleikamp * which has been journaled. Metadata (eg. indirect blocks) must be 69ac27a0ecSDave Kleikamp * revoked in all cases. 70ac27a0ecSDave Kleikamp * 71ac27a0ecSDave Kleikamp * "bh" may be NULL: a metadata block may have been freed from memory 72ac27a0ecSDave Kleikamp * but there may still be a record of it in the journal, and that record 73ac27a0ecSDave Kleikamp * still needs to be revoked. 74ac27a0ecSDave Kleikamp */ 75617ba13bSMingming Cao int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, 76617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t blocknr) 77ac27a0ecSDave Kleikamp { 78ac27a0ecSDave Kleikamp int err; 79ac27a0ecSDave Kleikamp 80ac27a0ecSDave Kleikamp might_sleep(); 81ac27a0ecSDave Kleikamp 82ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "enter"); 83ac27a0ecSDave Kleikamp 84ac27a0ecSDave Kleikamp jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 85ac27a0ecSDave Kleikamp "data mode %lx\n", 86ac27a0ecSDave Kleikamp bh, is_metadata, inode->i_mode, 87ac27a0ecSDave Kleikamp test_opt(inode->i_sb, DATA_FLAGS)); 88ac27a0ecSDave Kleikamp 89ac27a0ecSDave Kleikamp /* Never use the revoke function if we are doing full data 90ac27a0ecSDave Kleikamp * journaling: there is no need to, and a V1 superblock won't 91ac27a0ecSDave Kleikamp * support it. Otherwise, only skip the revoke on un-journaled 92ac27a0ecSDave Kleikamp * data blocks. */ 93ac27a0ecSDave Kleikamp 94617ba13bSMingming Cao if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA || 95617ba13bSMingming Cao (!is_metadata && !ext4_should_journal_data(inode))) { 96ac27a0ecSDave Kleikamp if (bh) { 97dab291afSMingming Cao BUFFER_TRACE(bh, "call jbd2_journal_forget"); 98617ba13bSMingming Cao return ext4_journal_forget(handle, bh); 99ac27a0ecSDave Kleikamp } 100ac27a0ecSDave Kleikamp return 0; 101ac27a0ecSDave Kleikamp } 102ac27a0ecSDave Kleikamp 103ac27a0ecSDave Kleikamp /* 104ac27a0ecSDave Kleikamp * data!=journal && (is_metadata || should_journal_data(inode)) 105ac27a0ecSDave Kleikamp */ 106617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_revoke"); 107617ba13bSMingming Cao err = ext4_journal_revoke(handle, blocknr, bh); 108ac27a0ecSDave Kleikamp if (err) 10946e665e9SHarvey Harrison ext4_abort(inode->i_sb, __func__, 110ac27a0ecSDave Kleikamp "error %d when attempting revoke", err); 111ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "exit"); 112ac27a0ecSDave Kleikamp return err; 113ac27a0ecSDave Kleikamp } 114ac27a0ecSDave Kleikamp 115ac27a0ecSDave Kleikamp /* 116ac27a0ecSDave Kleikamp * Work out how many blocks we need to proceed with the next chunk of a 117ac27a0ecSDave Kleikamp * truncate transaction. 118ac27a0ecSDave Kleikamp */ 119ac27a0ecSDave Kleikamp static unsigned long blocks_for_truncate(struct inode *inode) 120ac27a0ecSDave Kleikamp { 121725d26d3SAneesh Kumar K.V ext4_lblk_t needed; 122ac27a0ecSDave Kleikamp 123ac27a0ecSDave Kleikamp needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); 124ac27a0ecSDave Kleikamp 125ac27a0ecSDave Kleikamp /* Give ourselves just enough room to cope with inodes in which 126ac27a0ecSDave Kleikamp * i_blocks is corrupt: we've seen disk corruptions in the past 127ac27a0ecSDave Kleikamp * which resulted in random data in an inode which looked enough 128617ba13bSMingming Cao * like a regular file for ext4 to try to delete it. Things 129ac27a0ecSDave Kleikamp * will go a bit crazy if that happens, but at least we should 130ac27a0ecSDave Kleikamp * try not to panic the whole kernel. */ 131ac27a0ecSDave Kleikamp if (needed < 2) 132ac27a0ecSDave Kleikamp needed = 2; 133ac27a0ecSDave Kleikamp 134ac27a0ecSDave Kleikamp /* But we need to bound the transaction so we don't overflow the 135ac27a0ecSDave Kleikamp * journal. */ 136617ba13bSMingming Cao if (needed > EXT4_MAX_TRANS_DATA) 137617ba13bSMingming Cao needed = EXT4_MAX_TRANS_DATA; 138ac27a0ecSDave Kleikamp 139617ba13bSMingming Cao return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed; 140ac27a0ecSDave Kleikamp } 141ac27a0ecSDave Kleikamp 142ac27a0ecSDave Kleikamp /* 143ac27a0ecSDave Kleikamp * Truncate transactions can be complex and absolutely huge. So we need to 144ac27a0ecSDave Kleikamp * be able to restart the transaction at a conventient checkpoint to make 145ac27a0ecSDave Kleikamp * sure we don't overflow the journal. 146ac27a0ecSDave Kleikamp * 147ac27a0ecSDave Kleikamp * start_transaction gets us a new handle for a truncate transaction, 148ac27a0ecSDave Kleikamp * and extend_transaction tries to extend the existing one a bit. If 149ac27a0ecSDave Kleikamp * extend fails, we need to propagate the failure up and restart the 150ac27a0ecSDave Kleikamp * transaction in the top-level truncate loop. --sct 151ac27a0ecSDave Kleikamp */ 152ac27a0ecSDave Kleikamp static handle_t *start_transaction(struct inode *inode) 153ac27a0ecSDave Kleikamp { 154ac27a0ecSDave Kleikamp handle_t *result; 155ac27a0ecSDave Kleikamp 156617ba13bSMingming Cao result = ext4_journal_start(inode, blocks_for_truncate(inode)); 157ac27a0ecSDave Kleikamp if (!IS_ERR(result)) 158ac27a0ecSDave Kleikamp return result; 159ac27a0ecSDave Kleikamp 160617ba13bSMingming Cao ext4_std_error(inode->i_sb, PTR_ERR(result)); 161ac27a0ecSDave Kleikamp return result; 162ac27a0ecSDave Kleikamp } 163ac27a0ecSDave Kleikamp 164ac27a0ecSDave Kleikamp /* 165ac27a0ecSDave Kleikamp * Try to extend this transaction for the purposes of truncation. 166ac27a0ecSDave Kleikamp * 167ac27a0ecSDave Kleikamp * Returns 0 if we managed to create more room. If we can't create more 168ac27a0ecSDave Kleikamp * room, and the transaction must be restarted we return 1. 169ac27a0ecSDave Kleikamp */ 170ac27a0ecSDave Kleikamp static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 171ac27a0ecSDave Kleikamp { 172617ba13bSMingming Cao if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) 173ac27a0ecSDave Kleikamp return 0; 174617ba13bSMingming Cao if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) 175ac27a0ecSDave Kleikamp return 0; 176ac27a0ecSDave Kleikamp return 1; 177ac27a0ecSDave Kleikamp } 178ac27a0ecSDave Kleikamp 179ac27a0ecSDave Kleikamp /* 180ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 181ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 182ac27a0ecSDave Kleikamp * this transaction. 183ac27a0ecSDave Kleikamp */ 184617ba13bSMingming Cao static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) 185ac27a0ecSDave Kleikamp { 186ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 187617ba13bSMingming Cao return ext4_journal_restart(handle, blocks_for_truncate(inode)); 188ac27a0ecSDave Kleikamp } 189ac27a0ecSDave Kleikamp 190ac27a0ecSDave Kleikamp /* 191ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 192ac27a0ecSDave Kleikamp */ 193617ba13bSMingming Cao void ext4_delete_inode(struct inode *inode) 194ac27a0ecSDave Kleikamp { 195ac27a0ecSDave Kleikamp handle_t *handle; 196bc965ab3STheodore Ts'o int err; 197ac27a0ecSDave Kleikamp 198678aaf48SJan Kara if (ext4_should_order_data(inode)) 199678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 200ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 201ac27a0ecSDave Kleikamp 202ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 203ac27a0ecSDave Kleikamp goto no_delete; 204ac27a0ecSDave Kleikamp 205bc965ab3STheodore Ts'o handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3); 206ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 207bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 208ac27a0ecSDave Kleikamp /* 209ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 210ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 211ac27a0ecSDave Kleikamp * cleaned up. 212ac27a0ecSDave Kleikamp */ 213617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 214ac27a0ecSDave Kleikamp goto no_delete; 215ac27a0ecSDave Kleikamp } 216ac27a0ecSDave Kleikamp 217ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 218ac27a0ecSDave Kleikamp handle->h_sync = 1; 219ac27a0ecSDave Kleikamp inode->i_size = 0; 220bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 221bc965ab3STheodore Ts'o if (err) { 222bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 223bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 224bc965ab3STheodore Ts'o goto stop_handle; 225bc965ab3STheodore Ts'o } 226ac27a0ecSDave Kleikamp if (inode->i_blocks) 227617ba13bSMingming Cao ext4_truncate(inode); 228bc965ab3STheodore Ts'o 229bc965ab3STheodore Ts'o /* 230bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 231bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 232bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 233bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 234bc965ab3STheodore Ts'o */ 235bc965ab3STheodore Ts'o if (handle->h_buffer_credits < 3) { 236bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 237bc965ab3STheodore Ts'o if (err > 0) 238bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 239bc965ab3STheodore Ts'o if (err != 0) { 240bc965ab3STheodore Ts'o ext4_warning(inode->i_sb, __func__, 241bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 242bc965ab3STheodore Ts'o stop_handle: 243bc965ab3STheodore Ts'o ext4_journal_stop(handle); 244bc965ab3STheodore Ts'o goto no_delete; 245bc965ab3STheodore Ts'o } 246bc965ab3STheodore Ts'o } 247bc965ab3STheodore Ts'o 248ac27a0ecSDave Kleikamp /* 249617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 250ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 251617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 252ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 253617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 254ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 255ac27a0ecSDave Kleikamp */ 256617ba13bSMingming Cao ext4_orphan_del(handle, inode); 257617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 258ac27a0ecSDave Kleikamp 259ac27a0ecSDave Kleikamp /* 260ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 261ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 262ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 263ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 264ac27a0ecSDave Kleikamp * fails. 265ac27a0ecSDave Kleikamp */ 266617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 267ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 268ac27a0ecSDave Kleikamp clear_inode(inode); 269ac27a0ecSDave Kleikamp else 270617ba13bSMingming Cao ext4_free_inode(handle, inode); 271617ba13bSMingming Cao ext4_journal_stop(handle); 272ac27a0ecSDave Kleikamp return; 273ac27a0ecSDave Kleikamp no_delete: 274ac27a0ecSDave Kleikamp clear_inode(inode); /* We must guarantee clearing of inode... */ 275ac27a0ecSDave Kleikamp } 276ac27a0ecSDave Kleikamp 277ac27a0ecSDave Kleikamp typedef struct { 278ac27a0ecSDave Kleikamp __le32 *p; 279ac27a0ecSDave Kleikamp __le32 key; 280ac27a0ecSDave Kleikamp struct buffer_head *bh; 281ac27a0ecSDave Kleikamp } Indirect; 282ac27a0ecSDave Kleikamp 283ac27a0ecSDave Kleikamp static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 284ac27a0ecSDave Kleikamp { 285ac27a0ecSDave Kleikamp p->key = *(p->p = v); 286ac27a0ecSDave Kleikamp p->bh = bh; 287ac27a0ecSDave Kleikamp } 288ac27a0ecSDave Kleikamp 289ac27a0ecSDave Kleikamp /** 290617ba13bSMingming Cao * ext4_block_to_path - parse the block number into array of offsets 291ac27a0ecSDave Kleikamp * @inode: inode in question (we are only interested in its superblock) 292ac27a0ecSDave Kleikamp * @i_block: block number to be parsed 293ac27a0ecSDave Kleikamp * @offsets: array to store the offsets in 294ac27a0ecSDave Kleikamp * @boundary: set this non-zero if the referred-to block is likely to be 295ac27a0ecSDave Kleikamp * followed (on disk) by an indirect block. 296ac27a0ecSDave Kleikamp * 297617ba13bSMingming Cao * To store the locations of file's data ext4 uses a data structure common 298ac27a0ecSDave Kleikamp * for UNIX filesystems - tree of pointers anchored in the inode, with 299ac27a0ecSDave Kleikamp * data blocks at leaves and indirect blocks in intermediate nodes. 300ac27a0ecSDave Kleikamp * This function translates the block number into path in that tree - 301ac27a0ecSDave Kleikamp * return value is the path length and @offsets[n] is the offset of 302ac27a0ecSDave Kleikamp * pointer to (n+1)th node in the nth one. If @block is out of range 303ac27a0ecSDave Kleikamp * (negative or too large) warning is printed and zero returned. 304ac27a0ecSDave Kleikamp * 305ac27a0ecSDave Kleikamp * Note: function doesn't find node addresses, so no IO is needed. All 306ac27a0ecSDave Kleikamp * we need to know is the capacity of indirect blocks (taken from the 307ac27a0ecSDave Kleikamp * inode->i_sb). 308ac27a0ecSDave Kleikamp */ 309ac27a0ecSDave Kleikamp 310ac27a0ecSDave Kleikamp /* 311ac27a0ecSDave Kleikamp * Portability note: the last comparison (check that we fit into triple 312ac27a0ecSDave Kleikamp * indirect block) is spelled differently, because otherwise on an 313ac27a0ecSDave Kleikamp * architecture with 32-bit longs and 8Kb pages we might get into trouble 314ac27a0ecSDave Kleikamp * if our filesystem had 8Kb blocks. We might use long long, but that would 315ac27a0ecSDave Kleikamp * kill us on x86. Oh, well, at least the sign propagation does not matter - 316ac27a0ecSDave Kleikamp * i_block would have to be negative in the very beginning, so we would not 317ac27a0ecSDave Kleikamp * get there at all. 318ac27a0ecSDave Kleikamp */ 319ac27a0ecSDave Kleikamp 320617ba13bSMingming Cao static int ext4_block_to_path(struct inode *inode, 321725d26d3SAneesh Kumar K.V ext4_lblk_t i_block, 322725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], int *boundary) 323ac27a0ecSDave Kleikamp { 324617ba13bSMingming Cao int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb); 325617ba13bSMingming Cao int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb); 326617ba13bSMingming Cao const long direct_blocks = EXT4_NDIR_BLOCKS, 327ac27a0ecSDave Kleikamp indirect_blocks = ptrs, 328ac27a0ecSDave Kleikamp double_blocks = (1 << (ptrs_bits * 2)); 329ac27a0ecSDave Kleikamp int n = 0; 330ac27a0ecSDave Kleikamp int final = 0; 331ac27a0ecSDave Kleikamp 332ac27a0ecSDave Kleikamp if (i_block < 0) { 333617ba13bSMingming Cao ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); 334ac27a0ecSDave Kleikamp } else if (i_block < direct_blocks) { 335ac27a0ecSDave Kleikamp offsets[n++] = i_block; 336ac27a0ecSDave Kleikamp final = direct_blocks; 337ac27a0ecSDave Kleikamp } else if ((i_block -= direct_blocks) < indirect_blocks) { 338617ba13bSMingming Cao offsets[n++] = EXT4_IND_BLOCK; 339ac27a0ecSDave Kleikamp offsets[n++] = i_block; 340ac27a0ecSDave Kleikamp final = ptrs; 341ac27a0ecSDave Kleikamp } else if ((i_block -= indirect_blocks) < double_blocks) { 342617ba13bSMingming Cao offsets[n++] = EXT4_DIND_BLOCK; 343ac27a0ecSDave Kleikamp offsets[n++] = i_block >> ptrs_bits; 344ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 345ac27a0ecSDave Kleikamp final = ptrs; 346ac27a0ecSDave Kleikamp } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 347617ba13bSMingming Cao offsets[n++] = EXT4_TIND_BLOCK; 348ac27a0ecSDave Kleikamp offsets[n++] = i_block >> (ptrs_bits * 2); 349ac27a0ecSDave Kleikamp offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 350ac27a0ecSDave Kleikamp offsets[n++] = i_block & (ptrs - 1); 351ac27a0ecSDave Kleikamp final = ptrs; 352ac27a0ecSDave Kleikamp } else { 353e2b46574SEric Sandeen ext4_warning(inode->i_sb, "ext4_block_to_path", 3540e855ac8SAneesh Kumar K.V "block %lu > max", 355e2b46574SEric Sandeen i_block + direct_blocks + 356e2b46574SEric Sandeen indirect_blocks + double_blocks); 357ac27a0ecSDave Kleikamp } 358ac27a0ecSDave Kleikamp if (boundary) 359ac27a0ecSDave Kleikamp *boundary = final - 1 - (i_block & (ptrs - 1)); 360ac27a0ecSDave Kleikamp return n; 361ac27a0ecSDave Kleikamp } 362ac27a0ecSDave Kleikamp 363ac27a0ecSDave Kleikamp /** 364617ba13bSMingming Cao * ext4_get_branch - read the chain of indirect blocks leading to data 365ac27a0ecSDave Kleikamp * @inode: inode in question 366ac27a0ecSDave Kleikamp * @depth: depth of the chain (1 - direct pointer, etc.) 367ac27a0ecSDave Kleikamp * @offsets: offsets of pointers in inode/indirect blocks 368ac27a0ecSDave Kleikamp * @chain: place to store the result 369ac27a0ecSDave Kleikamp * @err: here we store the error value 370ac27a0ecSDave Kleikamp * 371ac27a0ecSDave Kleikamp * Function fills the array of triples <key, p, bh> and returns %NULL 372ac27a0ecSDave Kleikamp * if everything went OK or the pointer to the last filled triple 373ac27a0ecSDave Kleikamp * (incomplete one) otherwise. Upon the return chain[i].key contains 374ac27a0ecSDave Kleikamp * the number of (i+1)-th block in the chain (as it is stored in memory, 375ac27a0ecSDave Kleikamp * i.e. little-endian 32-bit), chain[i].p contains the address of that 376ac27a0ecSDave Kleikamp * number (it points into struct inode for i==0 and into the bh->b_data 377ac27a0ecSDave Kleikamp * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 378ac27a0ecSDave Kleikamp * block for i>0 and NULL for i==0. In other words, it holds the block 379ac27a0ecSDave Kleikamp * numbers of the chain, addresses they were taken from (and where we can 380ac27a0ecSDave Kleikamp * verify that chain did not change) and buffer_heads hosting these 381ac27a0ecSDave Kleikamp * numbers. 382ac27a0ecSDave Kleikamp * 383ac27a0ecSDave Kleikamp * Function stops when it stumbles upon zero pointer (absent block) 384ac27a0ecSDave Kleikamp * (pointer to last triple returned, *@err == 0) 385ac27a0ecSDave Kleikamp * or when it gets an IO error reading an indirect block 386ac27a0ecSDave Kleikamp * (ditto, *@err == -EIO) 387ac27a0ecSDave Kleikamp * or when it reads all @depth-1 indirect blocks successfully and finds 388ac27a0ecSDave Kleikamp * the whole chain, all way to the data (returns %NULL, *err == 0). 389c278bfecSAneesh Kumar K.V * 390c278bfecSAneesh Kumar K.V * Need to be called with 3910e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) 392ac27a0ecSDave Kleikamp */ 393725d26d3SAneesh Kumar K.V static Indirect *ext4_get_branch(struct inode *inode, int depth, 394725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, 395ac27a0ecSDave Kleikamp Indirect chain[4], int *err) 396ac27a0ecSDave Kleikamp { 397ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 398ac27a0ecSDave Kleikamp Indirect *p = chain; 399ac27a0ecSDave Kleikamp struct buffer_head *bh; 400ac27a0ecSDave Kleikamp 401ac27a0ecSDave Kleikamp *err = 0; 402ac27a0ecSDave Kleikamp /* i_data is not going away, no lock needed */ 403617ba13bSMingming Cao add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets); 404ac27a0ecSDave Kleikamp if (!p->key) 405ac27a0ecSDave Kleikamp goto no_block; 406ac27a0ecSDave Kleikamp while (--depth) { 407ac27a0ecSDave Kleikamp bh = sb_bread(sb, le32_to_cpu(p->key)); 408ac27a0ecSDave Kleikamp if (!bh) 409ac27a0ecSDave Kleikamp goto failure; 410ac27a0ecSDave Kleikamp add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets); 411ac27a0ecSDave Kleikamp /* Reader: end */ 412ac27a0ecSDave Kleikamp if (!p->key) 413ac27a0ecSDave Kleikamp goto no_block; 414ac27a0ecSDave Kleikamp } 415ac27a0ecSDave Kleikamp return NULL; 416ac27a0ecSDave Kleikamp 417ac27a0ecSDave Kleikamp failure: 418ac27a0ecSDave Kleikamp *err = -EIO; 419ac27a0ecSDave Kleikamp no_block: 420ac27a0ecSDave Kleikamp return p; 421ac27a0ecSDave Kleikamp } 422ac27a0ecSDave Kleikamp 423ac27a0ecSDave Kleikamp /** 424617ba13bSMingming Cao * ext4_find_near - find a place for allocation with sufficient locality 425ac27a0ecSDave Kleikamp * @inode: owner 426ac27a0ecSDave Kleikamp * @ind: descriptor of indirect block. 427ac27a0ecSDave Kleikamp * 4281cc8dcf5SBenoit Boissinot * This function returns the preferred place for block allocation. 429ac27a0ecSDave Kleikamp * It is used when heuristic for sequential allocation fails. 430ac27a0ecSDave Kleikamp * Rules are: 431ac27a0ecSDave Kleikamp * + if there is a block to the left of our position - allocate near it. 432ac27a0ecSDave Kleikamp * + if pointer will live in indirect block - allocate near that block. 433ac27a0ecSDave Kleikamp * + if pointer will live in inode - allocate in the same 434ac27a0ecSDave Kleikamp * cylinder group. 435ac27a0ecSDave Kleikamp * 436ac27a0ecSDave Kleikamp * In the latter case we colour the starting block by the callers PID to 437ac27a0ecSDave Kleikamp * prevent it from clashing with concurrent allocations for a different inode 438ac27a0ecSDave Kleikamp * in the same block group. The PID is used here so that functionally related 439ac27a0ecSDave Kleikamp * files will be close-by on-disk. 440ac27a0ecSDave Kleikamp * 441ac27a0ecSDave Kleikamp * Caller must make sure that @ind is valid and will stay that way. 442ac27a0ecSDave Kleikamp */ 443617ba13bSMingming Cao static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) 444ac27a0ecSDave Kleikamp { 445617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 446ac27a0ecSDave Kleikamp __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 447ac27a0ecSDave Kleikamp __le32 *p; 448617ba13bSMingming Cao ext4_fsblk_t bg_start; 44974d3487fSValerie Clement ext4_fsblk_t last_block; 450617ba13bSMingming Cao ext4_grpblk_t colour; 451ac27a0ecSDave Kleikamp 452ac27a0ecSDave Kleikamp /* Try to find previous block */ 453ac27a0ecSDave Kleikamp for (p = ind->p - 1; p >= start; p--) { 454ac27a0ecSDave Kleikamp if (*p) 455ac27a0ecSDave Kleikamp return le32_to_cpu(*p); 456ac27a0ecSDave Kleikamp } 457ac27a0ecSDave Kleikamp 458ac27a0ecSDave Kleikamp /* No such thing, so let's try location of indirect block */ 459ac27a0ecSDave Kleikamp if (ind->bh) 460ac27a0ecSDave Kleikamp return ind->bh->b_blocknr; 461ac27a0ecSDave Kleikamp 462ac27a0ecSDave Kleikamp /* 463ac27a0ecSDave Kleikamp * It is going to be referred to from the inode itself? OK, just put it 464ac27a0ecSDave Kleikamp * into the same cylinder group then. 465ac27a0ecSDave Kleikamp */ 466617ba13bSMingming Cao bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group); 46774d3487fSValerie Clement last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1; 46874d3487fSValerie Clement 46974d3487fSValerie Clement if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block) 470ac27a0ecSDave Kleikamp colour = (current->pid % 16) * 471617ba13bSMingming Cao (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16); 47274d3487fSValerie Clement else 47374d3487fSValerie Clement colour = (current->pid % 16) * ((last_block - bg_start) / 16); 474ac27a0ecSDave Kleikamp return bg_start + colour; 475ac27a0ecSDave Kleikamp } 476ac27a0ecSDave Kleikamp 477ac27a0ecSDave Kleikamp /** 4781cc8dcf5SBenoit Boissinot * ext4_find_goal - find a preferred place for allocation. 479ac27a0ecSDave Kleikamp * @inode: owner 480ac27a0ecSDave Kleikamp * @block: block we want 481ac27a0ecSDave Kleikamp * @partial: pointer to the last triple within a chain 482ac27a0ecSDave Kleikamp * 4831cc8dcf5SBenoit Boissinot * Normally this function find the preferred place for block allocation, 484fb01bfdaSAkinobu Mita * returns it. 485ac27a0ecSDave Kleikamp */ 486725d26d3SAneesh Kumar K.V static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, 487fb01bfdaSAkinobu Mita Indirect *partial) 488ac27a0ecSDave Kleikamp { 489ac27a0ecSDave Kleikamp /* 490c2ea3fdeSTheodore Ts'o * XXX need to get goal block from mballoc's data structures 491ac27a0ecSDave Kleikamp */ 492ac27a0ecSDave Kleikamp 493617ba13bSMingming Cao return ext4_find_near(inode, partial); 494ac27a0ecSDave Kleikamp } 495ac27a0ecSDave Kleikamp 496ac27a0ecSDave Kleikamp /** 497617ba13bSMingming Cao * ext4_blks_to_allocate: Look up the block map and count the number 498ac27a0ecSDave Kleikamp * of direct blocks need to be allocated for the given branch. 499ac27a0ecSDave Kleikamp * 500ac27a0ecSDave Kleikamp * @branch: chain of indirect blocks 501ac27a0ecSDave Kleikamp * @k: number of blocks need for indirect blocks 502ac27a0ecSDave Kleikamp * @blks: number of data blocks to be mapped. 503ac27a0ecSDave Kleikamp * @blocks_to_boundary: the offset in the indirect block 504ac27a0ecSDave Kleikamp * 505ac27a0ecSDave Kleikamp * return the total number of blocks to be allocate, including the 506ac27a0ecSDave Kleikamp * direct and indirect blocks. 507ac27a0ecSDave Kleikamp */ 508617ba13bSMingming Cao static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks, 509ac27a0ecSDave Kleikamp int blocks_to_boundary) 510ac27a0ecSDave Kleikamp { 511ac27a0ecSDave Kleikamp unsigned long count = 0; 512ac27a0ecSDave Kleikamp 513ac27a0ecSDave Kleikamp /* 514ac27a0ecSDave Kleikamp * Simple case, [t,d]Indirect block(s) has not allocated yet 515ac27a0ecSDave Kleikamp * then it's clear blocks on that path have not allocated 516ac27a0ecSDave Kleikamp */ 517ac27a0ecSDave Kleikamp if (k > 0) { 518ac27a0ecSDave Kleikamp /* right now we don't handle cross boundary allocation */ 519ac27a0ecSDave Kleikamp if (blks < blocks_to_boundary + 1) 520ac27a0ecSDave Kleikamp count += blks; 521ac27a0ecSDave Kleikamp else 522ac27a0ecSDave Kleikamp count += blocks_to_boundary + 1; 523ac27a0ecSDave Kleikamp return count; 524ac27a0ecSDave Kleikamp } 525ac27a0ecSDave Kleikamp 526ac27a0ecSDave Kleikamp count++; 527ac27a0ecSDave Kleikamp while (count < blks && count <= blocks_to_boundary && 528ac27a0ecSDave Kleikamp le32_to_cpu(*(branch[0].p + count)) == 0) { 529ac27a0ecSDave Kleikamp count++; 530ac27a0ecSDave Kleikamp } 531ac27a0ecSDave Kleikamp return count; 532ac27a0ecSDave Kleikamp } 533ac27a0ecSDave Kleikamp 534ac27a0ecSDave Kleikamp /** 535617ba13bSMingming Cao * ext4_alloc_blocks: multiple allocate blocks needed for a branch 536ac27a0ecSDave Kleikamp * @indirect_blks: the number of blocks need to allocate for indirect 537ac27a0ecSDave Kleikamp * blocks 538ac27a0ecSDave Kleikamp * 539ac27a0ecSDave Kleikamp * @new_blocks: on return it will store the new block numbers for 540ac27a0ecSDave Kleikamp * the indirect blocks(if needed) and the first direct block, 541ac27a0ecSDave Kleikamp * @blks: on return it will store the total number of allocated 542ac27a0ecSDave Kleikamp * direct blocks 543ac27a0ecSDave Kleikamp */ 544617ba13bSMingming Cao static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 5457061eba7SAneesh Kumar K.V ext4_lblk_t iblock, ext4_fsblk_t goal, 5467061eba7SAneesh Kumar K.V int indirect_blks, int blks, 547617ba13bSMingming Cao ext4_fsblk_t new_blocks[4], int *err) 548ac27a0ecSDave Kleikamp { 549ac27a0ecSDave Kleikamp int target, i; 5507061eba7SAneesh Kumar K.V unsigned long count = 0, blk_allocated = 0; 551ac27a0ecSDave Kleikamp int index = 0; 552617ba13bSMingming Cao ext4_fsblk_t current_block = 0; 553ac27a0ecSDave Kleikamp int ret = 0; 554ac27a0ecSDave Kleikamp 555ac27a0ecSDave Kleikamp /* 556ac27a0ecSDave Kleikamp * Here we try to allocate the requested multiple blocks at once, 557ac27a0ecSDave Kleikamp * on a best-effort basis. 558ac27a0ecSDave Kleikamp * To build a branch, we should allocate blocks for 559ac27a0ecSDave Kleikamp * the indirect blocks(if not allocated yet), and at least 560ac27a0ecSDave Kleikamp * the first direct block of this branch. That's the 561ac27a0ecSDave Kleikamp * minimum number of blocks need to allocate(required) 562ac27a0ecSDave Kleikamp */ 5637061eba7SAneesh Kumar K.V /* first we try to allocate the indirect blocks */ 5647061eba7SAneesh Kumar K.V target = indirect_blks; 5657061eba7SAneesh Kumar K.V while (target > 0) { 566ac27a0ecSDave Kleikamp count = target; 567ac27a0ecSDave Kleikamp /* allocating blocks for indirect blocks and direct blocks */ 5687061eba7SAneesh Kumar K.V current_block = ext4_new_meta_blocks(handle, inode, 5697061eba7SAneesh Kumar K.V goal, &count, err); 570ac27a0ecSDave Kleikamp if (*err) 571ac27a0ecSDave Kleikamp goto failed_out; 572ac27a0ecSDave Kleikamp 573ac27a0ecSDave Kleikamp target -= count; 574ac27a0ecSDave Kleikamp /* allocate blocks for indirect blocks */ 575ac27a0ecSDave Kleikamp while (index < indirect_blks && count) { 576ac27a0ecSDave Kleikamp new_blocks[index++] = current_block++; 577ac27a0ecSDave Kleikamp count--; 578ac27a0ecSDave Kleikamp } 5797061eba7SAneesh Kumar K.V if (count > 0) { 5807061eba7SAneesh Kumar K.V /* 5817061eba7SAneesh Kumar K.V * save the new block number 5827061eba7SAneesh Kumar K.V * for the first direct block 5837061eba7SAneesh Kumar K.V */ 5847061eba7SAneesh Kumar K.V new_blocks[index] = current_block; 5857061eba7SAneesh Kumar K.V printk(KERN_INFO "%s returned more blocks than " 5867061eba7SAneesh Kumar K.V "requested\n", __func__); 5877061eba7SAneesh Kumar K.V WARN_ON(1); 588ac27a0ecSDave Kleikamp break; 589ac27a0ecSDave Kleikamp } 5907061eba7SAneesh Kumar K.V } 591ac27a0ecSDave Kleikamp 5927061eba7SAneesh Kumar K.V target = blks - count ; 5937061eba7SAneesh Kumar K.V blk_allocated = count; 5947061eba7SAneesh Kumar K.V if (!target) 5957061eba7SAneesh Kumar K.V goto allocated; 5967061eba7SAneesh Kumar K.V /* Now allocate data blocks */ 5977061eba7SAneesh Kumar K.V count = target; 598654b4908SAneesh Kumar K.V /* allocating blocks for data blocks */ 5997061eba7SAneesh Kumar K.V current_block = ext4_new_blocks(handle, inode, iblock, 6007061eba7SAneesh Kumar K.V goal, &count, err); 6017061eba7SAneesh Kumar K.V if (*err && (target == blks)) { 6027061eba7SAneesh Kumar K.V /* 6037061eba7SAneesh Kumar K.V * if the allocation failed and we didn't allocate 6047061eba7SAneesh Kumar K.V * any blocks before 6057061eba7SAneesh Kumar K.V */ 6067061eba7SAneesh Kumar K.V goto failed_out; 6077061eba7SAneesh Kumar K.V } 6087061eba7SAneesh Kumar K.V if (!*err) { 6097061eba7SAneesh Kumar K.V if (target == blks) { 6107061eba7SAneesh Kumar K.V /* 6117061eba7SAneesh Kumar K.V * save the new block number 6127061eba7SAneesh Kumar K.V * for the first direct block 6137061eba7SAneesh Kumar K.V */ 614ac27a0ecSDave Kleikamp new_blocks[index] = current_block; 6157061eba7SAneesh Kumar K.V } 6167061eba7SAneesh Kumar K.V blk_allocated += count; 6177061eba7SAneesh Kumar K.V } 6187061eba7SAneesh Kumar K.V allocated: 619ac27a0ecSDave Kleikamp /* total number of blocks allocated for direct blocks */ 6207061eba7SAneesh Kumar K.V ret = blk_allocated; 621ac27a0ecSDave Kleikamp *err = 0; 622ac27a0ecSDave Kleikamp return ret; 623ac27a0ecSDave Kleikamp failed_out: 624ac27a0ecSDave Kleikamp for (i = 0; i < index; i++) 625c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 626ac27a0ecSDave Kleikamp return ret; 627ac27a0ecSDave Kleikamp } 628ac27a0ecSDave Kleikamp 629ac27a0ecSDave Kleikamp /** 630617ba13bSMingming Cao * ext4_alloc_branch - allocate and set up a chain of blocks. 631ac27a0ecSDave Kleikamp * @inode: owner 632ac27a0ecSDave Kleikamp * @indirect_blks: number of allocated indirect blocks 633ac27a0ecSDave Kleikamp * @blks: number of allocated direct blocks 634ac27a0ecSDave Kleikamp * @offsets: offsets (in the blocks) to store the pointers to next. 635ac27a0ecSDave Kleikamp * @branch: place to store the chain in. 636ac27a0ecSDave Kleikamp * 637ac27a0ecSDave Kleikamp * This function allocates blocks, zeroes out all but the last one, 638ac27a0ecSDave Kleikamp * links them into chain and (if we are synchronous) writes them to disk. 639ac27a0ecSDave Kleikamp * In other words, it prepares a branch that can be spliced onto the 640ac27a0ecSDave Kleikamp * inode. It stores the information about that chain in the branch[], in 641617ba13bSMingming Cao * the same format as ext4_get_branch() would do. We are calling it after 642ac27a0ecSDave Kleikamp * we had read the existing part of chain and partial points to the last 643ac27a0ecSDave Kleikamp * triple of that (one with zero ->key). Upon the exit we have the same 644617ba13bSMingming Cao * picture as after the successful ext4_get_block(), except that in one 645ac27a0ecSDave Kleikamp * place chain is disconnected - *branch->p is still zero (we did not 646ac27a0ecSDave Kleikamp * set the last link), but branch->key contains the number that should 647ac27a0ecSDave Kleikamp * be placed into *branch->p to fill that gap. 648ac27a0ecSDave Kleikamp * 649ac27a0ecSDave Kleikamp * If allocation fails we free all blocks we've allocated (and forget 650ac27a0ecSDave Kleikamp * their buffer_heads) and return the error value the from failed 651617ba13bSMingming Cao * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 652ac27a0ecSDave Kleikamp * as described above and return 0. 653ac27a0ecSDave Kleikamp */ 654617ba13bSMingming Cao static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 6557061eba7SAneesh Kumar K.V ext4_lblk_t iblock, int indirect_blks, 6567061eba7SAneesh Kumar K.V int *blks, ext4_fsblk_t goal, 657725d26d3SAneesh Kumar K.V ext4_lblk_t *offsets, Indirect *branch) 658ac27a0ecSDave Kleikamp { 659ac27a0ecSDave Kleikamp int blocksize = inode->i_sb->s_blocksize; 660ac27a0ecSDave Kleikamp int i, n = 0; 661ac27a0ecSDave Kleikamp int err = 0; 662ac27a0ecSDave Kleikamp struct buffer_head *bh; 663ac27a0ecSDave Kleikamp int num; 664617ba13bSMingming Cao ext4_fsblk_t new_blocks[4]; 665617ba13bSMingming Cao ext4_fsblk_t current_block; 666ac27a0ecSDave Kleikamp 6677061eba7SAneesh Kumar K.V num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks, 668ac27a0ecSDave Kleikamp *blks, new_blocks, &err); 669ac27a0ecSDave Kleikamp if (err) 670ac27a0ecSDave Kleikamp return err; 671ac27a0ecSDave Kleikamp 672ac27a0ecSDave Kleikamp branch[0].key = cpu_to_le32(new_blocks[0]); 673ac27a0ecSDave Kleikamp /* 674ac27a0ecSDave Kleikamp * metadata blocks and data blocks are allocated. 675ac27a0ecSDave Kleikamp */ 676ac27a0ecSDave Kleikamp for (n = 1; n <= indirect_blks; n++) { 677ac27a0ecSDave Kleikamp /* 678ac27a0ecSDave Kleikamp * Get buffer_head for parent block, zero it out 679ac27a0ecSDave Kleikamp * and set the pointer to new one, then send 680ac27a0ecSDave Kleikamp * parent to disk. 681ac27a0ecSDave Kleikamp */ 682ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 683ac27a0ecSDave Kleikamp branch[n].bh = bh; 684ac27a0ecSDave Kleikamp lock_buffer(bh); 685ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 686617ba13bSMingming Cao err = ext4_journal_get_create_access(handle, bh); 687ac27a0ecSDave Kleikamp if (err) { 688ac27a0ecSDave Kleikamp unlock_buffer(bh); 689ac27a0ecSDave Kleikamp brelse(bh); 690ac27a0ecSDave Kleikamp goto failed; 691ac27a0ecSDave Kleikamp } 692ac27a0ecSDave Kleikamp 693ac27a0ecSDave Kleikamp memset(bh->b_data, 0, blocksize); 694ac27a0ecSDave Kleikamp branch[n].p = (__le32 *) bh->b_data + offsets[n]; 695ac27a0ecSDave Kleikamp branch[n].key = cpu_to_le32(new_blocks[n]); 696ac27a0ecSDave Kleikamp *branch[n].p = branch[n].key; 697ac27a0ecSDave Kleikamp if (n == indirect_blks) { 698ac27a0ecSDave Kleikamp current_block = new_blocks[n]; 699ac27a0ecSDave Kleikamp /* 700ac27a0ecSDave Kleikamp * End of chain, update the last new metablock of 701ac27a0ecSDave Kleikamp * the chain to point to the new allocated 702ac27a0ecSDave Kleikamp * data blocks numbers 703ac27a0ecSDave Kleikamp */ 704ac27a0ecSDave Kleikamp for (i=1; i < num; i++) 705ac27a0ecSDave Kleikamp *(branch[n].p + i) = cpu_to_le32(++current_block); 706ac27a0ecSDave Kleikamp } 707ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "marking uptodate"); 708ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 709ac27a0ecSDave Kleikamp unlock_buffer(bh); 710ac27a0ecSDave Kleikamp 711617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 712617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 713ac27a0ecSDave Kleikamp if (err) 714ac27a0ecSDave Kleikamp goto failed; 715ac27a0ecSDave Kleikamp } 716ac27a0ecSDave Kleikamp *blks = num; 717ac27a0ecSDave Kleikamp return err; 718ac27a0ecSDave Kleikamp failed: 719ac27a0ecSDave Kleikamp /* Allocation failed, free what we already allocated */ 720ac27a0ecSDave Kleikamp for (i = 1; i <= n ; i++) { 721dab291afSMingming Cao BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget"); 722617ba13bSMingming Cao ext4_journal_forget(handle, branch[i].bh); 723ac27a0ecSDave Kleikamp } 724ac27a0ecSDave Kleikamp for (i = 0; i < indirect_blks; i++) 725c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], 1, 0); 726ac27a0ecSDave Kleikamp 727c9de560dSAlex Tomas ext4_free_blocks(handle, inode, new_blocks[i], num, 0); 728ac27a0ecSDave Kleikamp 729ac27a0ecSDave Kleikamp return err; 730ac27a0ecSDave Kleikamp } 731ac27a0ecSDave Kleikamp 732ac27a0ecSDave Kleikamp /** 733617ba13bSMingming Cao * ext4_splice_branch - splice the allocated branch onto inode. 734ac27a0ecSDave Kleikamp * @inode: owner 735ac27a0ecSDave Kleikamp * @block: (logical) number of block we are adding 736ac27a0ecSDave Kleikamp * @chain: chain of indirect blocks (with a missing link - see 737617ba13bSMingming Cao * ext4_alloc_branch) 738ac27a0ecSDave Kleikamp * @where: location of missing link 739ac27a0ecSDave Kleikamp * @num: number of indirect blocks we are adding 740ac27a0ecSDave Kleikamp * @blks: number of direct blocks we are adding 741ac27a0ecSDave Kleikamp * 742ac27a0ecSDave Kleikamp * This function fills the missing link and does all housekeeping needed in 743ac27a0ecSDave Kleikamp * inode (->i_blocks, etc.). In case of success we end up with the full 744ac27a0ecSDave Kleikamp * chain to new block and return 0. 745ac27a0ecSDave Kleikamp */ 746617ba13bSMingming Cao static int ext4_splice_branch(handle_t *handle, struct inode *inode, 747725d26d3SAneesh Kumar K.V ext4_lblk_t block, Indirect *where, int num, int blks) 748ac27a0ecSDave Kleikamp { 749ac27a0ecSDave Kleikamp int i; 750ac27a0ecSDave Kleikamp int err = 0; 751617ba13bSMingming Cao ext4_fsblk_t current_block; 752ac27a0ecSDave Kleikamp 753ac27a0ecSDave Kleikamp /* 754ac27a0ecSDave Kleikamp * If we're splicing into a [td]indirect block (as opposed to the 755ac27a0ecSDave Kleikamp * inode) then we need to get write access to the [td]indirect block 756ac27a0ecSDave Kleikamp * before the splice. 757ac27a0ecSDave Kleikamp */ 758ac27a0ecSDave Kleikamp if (where->bh) { 759ac27a0ecSDave Kleikamp BUFFER_TRACE(where->bh, "get_write_access"); 760617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, where->bh); 761ac27a0ecSDave Kleikamp if (err) 762ac27a0ecSDave Kleikamp goto err_out; 763ac27a0ecSDave Kleikamp } 764ac27a0ecSDave Kleikamp /* That's it */ 765ac27a0ecSDave Kleikamp 766ac27a0ecSDave Kleikamp *where->p = where->key; 767ac27a0ecSDave Kleikamp 768ac27a0ecSDave Kleikamp /* 769ac27a0ecSDave Kleikamp * Update the host buffer_head or inode to point to more just allocated 770ac27a0ecSDave Kleikamp * direct blocks blocks 771ac27a0ecSDave Kleikamp */ 772ac27a0ecSDave Kleikamp if (num == 0 && blks > 1) { 773ac27a0ecSDave Kleikamp current_block = le32_to_cpu(where->key) + 1; 774ac27a0ecSDave Kleikamp for (i = 1; i < blks; i++) 775ac27a0ecSDave Kleikamp *(where->p + i) = cpu_to_le32(current_block++); 776ac27a0ecSDave Kleikamp } 777ac27a0ecSDave Kleikamp 778ac27a0ecSDave Kleikamp /* We are done with atomic stuff, now do the rest of housekeeping */ 779ac27a0ecSDave Kleikamp 780ef7f3835SKalpak Shah inode->i_ctime = ext4_current_time(inode); 781617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 782ac27a0ecSDave Kleikamp 783ac27a0ecSDave Kleikamp /* had we spliced it onto indirect block? */ 784ac27a0ecSDave Kleikamp if (where->bh) { 785ac27a0ecSDave Kleikamp /* 786ac27a0ecSDave Kleikamp * If we spliced it onto an indirect block, we haven't 787ac27a0ecSDave Kleikamp * altered the inode. Note however that if it is being spliced 788ac27a0ecSDave Kleikamp * onto an indirect block at the very end of the file (the 789ac27a0ecSDave Kleikamp * file is growing) then we *will* alter the inode to reflect 790ac27a0ecSDave Kleikamp * the new i_size. But that is not done here - it is done in 791617ba13bSMingming Cao * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. 792ac27a0ecSDave Kleikamp */ 793ac27a0ecSDave Kleikamp jbd_debug(5, "splicing indirect only\n"); 794617ba13bSMingming Cao BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata"); 795617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, where->bh); 796ac27a0ecSDave Kleikamp if (err) 797ac27a0ecSDave Kleikamp goto err_out; 798ac27a0ecSDave Kleikamp } else { 799ac27a0ecSDave Kleikamp /* 800ac27a0ecSDave Kleikamp * OK, we spliced it into the inode itself on a direct block. 801ac27a0ecSDave Kleikamp * Inode was dirtied above. 802ac27a0ecSDave Kleikamp */ 803ac27a0ecSDave Kleikamp jbd_debug(5, "splicing direct\n"); 804ac27a0ecSDave Kleikamp } 805ac27a0ecSDave Kleikamp return err; 806ac27a0ecSDave Kleikamp 807ac27a0ecSDave Kleikamp err_out: 808ac27a0ecSDave Kleikamp for (i = 1; i <= num; i++) { 809dab291afSMingming Cao BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget"); 810617ba13bSMingming Cao ext4_journal_forget(handle, where[i].bh); 811c9de560dSAlex Tomas ext4_free_blocks(handle, inode, 812c9de560dSAlex Tomas le32_to_cpu(where[i-1].key), 1, 0); 813ac27a0ecSDave Kleikamp } 814c9de560dSAlex Tomas ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0); 815ac27a0ecSDave Kleikamp 816ac27a0ecSDave Kleikamp return err; 817ac27a0ecSDave Kleikamp } 818ac27a0ecSDave Kleikamp 819ac27a0ecSDave Kleikamp /* 820ac27a0ecSDave Kleikamp * Allocation strategy is simple: if we have to allocate something, we will 821ac27a0ecSDave Kleikamp * have to go the whole way to leaf. So let's do it before attaching anything 822ac27a0ecSDave Kleikamp * to tree, set linkage between the newborn blocks, write them if sync is 823ac27a0ecSDave Kleikamp * required, recheck the path, free and repeat if check fails, otherwise 824ac27a0ecSDave Kleikamp * set the last missing link (that will protect us from any truncate-generated 825ac27a0ecSDave Kleikamp * removals - all blocks on the path are immune now) and possibly force the 826ac27a0ecSDave Kleikamp * write on the parent block. 827ac27a0ecSDave Kleikamp * That has a nice additional property: no special recovery from the failed 828ac27a0ecSDave Kleikamp * allocations is needed - we simply release blocks and do not touch anything 829ac27a0ecSDave Kleikamp * reachable from inode. 830ac27a0ecSDave Kleikamp * 831ac27a0ecSDave Kleikamp * `handle' can be NULL if create == 0. 832ac27a0ecSDave Kleikamp * 833ac27a0ecSDave Kleikamp * return > 0, # of blocks mapped or allocated. 834ac27a0ecSDave Kleikamp * return = 0, if plain lookup failed. 835ac27a0ecSDave Kleikamp * return < 0, error case. 836c278bfecSAneesh Kumar K.V * 837c278bfecSAneesh Kumar K.V * 838c278bfecSAneesh Kumar K.V * Need to be called with 8390e855ac8SAneesh Kumar K.V * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block 8400e855ac8SAneesh Kumar K.V * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) 841ac27a0ecSDave Kleikamp */ 842617ba13bSMingming Cao int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, 843725d26d3SAneesh Kumar K.V ext4_lblk_t iblock, unsigned long maxblocks, 844ac27a0ecSDave Kleikamp struct buffer_head *bh_result, 845ac27a0ecSDave Kleikamp int create, int extend_disksize) 846ac27a0ecSDave Kleikamp { 847ac27a0ecSDave Kleikamp int err = -EIO; 848725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 849ac27a0ecSDave Kleikamp Indirect chain[4]; 850ac27a0ecSDave Kleikamp Indirect *partial; 851617ba13bSMingming Cao ext4_fsblk_t goal; 852ac27a0ecSDave Kleikamp int indirect_blks; 853ac27a0ecSDave Kleikamp int blocks_to_boundary = 0; 854ac27a0ecSDave Kleikamp int depth; 855617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 856ac27a0ecSDave Kleikamp int count = 0; 857617ba13bSMingming Cao ext4_fsblk_t first_block = 0; 85861628a3fSMingming Cao loff_t disksize; 859ac27a0ecSDave Kleikamp 860ac27a0ecSDave Kleikamp 861a86c6181SAlex Tomas J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)); 862ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 863725d26d3SAneesh Kumar K.V depth = ext4_block_to_path(inode, iblock, offsets, 864725d26d3SAneesh Kumar K.V &blocks_to_boundary); 865ac27a0ecSDave Kleikamp 866ac27a0ecSDave Kleikamp if (depth == 0) 867ac27a0ecSDave Kleikamp goto out; 868ac27a0ecSDave Kleikamp 869617ba13bSMingming Cao partial = ext4_get_branch(inode, depth, offsets, chain, &err); 870ac27a0ecSDave Kleikamp 871ac27a0ecSDave Kleikamp /* Simplest case - block found, no allocation needed */ 872ac27a0ecSDave Kleikamp if (!partial) { 873ac27a0ecSDave Kleikamp first_block = le32_to_cpu(chain[depth - 1].key); 874ac27a0ecSDave Kleikamp clear_buffer_new(bh_result); 875ac27a0ecSDave Kleikamp count++; 876ac27a0ecSDave Kleikamp /*map more blocks*/ 877ac27a0ecSDave Kleikamp while (count < maxblocks && count <= blocks_to_boundary) { 878617ba13bSMingming Cao ext4_fsblk_t blk; 879ac27a0ecSDave Kleikamp 880ac27a0ecSDave Kleikamp blk = le32_to_cpu(*(chain[depth-1].p + count)); 881ac27a0ecSDave Kleikamp 882ac27a0ecSDave Kleikamp if (blk == first_block + count) 883ac27a0ecSDave Kleikamp count++; 884ac27a0ecSDave Kleikamp else 885ac27a0ecSDave Kleikamp break; 886ac27a0ecSDave Kleikamp } 887ac27a0ecSDave Kleikamp goto got_it; 888ac27a0ecSDave Kleikamp } 889ac27a0ecSDave Kleikamp 890ac27a0ecSDave Kleikamp /* Next simple case - plain lookup or failed read of indirect block */ 891ac27a0ecSDave Kleikamp if (!create || err == -EIO) 892ac27a0ecSDave Kleikamp goto cleanup; 893ac27a0ecSDave Kleikamp 894ac27a0ecSDave Kleikamp /* 895c2ea3fdeSTheodore Ts'o * Okay, we need to do block allocation. 896ac27a0ecSDave Kleikamp */ 897fb01bfdaSAkinobu Mita goal = ext4_find_goal(inode, iblock, partial); 898ac27a0ecSDave Kleikamp 899ac27a0ecSDave Kleikamp /* the number of blocks need to allocate for [d,t]indirect blocks */ 900ac27a0ecSDave Kleikamp indirect_blks = (chain + depth) - partial - 1; 901ac27a0ecSDave Kleikamp 902ac27a0ecSDave Kleikamp /* 903ac27a0ecSDave Kleikamp * Next look up the indirect map to count the totoal number of 904ac27a0ecSDave Kleikamp * direct blocks to allocate for this branch. 905ac27a0ecSDave Kleikamp */ 906617ba13bSMingming Cao count = ext4_blks_to_allocate(partial, indirect_blks, 907ac27a0ecSDave Kleikamp maxblocks, blocks_to_boundary); 908ac27a0ecSDave Kleikamp /* 909617ba13bSMingming Cao * Block out ext4_truncate while we alter the tree 910ac27a0ecSDave Kleikamp */ 9117061eba7SAneesh Kumar K.V err = ext4_alloc_branch(handle, inode, iblock, indirect_blks, 9127061eba7SAneesh Kumar K.V &count, goal, 913ac27a0ecSDave Kleikamp offsets + (partial - chain), partial); 914ac27a0ecSDave Kleikamp 915ac27a0ecSDave Kleikamp /* 916617ba13bSMingming Cao * The ext4_splice_branch call will free and forget any buffers 917ac27a0ecSDave Kleikamp * on the new chain if there is a failure, but that risks using 918ac27a0ecSDave Kleikamp * up transaction credits, especially for bitmaps where the 919ac27a0ecSDave Kleikamp * credits cannot be returned. Can we handle this somehow? We 920ac27a0ecSDave Kleikamp * may need to return -EAGAIN upwards in the worst case. --sct 921ac27a0ecSDave Kleikamp */ 922ac27a0ecSDave Kleikamp if (!err) 923617ba13bSMingming Cao err = ext4_splice_branch(handle, inode, iblock, 924ac27a0ecSDave Kleikamp partial, indirect_blks, count); 925ac27a0ecSDave Kleikamp /* 9260e855ac8SAneesh Kumar K.V * i_disksize growing is protected by i_data_sem. Don't forget to 927ac27a0ecSDave Kleikamp * protect it if you're about to implement concurrent 928617ba13bSMingming Cao * ext4_get_block() -bzzz 929ac27a0ecSDave Kleikamp */ 93061628a3fSMingming Cao if (!err && extend_disksize) { 93161628a3fSMingming Cao disksize = ((loff_t) iblock + count) << inode->i_blkbits; 93261628a3fSMingming Cao if (disksize > i_size_read(inode)) 93361628a3fSMingming Cao disksize = i_size_read(inode); 93461628a3fSMingming Cao if (disksize > ei->i_disksize) 93561628a3fSMingming Cao ei->i_disksize = disksize; 93661628a3fSMingming Cao } 937ac27a0ecSDave Kleikamp if (err) 938ac27a0ecSDave Kleikamp goto cleanup; 939ac27a0ecSDave Kleikamp 940ac27a0ecSDave Kleikamp set_buffer_new(bh_result); 941ac27a0ecSDave Kleikamp got_it: 942ac27a0ecSDave Kleikamp map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); 943ac27a0ecSDave Kleikamp if (count > blocks_to_boundary) 944ac27a0ecSDave Kleikamp set_buffer_boundary(bh_result); 945ac27a0ecSDave Kleikamp err = count; 946ac27a0ecSDave Kleikamp /* Clean up and exit */ 947ac27a0ecSDave Kleikamp partial = chain + depth - 1; /* the whole chain */ 948ac27a0ecSDave Kleikamp cleanup: 949ac27a0ecSDave Kleikamp while (partial > chain) { 950ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 951ac27a0ecSDave Kleikamp brelse(partial->bh); 952ac27a0ecSDave Kleikamp partial--; 953ac27a0ecSDave Kleikamp } 954ac27a0ecSDave Kleikamp BUFFER_TRACE(bh_result, "returned"); 955ac27a0ecSDave Kleikamp out: 956ac27a0ecSDave Kleikamp return err; 957ac27a0ecSDave Kleikamp } 958ac27a0ecSDave Kleikamp 95912219aeaSAneesh Kumar K.V /* 96012219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 96112219aeaSAneesh Kumar K.V * to allocate @blocks for non extent file based file 96212219aeaSAneesh Kumar K.V */ 96312219aeaSAneesh Kumar K.V static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks) 96412219aeaSAneesh Kumar K.V { 96512219aeaSAneesh Kumar K.V int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb); 96612219aeaSAneesh Kumar K.V int ind_blks, dind_blks, tind_blks; 96712219aeaSAneesh Kumar K.V 96812219aeaSAneesh Kumar K.V /* number of new indirect blocks needed */ 96912219aeaSAneesh Kumar K.V ind_blks = (blocks + icap - 1) / icap; 97012219aeaSAneesh Kumar K.V 97112219aeaSAneesh Kumar K.V dind_blks = (ind_blks + icap - 1) / icap; 97212219aeaSAneesh Kumar K.V 97312219aeaSAneesh Kumar K.V tind_blks = 1; 97412219aeaSAneesh Kumar K.V 97512219aeaSAneesh Kumar K.V return ind_blks + dind_blks + tind_blks; 97612219aeaSAneesh Kumar K.V } 97712219aeaSAneesh Kumar K.V 97812219aeaSAneesh Kumar K.V /* 97912219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 98012219aeaSAneesh Kumar K.V * to allocate given number of blocks 98112219aeaSAneesh Kumar K.V */ 98212219aeaSAneesh Kumar K.V static int ext4_calc_metadata_amount(struct inode *inode, int blocks) 98312219aeaSAneesh Kumar K.V { 984cd213226SMingming Cao if (!blocks) 985cd213226SMingming Cao return 0; 986cd213226SMingming Cao 98712219aeaSAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) 98812219aeaSAneesh Kumar K.V return ext4_ext_calc_metadata_amount(inode, blocks); 98912219aeaSAneesh Kumar K.V 99012219aeaSAneesh Kumar K.V return ext4_indirect_calc_metadata_amount(inode, blocks); 99112219aeaSAneesh Kumar K.V } 99212219aeaSAneesh Kumar K.V 99312219aeaSAneesh Kumar K.V static void ext4_da_update_reserve_space(struct inode *inode, int used) 99412219aeaSAneesh Kumar K.V { 99512219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 99612219aeaSAneesh Kumar K.V int total, mdb, mdb_free; 99712219aeaSAneesh Kumar K.V 99812219aeaSAneesh Kumar K.V spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 99912219aeaSAneesh Kumar K.V /* recalculate the number of metablocks still need to be reserved */ 100012219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - used; 100112219aeaSAneesh Kumar K.V mdb = ext4_calc_metadata_amount(inode, total); 100212219aeaSAneesh Kumar K.V 100312219aeaSAneesh Kumar K.V /* figure out how many metablocks to release */ 100412219aeaSAneesh Kumar K.V BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 100512219aeaSAneesh Kumar K.V mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 100612219aeaSAneesh Kumar K.V 10076bc6e63fSAneesh Kumar K.V if (mdb_free) { 100812219aeaSAneesh Kumar K.V /* Account for allocated meta_blocks */ 100912219aeaSAneesh Kumar K.V mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; 101012219aeaSAneesh Kumar K.V 10116bc6e63fSAneesh Kumar K.V /* update fs dirty blocks counter */ 10126bc6e63fSAneesh Kumar K.V percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); 10136bc6e63fSAneesh Kumar K.V EXT4_I(inode)->i_allocated_meta_blocks = 0; 10146bc6e63fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks = mdb; 10156bc6e63fSAneesh Kumar K.V } 101612219aeaSAneesh Kumar K.V 101712219aeaSAneesh Kumar K.V /* update per-inode reservations */ 101812219aeaSAneesh Kumar K.V BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); 101912219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= used; 102012219aeaSAneesh Kumar K.V 102112219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 102212219aeaSAneesh Kumar K.V } 102312219aeaSAneesh Kumar K.V 1024f5ab0d1fSMingming Cao /* 10252b2d6d01STheodore Ts'o * The ext4_get_blocks_wrap() function try to look up the requested blocks, 10262b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 1027f5ab0d1fSMingming Cao * 1028f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 1029f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 1030f5ab0d1fSMingming Cao * mapped. 1031f5ab0d1fSMingming Cao * 1032f5ab0d1fSMingming Cao * If file type is extents based, it will call ext4_ext_get_blocks(), 1033f5ab0d1fSMingming Cao * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping 1034f5ab0d1fSMingming Cao * based files 1035f5ab0d1fSMingming Cao * 1036f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 1037f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 1038f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 1039f5ab0d1fSMingming Cao * the buffer head is mapped. 1040f5ab0d1fSMingming Cao * 1041f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 1042f5ab0d1fSMingming Cao * that casem, buffer head is unmapped 1043f5ab0d1fSMingming Cao * 1044f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 1045f5ab0d1fSMingming Cao */ 10460e855ac8SAneesh Kumar K.V int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, 10470e855ac8SAneesh Kumar K.V unsigned long max_blocks, struct buffer_head *bh, 1048d2a17637SMingming Cao int create, int extend_disksize, int flag) 10490e855ac8SAneesh Kumar K.V { 10500e855ac8SAneesh Kumar K.V int retval; 1051f5ab0d1fSMingming Cao 1052f5ab0d1fSMingming Cao clear_buffer_mapped(bh); 1053f5ab0d1fSMingming Cao 10544df3d265SAneesh Kumar K.V /* 10554df3d265SAneesh Kumar K.V * Try to see if we can get the block without requesting 10564df3d265SAneesh Kumar K.V * for new file system block. 10574df3d265SAneesh Kumar K.V */ 10580e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 10594df3d265SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 10604df3d265SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 10614df3d265SAneesh Kumar K.V bh, 0, 0); 10624df3d265SAneesh Kumar K.V } else { 10634df3d265SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, 10644df3d265SAneesh Kumar K.V inode, block, max_blocks, bh, 0, 0); 10650e855ac8SAneesh Kumar K.V } 10664df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 1067f5ab0d1fSMingming Cao 1068f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 1069f5ab0d1fSMingming Cao if (!create) 10704df3d265SAneesh Kumar K.V return retval; 10714df3d265SAneesh Kumar K.V 10724df3d265SAneesh Kumar K.V /* 1073f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 1074f5ab0d1fSMingming Cao * 1075f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 1076f5ab0d1fSMingming Cao * ext4_ext_get_block() returns th create = 0 1077f5ab0d1fSMingming Cao * with buffer head unmapped. 1078f5ab0d1fSMingming Cao */ 1079f5ab0d1fSMingming Cao if (retval > 0 && buffer_mapped(bh)) 1080f5ab0d1fSMingming Cao return retval; 1081f5ab0d1fSMingming Cao 1082f5ab0d1fSMingming Cao /* 1083f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 1084f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 1085f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 1086f5ab0d1fSMingming Cao * with create == 1 flag. 10874df3d265SAneesh Kumar K.V */ 10884df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 1089d2a17637SMingming Cao 1090d2a17637SMingming Cao /* 1091d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 1092d2a17637SMingming Cao * we have already reserved fs blocks for allocation 1093d2a17637SMingming Cao * let the underlying get_block() function know to 1094d2a17637SMingming Cao * avoid double accounting 1095d2a17637SMingming Cao */ 1096d2a17637SMingming Cao if (flag) 1097d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 1; 10984df3d265SAneesh Kumar K.V /* 10994df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 11004df3d265SAneesh Kumar K.V * could have changed the inode type in between 11014df3d265SAneesh Kumar K.V */ 11020e855ac8SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 11030e855ac8SAneesh Kumar K.V retval = ext4_ext_get_blocks(handle, inode, block, max_blocks, 11040e855ac8SAneesh Kumar K.V bh, create, extend_disksize); 11050e855ac8SAneesh Kumar K.V } else { 11060e855ac8SAneesh Kumar K.V retval = ext4_get_blocks_handle(handle, inode, block, 11070e855ac8SAneesh Kumar K.V max_blocks, bh, create, extend_disksize); 1108267e4db9SAneesh Kumar K.V 1109267e4db9SAneesh Kumar K.V if (retval > 0 && buffer_new(bh)) { 1110267e4db9SAneesh Kumar K.V /* 1111267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 1112267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 1113267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 1114267e4db9SAneesh Kumar K.V */ 1115267e4db9SAneesh Kumar K.V EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & 1116267e4db9SAneesh Kumar K.V ~EXT4_EXT_MIGRATE; 1117267e4db9SAneesh Kumar K.V } 11180e855ac8SAneesh Kumar K.V } 1119d2a17637SMingming Cao 1120d2a17637SMingming Cao if (flag) { 1121d2a17637SMingming Cao EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1122d2a17637SMingming Cao /* 1123d2a17637SMingming Cao * Update reserved blocks/metadata blocks 1124d2a17637SMingming Cao * after successful block allocation 1125d2a17637SMingming Cao * which were deferred till now 1126d2a17637SMingming Cao */ 1127d2a17637SMingming Cao if ((retval > 0) && buffer_delay(bh)) 112812219aeaSAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval); 1129d2a17637SMingming Cao } 1130d2a17637SMingming Cao 11310e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 11320e855ac8SAneesh Kumar K.V return retval; 11330e855ac8SAneesh Kumar K.V } 11340e855ac8SAneesh Kumar K.V 1135f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 1136f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 1137f3bd1f3fSMingming Cao 11386873fa0dSEric Sandeen int ext4_get_block(struct inode *inode, sector_t iblock, 1139ac27a0ecSDave Kleikamp struct buffer_head *bh_result, int create) 1140ac27a0ecSDave Kleikamp { 11413e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 11427fb5409dSJan Kara int ret = 0, started = 0; 1143ac27a0ecSDave Kleikamp unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 1144f3bd1f3fSMingming Cao int dio_credits; 1145ac27a0ecSDave Kleikamp 11467fb5409dSJan Kara if (create && !handle) { 11477fb5409dSJan Kara /* Direct IO write... */ 11487fb5409dSJan Kara if (max_blocks > DIO_MAX_BLOCKS) 11497fb5409dSJan Kara max_blocks = DIO_MAX_BLOCKS; 1150f3bd1f3fSMingming Cao dio_credits = ext4_chunk_trans_blocks(inode, max_blocks); 1151f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 11527fb5409dSJan Kara if (IS_ERR(handle)) { 1153ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 11547fb5409dSJan Kara goto out; 11557fb5409dSJan Kara } 11567fb5409dSJan Kara started = 1; 1157ac27a0ecSDave Kleikamp } 1158ac27a0ecSDave Kleikamp 1159a86c6181SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, 1160d2a17637SMingming Cao max_blocks, bh_result, create, 0, 0); 1161ac27a0ecSDave Kleikamp if (ret > 0) { 1162ac27a0ecSDave Kleikamp bh_result->b_size = (ret << inode->i_blkbits); 1163ac27a0ecSDave Kleikamp ret = 0; 1164ac27a0ecSDave Kleikamp } 11657fb5409dSJan Kara if (started) 11667fb5409dSJan Kara ext4_journal_stop(handle); 11677fb5409dSJan Kara out: 1168ac27a0ecSDave Kleikamp return ret; 1169ac27a0ecSDave Kleikamp } 1170ac27a0ecSDave Kleikamp 1171ac27a0ecSDave Kleikamp /* 1172ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 1173ac27a0ecSDave Kleikamp */ 1174617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 1175725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 1176ac27a0ecSDave Kleikamp { 1177ac27a0ecSDave Kleikamp struct buffer_head dummy; 1178ac27a0ecSDave Kleikamp int fatal = 0, err; 1179ac27a0ecSDave Kleikamp 1180ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 1181ac27a0ecSDave Kleikamp 1182ac27a0ecSDave Kleikamp dummy.b_state = 0; 1183ac27a0ecSDave Kleikamp dummy.b_blocknr = -1000; 1184ac27a0ecSDave Kleikamp buffer_trace_init(&dummy.b_history); 1185a86c6181SAlex Tomas err = ext4_get_blocks_wrap(handle, inode, block, 1, 1186d2a17637SMingming Cao &dummy, create, 1, 0); 1187ac27a0ecSDave Kleikamp /* 1188617ba13bSMingming Cao * ext4_get_blocks_handle() returns number of blocks 1189ac27a0ecSDave Kleikamp * mapped. 0 in case of a HOLE. 1190ac27a0ecSDave Kleikamp */ 1191ac27a0ecSDave Kleikamp if (err > 0) { 1192ac27a0ecSDave Kleikamp if (err > 1) 1193ac27a0ecSDave Kleikamp WARN_ON(1); 1194ac27a0ecSDave Kleikamp err = 0; 1195ac27a0ecSDave Kleikamp } 1196ac27a0ecSDave Kleikamp *errp = err; 1197ac27a0ecSDave Kleikamp if (!err && buffer_mapped(&dummy)) { 1198ac27a0ecSDave Kleikamp struct buffer_head *bh; 1199ac27a0ecSDave Kleikamp bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 1200ac27a0ecSDave Kleikamp if (!bh) { 1201ac27a0ecSDave Kleikamp *errp = -EIO; 1202ac27a0ecSDave Kleikamp goto err; 1203ac27a0ecSDave Kleikamp } 1204ac27a0ecSDave Kleikamp if (buffer_new(&dummy)) { 1205ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 1206ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 1207ac27a0ecSDave Kleikamp 1208ac27a0ecSDave Kleikamp /* 1209ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 1210ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 1211ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 1212617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 1213ac27a0ecSDave Kleikamp * problem. 1214ac27a0ecSDave Kleikamp */ 1215ac27a0ecSDave Kleikamp lock_buffer(bh); 1216ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 1217617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 1218ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 1219ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1220ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 1221ac27a0ecSDave Kleikamp } 1222ac27a0ecSDave Kleikamp unlock_buffer(bh); 1223617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 1224617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 1225ac27a0ecSDave Kleikamp if (!fatal) 1226ac27a0ecSDave Kleikamp fatal = err; 1227ac27a0ecSDave Kleikamp } else { 1228ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 1229ac27a0ecSDave Kleikamp } 1230ac27a0ecSDave Kleikamp if (fatal) { 1231ac27a0ecSDave Kleikamp *errp = fatal; 1232ac27a0ecSDave Kleikamp brelse(bh); 1233ac27a0ecSDave Kleikamp bh = NULL; 1234ac27a0ecSDave Kleikamp } 1235ac27a0ecSDave Kleikamp return bh; 1236ac27a0ecSDave Kleikamp } 1237ac27a0ecSDave Kleikamp err: 1238ac27a0ecSDave Kleikamp return NULL; 1239ac27a0ecSDave Kleikamp } 1240ac27a0ecSDave Kleikamp 1241617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 1242725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 1243ac27a0ecSDave Kleikamp { 1244ac27a0ecSDave Kleikamp struct buffer_head *bh; 1245ac27a0ecSDave Kleikamp 1246617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 1247ac27a0ecSDave Kleikamp if (!bh) 1248ac27a0ecSDave Kleikamp return bh; 1249ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1250ac27a0ecSDave Kleikamp return bh; 1251ac27a0ecSDave Kleikamp ll_rw_block(READ_META, 1, &bh); 1252ac27a0ecSDave Kleikamp wait_on_buffer(bh); 1253ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 1254ac27a0ecSDave Kleikamp return bh; 1255ac27a0ecSDave Kleikamp put_bh(bh); 1256ac27a0ecSDave Kleikamp *err = -EIO; 1257ac27a0ecSDave Kleikamp return NULL; 1258ac27a0ecSDave Kleikamp } 1259ac27a0ecSDave Kleikamp 1260ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle, 1261ac27a0ecSDave Kleikamp struct buffer_head *head, 1262ac27a0ecSDave Kleikamp unsigned from, 1263ac27a0ecSDave Kleikamp unsigned to, 1264ac27a0ecSDave Kleikamp int *partial, 1265ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 1266ac27a0ecSDave Kleikamp struct buffer_head *bh)) 1267ac27a0ecSDave Kleikamp { 1268ac27a0ecSDave Kleikamp struct buffer_head *bh; 1269ac27a0ecSDave Kleikamp unsigned block_start, block_end; 1270ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 1271ac27a0ecSDave Kleikamp int err, ret = 0; 1272ac27a0ecSDave Kleikamp struct buffer_head *next; 1273ac27a0ecSDave Kleikamp 1274ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 1275ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 1276ac27a0ecSDave Kleikamp block_start = block_end, bh = next) 1277ac27a0ecSDave Kleikamp { 1278ac27a0ecSDave Kleikamp next = bh->b_this_page; 1279ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 1280ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 1281ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 1282ac27a0ecSDave Kleikamp *partial = 1; 1283ac27a0ecSDave Kleikamp continue; 1284ac27a0ecSDave Kleikamp } 1285ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 1286ac27a0ecSDave Kleikamp if (!ret) 1287ac27a0ecSDave Kleikamp ret = err; 1288ac27a0ecSDave Kleikamp } 1289ac27a0ecSDave Kleikamp return ret; 1290ac27a0ecSDave Kleikamp } 1291ac27a0ecSDave Kleikamp 1292ac27a0ecSDave Kleikamp /* 1293ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 1294ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 1295617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 1296dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 1297ac27a0ecSDave Kleikamp * prepare_write() is the right place. 1298ac27a0ecSDave Kleikamp * 1299617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 1300617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 1301ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 1302ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 1303ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 1304ac27a0ecSDave Kleikamp * 1305617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 1306ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 1307ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 1308ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 1309ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 1310ac27a0ecSDave Kleikamp * violation. 1311ac27a0ecSDave Kleikamp * 1312dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 1313ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 1314ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 1315ac27a0ecSDave Kleikamp * write. 1316ac27a0ecSDave Kleikamp */ 1317ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle, 1318ac27a0ecSDave Kleikamp struct buffer_head *bh) 1319ac27a0ecSDave Kleikamp { 1320ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1321ac27a0ecSDave Kleikamp return 0; 1322617ba13bSMingming Cao return ext4_journal_get_write_access(handle, bh); 1323ac27a0ecSDave Kleikamp } 1324ac27a0ecSDave Kleikamp 1325bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 1326bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 1327bfc1af65SNick Piggin struct page **pagep, void **fsdata) 1328ac27a0ecSDave Kleikamp { 1329bfc1af65SNick Piggin struct inode *inode = mapping->host; 13307479d2b9SAndrew Morton int ret, needed_blocks = ext4_writepage_trans_blocks(inode); 1331ac27a0ecSDave Kleikamp handle_t *handle; 1332ac27a0ecSDave Kleikamp int retries = 0; 1333bfc1af65SNick Piggin struct page *page; 1334bfc1af65SNick Piggin pgoff_t index; 1335bfc1af65SNick Piggin unsigned from, to; 1336bfc1af65SNick Piggin 1337bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 1338bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1339bfc1af65SNick Piggin to = from + len; 1340ac27a0ecSDave Kleikamp 1341ac27a0ecSDave Kleikamp retry: 1342617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 13437479d2b9SAndrew Morton if (IS_ERR(handle)) { 13447479d2b9SAndrew Morton ret = PTR_ERR(handle); 13457479d2b9SAndrew Morton goto out; 13467479d2b9SAndrew Morton } 1347ac27a0ecSDave Kleikamp 1348cf108bcaSJan Kara page = __grab_cache_page(mapping, index); 1349cf108bcaSJan Kara if (!page) { 1350cf108bcaSJan Kara ext4_journal_stop(handle); 1351cf108bcaSJan Kara ret = -ENOMEM; 1352cf108bcaSJan Kara goto out; 1353cf108bcaSJan Kara } 1354cf108bcaSJan Kara *pagep = page; 1355cf108bcaSJan Kara 1356bfc1af65SNick Piggin ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 1357bfc1af65SNick Piggin ext4_get_block); 1358bfc1af65SNick Piggin 1359bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 1360ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), 1361ac27a0ecSDave Kleikamp from, to, NULL, do_journal_get_write_access); 1362b46be050SAndrey Savochkin } 1363bfc1af65SNick Piggin 1364bfc1af65SNick Piggin if (ret) { 1365bfc1af65SNick Piggin unlock_page(page); 1366cf108bcaSJan Kara ext4_journal_stop(handle); 1367bfc1af65SNick Piggin page_cache_release(page); 1368ae4d5372SAneesh Kumar K.V /* 1369ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 1370ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 1371ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 1372ae4d5372SAneesh Kumar K.V */ 1373ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 1374ae4d5372SAneesh Kumar K.V vmtruncate(inode, inode->i_size); 1375bfc1af65SNick Piggin } 1376bfc1af65SNick Piggin 1377617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 1378ac27a0ecSDave Kleikamp goto retry; 13797479d2b9SAndrew Morton out: 1380ac27a0ecSDave Kleikamp return ret; 1381ac27a0ecSDave Kleikamp } 1382ac27a0ecSDave Kleikamp 1383bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 1384bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1385ac27a0ecSDave Kleikamp { 1386ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1387ac27a0ecSDave Kleikamp return 0; 1388ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 1389617ba13bSMingming Cao return ext4_journal_dirty_metadata(handle, bh); 1390ac27a0ecSDave Kleikamp } 1391ac27a0ecSDave Kleikamp 1392ac27a0ecSDave Kleikamp /* 1393ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1394ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1395ac27a0ecSDave Kleikamp * 1396617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1397ac27a0ecSDave Kleikamp * buffers are managed internally. 1398ac27a0ecSDave Kleikamp */ 1399bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1400bfc1af65SNick Piggin struct address_space *mapping, 1401bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1402bfc1af65SNick Piggin struct page *page, void *fsdata) 1403ac27a0ecSDave Kleikamp { 1404617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1405cf108bcaSJan Kara struct inode *inode = mapping->host; 1406ac27a0ecSDave Kleikamp int ret = 0, ret2; 1407ac27a0ecSDave Kleikamp 1408678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1409ac27a0ecSDave Kleikamp 1410ac27a0ecSDave Kleikamp if (ret == 0) { 1411ac27a0ecSDave Kleikamp loff_t new_i_size; 1412ac27a0ecSDave Kleikamp 1413bfc1af65SNick Piggin new_i_size = pos + copied; 1414cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1415cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1416cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 1417cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 1418cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 1419cf17fea6SAneesh Kumar K.V */ 1420cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1421cf17fea6SAneesh Kumar K.V } 1422cf17fea6SAneesh Kumar K.V 1423cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1424bfc1af65SNick Piggin page, fsdata); 1425f8a87d89SRoel Kluin copied = ret2; 1426f8a87d89SRoel Kluin if (ret2 < 0) 1427f8a87d89SRoel Kluin ret = ret2; 1428ac27a0ecSDave Kleikamp } 1429617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1430ac27a0ecSDave Kleikamp if (!ret) 1431ac27a0ecSDave Kleikamp ret = ret2; 1432bfc1af65SNick Piggin 1433bfc1af65SNick Piggin return ret ? ret : copied; 1434ac27a0ecSDave Kleikamp } 1435ac27a0ecSDave Kleikamp 1436bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1437bfc1af65SNick Piggin struct address_space *mapping, 1438bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1439bfc1af65SNick Piggin struct page *page, void *fsdata) 1440ac27a0ecSDave Kleikamp { 1441617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1442cf108bcaSJan Kara struct inode *inode = mapping->host; 1443ac27a0ecSDave Kleikamp int ret = 0, ret2; 1444ac27a0ecSDave Kleikamp loff_t new_i_size; 1445ac27a0ecSDave Kleikamp 1446bfc1af65SNick Piggin new_i_size = pos + copied; 1447cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1448cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1449cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 1450cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 1451cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 1452cf17fea6SAneesh Kumar K.V */ 1453cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1454cf17fea6SAneesh Kumar K.V } 1455ac27a0ecSDave Kleikamp 1456cf108bcaSJan Kara ret2 = generic_write_end(file, mapping, pos, len, copied, 1457bfc1af65SNick Piggin page, fsdata); 1458f8a87d89SRoel Kluin copied = ret2; 1459f8a87d89SRoel Kluin if (ret2 < 0) 1460f8a87d89SRoel Kluin ret = ret2; 1461ac27a0ecSDave Kleikamp 1462617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1463ac27a0ecSDave Kleikamp if (!ret) 1464ac27a0ecSDave Kleikamp ret = ret2; 1465bfc1af65SNick Piggin 1466bfc1af65SNick Piggin return ret ? ret : copied; 1467ac27a0ecSDave Kleikamp } 1468ac27a0ecSDave Kleikamp 1469bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1470bfc1af65SNick Piggin struct address_space *mapping, 1471bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1472bfc1af65SNick Piggin struct page *page, void *fsdata) 1473ac27a0ecSDave Kleikamp { 1474617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1475bfc1af65SNick Piggin struct inode *inode = mapping->host; 1476ac27a0ecSDave Kleikamp int ret = 0, ret2; 1477ac27a0ecSDave Kleikamp int partial = 0; 1478bfc1af65SNick Piggin unsigned from, to; 1479cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1480ac27a0ecSDave Kleikamp 1481bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1482bfc1af65SNick Piggin to = from + len; 1483bfc1af65SNick Piggin 1484bfc1af65SNick Piggin if (copied < len) { 1485bfc1af65SNick Piggin if (!PageUptodate(page)) 1486bfc1af65SNick Piggin copied = 0; 1487bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1488bfc1af65SNick Piggin } 1489ac27a0ecSDave Kleikamp 1490ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), from, 1491bfc1af65SNick Piggin to, &partial, write_end_fn); 1492ac27a0ecSDave Kleikamp if (!partial) 1493ac27a0ecSDave Kleikamp SetPageUptodate(page); 1494cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1495cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1496bfc1af65SNick Piggin i_size_write(inode, pos+copied); 1497617ba13bSMingming Cao EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 1498cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1499cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1500617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1501ac27a0ecSDave Kleikamp if (!ret) 1502ac27a0ecSDave Kleikamp ret = ret2; 1503ac27a0ecSDave Kleikamp } 1504bfc1af65SNick Piggin 1505cf108bcaSJan Kara unlock_page(page); 1506617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1507ac27a0ecSDave Kleikamp if (!ret) 1508ac27a0ecSDave Kleikamp ret = ret2; 1509bfc1af65SNick Piggin page_cache_release(page); 1510bfc1af65SNick Piggin 1511bfc1af65SNick Piggin return ret ? ret : copied; 1512ac27a0ecSDave Kleikamp } 1513d2a17637SMingming Cao 1514d2a17637SMingming Cao static int ext4_da_reserve_space(struct inode *inode, int nrblocks) 1515d2a17637SMingming Cao { 1516030ba6bcSAneesh Kumar K.V int retries = 0; 1517d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1518d2a17637SMingming Cao unsigned long md_needed, mdblocks, total = 0; 1519d2a17637SMingming Cao 1520d2a17637SMingming Cao /* 1521d2a17637SMingming Cao * recalculate the amount of metadata blocks to reserve 1522d2a17637SMingming Cao * in order to allocate nrblocks 1523d2a17637SMingming Cao * worse case is one extent per block 1524d2a17637SMingming Cao */ 1525030ba6bcSAneesh Kumar K.V repeat: 1526d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1527d2a17637SMingming Cao total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks; 1528d2a17637SMingming Cao mdblocks = ext4_calc_metadata_amount(inode, total); 1529d2a17637SMingming Cao BUG_ON(mdblocks < EXT4_I(inode)->i_reserved_meta_blocks); 1530d2a17637SMingming Cao 1531d2a17637SMingming Cao md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; 1532d2a17637SMingming Cao total = md_needed + nrblocks; 1533d2a17637SMingming Cao 1534a30d542aSAneesh Kumar K.V if (ext4_claim_free_blocks(sbi, total)) { 1535d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1536030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1537030ba6bcSAneesh Kumar K.V yield(); 1538030ba6bcSAneesh Kumar K.V goto repeat; 1539030ba6bcSAneesh Kumar K.V } 1540d2a17637SMingming Cao return -ENOSPC; 1541d2a17637SMingming Cao } 1542d2a17637SMingming Cao EXT4_I(inode)->i_reserved_data_blocks += nrblocks; 1543d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; 1544d2a17637SMingming Cao 1545d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1546d2a17637SMingming Cao return 0; /* success */ 1547d2a17637SMingming Cao } 1548d2a17637SMingming Cao 154912219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1550d2a17637SMingming Cao { 1551d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1552d2a17637SMingming Cao int total, mdb, mdb_free, release; 1553d2a17637SMingming Cao 1554cd213226SMingming Cao if (!to_free) 1555cd213226SMingming Cao return; /* Nothing to release, exit */ 1556cd213226SMingming Cao 1557d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1558cd213226SMingming Cao 1559cd213226SMingming Cao if (!EXT4_I(inode)->i_reserved_data_blocks) { 1560cd213226SMingming Cao /* 1561cd213226SMingming Cao * if there is no reserved blocks, but we try to free some 1562cd213226SMingming Cao * then the counter is messed up somewhere. 1563cd213226SMingming Cao * but since this function is called from invalidate 1564cd213226SMingming Cao * page, it's harmless to return without any action 1565cd213226SMingming Cao */ 1566cd213226SMingming Cao printk(KERN_INFO "ext4 delalloc try to release %d reserved " 1567cd213226SMingming Cao "blocks for inode %lu, but there is no reserved " 1568cd213226SMingming Cao "data blocks\n", to_free, inode->i_ino); 1569cd213226SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1570cd213226SMingming Cao return; 1571cd213226SMingming Cao } 1572cd213226SMingming Cao 1573d2a17637SMingming Cao /* recalculate the number of metablocks still need to be reserved */ 157412219aeaSAneesh Kumar K.V total = EXT4_I(inode)->i_reserved_data_blocks - to_free; 1575d2a17637SMingming Cao mdb = ext4_calc_metadata_amount(inode, total); 1576d2a17637SMingming Cao 1577d2a17637SMingming Cao /* figure out how many metablocks to release */ 1578d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1579d2a17637SMingming Cao mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; 1580d2a17637SMingming Cao 1581d2a17637SMingming Cao release = to_free + mdb_free; 1582d2a17637SMingming Cao 15836bc6e63fSAneesh Kumar K.V /* update fs dirty blocks counter for truncate case */ 15846bc6e63fSAneesh Kumar K.V percpu_counter_sub(&sbi->s_dirtyblocks_counter, release); 1585d2a17637SMingming Cao 1586d2a17637SMingming Cao /* update per-inode reservations */ 158712219aeaSAneesh Kumar K.V BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks); 158812219aeaSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks -= to_free; 1589d2a17637SMingming Cao 1590d2a17637SMingming Cao BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); 1591d2a17637SMingming Cao EXT4_I(inode)->i_reserved_meta_blocks = mdb; 1592d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1593d2a17637SMingming Cao } 1594d2a17637SMingming Cao 1595d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1596d2a17637SMingming Cao unsigned long offset) 1597d2a17637SMingming Cao { 1598d2a17637SMingming Cao int to_release = 0; 1599d2a17637SMingming Cao struct buffer_head *head, *bh; 1600d2a17637SMingming Cao unsigned int curr_off = 0; 1601d2a17637SMingming Cao 1602d2a17637SMingming Cao head = page_buffers(page); 1603d2a17637SMingming Cao bh = head; 1604d2a17637SMingming Cao do { 1605d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1606d2a17637SMingming Cao 1607d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1608d2a17637SMingming Cao to_release++; 1609d2a17637SMingming Cao clear_buffer_delay(bh); 1610d2a17637SMingming Cao } 1611d2a17637SMingming Cao curr_off = next_off; 1612d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 161312219aeaSAneesh Kumar K.V ext4_da_release_space(page->mapping->host, to_release); 1614d2a17637SMingming Cao } 1615ac27a0ecSDave Kleikamp 1616ac27a0ecSDave Kleikamp /* 161764769240SAlex Tomas * Delayed allocation stuff 161864769240SAlex Tomas */ 161964769240SAlex Tomas 162064769240SAlex Tomas struct mpage_da_data { 162164769240SAlex Tomas struct inode *inode; 162264769240SAlex Tomas struct buffer_head lbh; /* extent of blocks */ 162364769240SAlex Tomas unsigned long first_page, next_page; /* extent of pages */ 162464769240SAlex Tomas get_block_t *get_block; 162564769240SAlex Tomas struct writeback_control *wbc; 1626a1d6cc56SAneesh Kumar K.V int io_done; 1627a1d6cc56SAneesh Kumar K.V long pages_written; 1628df22291fSAneesh Kumar K.V int retval; 162964769240SAlex Tomas }; 163064769240SAlex Tomas 163164769240SAlex Tomas /* 163264769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1633a1d6cc56SAneesh Kumar K.V * them with writepage() call back 163464769240SAlex Tomas * 163564769240SAlex Tomas * @mpd->inode: inode 163664769240SAlex Tomas * @mpd->first_page: first page of the extent 163764769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 163864769240SAlex Tomas * @mpd->get_block: the filesystem's block mapper function 163964769240SAlex Tomas * 164064769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 164164769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 164264769240SAlex Tomas * 164364769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 164464769240SAlex Tomas */ 164564769240SAlex Tomas static int mpage_da_submit_io(struct mpage_da_data *mpd) 164664769240SAlex Tomas { 164764769240SAlex Tomas struct address_space *mapping = mpd->inode->i_mapping; 164864769240SAlex Tomas int ret = 0, err, nr_pages, i; 164964769240SAlex Tomas unsigned long index, end; 165064769240SAlex Tomas struct pagevec pvec; 1651*22208dedSAneesh Kumar K.V long pages_skipped; 165264769240SAlex Tomas 165364769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 165464769240SAlex Tomas pagevec_init(&pvec, 0); 165564769240SAlex Tomas index = mpd->first_page; 165664769240SAlex Tomas end = mpd->next_page - 1; 165764769240SAlex Tomas 165864769240SAlex Tomas while (index <= end) { 1659af6f029dSAneesh Kumar K.V /* 1660af6f029dSAneesh Kumar K.V * We can use PAGECACHE_TAG_DIRTY lookup here because 1661af6f029dSAneesh Kumar K.V * even though we have cleared the dirty flag on the page 1662af6f029dSAneesh Kumar K.V * We still keep the page in the radix tree with tag 1663af6f029dSAneesh Kumar K.V * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io. 1664af6f029dSAneesh Kumar K.V * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback 1665af6f029dSAneesh Kumar K.V * which is called via the below writepage callback. 1666af6f029dSAneesh Kumar K.V */ 1667af6f029dSAneesh Kumar K.V nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 1668af6f029dSAneesh Kumar K.V PAGECACHE_TAG_DIRTY, 1669af6f029dSAneesh Kumar K.V min(end - index, 1670af6f029dSAneesh Kumar K.V (pgoff_t)PAGEVEC_SIZE-1) + 1); 167164769240SAlex Tomas if (nr_pages == 0) 167264769240SAlex Tomas break; 167364769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 167464769240SAlex Tomas struct page *page = pvec.pages[i]; 167564769240SAlex Tomas 1676*22208dedSAneesh Kumar K.V pages_skipped = mpd->wbc->pages_skipped; 1677a1d6cc56SAneesh Kumar K.V err = mapping->a_ops->writepage(page, mpd->wbc); 1678*22208dedSAneesh Kumar K.V if (!err && (pages_skipped == mpd->wbc->pages_skipped)) 1679*22208dedSAneesh Kumar K.V /* 1680*22208dedSAneesh Kumar K.V * have successfully written the page 1681*22208dedSAneesh Kumar K.V * without skipping the same 1682*22208dedSAneesh Kumar K.V */ 1683a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 168464769240SAlex Tomas /* 168564769240SAlex Tomas * In error case, we have to continue because 168664769240SAlex Tomas * remaining pages are still locked 168764769240SAlex Tomas * XXX: unlock and re-dirty them? 168864769240SAlex Tomas */ 168964769240SAlex Tomas if (ret == 0) 169064769240SAlex Tomas ret = err; 169164769240SAlex Tomas } 169264769240SAlex Tomas pagevec_release(&pvec); 169364769240SAlex Tomas } 169464769240SAlex Tomas return ret; 169564769240SAlex Tomas } 169664769240SAlex Tomas 169764769240SAlex Tomas /* 169864769240SAlex Tomas * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers 169964769240SAlex Tomas * 170064769240SAlex Tomas * @mpd->inode - inode to walk through 170164769240SAlex Tomas * @exbh->b_blocknr - first block on a disk 170264769240SAlex Tomas * @exbh->b_size - amount of space in bytes 170364769240SAlex Tomas * @logical - first logical block to start assignment with 170464769240SAlex Tomas * 170564769240SAlex Tomas * the function goes through all passed space and put actual disk 170664769240SAlex Tomas * block numbers into buffer heads, dropping BH_Delay 170764769240SAlex Tomas */ 170864769240SAlex Tomas static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical, 170964769240SAlex Tomas struct buffer_head *exbh) 171064769240SAlex Tomas { 171164769240SAlex Tomas struct inode *inode = mpd->inode; 171264769240SAlex Tomas struct address_space *mapping = inode->i_mapping; 171364769240SAlex Tomas int blocks = exbh->b_size >> inode->i_blkbits; 171464769240SAlex Tomas sector_t pblock = exbh->b_blocknr, cur_logical; 171564769240SAlex Tomas struct buffer_head *head, *bh; 1716a1d6cc56SAneesh Kumar K.V pgoff_t index, end; 171764769240SAlex Tomas struct pagevec pvec; 171864769240SAlex Tomas int nr_pages, i; 171964769240SAlex Tomas 172064769240SAlex Tomas index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 172164769240SAlex Tomas end = (logical + blocks - 1) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 172264769240SAlex Tomas cur_logical = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 172364769240SAlex Tomas 172464769240SAlex Tomas pagevec_init(&pvec, 0); 172564769240SAlex Tomas 172664769240SAlex Tomas while (index <= end) { 172764769240SAlex Tomas /* XXX: optimize tail */ 172864769240SAlex Tomas nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 172964769240SAlex Tomas if (nr_pages == 0) 173064769240SAlex Tomas break; 173164769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 173264769240SAlex Tomas struct page *page = pvec.pages[i]; 173364769240SAlex Tomas 173464769240SAlex Tomas index = page->index; 173564769240SAlex Tomas if (index > end) 173664769240SAlex Tomas break; 173764769240SAlex Tomas index++; 173864769240SAlex Tomas 173964769240SAlex Tomas BUG_ON(!PageLocked(page)); 174064769240SAlex Tomas BUG_ON(PageWriteback(page)); 174164769240SAlex Tomas BUG_ON(!page_has_buffers(page)); 174264769240SAlex Tomas 174364769240SAlex Tomas bh = page_buffers(page); 174464769240SAlex Tomas head = bh; 174564769240SAlex Tomas 174664769240SAlex Tomas /* skip blocks out of the range */ 174764769240SAlex Tomas do { 174864769240SAlex Tomas if (cur_logical >= logical) 174964769240SAlex Tomas break; 175064769240SAlex Tomas cur_logical++; 175164769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 175264769240SAlex Tomas 175364769240SAlex Tomas do { 175464769240SAlex Tomas if (cur_logical >= logical + blocks) 175564769240SAlex Tomas break; 175664769240SAlex Tomas if (buffer_delay(bh)) { 175764769240SAlex Tomas bh->b_blocknr = pblock; 175864769240SAlex Tomas clear_buffer_delay(bh); 1759bf068ee2SAneesh Kumar K.V bh->b_bdev = inode->i_sb->s_bdev; 1760bf068ee2SAneesh Kumar K.V } else if (buffer_unwritten(bh)) { 1761bf068ee2SAneesh Kumar K.V bh->b_blocknr = pblock; 1762bf068ee2SAneesh Kumar K.V clear_buffer_unwritten(bh); 1763bf068ee2SAneesh Kumar K.V set_buffer_mapped(bh); 1764bf068ee2SAneesh Kumar K.V set_buffer_new(bh); 1765bf068ee2SAneesh Kumar K.V bh->b_bdev = inode->i_sb->s_bdev; 176661628a3fSMingming Cao } else if (buffer_mapped(bh)) 176764769240SAlex Tomas BUG_ON(bh->b_blocknr != pblock); 176864769240SAlex Tomas 176964769240SAlex Tomas cur_logical++; 177064769240SAlex Tomas pblock++; 177164769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 177264769240SAlex Tomas } 177364769240SAlex Tomas pagevec_release(&pvec); 177464769240SAlex Tomas } 177564769240SAlex Tomas } 177664769240SAlex Tomas 177764769240SAlex Tomas 177864769240SAlex Tomas /* 177964769240SAlex Tomas * __unmap_underlying_blocks - just a helper function to unmap 178064769240SAlex Tomas * set of blocks described by @bh 178164769240SAlex Tomas */ 178264769240SAlex Tomas static inline void __unmap_underlying_blocks(struct inode *inode, 178364769240SAlex Tomas struct buffer_head *bh) 178464769240SAlex Tomas { 178564769240SAlex Tomas struct block_device *bdev = inode->i_sb->s_bdev; 178664769240SAlex Tomas int blocks, i; 178764769240SAlex Tomas 178864769240SAlex Tomas blocks = bh->b_size >> inode->i_blkbits; 178964769240SAlex Tomas for (i = 0; i < blocks; i++) 179064769240SAlex Tomas unmap_underlying_metadata(bdev, bh->b_blocknr + i); 179164769240SAlex Tomas } 179264769240SAlex Tomas 1793c4a0c46eSAneesh Kumar K.V static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 1794c4a0c46eSAneesh Kumar K.V sector_t logical, long blk_cnt) 1795c4a0c46eSAneesh Kumar K.V { 1796c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1797c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1798c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1799c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1800c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1801c4a0c46eSAneesh Kumar K.V 1802c4a0c46eSAneesh Kumar K.V index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 1803c4a0c46eSAneesh Kumar K.V end = (logical + blk_cnt - 1) >> 1804c4a0c46eSAneesh Kumar K.V (PAGE_CACHE_SHIFT - inode->i_blkbits); 1805c4a0c46eSAneesh Kumar K.V while (index <= end) { 1806c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1807c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1808c4a0c46eSAneesh Kumar K.V break; 1809c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1810c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 1811c4a0c46eSAneesh Kumar K.V index = page->index; 1812c4a0c46eSAneesh Kumar K.V if (index > end) 1813c4a0c46eSAneesh Kumar K.V break; 1814c4a0c46eSAneesh Kumar K.V index++; 1815c4a0c46eSAneesh Kumar K.V 1816c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1817c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1818c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1819c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1820c4a0c46eSAneesh Kumar K.V unlock_page(page); 1821c4a0c46eSAneesh Kumar K.V } 1822c4a0c46eSAneesh Kumar K.V } 1823c4a0c46eSAneesh Kumar K.V return; 1824c4a0c46eSAneesh Kumar K.V } 1825c4a0c46eSAneesh Kumar K.V 1826df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1827df22291fSAneesh Kumar K.V { 1828df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1829df22291fSAneesh Kumar K.V printk(KERN_EMERG "Total free blocks count %lld\n", 1830df22291fSAneesh Kumar K.V ext4_count_free_blocks(inode->i_sb)); 1831df22291fSAneesh Kumar K.V printk(KERN_EMERG "Free/Dirty block details\n"); 1832df22291fSAneesh Kumar K.V printk(KERN_EMERG "free_blocks=%lld\n", 1833df22291fSAneesh Kumar K.V percpu_counter_sum(&sbi->s_freeblocks_counter)); 1834df22291fSAneesh Kumar K.V printk(KERN_EMERG "dirty_blocks=%lld\n", 1835df22291fSAneesh Kumar K.V percpu_counter_sum(&sbi->s_dirtyblocks_counter)); 1836df22291fSAneesh Kumar K.V printk(KERN_EMERG "Block reservation details\n"); 1837df22291fSAneesh Kumar K.V printk(KERN_EMERG "i_reserved_data_blocks=%lu\n", 1838df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 1839df22291fSAneesh Kumar K.V printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n", 1840df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1841df22291fSAneesh Kumar K.V return; 1842df22291fSAneesh Kumar K.V } 1843df22291fSAneesh Kumar K.V 184464769240SAlex Tomas /* 184564769240SAlex Tomas * mpage_da_map_blocks - go through given space 184664769240SAlex Tomas * 184764769240SAlex Tomas * @mpd->lbh - bh describing space 184864769240SAlex Tomas * @mpd->get_block - the filesystem's block mapper function 184964769240SAlex Tomas * 185064769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 185164769240SAlex Tomas * 185264769240SAlex Tomas */ 1853c4a0c46eSAneesh Kumar K.V static int mpage_da_map_blocks(struct mpage_da_data *mpd) 185464769240SAlex Tomas { 1855a1d6cc56SAneesh Kumar K.V int err = 0; 1856030ba6bcSAneesh Kumar K.V struct buffer_head new; 185764769240SAlex Tomas struct buffer_head *lbh = &mpd->lbh; 1858df22291fSAneesh Kumar K.V sector_t next; 185964769240SAlex Tomas 186064769240SAlex Tomas /* 186164769240SAlex Tomas * We consider only non-mapped and non-allocated blocks 186264769240SAlex Tomas */ 186364769240SAlex Tomas if (buffer_mapped(lbh) && !buffer_delay(lbh)) 1864c4a0c46eSAneesh Kumar K.V return 0; 186564769240SAlex Tomas new.b_state = lbh->b_state; 186664769240SAlex Tomas new.b_blocknr = 0; 1867a1d6cc56SAneesh Kumar K.V new.b_size = lbh->b_size; 1868df22291fSAneesh Kumar K.V next = lbh->b_blocknr; 186964769240SAlex Tomas /* 1870a1d6cc56SAneesh Kumar K.V * If we didn't accumulate anything 1871a1d6cc56SAneesh Kumar K.V * to write simply return 187264769240SAlex Tomas */ 1873a1d6cc56SAneesh Kumar K.V if (!new.b_size) 1874c4a0c46eSAneesh Kumar K.V return 0; 1875a1d6cc56SAneesh Kumar K.V err = mpd->get_block(mpd->inode, next, &new, 1); 1876c4a0c46eSAneesh Kumar K.V if (err) { 1877c4a0c46eSAneesh Kumar K.V 1878c4a0c46eSAneesh Kumar K.V /* If get block returns with error 1879c4a0c46eSAneesh Kumar K.V * we simply return. Later writepage 1880c4a0c46eSAneesh Kumar K.V * will redirty the page and writepages 1881c4a0c46eSAneesh Kumar K.V * will find the dirty page again 1882c4a0c46eSAneesh Kumar K.V */ 1883c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 1884c4a0c46eSAneesh Kumar K.V return 0; 1885df22291fSAneesh Kumar K.V 1886df22291fSAneesh Kumar K.V if (err == -ENOSPC && 1887df22291fSAneesh Kumar K.V ext4_count_free_blocks(mpd->inode->i_sb)) { 1888df22291fSAneesh Kumar K.V mpd->retval = err; 1889df22291fSAneesh Kumar K.V return 0; 1890df22291fSAneesh Kumar K.V } 1891df22291fSAneesh Kumar K.V 1892c4a0c46eSAneesh Kumar K.V /* 1893c4a0c46eSAneesh Kumar K.V * get block failure will cause us 1894c4a0c46eSAneesh Kumar K.V * to loop in writepages. Because 1895c4a0c46eSAneesh Kumar K.V * a_ops->writepage won't be able to 1896c4a0c46eSAneesh Kumar K.V * make progress. The page will be redirtied 1897c4a0c46eSAneesh Kumar K.V * by writepage and writepages will again 1898c4a0c46eSAneesh Kumar K.V * try to write the same. 1899c4a0c46eSAneesh Kumar K.V */ 1900c4a0c46eSAneesh Kumar K.V printk(KERN_EMERG "%s block allocation failed for inode %lu " 1901c4a0c46eSAneesh Kumar K.V "at logical offset %llu with max blocks " 1902c4a0c46eSAneesh Kumar K.V "%zd with error %d\n", 1903c4a0c46eSAneesh Kumar K.V __func__, mpd->inode->i_ino, 1904c4a0c46eSAneesh Kumar K.V (unsigned long long)next, 1905c4a0c46eSAneesh Kumar K.V lbh->b_size >> mpd->inode->i_blkbits, err); 1906c4a0c46eSAneesh Kumar K.V printk(KERN_EMERG "This should not happen.!! " 1907c4a0c46eSAneesh Kumar K.V "Data will be lost\n"); 1908030ba6bcSAneesh Kumar K.V if (err == -ENOSPC) { 1909df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1910030ba6bcSAneesh Kumar K.V } 1911c4a0c46eSAneesh Kumar K.V /* invlaidate all the pages */ 1912c4a0c46eSAneesh Kumar K.V ext4_da_block_invalidatepages(mpd, next, 1913c4a0c46eSAneesh Kumar K.V lbh->b_size >> mpd->inode->i_blkbits); 1914c4a0c46eSAneesh Kumar K.V return err; 1915c4a0c46eSAneesh Kumar K.V } 191664769240SAlex Tomas BUG_ON(new.b_size == 0); 191764769240SAlex Tomas 191864769240SAlex Tomas if (buffer_new(&new)) 191964769240SAlex Tomas __unmap_underlying_blocks(mpd->inode, &new); 192064769240SAlex Tomas 192164769240SAlex Tomas /* 192264769240SAlex Tomas * If blocks are delayed marked, we need to 192364769240SAlex Tomas * put actual blocknr and drop delayed bit 192464769240SAlex Tomas */ 1925bf068ee2SAneesh Kumar K.V if (buffer_delay(lbh) || buffer_unwritten(lbh)) 192664769240SAlex Tomas mpage_put_bnr_to_bhs(mpd, next, &new); 192764769240SAlex Tomas 1928c4a0c46eSAneesh Kumar K.V return 0; 192964769240SAlex Tomas } 193064769240SAlex Tomas 1931bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1932bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 193364769240SAlex Tomas 193464769240SAlex Tomas /* 193564769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 193664769240SAlex Tomas * 193764769240SAlex Tomas * @mpd->lbh - extent of blocks 193864769240SAlex Tomas * @logical - logical number of the block in the file 193964769240SAlex Tomas * @bh - bh of the block (used to access block's state) 194064769240SAlex Tomas * 194164769240SAlex Tomas * the function is used to collect contig. blocks in same state 194264769240SAlex Tomas */ 194364769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 194464769240SAlex Tomas sector_t logical, struct buffer_head *bh) 194564769240SAlex Tomas { 194664769240SAlex Tomas sector_t next; 1947525f4ed8SMingming Cao size_t b_size = bh->b_size; 1948525f4ed8SMingming Cao struct buffer_head *lbh = &mpd->lbh; 1949525f4ed8SMingming Cao int nrblocks = lbh->b_size >> mpd->inode->i_blkbits; 195064769240SAlex Tomas 1951525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 1952525f4ed8SMingming Cao if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) { 1953525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1954525f4ed8SMingming Cao /* 1955525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1956525f4ed8SMingming Cao * credit available. Total credit needed to insert 1957525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1958525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1959525f4ed8SMingming Cao */ 1960525f4ed8SMingming Cao goto flush_it; 1961525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1962525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 1963525f4ed8SMingming Cao /* 1964525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 1965525f4ed8SMingming Cao * allowed limit for which we have journal credit 1966525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 1967525f4ed8SMingming Cao */ 1968525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1969525f4ed8SMingming Cao mpd->inode->i_blkbits; 1970525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 1971525f4ed8SMingming Cao } 1972525f4ed8SMingming Cao } 197364769240SAlex Tomas /* 197464769240SAlex Tomas * First block in the extent 197564769240SAlex Tomas */ 197664769240SAlex Tomas if (lbh->b_size == 0) { 197764769240SAlex Tomas lbh->b_blocknr = logical; 1978525f4ed8SMingming Cao lbh->b_size = b_size; 197964769240SAlex Tomas lbh->b_state = bh->b_state & BH_FLAGS; 198064769240SAlex Tomas return; 198164769240SAlex Tomas } 198264769240SAlex Tomas 1983525f4ed8SMingming Cao next = lbh->b_blocknr + nrblocks; 198464769240SAlex Tomas /* 198564769240SAlex Tomas * Can we merge the block to our big extent? 198664769240SAlex Tomas */ 198764769240SAlex Tomas if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) { 1988525f4ed8SMingming Cao lbh->b_size += b_size; 198964769240SAlex Tomas return; 199064769240SAlex Tomas } 199164769240SAlex Tomas 1992525f4ed8SMingming Cao flush_it: 199364769240SAlex Tomas /* 199464769240SAlex Tomas * We couldn't merge the block to our extent, so we 199564769240SAlex Tomas * need to flush current extent and start new one 199664769240SAlex Tomas */ 1997c4a0c46eSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 1998a1d6cc56SAneesh Kumar K.V mpage_da_submit_io(mpd); 1999a1d6cc56SAneesh Kumar K.V mpd->io_done = 1; 2000a1d6cc56SAneesh Kumar K.V return; 200164769240SAlex Tomas } 200264769240SAlex Tomas 200364769240SAlex Tomas /* 200464769240SAlex Tomas * __mpage_da_writepage - finds extent of pages and blocks 200564769240SAlex Tomas * 200664769240SAlex Tomas * @page: page to consider 200764769240SAlex Tomas * @wbc: not used, we just follow rules 200864769240SAlex Tomas * @data: context 200964769240SAlex Tomas * 201064769240SAlex Tomas * The function finds extents of pages and scan them for all blocks. 201164769240SAlex Tomas */ 201264769240SAlex Tomas static int __mpage_da_writepage(struct page *page, 201364769240SAlex Tomas struct writeback_control *wbc, void *data) 201464769240SAlex Tomas { 201564769240SAlex Tomas struct mpage_da_data *mpd = data; 201664769240SAlex Tomas struct inode *inode = mpd->inode; 201764769240SAlex Tomas struct buffer_head *bh, *head, fake; 201864769240SAlex Tomas sector_t logical; 201964769240SAlex Tomas 2020a1d6cc56SAneesh Kumar K.V if (mpd->io_done) { 2021a1d6cc56SAneesh Kumar K.V /* 2022a1d6cc56SAneesh Kumar K.V * Rest of the page in the page_vec 2023a1d6cc56SAneesh Kumar K.V * redirty then and skip then. We will 2024a1d6cc56SAneesh Kumar K.V * try to to write them again after 2025a1d6cc56SAneesh Kumar K.V * starting a new transaction 2026a1d6cc56SAneesh Kumar K.V */ 2027a1d6cc56SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2028a1d6cc56SAneesh Kumar K.V unlock_page(page); 2029a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 2030a1d6cc56SAneesh Kumar K.V } 203164769240SAlex Tomas /* 203264769240SAlex Tomas * Can we merge this page to current extent? 203364769240SAlex Tomas */ 203464769240SAlex Tomas if (mpd->next_page != page->index) { 203564769240SAlex Tomas /* 203664769240SAlex Tomas * Nope, we can't. So, we map non-allocated blocks 2037a1d6cc56SAneesh Kumar K.V * and start IO on them using writepage() 203864769240SAlex Tomas */ 203964769240SAlex Tomas if (mpd->next_page != mpd->first_page) { 2040c4a0c46eSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 204164769240SAlex Tomas mpage_da_submit_io(mpd); 2042a1d6cc56SAneesh Kumar K.V /* 2043a1d6cc56SAneesh Kumar K.V * skip rest of the page in the page_vec 2044a1d6cc56SAneesh Kumar K.V */ 2045a1d6cc56SAneesh Kumar K.V mpd->io_done = 1; 2046a1d6cc56SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2047a1d6cc56SAneesh Kumar K.V unlock_page(page); 2048a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 204964769240SAlex Tomas } 205064769240SAlex Tomas 205164769240SAlex Tomas /* 205264769240SAlex Tomas * Start next extent of pages ... 205364769240SAlex Tomas */ 205464769240SAlex Tomas mpd->first_page = page->index; 205564769240SAlex Tomas 205664769240SAlex Tomas /* 205764769240SAlex Tomas * ... and blocks 205864769240SAlex Tomas */ 205964769240SAlex Tomas mpd->lbh.b_size = 0; 206064769240SAlex Tomas mpd->lbh.b_state = 0; 206164769240SAlex Tomas mpd->lbh.b_blocknr = 0; 206264769240SAlex Tomas } 206364769240SAlex Tomas 206464769240SAlex Tomas mpd->next_page = page->index + 1; 206564769240SAlex Tomas logical = (sector_t) page->index << 206664769240SAlex Tomas (PAGE_CACHE_SHIFT - inode->i_blkbits); 206764769240SAlex Tomas 206864769240SAlex Tomas if (!page_has_buffers(page)) { 206964769240SAlex Tomas /* 207064769240SAlex Tomas * There is no attached buffer heads yet (mmap?) 207164769240SAlex Tomas * we treat the page asfull of dirty blocks 207264769240SAlex Tomas */ 207364769240SAlex Tomas bh = &fake; 207464769240SAlex Tomas bh->b_size = PAGE_CACHE_SIZE; 207564769240SAlex Tomas bh->b_state = 0; 207664769240SAlex Tomas set_buffer_dirty(bh); 207764769240SAlex Tomas set_buffer_uptodate(bh); 207864769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 2079a1d6cc56SAneesh Kumar K.V if (mpd->io_done) 2080a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 208164769240SAlex Tomas } else { 208264769240SAlex Tomas /* 208364769240SAlex Tomas * Page with regular buffer heads, just add all dirty ones 208464769240SAlex Tomas */ 208564769240SAlex Tomas head = page_buffers(page); 208664769240SAlex Tomas bh = head; 208764769240SAlex Tomas do { 208864769240SAlex Tomas BUG_ON(buffer_locked(bh)); 2089a1d6cc56SAneesh Kumar K.V if (buffer_dirty(bh) && 2090a1d6cc56SAneesh Kumar K.V (!buffer_mapped(bh) || buffer_delay(bh))) { 209164769240SAlex Tomas mpage_add_bh_to_extent(mpd, logical, bh); 2092a1d6cc56SAneesh Kumar K.V if (mpd->io_done) 2093a1d6cc56SAneesh Kumar K.V return MPAGE_DA_EXTENT_TAIL; 2094a1d6cc56SAneesh Kumar K.V } 209564769240SAlex Tomas logical++; 209664769240SAlex Tomas } while ((bh = bh->b_this_page) != head); 209764769240SAlex Tomas } 209864769240SAlex Tomas 209964769240SAlex Tomas return 0; 210064769240SAlex Tomas } 210164769240SAlex Tomas 210264769240SAlex Tomas /* 210364769240SAlex Tomas * mpage_da_writepages - walk the list of dirty pages of the given 210464769240SAlex Tomas * address space, allocates non-allocated blocks, maps newly-allocated 210564769240SAlex Tomas * blocks to existing bhs and issue IO them 210664769240SAlex Tomas * 210764769240SAlex Tomas * @mapping: address space structure to write 210864769240SAlex Tomas * @wbc: subtract the number of written pages from *@wbc->nr_to_write 210964769240SAlex Tomas * @get_block: the filesystem's block mapper function. 211064769240SAlex Tomas * 211164769240SAlex Tomas * This is a library function, which implements the writepages() 211264769240SAlex Tomas * address_space_operation. 211364769240SAlex Tomas */ 211464769240SAlex Tomas static int mpage_da_writepages(struct address_space *mapping, 211564769240SAlex Tomas struct writeback_control *wbc, 2116df22291fSAneesh Kumar K.V struct mpage_da_data *mpd) 211764769240SAlex Tomas { 211864769240SAlex Tomas int ret; 211964769240SAlex Tomas 2120df22291fSAneesh Kumar K.V if (!mpd->get_block) 212164769240SAlex Tomas return generic_writepages(mapping, wbc); 212264769240SAlex Tomas 2123df22291fSAneesh Kumar K.V mpd->lbh.b_size = 0; 2124df22291fSAneesh Kumar K.V mpd->lbh.b_state = 0; 2125df22291fSAneesh Kumar K.V mpd->lbh.b_blocknr = 0; 2126df22291fSAneesh Kumar K.V mpd->first_page = 0; 2127df22291fSAneesh Kumar K.V mpd->next_page = 0; 2128df22291fSAneesh Kumar K.V mpd->io_done = 0; 2129df22291fSAneesh Kumar K.V mpd->pages_written = 0; 2130df22291fSAneesh Kumar K.V mpd->retval = 0; 2131a1d6cc56SAneesh Kumar K.V 2132df22291fSAneesh Kumar K.V ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd); 213364769240SAlex Tomas /* 213464769240SAlex Tomas * Handle last extent of pages 213564769240SAlex Tomas */ 2136df22291fSAneesh Kumar K.V if (!mpd->io_done && mpd->next_page != mpd->first_page) { 2137df22291fSAneesh Kumar K.V if (mpage_da_map_blocks(mpd) == 0) 2138df22291fSAneesh Kumar K.V mpage_da_submit_io(mpd); 213964769240SAlex Tomas 2140*22208dedSAneesh Kumar K.V mpd->io_done = 1; 2141*22208dedSAneesh Kumar K.V ret = MPAGE_DA_EXTENT_TAIL; 2142*22208dedSAneesh Kumar K.V } 2143*22208dedSAneesh Kumar K.V wbc->nr_to_write -= mpd->pages_written; 214464769240SAlex Tomas return ret; 214564769240SAlex Tomas } 214664769240SAlex Tomas 214764769240SAlex Tomas /* 214864769240SAlex Tomas * this is a special callback for ->write_begin() only 214964769240SAlex Tomas * it's intention is to return mapped block or reserve space 215064769240SAlex Tomas */ 215164769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 215264769240SAlex Tomas struct buffer_head *bh_result, int create) 215364769240SAlex Tomas { 215464769240SAlex Tomas int ret = 0; 215564769240SAlex Tomas 215664769240SAlex Tomas BUG_ON(create == 0); 215764769240SAlex Tomas BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 215864769240SAlex Tomas 215964769240SAlex Tomas /* 216064769240SAlex Tomas * first, we need to know whether the block is allocated already 216164769240SAlex Tomas * preallocated blocks are unmapped but should treated 216264769240SAlex Tomas * the same as allocated blocks. 216364769240SAlex Tomas */ 2164d2a17637SMingming Cao ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0); 2165d2a17637SMingming Cao if ((ret == 0) && !buffer_delay(bh_result)) { 2166d2a17637SMingming Cao /* the block isn't (pre)allocated yet, let's reserve space */ 216764769240SAlex Tomas /* 216864769240SAlex Tomas * XXX: __block_prepare_write() unmaps passed block, 216964769240SAlex Tomas * is it OK? 217064769240SAlex Tomas */ 2171d2a17637SMingming Cao ret = ext4_da_reserve_space(inode, 1); 2172d2a17637SMingming Cao if (ret) 2173d2a17637SMingming Cao /* not enough space to reserve */ 2174d2a17637SMingming Cao return ret; 2175d2a17637SMingming Cao 217664769240SAlex Tomas map_bh(bh_result, inode->i_sb, 0); 217764769240SAlex Tomas set_buffer_new(bh_result); 217864769240SAlex Tomas set_buffer_delay(bh_result); 217964769240SAlex Tomas } else if (ret > 0) { 218064769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 218164769240SAlex Tomas ret = 0; 218264769240SAlex Tomas } 218364769240SAlex Tomas 218464769240SAlex Tomas return ret; 218564769240SAlex Tomas } 2186d2a17637SMingming Cao #define EXT4_DELALLOC_RSVED 1 218764769240SAlex Tomas static int ext4_da_get_block_write(struct inode *inode, sector_t iblock, 218864769240SAlex Tomas struct buffer_head *bh_result, int create) 218964769240SAlex Tomas { 219061628a3fSMingming Cao int ret; 219164769240SAlex Tomas unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 219264769240SAlex Tomas loff_t disksize = EXT4_I(inode)->i_disksize; 219364769240SAlex Tomas handle_t *handle = NULL; 219464769240SAlex Tomas 219561628a3fSMingming Cao handle = ext4_journal_current_handle(); 2196166348ddSAneesh Kumar K.V BUG_ON(!handle); 219764769240SAlex Tomas ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks, 2198d2a17637SMingming Cao bh_result, create, 0, EXT4_DELALLOC_RSVED); 219964769240SAlex Tomas if (ret > 0) { 2200166348ddSAneesh Kumar K.V 220164769240SAlex Tomas bh_result->b_size = (ret << inode->i_blkbits); 220264769240SAlex Tomas 2203166348ddSAneesh Kumar K.V if (ext4_should_order_data(inode)) { 2204166348ddSAneesh Kumar K.V int retval; 2205166348ddSAneesh Kumar K.V retval = ext4_jbd2_file_inode(handle, inode); 2206166348ddSAneesh Kumar K.V if (retval) 2207166348ddSAneesh Kumar K.V /* 2208166348ddSAneesh Kumar K.V * Failed to add inode for ordered 2209166348ddSAneesh Kumar K.V * mode. Don't update file size 2210166348ddSAneesh Kumar K.V */ 2211166348ddSAneesh Kumar K.V return retval; 2212166348ddSAneesh Kumar K.V } 2213166348ddSAneesh Kumar K.V 221464769240SAlex Tomas /* 221564769240SAlex Tomas * Update on-disk size along with block allocation 221664769240SAlex Tomas * we don't use 'extend_disksize' as size may change 221764769240SAlex Tomas * within already allocated block -bzzz 221864769240SAlex Tomas */ 221964769240SAlex Tomas disksize = ((loff_t) iblock + ret) << inode->i_blkbits; 222064769240SAlex Tomas if (disksize > i_size_read(inode)) 222164769240SAlex Tomas disksize = i_size_read(inode); 222264769240SAlex Tomas if (disksize > EXT4_I(inode)->i_disksize) { 2223cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, disksize); 222461628a3fSMingming Cao ret = ext4_mark_inode_dirty(handle, inode); 222564769240SAlex Tomas return ret; 222664769240SAlex Tomas } 222761628a3fSMingming Cao ret = 0; 222861628a3fSMingming Cao } 222961628a3fSMingming Cao return ret; 223061628a3fSMingming Cao } 223161628a3fSMingming Cao 223261628a3fSMingming Cao static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh) 223361628a3fSMingming Cao { 2234f0e6c985SAneesh Kumar K.V /* 2235f0e6c985SAneesh Kumar K.V * unmapped buffer is possible for holes. 2236f0e6c985SAneesh Kumar K.V * delay buffer is possible with delayed allocation 2237f0e6c985SAneesh Kumar K.V */ 2238f0e6c985SAneesh Kumar K.V return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh)); 2239f0e6c985SAneesh Kumar K.V } 2240f0e6c985SAneesh Kumar K.V 2241f0e6c985SAneesh Kumar K.V static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock, 2242f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 2243f0e6c985SAneesh Kumar K.V { 2244f0e6c985SAneesh Kumar K.V int ret = 0; 2245f0e6c985SAneesh Kumar K.V unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 2246f0e6c985SAneesh Kumar K.V 2247f0e6c985SAneesh Kumar K.V /* 2248f0e6c985SAneesh Kumar K.V * we don't want to do block allocation in writepage 2249f0e6c985SAneesh Kumar K.V * so call get_block_wrap with create = 0 2250f0e6c985SAneesh Kumar K.V */ 2251f0e6c985SAneesh Kumar K.V ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks, 2252f0e6c985SAneesh Kumar K.V bh_result, 0, 0, 0); 2253f0e6c985SAneesh Kumar K.V if (ret > 0) { 2254f0e6c985SAneesh Kumar K.V bh_result->b_size = (ret << inode->i_blkbits); 2255f0e6c985SAneesh Kumar K.V ret = 0; 2256f0e6c985SAneesh Kumar K.V } 2257f0e6c985SAneesh Kumar K.V return ret; 225861628a3fSMingming Cao } 225961628a3fSMingming Cao 226061628a3fSMingming Cao /* 2261f0e6c985SAneesh Kumar K.V * get called vi ext4_da_writepages after taking page lock (have journal handle) 2262f0e6c985SAneesh Kumar K.V * get called via journal_submit_inode_data_buffers (no journal handle) 2263f0e6c985SAneesh Kumar K.V * get called via shrink_page_list via pdflush (no journal handle) 2264f0e6c985SAneesh Kumar K.V * or grab_page_cache when doing write_begin (have journal handle) 226561628a3fSMingming Cao */ 226664769240SAlex Tomas static int ext4_da_writepage(struct page *page, 226764769240SAlex Tomas struct writeback_control *wbc) 226864769240SAlex Tomas { 226964769240SAlex Tomas int ret = 0; 227061628a3fSMingming Cao loff_t size; 227161628a3fSMingming Cao unsigned long len; 227261628a3fSMingming Cao struct buffer_head *page_bufs; 227361628a3fSMingming Cao struct inode *inode = page->mapping->host; 227464769240SAlex Tomas 227561628a3fSMingming Cao size = i_size_read(inode); 227661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 227761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 227861628a3fSMingming Cao else 227961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 228061628a3fSMingming Cao 2281f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2282f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2283f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2284f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 228561628a3fSMingming Cao /* 2286f0e6c985SAneesh Kumar K.V * We don't want to do block allocation 2287f0e6c985SAneesh Kumar K.V * So redirty the page and return 2288cd1aac32SAneesh Kumar K.V * We may reach here when we do a journal commit 2289cd1aac32SAneesh Kumar K.V * via journal_submit_inode_data_buffers. 2290cd1aac32SAneesh Kumar K.V * If we don't have mapping block we just ignore 2291f0e6c985SAneesh Kumar K.V * them. We can also reach here via shrink_page_list 2292f0e6c985SAneesh Kumar K.V */ 2293f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2294f0e6c985SAneesh Kumar K.V unlock_page(page); 2295f0e6c985SAneesh Kumar K.V return 0; 2296f0e6c985SAneesh Kumar K.V } 2297f0e6c985SAneesh Kumar K.V } else { 2298f0e6c985SAneesh Kumar K.V /* 2299f0e6c985SAneesh Kumar K.V * The test for page_has_buffers() is subtle: 2300f0e6c985SAneesh Kumar K.V * We know the page is dirty but it lost buffers. That means 2301f0e6c985SAneesh Kumar K.V * that at some moment in time after write_begin()/write_end() 2302f0e6c985SAneesh Kumar K.V * has been called all buffers have been clean and thus they 2303f0e6c985SAneesh Kumar K.V * must have been written at least once. So they are all 2304f0e6c985SAneesh Kumar K.V * mapped and we can happily proceed with mapping them 2305f0e6c985SAneesh Kumar K.V * and writing the page. 2306f0e6c985SAneesh Kumar K.V * 2307f0e6c985SAneesh Kumar K.V * Try to initialize the buffer_heads and check whether 2308f0e6c985SAneesh Kumar K.V * all are mapped and non delay. We don't want to 2309f0e6c985SAneesh Kumar K.V * do block allocation here. 2310f0e6c985SAneesh Kumar K.V */ 2311f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2312f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 2313f0e6c985SAneesh Kumar K.V if (!ret) { 2314f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2315f0e6c985SAneesh Kumar K.V /* check whether all are mapped and non delay */ 2316f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2317f0e6c985SAneesh Kumar K.V ext4_bh_unmapped_or_delay)) { 2318f0e6c985SAneesh Kumar K.V redirty_page_for_writepage(wbc, page); 2319f0e6c985SAneesh Kumar K.V unlock_page(page); 2320f0e6c985SAneesh Kumar K.V return 0; 2321f0e6c985SAneesh Kumar K.V } 2322f0e6c985SAneesh Kumar K.V } else { 2323f0e6c985SAneesh Kumar K.V /* 2324f0e6c985SAneesh Kumar K.V * We can't do block allocation here 2325f0e6c985SAneesh Kumar K.V * so just redity the page and unlock 2326f0e6c985SAneesh Kumar K.V * and return 232761628a3fSMingming Cao */ 232861628a3fSMingming Cao redirty_page_for_writepage(wbc, page); 232961628a3fSMingming Cao unlock_page(page); 233061628a3fSMingming Cao return 0; 233161628a3fSMingming Cao } 233264769240SAlex Tomas } 233364769240SAlex Tomas 233464769240SAlex Tomas if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode)) 2335f0e6c985SAneesh Kumar K.V ret = nobh_writepage(page, ext4_normal_get_block_write, wbc); 233664769240SAlex Tomas else 2337f0e6c985SAneesh Kumar K.V ret = block_write_full_page(page, 2338f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2339f0e6c985SAneesh Kumar K.V wbc); 234064769240SAlex Tomas 234164769240SAlex Tomas return ret; 234264769240SAlex Tomas } 234364769240SAlex Tomas 234461628a3fSMingming Cao /* 2345525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 2346525f4ed8SMingming Cao * calulate the total number of credits to reserve to fit 2347525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2348525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2349525f4ed8SMingming Cao * the block allocation. 235061628a3fSMingming Cao */ 2351525f4ed8SMingming Cao 2352525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2353525f4ed8SMingming Cao { 2354525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2355525f4ed8SMingming Cao 2356525f4ed8SMingming Cao /* 2357525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2358525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2359525f4ed8SMingming Cao * number of contiguous block. So we will limit 2360525f4ed8SMingming Cao * number of contiguous block to a sane value 2361525f4ed8SMingming Cao */ 2362525f4ed8SMingming Cao if (!(inode->i_flags & EXT4_EXTENTS_FL) && 2363525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2364525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2365525f4ed8SMingming Cao 2366525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2367525f4ed8SMingming Cao } 236861628a3fSMingming Cao 236964769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 237064769240SAlex Tomas struct writeback_control *wbc) 237164769240SAlex Tomas { 2372*22208dedSAneesh Kumar K.V pgoff_t index; 2373*22208dedSAneesh Kumar K.V int range_whole = 0; 237461628a3fSMingming Cao handle_t *handle = NULL; 2375df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 23765e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2377*22208dedSAneesh Kumar K.V int no_nrwrite_index_update; 2378*22208dedSAneesh Kumar K.V long pages_written = 0, pages_skipped; 23795e745b04SAneesh Kumar K.V int needed_blocks, ret = 0, nr_to_writebump = 0; 23805e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 238161628a3fSMingming Cao 238261628a3fSMingming Cao /* 238361628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 238461628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 238561628a3fSMingming Cao * because that could violate lock ordering on umount 238661628a3fSMingming Cao */ 2387a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 238861628a3fSMingming Cao return 0; 23895e745b04SAneesh Kumar K.V /* 23905e745b04SAneesh Kumar K.V * Make sure nr_to_write is >= sbi->s_mb_stream_request 23915e745b04SAneesh Kumar K.V * This make sure small files blocks are allocated in 23925e745b04SAneesh Kumar K.V * single attempt. This ensure that small files 23935e745b04SAneesh Kumar K.V * get less fragmented. 23945e745b04SAneesh Kumar K.V */ 23955e745b04SAneesh Kumar K.V if (wbc->nr_to_write < sbi->s_mb_stream_request) { 23965e745b04SAneesh Kumar K.V nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; 23975e745b04SAneesh Kumar K.V wbc->nr_to_write = sbi->s_mb_stream_request; 23985e745b04SAneesh Kumar K.V } 2399*22208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 2400*22208dedSAneesh Kumar K.V range_whole = 1; 240161628a3fSMingming Cao 2402*22208dedSAneesh Kumar K.V if (wbc->range_cyclic) 2403*22208dedSAneesh Kumar K.V index = mapping->writeback_index; 2404*22208dedSAneesh Kumar K.V else 2405*22208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 2406a1d6cc56SAneesh Kumar K.V 2407df22291fSAneesh Kumar K.V mpd.wbc = wbc; 2408df22291fSAneesh Kumar K.V mpd.inode = mapping->host; 2409df22291fSAneesh Kumar K.V 2410*22208dedSAneesh Kumar K.V /* 2411*22208dedSAneesh Kumar K.V * we don't want write_cache_pages to update 2412*22208dedSAneesh Kumar K.V * nr_to_write and writeback_index 2413*22208dedSAneesh Kumar K.V */ 2414*22208dedSAneesh Kumar K.V no_nrwrite_index_update = wbc->no_nrwrite_index_update; 2415*22208dedSAneesh Kumar K.V wbc->no_nrwrite_index_update = 1; 2416*22208dedSAneesh Kumar K.V pages_skipped = wbc->pages_skipped; 2417*22208dedSAneesh Kumar K.V 2418*22208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2419a1d6cc56SAneesh Kumar K.V 2420a1d6cc56SAneesh Kumar K.V /* 2421a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2422a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2423a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2424a1d6cc56SAneesh Kumar K.V * by delalloc 2425a1d6cc56SAneesh Kumar K.V */ 2426a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2427525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2428a1d6cc56SAneesh Kumar K.V 242961628a3fSMingming Cao /* start a new transaction*/ 243061628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 243161628a3fSMingming Cao if (IS_ERR(handle)) { 243261628a3fSMingming Cao ret = PTR_ERR(handle); 2433a1d6cc56SAneesh Kumar K.V printk(KERN_EMERG "%s: jbd2_start: " 2434a1d6cc56SAneesh Kumar K.V "%ld pages, ino %lu; err %d\n", __func__, 2435a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 2436a1d6cc56SAneesh Kumar K.V dump_stack(); 243761628a3fSMingming Cao goto out_writepages; 243861628a3fSMingming Cao } 2439df22291fSAneesh Kumar K.V mpd.get_block = ext4_da_get_block_write; 2440df22291fSAneesh Kumar K.V ret = mpage_da_writepages(mapping, wbc, &mpd); 2441df22291fSAneesh Kumar K.V 244261628a3fSMingming Cao ext4_journal_stop(handle); 2443df22291fSAneesh Kumar K.V 2444*22208dedSAneesh Kumar K.V if (mpd.retval == -ENOSPC) { 2445*22208dedSAneesh Kumar K.V /* commit the transaction which would 2446*22208dedSAneesh Kumar K.V * free blocks released in the transaction 2447*22208dedSAneesh Kumar K.V * and try again 2448*22208dedSAneesh Kumar K.V */ 2449df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 2450*22208dedSAneesh Kumar K.V wbc->pages_skipped = pages_skipped; 2451*22208dedSAneesh Kumar K.V ret = 0; 2452*22208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2453a1d6cc56SAneesh Kumar K.V /* 2454a1d6cc56SAneesh Kumar K.V * got one extent now try with 2455a1d6cc56SAneesh Kumar K.V * rest of the pages 2456a1d6cc56SAneesh Kumar K.V */ 2457*22208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 2458*22208dedSAneesh Kumar K.V wbc->pages_skipped = pages_skipped; 2459a1d6cc56SAneesh Kumar K.V ret = 0; 2460*22208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 246161628a3fSMingming Cao /* 246261628a3fSMingming Cao * There is no more writeout needed 246361628a3fSMingming Cao * or we requested for a noblocking writeout 246461628a3fSMingming Cao * and we found the device congested 246561628a3fSMingming Cao */ 246661628a3fSMingming Cao break; 246761628a3fSMingming Cao } 2468*22208dedSAneesh Kumar K.V if (pages_skipped != wbc->pages_skipped) 2469*22208dedSAneesh Kumar K.V printk(KERN_EMERG "This should not happen leaving %s " 2470*22208dedSAneesh Kumar K.V "with nr_to_write = %ld ret = %d\n", 2471*22208dedSAneesh Kumar K.V __func__, wbc->nr_to_write, ret); 247261628a3fSMingming Cao 2473*22208dedSAneesh Kumar K.V /* Update index */ 2474*22208dedSAneesh Kumar K.V index += pages_written; 2475*22208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2476*22208dedSAneesh Kumar K.V /* 2477*22208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 2478*22208dedSAneesh Kumar K.V * mode will write it back later 2479*22208dedSAneesh Kumar K.V */ 2480*22208dedSAneesh Kumar K.V mapping->writeback_index = index; 2481a1d6cc56SAneesh Kumar K.V 248261628a3fSMingming Cao out_writepages: 2483*22208dedSAneesh Kumar K.V if (!no_nrwrite_index_update) 2484*22208dedSAneesh Kumar K.V wbc->no_nrwrite_index_update = 0; 2485*22208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 248661628a3fSMingming Cao return ret; 248764769240SAlex Tomas } 248864769240SAlex Tomas 248979f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1 249079f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 249179f0be8dSAneesh Kumar K.V { 249279f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 249379f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 249479f0be8dSAneesh Kumar K.V 249579f0be8dSAneesh Kumar K.V /* 249679f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 249779f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 249879f0be8dSAneesh Kumar K.V * counters can get slightly wrong with FBC_BATCH getting 249979f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 250079f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 250179f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 250279f0be8dSAneesh Kumar K.V */ 250379f0be8dSAneesh Kumar K.V free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 250479f0be8dSAneesh Kumar K.V dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter); 250579f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 250679f0be8dSAneesh Kumar K.V free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) { 250779f0be8dSAneesh Kumar K.V /* 250879f0be8dSAneesh Kumar K.V * free block count is less that 150% of dirty blocks 250979f0be8dSAneesh Kumar K.V * or free blocks is less that watermark 251079f0be8dSAneesh Kumar K.V */ 251179f0be8dSAneesh Kumar K.V return 1; 251279f0be8dSAneesh Kumar K.V } 251379f0be8dSAneesh Kumar K.V return 0; 251479f0be8dSAneesh Kumar K.V } 251579f0be8dSAneesh Kumar K.V 251664769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 251764769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 251864769240SAlex Tomas struct page **pagep, void **fsdata) 251964769240SAlex Tomas { 2520d2a17637SMingming Cao int ret, retries = 0; 252164769240SAlex Tomas struct page *page; 252264769240SAlex Tomas pgoff_t index; 252364769240SAlex Tomas unsigned from, to; 252464769240SAlex Tomas struct inode *inode = mapping->host; 252564769240SAlex Tomas handle_t *handle; 252664769240SAlex Tomas 252764769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 252864769240SAlex Tomas from = pos & (PAGE_CACHE_SIZE - 1); 252964769240SAlex Tomas to = from + len; 253079f0be8dSAneesh Kumar K.V 253179f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 253279f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 253379f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 253479f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 253579f0be8dSAneesh Kumar K.V } 253679f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 2537d2a17637SMingming Cao retry: 253864769240SAlex Tomas /* 253964769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 254064769240SAlex Tomas * if there is delayed block allocation. But we still need 254164769240SAlex Tomas * to journalling the i_disksize update if writes to the end 254264769240SAlex Tomas * of file which has an already mapped buffer. 254364769240SAlex Tomas */ 254464769240SAlex Tomas handle = ext4_journal_start(inode, 1); 254564769240SAlex Tomas if (IS_ERR(handle)) { 254664769240SAlex Tomas ret = PTR_ERR(handle); 254764769240SAlex Tomas goto out; 254864769240SAlex Tomas } 254964769240SAlex Tomas 255064769240SAlex Tomas page = __grab_cache_page(mapping, index); 2551d5a0d4f7SEric Sandeen if (!page) { 2552d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2553d5a0d4f7SEric Sandeen ret = -ENOMEM; 2554d5a0d4f7SEric Sandeen goto out; 2555d5a0d4f7SEric Sandeen } 255664769240SAlex Tomas *pagep = page; 255764769240SAlex Tomas 255864769240SAlex Tomas ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 255964769240SAlex Tomas ext4_da_get_block_prep); 256064769240SAlex Tomas if (ret < 0) { 256164769240SAlex Tomas unlock_page(page); 256264769240SAlex Tomas ext4_journal_stop(handle); 256364769240SAlex Tomas page_cache_release(page); 2564ae4d5372SAneesh Kumar K.V /* 2565ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2566ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2567ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2568ae4d5372SAneesh Kumar K.V */ 2569ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2570ae4d5372SAneesh Kumar K.V vmtruncate(inode, inode->i_size); 257164769240SAlex Tomas } 257264769240SAlex Tomas 2573d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2574d2a17637SMingming Cao goto retry; 257564769240SAlex Tomas out: 257664769240SAlex Tomas return ret; 257764769240SAlex Tomas } 257864769240SAlex Tomas 2579632eaeabSMingming Cao /* 2580632eaeabSMingming Cao * Check if we should update i_disksize 2581632eaeabSMingming Cao * when write to the end of file but not require block allocation 2582632eaeabSMingming Cao */ 2583632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2584632eaeabSMingming Cao unsigned long offset) 2585632eaeabSMingming Cao { 2586632eaeabSMingming Cao struct buffer_head *bh; 2587632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2588632eaeabSMingming Cao unsigned int idx; 2589632eaeabSMingming Cao int i; 2590632eaeabSMingming Cao 2591632eaeabSMingming Cao bh = page_buffers(page); 2592632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2593632eaeabSMingming Cao 2594632eaeabSMingming Cao for (i = 0; i < idx; i++) 2595632eaeabSMingming Cao bh = bh->b_this_page; 2596632eaeabSMingming Cao 2597632eaeabSMingming Cao if (!buffer_mapped(bh) || (buffer_delay(bh))) 2598632eaeabSMingming Cao return 0; 2599632eaeabSMingming Cao return 1; 2600632eaeabSMingming Cao } 2601632eaeabSMingming Cao 260264769240SAlex Tomas static int ext4_da_write_end(struct file *file, 260364769240SAlex Tomas struct address_space *mapping, 260464769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 260564769240SAlex Tomas struct page *page, void *fsdata) 260664769240SAlex Tomas { 260764769240SAlex Tomas struct inode *inode = mapping->host; 260864769240SAlex Tomas int ret = 0, ret2; 260964769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 261064769240SAlex Tomas loff_t new_i_size; 2611632eaeabSMingming Cao unsigned long start, end; 261279f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 261379f0be8dSAneesh Kumar K.V 261479f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 261579f0be8dSAneesh Kumar K.V if (ext4_should_order_data(inode)) { 261679f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 261779f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 261879f0be8dSAneesh Kumar K.V } else if (ext4_should_writeback_data(inode)) { 261979f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 262079f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 262179f0be8dSAneesh Kumar K.V } else { 262279f0be8dSAneesh Kumar K.V BUG(); 262379f0be8dSAneesh Kumar K.V } 262479f0be8dSAneesh Kumar K.V } 2625632eaeabSMingming Cao 2626632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2627632eaeabSMingming Cao end = start + copied - 1; 262864769240SAlex Tomas 262964769240SAlex Tomas /* 263064769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 263164769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 263264769240SAlex Tomas * into that. 263364769240SAlex Tomas */ 263464769240SAlex Tomas 263564769240SAlex Tomas new_i_size = pos + copied; 2636632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 2637632eaeabSMingming Cao if (ext4_da_should_update_i_disksize(page, end)) { 2638632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2639632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 264064769240SAlex Tomas /* 2641632eaeabSMingming Cao * Updating i_disksize when extending file 2642632eaeabSMingming Cao * without needing block allocation 264364769240SAlex Tomas */ 264464769240SAlex Tomas if (ext4_should_order_data(inode)) 2645632eaeabSMingming Cao ret = ext4_jbd2_file_inode(handle, 2646632eaeabSMingming Cao inode); 264764769240SAlex Tomas 264864769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 264964769240SAlex Tomas } 2650632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2651cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2652cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2653cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2654cf17fea6SAneesh Kumar K.V */ 2655cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2656632eaeabSMingming Cao } 2657632eaeabSMingming Cao } 265864769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 265964769240SAlex Tomas page, fsdata); 266064769240SAlex Tomas copied = ret2; 266164769240SAlex Tomas if (ret2 < 0) 266264769240SAlex Tomas ret = ret2; 266364769240SAlex Tomas ret2 = ext4_journal_stop(handle); 266464769240SAlex Tomas if (!ret) 266564769240SAlex Tomas ret = ret2; 266664769240SAlex Tomas 266764769240SAlex Tomas return ret ? ret : copied; 266864769240SAlex Tomas } 266964769240SAlex Tomas 267064769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 267164769240SAlex Tomas { 267264769240SAlex Tomas /* 267364769240SAlex Tomas * Drop reserved blocks 267464769240SAlex Tomas */ 267564769240SAlex Tomas BUG_ON(!PageLocked(page)); 267664769240SAlex Tomas if (!page_has_buffers(page)) 267764769240SAlex Tomas goto out; 267864769240SAlex Tomas 2679d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 268064769240SAlex Tomas 268164769240SAlex Tomas out: 268264769240SAlex Tomas ext4_invalidatepage(page, offset); 268364769240SAlex Tomas 268464769240SAlex Tomas return; 268564769240SAlex Tomas } 268664769240SAlex Tomas 268764769240SAlex Tomas 268864769240SAlex Tomas /* 2689ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2690ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2691ac27a0ecSDave Kleikamp * 2692ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2693617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2694ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2695ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2696ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2697ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2698ac27a0ecSDave Kleikamp * 2699ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2700ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2701ac27a0ecSDave Kleikamp */ 2702617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2703ac27a0ecSDave Kleikamp { 2704ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2705ac27a0ecSDave Kleikamp journal_t *journal; 2706ac27a0ecSDave Kleikamp int err; 2707ac27a0ecSDave Kleikamp 270864769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 270964769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 271064769240SAlex Tomas /* 271164769240SAlex Tomas * With delalloc we want to sync the file 271264769240SAlex Tomas * so that we can make sure we allocate 271364769240SAlex Tomas * blocks for file 271464769240SAlex Tomas */ 271564769240SAlex Tomas filemap_write_and_wait(mapping); 271664769240SAlex Tomas } 271764769240SAlex Tomas 2718617ba13bSMingming Cao if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { 2719ac27a0ecSDave Kleikamp /* 2720ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2721ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2722ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2723ac27a0ecSDave Kleikamp * do we expect this to happen. 2724ac27a0ecSDave Kleikamp * 2725ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2726ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2727ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2728ac27a0ecSDave Kleikamp * will.) 2729ac27a0ecSDave Kleikamp * 2730617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2731ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2732ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2733ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2734ac27a0ecSDave Kleikamp * everything they get. 2735ac27a0ecSDave Kleikamp */ 2736ac27a0ecSDave Kleikamp 2737617ba13bSMingming Cao EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA; 2738617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2739dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2740dab291afSMingming Cao err = jbd2_journal_flush(journal); 2741dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2742ac27a0ecSDave Kleikamp 2743ac27a0ecSDave Kleikamp if (err) 2744ac27a0ecSDave Kleikamp return 0; 2745ac27a0ecSDave Kleikamp } 2746ac27a0ecSDave Kleikamp 2747617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2748ac27a0ecSDave Kleikamp } 2749ac27a0ecSDave Kleikamp 2750ac27a0ecSDave Kleikamp static int bget_one(handle_t *handle, struct buffer_head *bh) 2751ac27a0ecSDave Kleikamp { 2752ac27a0ecSDave Kleikamp get_bh(bh); 2753ac27a0ecSDave Kleikamp return 0; 2754ac27a0ecSDave Kleikamp } 2755ac27a0ecSDave Kleikamp 2756ac27a0ecSDave Kleikamp static int bput_one(handle_t *handle, struct buffer_head *bh) 2757ac27a0ecSDave Kleikamp { 2758ac27a0ecSDave Kleikamp put_bh(bh); 2759ac27a0ecSDave Kleikamp return 0; 2760ac27a0ecSDave Kleikamp } 2761ac27a0ecSDave Kleikamp 2762ac27a0ecSDave Kleikamp /* 2763678aaf48SJan Kara * Note that we don't need to start a transaction unless we're journaling data 2764678aaf48SJan Kara * because we should have holes filled from ext4_page_mkwrite(). We even don't 2765678aaf48SJan Kara * need to file the inode to the transaction's list in ordered mode because if 2766678aaf48SJan Kara * we are writing back data added by write(), the inode is already there and if 2767678aaf48SJan Kara * we are writing back data modified via mmap(), noone guarantees in which 2768678aaf48SJan Kara * transaction the data will hit the disk. In case we are journaling data, we 2769678aaf48SJan Kara * cannot start transaction directly because transaction start ranks above page 2770678aaf48SJan Kara * lock so we have to do some magic. 2771ac27a0ecSDave Kleikamp * 2772678aaf48SJan Kara * In all journaling modes block_write_full_page() will start the I/O. 2773ac27a0ecSDave Kleikamp * 2774ac27a0ecSDave Kleikamp * Problem: 2775ac27a0ecSDave Kleikamp * 2776617ba13bSMingming Cao * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 2777617ba13bSMingming Cao * ext4_writepage() 2778ac27a0ecSDave Kleikamp * 2779ac27a0ecSDave Kleikamp * Similar for: 2780ac27a0ecSDave Kleikamp * 2781617ba13bSMingming Cao * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ... 2782ac27a0ecSDave Kleikamp * 2783617ba13bSMingming Cao * Same applies to ext4_get_block(). We will deadlock on various things like 27840e855ac8SAneesh Kumar K.V * lock_journal and i_data_sem 2785ac27a0ecSDave Kleikamp * 2786ac27a0ecSDave Kleikamp * Setting PF_MEMALLOC here doesn't work - too many internal memory 2787ac27a0ecSDave Kleikamp * allocations fail. 2788ac27a0ecSDave Kleikamp * 2789ac27a0ecSDave Kleikamp * 16May01: If we're reentered then journal_current_handle() will be 2790ac27a0ecSDave Kleikamp * non-zero. We simply *return*. 2791ac27a0ecSDave Kleikamp * 2792ac27a0ecSDave Kleikamp * 1 July 2001: @@@ FIXME: 2793ac27a0ecSDave Kleikamp * In journalled data mode, a data buffer may be metadata against the 2794ac27a0ecSDave Kleikamp * current transaction. But the same file is part of a shared mapping 2795ac27a0ecSDave Kleikamp * and someone does a writepage() on it. 2796ac27a0ecSDave Kleikamp * 2797ac27a0ecSDave Kleikamp * We will move the buffer onto the async_data list, but *after* it has 2798ac27a0ecSDave Kleikamp * been dirtied. So there's a small window where we have dirty data on 2799ac27a0ecSDave Kleikamp * BJ_Metadata. 2800ac27a0ecSDave Kleikamp * 2801ac27a0ecSDave Kleikamp * Note that this only applies to the last partial page in the file. The 2802ac27a0ecSDave Kleikamp * bit which block_write_full_page() uses prepare/commit for. (That's 2803ac27a0ecSDave Kleikamp * broken code anyway: it's wrong for msync()). 2804ac27a0ecSDave Kleikamp * 2805ac27a0ecSDave Kleikamp * It's a rare case: affects the final partial page, for journalled data 2806ac27a0ecSDave Kleikamp * where the file is subject to bith write() and writepage() in the same 2807ac27a0ecSDave Kleikamp * transction. To fix it we'll need a custom block_write_full_page(). 2808ac27a0ecSDave Kleikamp * We'll probably need that anyway for journalling writepage() output. 2809ac27a0ecSDave Kleikamp * 2810ac27a0ecSDave Kleikamp * We don't honour synchronous mounts for writepage(). That would be 2811ac27a0ecSDave Kleikamp * disastrous. Any write() or metadata operation will sync the fs for 2812ac27a0ecSDave Kleikamp * us. 2813ac27a0ecSDave Kleikamp * 2814ac27a0ecSDave Kleikamp */ 2815678aaf48SJan Kara static int __ext4_normal_writepage(struct page *page, 2816cf108bcaSJan Kara struct writeback_control *wbc) 2817cf108bcaSJan Kara { 2818cf108bcaSJan Kara struct inode *inode = page->mapping->host; 2819cf108bcaSJan Kara 2820cf108bcaSJan Kara if (test_opt(inode->i_sb, NOBH)) 2821f0e6c985SAneesh Kumar K.V return nobh_writepage(page, 2822f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, wbc); 2823cf108bcaSJan Kara else 2824f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 2825f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2826f0e6c985SAneesh Kumar K.V wbc); 2827cf108bcaSJan Kara } 2828cf108bcaSJan Kara 2829678aaf48SJan Kara static int ext4_normal_writepage(struct page *page, 2830ac27a0ecSDave Kleikamp struct writeback_control *wbc) 2831ac27a0ecSDave Kleikamp { 2832ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 2833cf108bcaSJan Kara loff_t size = i_size_read(inode); 2834cf108bcaSJan Kara loff_t len; 2835cf108bcaSJan Kara 2836cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 2837cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 2838cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 2839cf108bcaSJan Kara else 2840cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 2841f0e6c985SAneesh Kumar K.V 2842f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2843f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 2844f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 2845f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 2846f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 2847f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 2848f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 2849f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 2850f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 2851f0e6c985SAneesh Kumar K.V */ 2852cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 2853cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 2854f0e6c985SAneesh Kumar K.V } 2855cf108bcaSJan Kara 2856cf108bcaSJan Kara if (!ext4_journal_current_handle()) 2857678aaf48SJan Kara return __ext4_normal_writepage(page, wbc); 2858cf108bcaSJan Kara 2859cf108bcaSJan Kara redirty_page_for_writepage(wbc, page); 2860cf108bcaSJan Kara unlock_page(page); 2861cf108bcaSJan Kara return 0; 2862cf108bcaSJan Kara } 2863cf108bcaSJan Kara 2864cf108bcaSJan Kara static int __ext4_journalled_writepage(struct page *page, 2865cf108bcaSJan Kara struct writeback_control *wbc) 2866cf108bcaSJan Kara { 2867cf108bcaSJan Kara struct address_space *mapping = page->mapping; 2868cf108bcaSJan Kara struct inode *inode = mapping->host; 2869cf108bcaSJan Kara struct buffer_head *page_bufs; 2870ac27a0ecSDave Kleikamp handle_t *handle = NULL; 2871ac27a0ecSDave Kleikamp int ret = 0; 2872ac27a0ecSDave Kleikamp int err; 2873ac27a0ecSDave Kleikamp 2874f0e6c985SAneesh Kumar K.V ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, 2875f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write); 2876cf108bcaSJan Kara if (ret != 0) 2877cf108bcaSJan Kara goto out_unlock; 2878cf108bcaSJan Kara 2879cf108bcaSJan Kara page_bufs = page_buffers(page); 2880cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL, 2881cf108bcaSJan Kara bget_one); 2882cf108bcaSJan Kara /* As soon as we unlock the page, it can go away, but we have 2883cf108bcaSJan Kara * references to buffers so we are safe */ 2884cf108bcaSJan Kara unlock_page(page); 2885ac27a0ecSDave Kleikamp 2886617ba13bSMingming Cao handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 2887ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 2888ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 2889cf108bcaSJan Kara goto out; 2890ac27a0ecSDave Kleikamp } 2891ac27a0ecSDave Kleikamp 2892cf108bcaSJan Kara ret = walk_page_buffers(handle, page_bufs, 0, 2893cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access); 2894ac27a0ecSDave Kleikamp 2895cf108bcaSJan Kara err = walk_page_buffers(handle, page_bufs, 0, 2896cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, write_end_fn); 2897cf108bcaSJan Kara if (ret == 0) 2898cf108bcaSJan Kara ret = err; 2899617ba13bSMingming Cao err = ext4_journal_stop(handle); 2900ac27a0ecSDave Kleikamp if (!ret) 2901ac27a0ecSDave Kleikamp ret = err; 2902ac27a0ecSDave Kleikamp 2903cf108bcaSJan Kara walk_page_buffers(handle, page_bufs, 0, 2904cf108bcaSJan Kara PAGE_CACHE_SIZE, NULL, bput_one); 2905cf108bcaSJan Kara EXT4_I(inode)->i_state |= EXT4_STATE_JDATA; 2906cf108bcaSJan Kara goto out; 2907cf108bcaSJan Kara 2908cf108bcaSJan Kara out_unlock: 2909ac27a0ecSDave Kleikamp unlock_page(page); 2910cf108bcaSJan Kara out: 2911ac27a0ecSDave Kleikamp return ret; 2912ac27a0ecSDave Kleikamp } 2913ac27a0ecSDave Kleikamp 2914617ba13bSMingming Cao static int ext4_journalled_writepage(struct page *page, 2915ac27a0ecSDave Kleikamp struct writeback_control *wbc) 2916ac27a0ecSDave Kleikamp { 2917ac27a0ecSDave Kleikamp struct inode *inode = page->mapping->host; 2918cf108bcaSJan Kara loff_t size = i_size_read(inode); 2919cf108bcaSJan Kara loff_t len; 2920cf108bcaSJan Kara 2921cf108bcaSJan Kara J_ASSERT(PageLocked(page)); 2922cf108bcaSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 2923cf108bcaSJan Kara len = size & ~PAGE_CACHE_MASK; 2924cf108bcaSJan Kara else 2925cf108bcaSJan Kara len = PAGE_CACHE_SIZE; 2926f0e6c985SAneesh Kumar K.V 2927f0e6c985SAneesh Kumar K.V if (page_has_buffers(page)) { 2928f0e6c985SAneesh Kumar K.V /* if page has buffers it should all be mapped 2929f0e6c985SAneesh Kumar K.V * and allocated. If there are not buffers attached 2930f0e6c985SAneesh Kumar K.V * to the page we know the page is dirty but it lost 2931f0e6c985SAneesh Kumar K.V * buffers. That means that at some moment in time 2932f0e6c985SAneesh Kumar K.V * after write_begin() / write_end() has been called 2933f0e6c985SAneesh Kumar K.V * all buffers have been clean and thus they must have been 2934f0e6c985SAneesh Kumar K.V * written at least once. So they are all mapped and we can 2935f0e6c985SAneesh Kumar K.V * happily proceed with mapping them and writing the page. 2936f0e6c985SAneesh Kumar K.V */ 2937cf108bcaSJan Kara BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 2938cf108bcaSJan Kara ext4_bh_unmapped_or_delay)); 2939f0e6c985SAneesh Kumar K.V } 2940ac27a0ecSDave Kleikamp 2941617ba13bSMingming Cao if (ext4_journal_current_handle()) 2942ac27a0ecSDave Kleikamp goto no_write; 2943ac27a0ecSDave Kleikamp 2944cf108bcaSJan Kara if (PageChecked(page)) { 2945ac27a0ecSDave Kleikamp /* 2946ac27a0ecSDave Kleikamp * It's mmapped pagecache. Add buffers and journal it. There 2947ac27a0ecSDave Kleikamp * doesn't seem much point in redirtying the page here. 2948ac27a0ecSDave Kleikamp */ 2949ac27a0ecSDave Kleikamp ClearPageChecked(page); 2950cf108bcaSJan Kara return __ext4_journalled_writepage(page, wbc); 2951ac27a0ecSDave Kleikamp } else { 2952ac27a0ecSDave Kleikamp /* 2953ac27a0ecSDave Kleikamp * It may be a page full of checkpoint-mode buffers. We don't 2954ac27a0ecSDave Kleikamp * really know unless we go poke around in the buffer_heads. 2955ac27a0ecSDave Kleikamp * But block_write_full_page will do the right thing. 2956ac27a0ecSDave Kleikamp */ 2957f0e6c985SAneesh Kumar K.V return block_write_full_page(page, 2958f0e6c985SAneesh Kumar K.V ext4_normal_get_block_write, 2959f0e6c985SAneesh Kumar K.V wbc); 2960ac27a0ecSDave Kleikamp } 2961ac27a0ecSDave Kleikamp no_write: 2962ac27a0ecSDave Kleikamp redirty_page_for_writepage(wbc, page); 2963ac27a0ecSDave Kleikamp unlock_page(page); 2964cf108bcaSJan Kara return 0; 2965ac27a0ecSDave Kleikamp } 2966ac27a0ecSDave Kleikamp 2967617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2968ac27a0ecSDave Kleikamp { 2969617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 2970ac27a0ecSDave Kleikamp } 2971ac27a0ecSDave Kleikamp 2972ac27a0ecSDave Kleikamp static int 2973617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2974ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2975ac27a0ecSDave Kleikamp { 2976617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2977ac27a0ecSDave Kleikamp } 2978ac27a0ecSDave Kleikamp 2979617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2980ac27a0ecSDave Kleikamp { 2981617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2982ac27a0ecSDave Kleikamp 2983ac27a0ecSDave Kleikamp /* 2984ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2985ac27a0ecSDave Kleikamp */ 2986ac27a0ecSDave Kleikamp if (offset == 0) 2987ac27a0ecSDave Kleikamp ClearPageChecked(page); 2988ac27a0ecSDave Kleikamp 2989dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 2990ac27a0ecSDave Kleikamp } 2991ac27a0ecSDave Kleikamp 2992617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2993ac27a0ecSDave Kleikamp { 2994617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2995ac27a0ecSDave Kleikamp 2996ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2997ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2998ac27a0ecSDave Kleikamp return 0; 2999dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 3000ac27a0ecSDave Kleikamp } 3001ac27a0ecSDave Kleikamp 3002ac27a0ecSDave Kleikamp /* 3003ac27a0ecSDave Kleikamp * If the O_DIRECT write will extend the file then add this inode to the 3004ac27a0ecSDave Kleikamp * orphan list. So recovery will truncate it back to the original size 3005ac27a0ecSDave Kleikamp * if the machine crashes during the write. 3006ac27a0ecSDave Kleikamp * 3007ac27a0ecSDave Kleikamp * If the O_DIRECT write is intantiating holes inside i_size and the machine 30087fb5409dSJan Kara * crashes then stale disk data _may_ be exposed inside the file. But current 30097fb5409dSJan Kara * VFS code falls back into buffered path in that case so we are safe. 3010ac27a0ecSDave Kleikamp */ 3011617ba13bSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 3012ac27a0ecSDave Kleikamp const struct iovec *iov, loff_t offset, 3013ac27a0ecSDave Kleikamp unsigned long nr_segs) 3014ac27a0ecSDave Kleikamp { 3015ac27a0ecSDave Kleikamp struct file *file = iocb->ki_filp; 3016ac27a0ecSDave Kleikamp struct inode *inode = file->f_mapping->host; 3017617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 30187fb5409dSJan Kara handle_t *handle; 3019ac27a0ecSDave Kleikamp ssize_t ret; 3020ac27a0ecSDave Kleikamp int orphan = 0; 3021ac27a0ecSDave Kleikamp size_t count = iov_length(iov, nr_segs); 3022ac27a0ecSDave Kleikamp 3023ac27a0ecSDave Kleikamp if (rw == WRITE) { 3024ac27a0ecSDave Kleikamp loff_t final_size = offset + count; 3025ac27a0ecSDave Kleikamp 30267fb5409dSJan Kara if (final_size > inode->i_size) { 30277fb5409dSJan Kara /* Credits for sb + inode write */ 30287fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 3029ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 3030ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 3031ac27a0ecSDave Kleikamp goto out; 3032ac27a0ecSDave Kleikamp } 3033617ba13bSMingming Cao ret = ext4_orphan_add(handle, inode); 30347fb5409dSJan Kara if (ret) { 30357fb5409dSJan Kara ext4_journal_stop(handle); 30367fb5409dSJan Kara goto out; 30377fb5409dSJan Kara } 3038ac27a0ecSDave Kleikamp orphan = 1; 3039ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 30407fb5409dSJan Kara ext4_journal_stop(handle); 3041ac27a0ecSDave Kleikamp } 3042ac27a0ecSDave Kleikamp } 3043ac27a0ecSDave Kleikamp 3044ac27a0ecSDave Kleikamp ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 3045ac27a0ecSDave Kleikamp offset, nr_segs, 3046617ba13bSMingming Cao ext4_get_block, NULL); 3047ac27a0ecSDave Kleikamp 30487fb5409dSJan Kara if (orphan) { 3049ac27a0ecSDave Kleikamp int err; 3050ac27a0ecSDave Kleikamp 30517fb5409dSJan Kara /* Credits for sb + inode write */ 30527fb5409dSJan Kara handle = ext4_journal_start(inode, 2); 30537fb5409dSJan Kara if (IS_ERR(handle)) { 30547fb5409dSJan Kara /* This is really bad luck. We've written the data 30557fb5409dSJan Kara * but cannot extend i_size. Bail out and pretend 30567fb5409dSJan Kara * the write failed... */ 30577fb5409dSJan Kara ret = PTR_ERR(handle); 30587fb5409dSJan Kara goto out; 30597fb5409dSJan Kara } 30607fb5409dSJan Kara if (inode->i_nlink) 3061617ba13bSMingming Cao ext4_orphan_del(handle, inode); 30627fb5409dSJan Kara if (ret > 0) { 3063ac27a0ecSDave Kleikamp loff_t end = offset + ret; 3064ac27a0ecSDave Kleikamp if (end > inode->i_size) { 3065ac27a0ecSDave Kleikamp ei->i_disksize = end; 3066ac27a0ecSDave Kleikamp i_size_write(inode, end); 3067ac27a0ecSDave Kleikamp /* 3068ac27a0ecSDave Kleikamp * We're going to return a positive `ret' 3069ac27a0ecSDave Kleikamp * here due to non-zero-length I/O, so there's 3070ac27a0ecSDave Kleikamp * no way of reporting error returns from 3071617ba13bSMingming Cao * ext4_mark_inode_dirty() to userspace. So 3072ac27a0ecSDave Kleikamp * ignore it. 3073ac27a0ecSDave Kleikamp */ 3074617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3075ac27a0ecSDave Kleikamp } 3076ac27a0ecSDave Kleikamp } 3077617ba13bSMingming Cao err = ext4_journal_stop(handle); 3078ac27a0ecSDave Kleikamp if (ret == 0) 3079ac27a0ecSDave Kleikamp ret = err; 3080ac27a0ecSDave Kleikamp } 3081ac27a0ecSDave Kleikamp out: 3082ac27a0ecSDave Kleikamp return ret; 3083ac27a0ecSDave Kleikamp } 3084ac27a0ecSDave Kleikamp 3085ac27a0ecSDave Kleikamp /* 3086617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3087ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3088ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3089ac27a0ecSDave Kleikamp * not necessarily locked. 3090ac27a0ecSDave Kleikamp * 3091ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3092ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3093ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3094ac27a0ecSDave Kleikamp * 3095ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3096ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3097ac27a0ecSDave Kleikamp */ 3098617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3099ac27a0ecSDave Kleikamp { 3100ac27a0ecSDave Kleikamp SetPageChecked(page); 3101ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3102ac27a0ecSDave Kleikamp } 3103ac27a0ecSDave Kleikamp 3104617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3105617ba13bSMingming Cao .readpage = ext4_readpage, 3106617ba13bSMingming Cao .readpages = ext4_readpages, 3107678aaf48SJan Kara .writepage = ext4_normal_writepage, 3108ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3109bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3110bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3111617ba13bSMingming Cao .bmap = ext4_bmap, 3112617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3113617ba13bSMingming Cao .releasepage = ext4_releasepage, 3114617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3115ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 31168ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3117ac27a0ecSDave Kleikamp }; 3118ac27a0ecSDave Kleikamp 3119617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3120617ba13bSMingming Cao .readpage = ext4_readpage, 3121617ba13bSMingming Cao .readpages = ext4_readpages, 3122678aaf48SJan Kara .writepage = ext4_normal_writepage, 3123ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3124bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3125bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3126617ba13bSMingming Cao .bmap = ext4_bmap, 3127617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3128617ba13bSMingming Cao .releasepage = ext4_releasepage, 3129617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3130ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 31318ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3132ac27a0ecSDave Kleikamp }; 3133ac27a0ecSDave Kleikamp 3134617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3135617ba13bSMingming Cao .readpage = ext4_readpage, 3136617ba13bSMingming Cao .readpages = ext4_readpages, 3137617ba13bSMingming Cao .writepage = ext4_journalled_writepage, 3138ac27a0ecSDave Kleikamp .sync_page = block_sync_page, 3139bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3140bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3141617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3142617ba13bSMingming Cao .bmap = ext4_bmap, 3143617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3144617ba13bSMingming Cao .releasepage = ext4_releasepage, 31458ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3146ac27a0ecSDave Kleikamp }; 3147ac27a0ecSDave Kleikamp 314864769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 314964769240SAlex Tomas .readpage = ext4_readpage, 315064769240SAlex Tomas .readpages = ext4_readpages, 315164769240SAlex Tomas .writepage = ext4_da_writepage, 315264769240SAlex Tomas .writepages = ext4_da_writepages, 315364769240SAlex Tomas .sync_page = block_sync_page, 315464769240SAlex Tomas .write_begin = ext4_da_write_begin, 315564769240SAlex Tomas .write_end = ext4_da_write_end, 315664769240SAlex Tomas .bmap = ext4_bmap, 315764769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 315864769240SAlex Tomas .releasepage = ext4_releasepage, 315964769240SAlex Tomas .direct_IO = ext4_direct_IO, 316064769240SAlex Tomas .migratepage = buffer_migrate_page, 31618ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 316264769240SAlex Tomas }; 316364769240SAlex Tomas 3164617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3165ac27a0ecSDave Kleikamp { 3166cd1aac32SAneesh Kumar K.V if (ext4_should_order_data(inode) && 3167cd1aac32SAneesh Kumar K.V test_opt(inode->i_sb, DELALLOC)) 3168cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3169cd1aac32SAneesh Kumar K.V else if (ext4_should_order_data(inode)) 3170617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_ordered_aops; 317164769240SAlex Tomas else if (ext4_should_writeback_data(inode) && 317264769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) 317364769240SAlex Tomas inode->i_mapping->a_ops = &ext4_da_aops; 3174617ba13bSMingming Cao else if (ext4_should_writeback_data(inode)) 3175617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_writeback_aops; 3176ac27a0ecSDave Kleikamp else 3177617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 3178ac27a0ecSDave Kleikamp } 3179ac27a0ecSDave Kleikamp 3180ac27a0ecSDave Kleikamp /* 3181617ba13bSMingming Cao * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3182ac27a0ecSDave Kleikamp * up to the end of the block which corresponds to `from'. 3183ac27a0ecSDave Kleikamp * This required during truncate. We need to physically zero the tail end 3184ac27a0ecSDave Kleikamp * of that block so it doesn't yield old data if the file is later grown. 3185ac27a0ecSDave Kleikamp */ 3186cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle, 3187ac27a0ecSDave Kleikamp struct address_space *mapping, loff_t from) 3188ac27a0ecSDave Kleikamp { 3189617ba13bSMingming Cao ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3190ac27a0ecSDave Kleikamp unsigned offset = from & (PAGE_CACHE_SIZE-1); 3191725d26d3SAneesh Kumar K.V unsigned blocksize, length, pos; 3192725d26d3SAneesh Kumar K.V ext4_lblk_t iblock; 3193ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 3194ac27a0ecSDave Kleikamp struct buffer_head *bh; 3195cf108bcaSJan Kara struct page *page; 3196ac27a0ecSDave Kleikamp int err = 0; 3197ac27a0ecSDave Kleikamp 3198cf108bcaSJan Kara page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT); 3199cf108bcaSJan Kara if (!page) 3200cf108bcaSJan Kara return -EINVAL; 3201cf108bcaSJan Kara 3202ac27a0ecSDave Kleikamp blocksize = inode->i_sb->s_blocksize; 3203ac27a0ecSDave Kleikamp length = blocksize - (offset & (blocksize - 1)); 3204ac27a0ecSDave Kleikamp iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3205ac27a0ecSDave Kleikamp 3206ac27a0ecSDave Kleikamp /* 3207ac27a0ecSDave Kleikamp * For "nobh" option, we can only work if we don't need to 3208ac27a0ecSDave Kleikamp * read-in the page - otherwise we create buffers to do the IO. 3209ac27a0ecSDave Kleikamp */ 3210ac27a0ecSDave Kleikamp if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) && 3211617ba13bSMingming Cao ext4_should_writeback_data(inode) && PageUptodate(page)) { 3212eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3213ac27a0ecSDave Kleikamp set_page_dirty(page); 3214ac27a0ecSDave Kleikamp goto unlock; 3215ac27a0ecSDave Kleikamp } 3216ac27a0ecSDave Kleikamp 3217ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 3218ac27a0ecSDave Kleikamp create_empty_buffers(page, blocksize, 0); 3219ac27a0ecSDave Kleikamp 3220ac27a0ecSDave Kleikamp /* Find the buffer that contains "offset" */ 3221ac27a0ecSDave Kleikamp bh = page_buffers(page); 3222ac27a0ecSDave Kleikamp pos = blocksize; 3223ac27a0ecSDave Kleikamp while (offset >= pos) { 3224ac27a0ecSDave Kleikamp bh = bh->b_this_page; 3225ac27a0ecSDave Kleikamp iblock++; 3226ac27a0ecSDave Kleikamp pos += blocksize; 3227ac27a0ecSDave Kleikamp } 3228ac27a0ecSDave Kleikamp 3229ac27a0ecSDave Kleikamp err = 0; 3230ac27a0ecSDave Kleikamp if (buffer_freed(bh)) { 3231ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "freed: skip"); 3232ac27a0ecSDave Kleikamp goto unlock; 3233ac27a0ecSDave Kleikamp } 3234ac27a0ecSDave Kleikamp 3235ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3236ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "unmapped"); 3237617ba13bSMingming Cao ext4_get_block(inode, iblock, bh, 0); 3238ac27a0ecSDave Kleikamp /* unmapped? It's a hole - nothing to do */ 3239ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3240ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "still unmapped"); 3241ac27a0ecSDave Kleikamp goto unlock; 3242ac27a0ecSDave Kleikamp } 3243ac27a0ecSDave Kleikamp } 3244ac27a0ecSDave Kleikamp 3245ac27a0ecSDave Kleikamp /* Ok, it's mapped. Make sure it's up-to-date */ 3246ac27a0ecSDave Kleikamp if (PageUptodate(page)) 3247ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3248ac27a0ecSDave Kleikamp 3249ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3250ac27a0ecSDave Kleikamp err = -EIO; 3251ac27a0ecSDave Kleikamp ll_rw_block(READ, 1, &bh); 3252ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3253ac27a0ecSDave Kleikamp /* Uhhuh. Read error. Complain and punt. */ 3254ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) 3255ac27a0ecSDave Kleikamp goto unlock; 3256ac27a0ecSDave Kleikamp } 3257ac27a0ecSDave Kleikamp 3258617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3259ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "get write access"); 3260617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, bh); 3261ac27a0ecSDave Kleikamp if (err) 3262ac27a0ecSDave Kleikamp goto unlock; 3263ac27a0ecSDave Kleikamp } 3264ac27a0ecSDave Kleikamp 3265eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3266ac27a0ecSDave Kleikamp 3267ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "zeroed end of block"); 3268ac27a0ecSDave Kleikamp 3269ac27a0ecSDave Kleikamp err = 0; 3270617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3271617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, bh); 3272ac27a0ecSDave Kleikamp } else { 3273617ba13bSMingming Cao if (ext4_should_order_data(inode)) 3274678aaf48SJan Kara err = ext4_jbd2_file_inode(handle, inode); 3275ac27a0ecSDave Kleikamp mark_buffer_dirty(bh); 3276ac27a0ecSDave Kleikamp } 3277ac27a0ecSDave Kleikamp 3278ac27a0ecSDave Kleikamp unlock: 3279ac27a0ecSDave Kleikamp unlock_page(page); 3280ac27a0ecSDave Kleikamp page_cache_release(page); 3281ac27a0ecSDave Kleikamp return err; 3282ac27a0ecSDave Kleikamp } 3283ac27a0ecSDave Kleikamp 3284ac27a0ecSDave Kleikamp /* 3285ac27a0ecSDave Kleikamp * Probably it should be a library function... search for first non-zero word 3286ac27a0ecSDave Kleikamp * or memcmp with zero_page, whatever is better for particular architecture. 3287ac27a0ecSDave Kleikamp * Linus? 3288ac27a0ecSDave Kleikamp */ 3289ac27a0ecSDave Kleikamp static inline int all_zeroes(__le32 *p, __le32 *q) 3290ac27a0ecSDave Kleikamp { 3291ac27a0ecSDave Kleikamp while (p < q) 3292ac27a0ecSDave Kleikamp if (*p++) 3293ac27a0ecSDave Kleikamp return 0; 3294ac27a0ecSDave Kleikamp return 1; 3295ac27a0ecSDave Kleikamp } 3296ac27a0ecSDave Kleikamp 3297ac27a0ecSDave Kleikamp /** 3298617ba13bSMingming Cao * ext4_find_shared - find the indirect blocks for partial truncation. 3299ac27a0ecSDave Kleikamp * @inode: inode in question 3300ac27a0ecSDave Kleikamp * @depth: depth of the affected branch 3301617ba13bSMingming Cao * @offsets: offsets of pointers in that branch (see ext4_block_to_path) 3302ac27a0ecSDave Kleikamp * @chain: place to store the pointers to partial indirect blocks 3303ac27a0ecSDave Kleikamp * @top: place to the (detached) top of branch 3304ac27a0ecSDave Kleikamp * 3305617ba13bSMingming Cao * This is a helper function used by ext4_truncate(). 3306ac27a0ecSDave Kleikamp * 3307ac27a0ecSDave Kleikamp * When we do truncate() we may have to clean the ends of several 3308ac27a0ecSDave Kleikamp * indirect blocks but leave the blocks themselves alive. Block is 3309ac27a0ecSDave Kleikamp * partially truncated if some data below the new i_size is refered 3310ac27a0ecSDave Kleikamp * from it (and it is on the path to the first completely truncated 3311ac27a0ecSDave Kleikamp * data block, indeed). We have to free the top of that path along 3312ac27a0ecSDave Kleikamp * with everything to the right of the path. Since no allocation 3313617ba13bSMingming Cao * past the truncation point is possible until ext4_truncate() 3314ac27a0ecSDave Kleikamp * finishes, we may safely do the latter, but top of branch may 3315ac27a0ecSDave Kleikamp * require special attention - pageout below the truncation point 3316ac27a0ecSDave Kleikamp * might try to populate it. 3317ac27a0ecSDave Kleikamp * 3318ac27a0ecSDave Kleikamp * We atomically detach the top of branch from the tree, store the 3319ac27a0ecSDave Kleikamp * block number of its root in *@top, pointers to buffer_heads of 3320ac27a0ecSDave Kleikamp * partially truncated blocks - in @chain[].bh and pointers to 3321ac27a0ecSDave Kleikamp * their last elements that should not be removed - in 3322ac27a0ecSDave Kleikamp * @chain[].p. Return value is the pointer to last filled element 3323ac27a0ecSDave Kleikamp * of @chain. 3324ac27a0ecSDave Kleikamp * 3325ac27a0ecSDave Kleikamp * The work left to caller to do the actual freeing of subtrees: 3326ac27a0ecSDave Kleikamp * a) free the subtree starting from *@top 3327ac27a0ecSDave Kleikamp * b) free the subtrees whose roots are stored in 3328ac27a0ecSDave Kleikamp * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 3329ac27a0ecSDave Kleikamp * c) free the subtrees growing from the inode past the @chain[0]. 3330ac27a0ecSDave Kleikamp * (no partially truncated stuff there). */ 3331ac27a0ecSDave Kleikamp 3332617ba13bSMingming Cao static Indirect *ext4_find_shared(struct inode *inode, int depth, 3333725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top) 3334ac27a0ecSDave Kleikamp { 3335ac27a0ecSDave Kleikamp Indirect *partial, *p; 3336ac27a0ecSDave Kleikamp int k, err; 3337ac27a0ecSDave Kleikamp 3338ac27a0ecSDave Kleikamp *top = 0; 3339ac27a0ecSDave Kleikamp /* Make k index the deepest non-null offest + 1 */ 3340ac27a0ecSDave Kleikamp for (k = depth; k > 1 && !offsets[k-1]; k--) 3341ac27a0ecSDave Kleikamp ; 3342617ba13bSMingming Cao partial = ext4_get_branch(inode, k, offsets, chain, &err); 3343ac27a0ecSDave Kleikamp /* Writer: pointers */ 3344ac27a0ecSDave Kleikamp if (!partial) 3345ac27a0ecSDave Kleikamp partial = chain + k-1; 3346ac27a0ecSDave Kleikamp /* 3347ac27a0ecSDave Kleikamp * If the branch acquired continuation since we've looked at it - 3348ac27a0ecSDave Kleikamp * fine, it should all survive and (new) top doesn't belong to us. 3349ac27a0ecSDave Kleikamp */ 3350ac27a0ecSDave Kleikamp if (!partial->key && *partial->p) 3351ac27a0ecSDave Kleikamp /* Writer: end */ 3352ac27a0ecSDave Kleikamp goto no_top; 3353af5bc92dSTheodore Ts'o for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--) 3354ac27a0ecSDave Kleikamp ; 3355ac27a0ecSDave Kleikamp /* 3356ac27a0ecSDave Kleikamp * OK, we've found the last block that must survive. The rest of our 3357ac27a0ecSDave Kleikamp * branch should be detached before unlocking. However, if that rest 3358ac27a0ecSDave Kleikamp * of branch is all ours and does not grow immediately from the inode 3359ac27a0ecSDave Kleikamp * it's easier to cheat and just decrement partial->p. 3360ac27a0ecSDave Kleikamp */ 3361ac27a0ecSDave Kleikamp if (p == chain + k - 1 && p > chain) { 3362ac27a0ecSDave Kleikamp p->p--; 3363ac27a0ecSDave Kleikamp } else { 3364ac27a0ecSDave Kleikamp *top = *p->p; 3365617ba13bSMingming Cao /* Nope, don't do this in ext4. Must leave the tree intact */ 3366ac27a0ecSDave Kleikamp #if 0 3367ac27a0ecSDave Kleikamp *p->p = 0; 3368ac27a0ecSDave Kleikamp #endif 3369ac27a0ecSDave Kleikamp } 3370ac27a0ecSDave Kleikamp /* Writer: end */ 3371ac27a0ecSDave Kleikamp 3372ac27a0ecSDave Kleikamp while (partial > p) { 3373ac27a0ecSDave Kleikamp brelse(partial->bh); 3374ac27a0ecSDave Kleikamp partial--; 3375ac27a0ecSDave Kleikamp } 3376ac27a0ecSDave Kleikamp no_top: 3377ac27a0ecSDave Kleikamp return partial; 3378ac27a0ecSDave Kleikamp } 3379ac27a0ecSDave Kleikamp 3380ac27a0ecSDave Kleikamp /* 3381ac27a0ecSDave Kleikamp * Zero a number of block pointers in either an inode or an indirect block. 3382ac27a0ecSDave Kleikamp * If we restart the transaction we must again get write access to the 3383ac27a0ecSDave Kleikamp * indirect block for further modification. 3384ac27a0ecSDave Kleikamp * 3385ac27a0ecSDave Kleikamp * We release `count' blocks on disk, but (last - first) may be greater 3386ac27a0ecSDave Kleikamp * than `count' because there can be holes in there. 3387ac27a0ecSDave Kleikamp */ 3388617ba13bSMingming Cao static void ext4_clear_blocks(handle_t *handle, struct inode *inode, 3389617ba13bSMingming Cao struct buffer_head *bh, ext4_fsblk_t block_to_free, 3390ac27a0ecSDave Kleikamp unsigned long count, __le32 *first, __le32 *last) 3391ac27a0ecSDave Kleikamp { 3392ac27a0ecSDave Kleikamp __le32 *p; 3393ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3394ac27a0ecSDave Kleikamp if (bh) { 3395617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 3396617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, bh); 3397ac27a0ecSDave Kleikamp } 3398617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3399617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3400ac27a0ecSDave Kleikamp if (bh) { 3401ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "retaking write access"); 3402617ba13bSMingming Cao ext4_journal_get_write_access(handle, bh); 3403ac27a0ecSDave Kleikamp } 3404ac27a0ecSDave Kleikamp } 3405ac27a0ecSDave Kleikamp 3406ac27a0ecSDave Kleikamp /* 3407ac27a0ecSDave Kleikamp * Any buffers which are on the journal will be in memory. We find 3408dab291afSMingming Cao * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget() 3409ac27a0ecSDave Kleikamp * on them. We've already detached each block from the file, so 3410dab291afSMingming Cao * bforget() in jbd2_journal_forget() should be safe. 3411ac27a0ecSDave Kleikamp * 3412dab291afSMingming Cao * AKPM: turn on bforget in jbd2_journal_forget()!!! 3413ac27a0ecSDave Kleikamp */ 3414ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3415ac27a0ecSDave Kleikamp u32 nr = le32_to_cpu(*p); 3416ac27a0ecSDave Kleikamp if (nr) { 34171d03ec98SAneesh Kumar K.V struct buffer_head *tbh; 3418ac27a0ecSDave Kleikamp 3419ac27a0ecSDave Kleikamp *p = 0; 34201d03ec98SAneesh Kumar K.V tbh = sb_find_get_block(inode->i_sb, nr); 34211d03ec98SAneesh Kumar K.V ext4_forget(handle, 0, inode, tbh, nr); 3422ac27a0ecSDave Kleikamp } 3423ac27a0ecSDave Kleikamp } 3424ac27a0ecSDave Kleikamp 3425c9de560dSAlex Tomas ext4_free_blocks(handle, inode, block_to_free, count, 0); 3426ac27a0ecSDave Kleikamp } 3427ac27a0ecSDave Kleikamp 3428ac27a0ecSDave Kleikamp /** 3429617ba13bSMingming Cao * ext4_free_data - free a list of data blocks 3430ac27a0ecSDave Kleikamp * @handle: handle for this transaction 3431ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3432ac27a0ecSDave Kleikamp * @this_bh: indirect buffer_head which contains *@first and *@last 3433ac27a0ecSDave Kleikamp * @first: array of block numbers 3434ac27a0ecSDave Kleikamp * @last: points immediately past the end of array 3435ac27a0ecSDave Kleikamp * 3436ac27a0ecSDave Kleikamp * We are freeing all blocks refered from that array (numbers are stored as 3437ac27a0ecSDave Kleikamp * little-endian 32-bit) and updating @inode->i_blocks appropriately. 3438ac27a0ecSDave Kleikamp * 3439ac27a0ecSDave Kleikamp * We accumulate contiguous runs of blocks to free. Conveniently, if these 3440ac27a0ecSDave Kleikamp * blocks are contiguous then releasing them at one time will only affect one 3441ac27a0ecSDave Kleikamp * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 3442ac27a0ecSDave Kleikamp * actually use a lot of journal space. 3443ac27a0ecSDave Kleikamp * 3444ac27a0ecSDave Kleikamp * @this_bh will be %NULL if @first and @last point into the inode's direct 3445ac27a0ecSDave Kleikamp * block pointers. 3446ac27a0ecSDave Kleikamp */ 3447617ba13bSMingming Cao static void ext4_free_data(handle_t *handle, struct inode *inode, 3448ac27a0ecSDave Kleikamp struct buffer_head *this_bh, 3449ac27a0ecSDave Kleikamp __le32 *first, __le32 *last) 3450ac27a0ecSDave Kleikamp { 3451617ba13bSMingming Cao ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */ 3452ac27a0ecSDave Kleikamp unsigned long count = 0; /* Number of blocks in the run */ 3453ac27a0ecSDave Kleikamp __le32 *block_to_free_p = NULL; /* Pointer into inode/ind 3454ac27a0ecSDave Kleikamp corresponding to 3455ac27a0ecSDave Kleikamp block_to_free */ 3456617ba13bSMingming Cao ext4_fsblk_t nr; /* Current block # */ 3457ac27a0ecSDave Kleikamp __le32 *p; /* Pointer into inode/ind 3458ac27a0ecSDave Kleikamp for current block */ 3459ac27a0ecSDave Kleikamp int err; 3460ac27a0ecSDave Kleikamp 3461ac27a0ecSDave Kleikamp if (this_bh) { /* For indirect block */ 3462ac27a0ecSDave Kleikamp BUFFER_TRACE(this_bh, "get_write_access"); 3463617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, this_bh); 3464ac27a0ecSDave Kleikamp /* Important: if we can't update the indirect pointers 3465ac27a0ecSDave Kleikamp * to the blocks, we can't free them. */ 3466ac27a0ecSDave Kleikamp if (err) 3467ac27a0ecSDave Kleikamp return; 3468ac27a0ecSDave Kleikamp } 3469ac27a0ecSDave Kleikamp 3470ac27a0ecSDave Kleikamp for (p = first; p < last; p++) { 3471ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3472ac27a0ecSDave Kleikamp if (nr) { 3473ac27a0ecSDave Kleikamp /* accumulate blocks to free if they're contiguous */ 3474ac27a0ecSDave Kleikamp if (count == 0) { 3475ac27a0ecSDave Kleikamp block_to_free = nr; 3476ac27a0ecSDave Kleikamp block_to_free_p = p; 3477ac27a0ecSDave Kleikamp count = 1; 3478ac27a0ecSDave Kleikamp } else if (nr == block_to_free + count) { 3479ac27a0ecSDave Kleikamp count++; 3480ac27a0ecSDave Kleikamp } else { 3481617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, 3482ac27a0ecSDave Kleikamp block_to_free, 3483ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3484ac27a0ecSDave Kleikamp block_to_free = nr; 3485ac27a0ecSDave Kleikamp block_to_free_p = p; 3486ac27a0ecSDave Kleikamp count = 1; 3487ac27a0ecSDave Kleikamp } 3488ac27a0ecSDave Kleikamp } 3489ac27a0ecSDave Kleikamp } 3490ac27a0ecSDave Kleikamp 3491ac27a0ecSDave Kleikamp if (count > 0) 3492617ba13bSMingming Cao ext4_clear_blocks(handle, inode, this_bh, block_to_free, 3493ac27a0ecSDave Kleikamp count, block_to_free_p, p); 3494ac27a0ecSDave Kleikamp 3495ac27a0ecSDave Kleikamp if (this_bh) { 3496617ba13bSMingming Cao BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata"); 349771dc8fbcSDuane Griffin 349871dc8fbcSDuane Griffin /* 349971dc8fbcSDuane Griffin * The buffer head should have an attached journal head at this 350071dc8fbcSDuane Griffin * point. However, if the data is corrupted and an indirect 350171dc8fbcSDuane Griffin * block pointed to itself, it would have been detached when 350271dc8fbcSDuane Griffin * the block was cleared. Check for this instead of OOPSing. 350371dc8fbcSDuane Griffin */ 350471dc8fbcSDuane Griffin if (bh2jh(this_bh)) 3505617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, this_bh); 350671dc8fbcSDuane Griffin else 350771dc8fbcSDuane Griffin ext4_error(inode->i_sb, __func__, 350871dc8fbcSDuane Griffin "circular indirect block detected, " 350971dc8fbcSDuane Griffin "inode=%lu, block=%llu", 351071dc8fbcSDuane Griffin inode->i_ino, 351171dc8fbcSDuane Griffin (unsigned long long) this_bh->b_blocknr); 3512ac27a0ecSDave Kleikamp } 3513ac27a0ecSDave Kleikamp } 3514ac27a0ecSDave Kleikamp 3515ac27a0ecSDave Kleikamp /** 3516617ba13bSMingming Cao * ext4_free_branches - free an array of branches 3517ac27a0ecSDave Kleikamp * @handle: JBD handle for this transaction 3518ac27a0ecSDave Kleikamp * @inode: inode we are dealing with 3519ac27a0ecSDave Kleikamp * @parent_bh: the buffer_head which contains *@first and *@last 3520ac27a0ecSDave Kleikamp * @first: array of block numbers 3521ac27a0ecSDave Kleikamp * @last: pointer immediately past the end of array 3522ac27a0ecSDave Kleikamp * @depth: depth of the branches to free 3523ac27a0ecSDave Kleikamp * 3524ac27a0ecSDave Kleikamp * We are freeing all blocks refered from these branches (numbers are 3525ac27a0ecSDave Kleikamp * stored as little-endian 32-bit) and updating @inode->i_blocks 3526ac27a0ecSDave Kleikamp * appropriately. 3527ac27a0ecSDave Kleikamp */ 3528617ba13bSMingming Cao static void ext4_free_branches(handle_t *handle, struct inode *inode, 3529ac27a0ecSDave Kleikamp struct buffer_head *parent_bh, 3530ac27a0ecSDave Kleikamp __le32 *first, __le32 *last, int depth) 3531ac27a0ecSDave Kleikamp { 3532617ba13bSMingming Cao ext4_fsblk_t nr; 3533ac27a0ecSDave Kleikamp __le32 *p; 3534ac27a0ecSDave Kleikamp 3535ac27a0ecSDave Kleikamp if (is_handle_aborted(handle)) 3536ac27a0ecSDave Kleikamp return; 3537ac27a0ecSDave Kleikamp 3538ac27a0ecSDave Kleikamp if (depth--) { 3539ac27a0ecSDave Kleikamp struct buffer_head *bh; 3540617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3541ac27a0ecSDave Kleikamp p = last; 3542ac27a0ecSDave Kleikamp while (--p >= first) { 3543ac27a0ecSDave Kleikamp nr = le32_to_cpu(*p); 3544ac27a0ecSDave Kleikamp if (!nr) 3545ac27a0ecSDave Kleikamp continue; /* A hole */ 3546ac27a0ecSDave Kleikamp 3547ac27a0ecSDave Kleikamp /* Go read the buffer for the next level down */ 3548ac27a0ecSDave Kleikamp bh = sb_bread(inode->i_sb, nr); 3549ac27a0ecSDave Kleikamp 3550ac27a0ecSDave Kleikamp /* 3551ac27a0ecSDave Kleikamp * A read failure? Report error and clear slot 3552ac27a0ecSDave Kleikamp * (should be rare). 3553ac27a0ecSDave Kleikamp */ 3554ac27a0ecSDave Kleikamp if (!bh) { 3555617ba13bSMingming Cao ext4_error(inode->i_sb, "ext4_free_branches", 35562ae02107SMingming Cao "Read failure, inode=%lu, block=%llu", 3557ac27a0ecSDave Kleikamp inode->i_ino, nr); 3558ac27a0ecSDave Kleikamp continue; 3559ac27a0ecSDave Kleikamp } 3560ac27a0ecSDave Kleikamp 3561ac27a0ecSDave Kleikamp /* This zaps the entire block. Bottom up. */ 3562ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "free child branches"); 3563617ba13bSMingming Cao ext4_free_branches(handle, inode, bh, 3564ac27a0ecSDave Kleikamp (__le32 *) bh->b_data, 3565ac27a0ecSDave Kleikamp (__le32 *) bh->b_data + addr_per_block, 3566ac27a0ecSDave Kleikamp depth); 3567ac27a0ecSDave Kleikamp 3568ac27a0ecSDave Kleikamp /* 3569ac27a0ecSDave Kleikamp * We've probably journalled the indirect block several 3570ac27a0ecSDave Kleikamp * times during the truncate. But it's no longer 3571ac27a0ecSDave Kleikamp * needed and we now drop it from the transaction via 3572dab291afSMingming Cao * jbd2_journal_revoke(). 3573ac27a0ecSDave Kleikamp * 3574ac27a0ecSDave Kleikamp * That's easy if it's exclusively part of this 3575ac27a0ecSDave Kleikamp * transaction. But if it's part of the committing 3576dab291afSMingming Cao * transaction then jbd2_journal_forget() will simply 3577ac27a0ecSDave Kleikamp * brelse() it. That means that if the underlying 3578617ba13bSMingming Cao * block is reallocated in ext4_get_block(), 3579ac27a0ecSDave Kleikamp * unmap_underlying_metadata() will find this block 3580ac27a0ecSDave Kleikamp * and will try to get rid of it. damn, damn. 3581ac27a0ecSDave Kleikamp * 3582ac27a0ecSDave Kleikamp * If this block has already been committed to the 3583ac27a0ecSDave Kleikamp * journal, a revoke record will be written. And 3584ac27a0ecSDave Kleikamp * revoke records must be emitted *before* clearing 3585ac27a0ecSDave Kleikamp * this block's bit in the bitmaps. 3586ac27a0ecSDave Kleikamp */ 3587617ba13bSMingming Cao ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 3588ac27a0ecSDave Kleikamp 3589ac27a0ecSDave Kleikamp /* 3590ac27a0ecSDave Kleikamp * Everything below this this pointer has been 3591ac27a0ecSDave Kleikamp * released. Now let this top-of-subtree go. 3592ac27a0ecSDave Kleikamp * 3593ac27a0ecSDave Kleikamp * We want the freeing of this indirect block to be 3594ac27a0ecSDave Kleikamp * atomic in the journal with the updating of the 3595ac27a0ecSDave Kleikamp * bitmap block which owns it. So make some room in 3596ac27a0ecSDave Kleikamp * the journal. 3597ac27a0ecSDave Kleikamp * 3598ac27a0ecSDave Kleikamp * We zero the parent pointer *after* freeing its 3599ac27a0ecSDave Kleikamp * pointee in the bitmaps, so if extend_transaction() 3600ac27a0ecSDave Kleikamp * for some reason fails to put the bitmap changes and 3601ac27a0ecSDave Kleikamp * the release into the same transaction, recovery 3602ac27a0ecSDave Kleikamp * will merely complain about releasing a free block, 3603ac27a0ecSDave Kleikamp * rather than leaking blocks. 3604ac27a0ecSDave Kleikamp */ 3605ac27a0ecSDave Kleikamp if (is_handle_aborted(handle)) 3606ac27a0ecSDave Kleikamp return; 3607ac27a0ecSDave Kleikamp if (try_to_extend_transaction(handle, inode)) { 3608617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3609617ba13bSMingming Cao ext4_journal_test_restart(handle, inode); 3610ac27a0ecSDave Kleikamp } 3611ac27a0ecSDave Kleikamp 3612c9de560dSAlex Tomas ext4_free_blocks(handle, inode, nr, 1, 1); 3613ac27a0ecSDave Kleikamp 3614ac27a0ecSDave Kleikamp if (parent_bh) { 3615ac27a0ecSDave Kleikamp /* 3616ac27a0ecSDave Kleikamp * The block which we have just freed is 3617ac27a0ecSDave Kleikamp * pointed to by an indirect block: journal it 3618ac27a0ecSDave Kleikamp */ 3619ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "get_write_access"); 3620617ba13bSMingming Cao if (!ext4_journal_get_write_access(handle, 3621ac27a0ecSDave Kleikamp parent_bh)){ 3622ac27a0ecSDave Kleikamp *p = 0; 3623ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, 3624617ba13bSMingming Cao "call ext4_journal_dirty_metadata"); 3625617ba13bSMingming Cao ext4_journal_dirty_metadata(handle, 3626ac27a0ecSDave Kleikamp parent_bh); 3627ac27a0ecSDave Kleikamp } 3628ac27a0ecSDave Kleikamp } 3629ac27a0ecSDave Kleikamp } 3630ac27a0ecSDave Kleikamp } else { 3631ac27a0ecSDave Kleikamp /* We have reached the bottom of the tree. */ 3632ac27a0ecSDave Kleikamp BUFFER_TRACE(parent_bh, "free data blocks"); 3633617ba13bSMingming Cao ext4_free_data(handle, inode, parent_bh, first, last); 3634ac27a0ecSDave Kleikamp } 3635ac27a0ecSDave Kleikamp } 3636ac27a0ecSDave Kleikamp 363791ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 363891ef4cafSDuane Griffin { 363991ef4cafSDuane Griffin if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 364091ef4cafSDuane Griffin return 0; 364191ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 364291ef4cafSDuane Griffin return 1; 364391ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 364491ef4cafSDuane Griffin return 1; 364591ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 364691ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 364791ef4cafSDuane Griffin return 0; 364891ef4cafSDuane Griffin } 364991ef4cafSDuane Griffin 3650ac27a0ecSDave Kleikamp /* 3651617ba13bSMingming Cao * ext4_truncate() 3652ac27a0ecSDave Kleikamp * 3653617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3654617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3655ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3656ac27a0ecSDave Kleikamp * 3657ac27a0ecSDave Kleikamp * As we work through the truncate and commmit bits of it to the journal there 3658ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3659ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3660ac27a0ecSDave Kleikamp * 3661ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3662ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3663ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3664ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3665ac27a0ecSDave Kleikamp * left-to-right works OK too). 3666ac27a0ecSDave Kleikamp * 3667ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3668ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3669ac27a0ecSDave Kleikamp * 3670ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3671617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3672ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3673617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3674617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3675ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3676617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3677ac27a0ecSDave Kleikamp */ 3678617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3679ac27a0ecSDave Kleikamp { 3680ac27a0ecSDave Kleikamp handle_t *handle; 3681617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3682ac27a0ecSDave Kleikamp __le32 *i_data = ei->i_data; 3683617ba13bSMingming Cao int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); 3684ac27a0ecSDave Kleikamp struct address_space *mapping = inode->i_mapping; 3685725d26d3SAneesh Kumar K.V ext4_lblk_t offsets[4]; 3686ac27a0ecSDave Kleikamp Indirect chain[4]; 3687ac27a0ecSDave Kleikamp Indirect *partial; 3688ac27a0ecSDave Kleikamp __le32 nr = 0; 3689ac27a0ecSDave Kleikamp int n; 3690725d26d3SAneesh Kumar K.V ext4_lblk_t last_block; 3691ac27a0ecSDave Kleikamp unsigned blocksize = inode->i_sb->s_blocksize; 3692ac27a0ecSDave Kleikamp 369391ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3694ac27a0ecSDave Kleikamp return; 3695ac27a0ecSDave Kleikamp 36961d03ec98SAneesh Kumar K.V if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { 3697cf108bcaSJan Kara ext4_ext_truncate(inode); 36981d03ec98SAneesh Kumar K.V return; 36991d03ec98SAneesh Kumar K.V } 3700a86c6181SAlex Tomas 3701ac27a0ecSDave Kleikamp handle = start_transaction(inode); 3702cf108bcaSJan Kara if (IS_ERR(handle)) 3703ac27a0ecSDave Kleikamp return; /* AKPM: return what? */ 3704ac27a0ecSDave Kleikamp 3705ac27a0ecSDave Kleikamp last_block = (inode->i_size + blocksize-1) 3706617ba13bSMingming Cao >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); 3707ac27a0ecSDave Kleikamp 3708cf108bcaSJan Kara if (inode->i_size & (blocksize - 1)) 3709cf108bcaSJan Kara if (ext4_block_truncate_page(handle, mapping, inode->i_size)) 3710cf108bcaSJan Kara goto out_stop; 3711ac27a0ecSDave Kleikamp 3712617ba13bSMingming Cao n = ext4_block_to_path(inode, last_block, offsets, NULL); 3713ac27a0ecSDave Kleikamp if (n == 0) 3714ac27a0ecSDave Kleikamp goto out_stop; /* error */ 3715ac27a0ecSDave Kleikamp 3716ac27a0ecSDave Kleikamp /* 3717ac27a0ecSDave Kleikamp * OK. This truncate is going to happen. We add the inode to the 3718ac27a0ecSDave Kleikamp * orphan list, so that if this truncate spans multiple transactions, 3719ac27a0ecSDave Kleikamp * and we crash, we will resume the truncate when the filesystem 3720ac27a0ecSDave Kleikamp * recovers. It also marks the inode dirty, to catch the new size. 3721ac27a0ecSDave Kleikamp * 3722ac27a0ecSDave Kleikamp * Implication: the file must always be in a sane, consistent 3723ac27a0ecSDave Kleikamp * truncatable state while each transaction commits. 3724ac27a0ecSDave Kleikamp */ 3725617ba13bSMingming Cao if (ext4_orphan_add(handle, inode)) 3726ac27a0ecSDave Kleikamp goto out_stop; 3727ac27a0ecSDave Kleikamp 3728ac27a0ecSDave Kleikamp /* 3729632eaeabSMingming Cao * From here we block out all ext4_get_block() callers who want to 3730632eaeabSMingming Cao * modify the block allocation tree. 3731632eaeabSMingming Cao */ 3732632eaeabSMingming Cao down_write(&ei->i_data_sem); 3733b4df2030STheodore Ts'o 3734c2ea3fdeSTheodore Ts'o ext4_discard_preallocations(inode); 3735b4df2030STheodore Ts'o 3736632eaeabSMingming Cao /* 3737ac27a0ecSDave Kleikamp * The orphan list entry will now protect us from any crash which 3738ac27a0ecSDave Kleikamp * occurs before the truncate completes, so it is now safe to propagate 3739ac27a0ecSDave Kleikamp * the new, shorter inode size (held for now in i_size) into the 3740ac27a0ecSDave Kleikamp * on-disk inode. We do this via i_disksize, which is the value which 3741617ba13bSMingming Cao * ext4 *really* writes onto the disk inode. 3742ac27a0ecSDave Kleikamp */ 3743ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3744ac27a0ecSDave Kleikamp 3745ac27a0ecSDave Kleikamp if (n == 1) { /* direct blocks */ 3746617ba13bSMingming Cao ext4_free_data(handle, inode, NULL, i_data+offsets[0], 3747617ba13bSMingming Cao i_data + EXT4_NDIR_BLOCKS); 3748ac27a0ecSDave Kleikamp goto do_indirects; 3749ac27a0ecSDave Kleikamp } 3750ac27a0ecSDave Kleikamp 3751617ba13bSMingming Cao partial = ext4_find_shared(inode, n, offsets, chain, &nr); 3752ac27a0ecSDave Kleikamp /* Kill the top of shared branch (not detached) */ 3753ac27a0ecSDave Kleikamp if (nr) { 3754ac27a0ecSDave Kleikamp if (partial == chain) { 3755ac27a0ecSDave Kleikamp /* Shared branch grows from the inode */ 3756617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, 3757ac27a0ecSDave Kleikamp &nr, &nr+1, (chain+n-1) - partial); 3758ac27a0ecSDave Kleikamp *partial->p = 0; 3759ac27a0ecSDave Kleikamp /* 3760ac27a0ecSDave Kleikamp * We mark the inode dirty prior to restart, 3761ac27a0ecSDave Kleikamp * and prior to stop. No need for it here. 3762ac27a0ecSDave Kleikamp */ 3763ac27a0ecSDave Kleikamp } else { 3764ac27a0ecSDave Kleikamp /* Shared branch grows from an indirect block */ 3765ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "get_write_access"); 3766617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, 3767ac27a0ecSDave Kleikamp partial->p, 3768ac27a0ecSDave Kleikamp partial->p+1, (chain+n-1) - partial); 3769ac27a0ecSDave Kleikamp } 3770ac27a0ecSDave Kleikamp } 3771ac27a0ecSDave Kleikamp /* Clear the ends of indirect blocks on the shared branch */ 3772ac27a0ecSDave Kleikamp while (partial > chain) { 3773617ba13bSMingming Cao ext4_free_branches(handle, inode, partial->bh, partial->p + 1, 3774ac27a0ecSDave Kleikamp (__le32*)partial->bh->b_data+addr_per_block, 3775ac27a0ecSDave Kleikamp (chain+n-1) - partial); 3776ac27a0ecSDave Kleikamp BUFFER_TRACE(partial->bh, "call brelse"); 3777ac27a0ecSDave Kleikamp brelse (partial->bh); 3778ac27a0ecSDave Kleikamp partial--; 3779ac27a0ecSDave Kleikamp } 3780ac27a0ecSDave Kleikamp do_indirects: 3781ac27a0ecSDave Kleikamp /* Kill the remaining (whole) subtrees */ 3782ac27a0ecSDave Kleikamp switch (offsets[0]) { 3783ac27a0ecSDave Kleikamp default: 3784617ba13bSMingming Cao nr = i_data[EXT4_IND_BLOCK]; 3785ac27a0ecSDave Kleikamp if (nr) { 3786617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 3787617ba13bSMingming Cao i_data[EXT4_IND_BLOCK] = 0; 3788ac27a0ecSDave Kleikamp } 3789617ba13bSMingming Cao case EXT4_IND_BLOCK: 3790617ba13bSMingming Cao nr = i_data[EXT4_DIND_BLOCK]; 3791ac27a0ecSDave Kleikamp if (nr) { 3792617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 3793617ba13bSMingming Cao i_data[EXT4_DIND_BLOCK] = 0; 3794ac27a0ecSDave Kleikamp } 3795617ba13bSMingming Cao case EXT4_DIND_BLOCK: 3796617ba13bSMingming Cao nr = i_data[EXT4_TIND_BLOCK]; 3797ac27a0ecSDave Kleikamp if (nr) { 3798617ba13bSMingming Cao ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 3799617ba13bSMingming Cao i_data[EXT4_TIND_BLOCK] = 0; 3800ac27a0ecSDave Kleikamp } 3801617ba13bSMingming Cao case EXT4_TIND_BLOCK: 3802ac27a0ecSDave Kleikamp ; 3803ac27a0ecSDave Kleikamp } 3804ac27a0ecSDave Kleikamp 38050e855ac8SAneesh Kumar K.V up_write(&ei->i_data_sem); 3806ef7f3835SKalpak Shah inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3807617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 3808ac27a0ecSDave Kleikamp 3809ac27a0ecSDave Kleikamp /* 3810ac27a0ecSDave Kleikamp * In a multi-transaction truncate, we only make the final transaction 3811ac27a0ecSDave Kleikamp * synchronous 3812ac27a0ecSDave Kleikamp */ 3813ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 3814ac27a0ecSDave Kleikamp handle->h_sync = 1; 3815ac27a0ecSDave Kleikamp out_stop: 3816ac27a0ecSDave Kleikamp /* 3817ac27a0ecSDave Kleikamp * If this was a simple ftruncate(), and the file will remain alive 3818ac27a0ecSDave Kleikamp * then we need to clear up the orphan record which we created above. 3819ac27a0ecSDave Kleikamp * However, if this was a real unlink then we were called by 3820617ba13bSMingming Cao * ext4_delete_inode(), and we allow that function to clean up the 3821ac27a0ecSDave Kleikamp * orphan info for us. 3822ac27a0ecSDave Kleikamp */ 3823ac27a0ecSDave Kleikamp if (inode->i_nlink) 3824617ba13bSMingming Cao ext4_orphan_del(handle, inode); 3825ac27a0ecSDave Kleikamp 3826617ba13bSMingming Cao ext4_journal_stop(handle); 3827ac27a0ecSDave Kleikamp } 3828ac27a0ecSDave Kleikamp 3829ac27a0ecSDave Kleikamp /* 3830617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3831ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3832ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3833ac27a0ecSDave Kleikamp * inode. 3834ac27a0ecSDave Kleikamp */ 3835617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3836617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3837ac27a0ecSDave Kleikamp { 3838240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3839ac27a0ecSDave Kleikamp struct buffer_head *bh; 3840240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3841240799cdSTheodore Ts'o ext4_fsblk_t block; 3842240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3843ac27a0ecSDave Kleikamp 3844240799cdSTheodore Ts'o iloc->bh = 0; 3845240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3846ac27a0ecSDave Kleikamp return -EIO; 3847ac27a0ecSDave Kleikamp 3848240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3849240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3850240799cdSTheodore Ts'o if (!gdp) 3851240799cdSTheodore Ts'o return -EIO; 3852240799cdSTheodore Ts'o 3853240799cdSTheodore Ts'o /* 3854240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3855240799cdSTheodore Ts'o */ 3856240799cdSTheodore Ts'o inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); 3857240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3858240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3859240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3860240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3861240799cdSTheodore Ts'o 3862240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3863ac27a0ecSDave Kleikamp if (!bh) { 3864240799cdSTheodore Ts'o ext4_error(sb, "ext4_get_inode_loc", "unable to read " 3865240799cdSTheodore Ts'o "inode block - inode=%lu, block=%llu", 3866ac27a0ecSDave Kleikamp inode->i_ino, block); 3867ac27a0ecSDave Kleikamp return -EIO; 3868ac27a0ecSDave Kleikamp } 3869ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3870ac27a0ecSDave Kleikamp lock_buffer(bh); 38719c83a923SHidehiro Kawai 38729c83a923SHidehiro Kawai /* 38739c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 38749c83a923SHidehiro Kawai * to write out another inode in the same block. In this 38759c83a923SHidehiro Kawai * case, we don't have to read the block because we may 38769c83a923SHidehiro Kawai * read the old inode data successfully. 38779c83a923SHidehiro Kawai */ 38789c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 38799c83a923SHidehiro Kawai set_buffer_uptodate(bh); 38809c83a923SHidehiro Kawai 3881ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3882ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3883ac27a0ecSDave Kleikamp unlock_buffer(bh); 3884ac27a0ecSDave Kleikamp goto has_buffer; 3885ac27a0ecSDave Kleikamp } 3886ac27a0ecSDave Kleikamp 3887ac27a0ecSDave Kleikamp /* 3888ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3889ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3890ac27a0ecSDave Kleikamp * block. 3891ac27a0ecSDave Kleikamp */ 3892ac27a0ecSDave Kleikamp if (in_mem) { 3893ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3894240799cdSTheodore Ts'o int i, start; 3895ac27a0ecSDave Kleikamp 3896240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3897ac27a0ecSDave Kleikamp 3898ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3899240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3900ac27a0ecSDave Kleikamp if (!bitmap_bh) 3901ac27a0ecSDave Kleikamp goto make_io; 3902ac27a0ecSDave Kleikamp 3903ac27a0ecSDave Kleikamp /* 3904ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3905ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3906ac27a0ecSDave Kleikamp * of one, so skip it. 3907ac27a0ecSDave Kleikamp */ 3908ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3909ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3910ac27a0ecSDave Kleikamp goto make_io; 3911ac27a0ecSDave Kleikamp } 3912240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3913ac27a0ecSDave Kleikamp if (i == inode_offset) 3914ac27a0ecSDave Kleikamp continue; 3915617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3916ac27a0ecSDave Kleikamp break; 3917ac27a0ecSDave Kleikamp } 3918ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3919240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3920ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3921ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3922ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3923ac27a0ecSDave Kleikamp unlock_buffer(bh); 3924ac27a0ecSDave Kleikamp goto has_buffer; 3925ac27a0ecSDave Kleikamp } 3926ac27a0ecSDave Kleikamp } 3927ac27a0ecSDave Kleikamp 3928ac27a0ecSDave Kleikamp make_io: 3929ac27a0ecSDave Kleikamp /* 3930240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3931240799cdSTheodore Ts'o * blocks from the inode table. 3932240799cdSTheodore Ts'o */ 3933240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3934240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3935240799cdSTheodore Ts'o unsigned num; 3936240799cdSTheodore Ts'o 3937240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3938240799cdSTheodore Ts'o /* Make sure s_inode_readahead_blks is a power of 2 */ 3939240799cdSTheodore Ts'o while (EXT4_SB(sb)->s_inode_readahead_blks & 3940240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks-1)) 3941240799cdSTheodore Ts'o EXT4_SB(sb)->s_inode_readahead_blks = 3942240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks & 3943240799cdSTheodore Ts'o (EXT4_SB(sb)->s_inode_readahead_blks-1)); 3944240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3945240799cdSTheodore Ts'o if (table > b) 3946240799cdSTheodore Ts'o b = table; 3947240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3948240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3949240799cdSTheodore Ts'o if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3950240799cdSTheodore Ts'o EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3951240799cdSTheodore Ts'o num -= le16_to_cpu(gdp->bg_itable_unused); 3952240799cdSTheodore Ts'o table += num / inodes_per_block; 3953240799cdSTheodore Ts'o if (end > table) 3954240799cdSTheodore Ts'o end = table; 3955240799cdSTheodore Ts'o while (b <= end) 3956240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3957240799cdSTheodore Ts'o } 3958240799cdSTheodore Ts'o 3959240799cdSTheodore Ts'o /* 3960ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3961ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3962ac27a0ecSDave Kleikamp * Read the block from disk. 3963ac27a0ecSDave Kleikamp */ 3964ac27a0ecSDave Kleikamp get_bh(bh); 3965ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 3966ac27a0ecSDave Kleikamp submit_bh(READ_META, bh); 3967ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3968ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3969240799cdSTheodore Ts'o ext4_error(sb, __func__, 3970240799cdSTheodore Ts'o "unable to read inode block - inode=%lu, " 3971240799cdSTheodore Ts'o "block=%llu", inode->i_ino, block); 3972ac27a0ecSDave Kleikamp brelse(bh); 3973ac27a0ecSDave Kleikamp return -EIO; 3974ac27a0ecSDave Kleikamp } 3975ac27a0ecSDave Kleikamp } 3976ac27a0ecSDave Kleikamp has_buffer: 3977ac27a0ecSDave Kleikamp iloc->bh = bh; 3978ac27a0ecSDave Kleikamp return 0; 3979ac27a0ecSDave Kleikamp } 3980ac27a0ecSDave Kleikamp 3981617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3982ac27a0ecSDave Kleikamp { 3983ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3984617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 3985617ba13bSMingming Cao !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)); 3986ac27a0ecSDave Kleikamp } 3987ac27a0ecSDave Kleikamp 3988617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3989ac27a0ecSDave Kleikamp { 3990617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3991ac27a0ecSDave Kleikamp 3992ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3993617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3994ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3995617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3996ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3997617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3998ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3999617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 4000ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 4001617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 4002ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 4003ac27a0ecSDave Kleikamp } 4004ac27a0ecSDave Kleikamp 4005ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4006ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 4007ff9ddf7eSJan Kara { 4008ff9ddf7eSJan Kara unsigned int flags = ei->vfs_inode.i_flags; 4009ff9ddf7eSJan Kara 4010ff9ddf7eSJan Kara ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 4011ff9ddf7eSJan Kara EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); 4012ff9ddf7eSJan Kara if (flags & S_SYNC) 4013ff9ddf7eSJan Kara ei->i_flags |= EXT4_SYNC_FL; 4014ff9ddf7eSJan Kara if (flags & S_APPEND) 4015ff9ddf7eSJan Kara ei->i_flags |= EXT4_APPEND_FL; 4016ff9ddf7eSJan Kara if (flags & S_IMMUTABLE) 4017ff9ddf7eSJan Kara ei->i_flags |= EXT4_IMMUTABLE_FL; 4018ff9ddf7eSJan Kara if (flags & S_NOATIME) 4019ff9ddf7eSJan Kara ei->i_flags |= EXT4_NOATIME_FL; 4020ff9ddf7eSJan Kara if (flags & S_DIRSYNC) 4021ff9ddf7eSJan Kara ei->i_flags |= EXT4_DIRSYNC_FL; 4022ff9ddf7eSJan Kara } 40230fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 40240fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 40250fc1b451SAneesh Kumar K.V { 40260fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 40278180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 40288180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 40290fc1b451SAneesh Kumar K.V 40300fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 40310fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 40320fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 40330fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 40340fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 40358180a562SAneesh Kumar K.V if (ei->i_flags & EXT4_HUGE_FILE_FL) { 40368180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 40378180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 40388180a562SAneesh Kumar K.V } else { 40390fc1b451SAneesh Kumar K.V return i_blocks; 40408180a562SAneesh Kumar K.V } 40410fc1b451SAneesh Kumar K.V } else { 40420fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 40430fc1b451SAneesh Kumar K.V } 40440fc1b451SAneesh Kumar K.V } 4045ff9ddf7eSJan Kara 40461d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4047ac27a0ecSDave Kleikamp { 4048617ba13bSMingming Cao struct ext4_iloc iloc; 4049617ba13bSMingming Cao struct ext4_inode *raw_inode; 40501d1fe1eeSDavid Howells struct ext4_inode_info *ei; 4051ac27a0ecSDave Kleikamp struct buffer_head *bh; 40521d1fe1eeSDavid Howells struct inode *inode; 40531d1fe1eeSDavid Howells long ret; 4054ac27a0ecSDave Kleikamp int block; 4055ac27a0ecSDave Kleikamp 40561d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 40571d1fe1eeSDavid Howells if (!inode) 40581d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 40591d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 40601d1fe1eeSDavid Howells return inode; 40611d1fe1eeSDavid Howells 40621d1fe1eeSDavid Howells ei = EXT4_I(inode); 406303010a33STheodore Ts'o #ifdef CONFIG_EXT4_FS_POSIX_ACL 4064617ba13bSMingming Cao ei->i_acl = EXT4_ACL_NOT_CACHED; 4065617ba13bSMingming Cao ei->i_default_acl = EXT4_ACL_NOT_CACHED; 4066ac27a0ecSDave Kleikamp #endif 4067ac27a0ecSDave Kleikamp 40681d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 40691d1fe1eeSDavid Howells if (ret < 0) 4070ac27a0ecSDave Kleikamp goto bad_inode; 4071ac27a0ecSDave Kleikamp bh = iloc.bh; 4072617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 4073ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 4074ac27a0ecSDave Kleikamp inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 4075ac27a0ecSDave Kleikamp inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4076ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 4077ac27a0ecSDave Kleikamp inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 4078ac27a0ecSDave Kleikamp inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4079ac27a0ecSDave Kleikamp } 4080ac27a0ecSDave Kleikamp inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4081ac27a0ecSDave Kleikamp 4082ac27a0ecSDave Kleikamp ei->i_state = 0; 4083ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 4084ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4085ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 4086ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 4087ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 4088ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 4089ac27a0ecSDave Kleikamp */ 4090ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 4091ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 4092617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 4093ac27a0ecSDave Kleikamp /* this inode is deleted */ 4094ac27a0ecSDave Kleikamp brelse(bh); 40951d1fe1eeSDavid Howells ret = -ESTALE; 4096ac27a0ecSDave Kleikamp goto bad_inode; 4097ac27a0ecSDave Kleikamp } 4098ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 4099ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 4100ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 4101ac27a0ecSDave Kleikamp * the process of deleting those. */ 4102ac27a0ecSDave Kleikamp } 4103ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 41040fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 41057973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 41069b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 4107a48380f7SAneesh Kumar K.V cpu_to_le32(EXT4_OS_HURD)) { 4108a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 4109a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4110ac27a0ecSDave Kleikamp } 4111a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 4112ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 4113ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4114ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 4115ac27a0ecSDave Kleikamp /* 4116ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 4117ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 4118ac27a0ecSDave Kleikamp */ 4119617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 4120ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 4121ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 4122ac27a0ecSDave Kleikamp 41230040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4124ac27a0ecSDave Kleikamp ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4125617ba13bSMingming Cao if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4126e5d2861fSKirill Korotaev EXT4_INODE_SIZE(inode->i_sb)) { 4127e5d2861fSKirill Korotaev brelse(bh); 41281d1fe1eeSDavid Howells ret = -EIO; 4129ac27a0ecSDave Kleikamp goto bad_inode; 4130e5d2861fSKirill Korotaev } 4131ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 4132ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 4133617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 4134617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 4135ac27a0ecSDave Kleikamp } else { 4136ac27a0ecSDave Kleikamp __le32 *magic = (void *)raw_inode + 4137617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE + 4138ac27a0ecSDave Kleikamp ei->i_extra_isize; 4139617ba13bSMingming Cao if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 4140617ba13bSMingming Cao ei->i_state |= EXT4_STATE_XATTR; 4141ac27a0ecSDave Kleikamp } 4142ac27a0ecSDave Kleikamp } else 4143ac27a0ecSDave Kleikamp ei->i_extra_isize = 0; 4144ac27a0ecSDave Kleikamp 4145ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4146ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4147ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4148ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4149ef7f3835SKalpak Shah 415025ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 415125ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 415225ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 415325ec56b5SJean Noel Cordenner inode->i_version |= 415425ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 415525ec56b5SJean Noel Cordenner } 415625ec56b5SJean Noel Cordenner 4157ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 4158617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 4159617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 4160617ba13bSMingming Cao ext4_set_aops(inode); 4161ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 4162617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 4163617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 4164ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 4165617ba13bSMingming Cao if (ext4_inode_is_fast_symlink(inode)) 4166617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 4167ac27a0ecSDave Kleikamp else { 4168617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 4169617ba13bSMingming Cao ext4_set_aops(inode); 4170ac27a0ecSDave Kleikamp } 4171ac27a0ecSDave Kleikamp } else { 4172617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 4173ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 4174ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4175ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4176ac27a0ecSDave Kleikamp else 4177ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4178ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4179ac27a0ecSDave Kleikamp } 4180ac27a0ecSDave Kleikamp brelse(iloc.bh); 4181617ba13bSMingming Cao ext4_set_inode_flags(inode); 41821d1fe1eeSDavid Howells unlock_new_inode(inode); 41831d1fe1eeSDavid Howells return inode; 4184ac27a0ecSDave Kleikamp 4185ac27a0ecSDave Kleikamp bad_inode: 41861d1fe1eeSDavid Howells iget_failed(inode); 41871d1fe1eeSDavid Howells return ERR_PTR(ret); 4188ac27a0ecSDave Kleikamp } 4189ac27a0ecSDave Kleikamp 41900fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 41910fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 41920fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 41930fc1b451SAneesh Kumar K.V { 41940fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 41950fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 41960fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 41970fc1b451SAneesh Kumar K.V int err = 0; 41980fc1b451SAneesh Kumar K.V 41990fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 42000fc1b451SAneesh Kumar K.V /* 42010fc1b451SAneesh Kumar K.V * i_blocks can be represnted in a 32 bit variable 42020fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 42030fc1b451SAneesh Kumar K.V */ 42048180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42050fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 42068180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 42070fc1b451SAneesh Kumar K.V } else if (i_blocks <= 0xffffffffffffULL) { 42080fc1b451SAneesh Kumar K.V /* 42090fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 42100fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 42110fc1b451SAneesh Kumar K.V */ 42120fc1b451SAneesh Kumar K.V err = ext4_update_rocompat_feature(handle, sb, 42130fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE); 42140fc1b451SAneesh Kumar K.V if (err) 42150fc1b451SAneesh Kumar K.V goto err_out; 42160fc1b451SAneesh Kumar K.V /* i_block is stored in the split 48 bit fields */ 42178180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42180fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 42198180a562SAneesh Kumar K.V ei->i_flags &= ~EXT4_HUGE_FILE_FL; 42200fc1b451SAneesh Kumar K.V } else { 42218180a562SAneesh Kumar K.V /* 42228180a562SAneesh Kumar K.V * i_blocks should be represented in a 48 bit variable 42238180a562SAneesh Kumar K.V * as multiple of file system block size 42248180a562SAneesh Kumar K.V */ 42258180a562SAneesh Kumar K.V err = ext4_update_rocompat_feature(handle, sb, 42268180a562SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE); 42278180a562SAneesh Kumar K.V if (err) 42288180a562SAneesh Kumar K.V goto err_out; 42298180a562SAneesh Kumar K.V ei->i_flags |= EXT4_HUGE_FILE_FL; 42308180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 42318180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 42328180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42338180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 42340fc1b451SAneesh Kumar K.V } 42350fc1b451SAneesh Kumar K.V err_out: 42360fc1b451SAneesh Kumar K.V return err; 42370fc1b451SAneesh Kumar K.V } 42380fc1b451SAneesh Kumar K.V 4239ac27a0ecSDave Kleikamp /* 4240ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4241ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4242ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4243ac27a0ecSDave Kleikamp * 4244ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4245ac27a0ecSDave Kleikamp */ 4246617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4247ac27a0ecSDave Kleikamp struct inode *inode, 4248617ba13bSMingming Cao struct ext4_iloc *iloc) 4249ac27a0ecSDave Kleikamp { 4250617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4251617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4252ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4253ac27a0ecSDave Kleikamp int err = 0, rc, block; 4254ac27a0ecSDave Kleikamp 4255ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4256ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 4257617ba13bSMingming Cao if (ei->i_state & EXT4_STATE_NEW) 4258617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4259ac27a0ecSDave Kleikamp 4260ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4261ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 4262ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 4263ac27a0ecSDave Kleikamp raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 4264ac27a0ecSDave Kleikamp raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 4265ac27a0ecSDave Kleikamp /* 4266ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4267ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4268ac27a0ecSDave Kleikamp */ 4269ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4270ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 4271ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_uid)); 4272ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 4273ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_gid)); 4274ac27a0ecSDave Kleikamp } else { 4275ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4276ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4277ac27a0ecSDave Kleikamp } 4278ac27a0ecSDave Kleikamp } else { 4279ac27a0ecSDave Kleikamp raw_inode->i_uid_low = 4280ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowuid(inode->i_uid)); 4281ac27a0ecSDave Kleikamp raw_inode->i_gid_low = 4282ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowgid(inode->i_gid)); 4283ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4284ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4285ac27a0ecSDave Kleikamp } 4286ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4287ef7f3835SKalpak Shah 4288ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4289ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4290ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4291ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4292ef7f3835SKalpak Shah 42930fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 42940fc1b451SAneesh Kumar K.V goto out_brelse; 4295ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4296267e4db9SAneesh Kumar K.V /* clear the migrate flag in the raw_inode */ 4297267e4db9SAneesh Kumar K.V raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); 42989b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 42999b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4300a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4301a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 43027973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4303a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4304ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4305ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4306617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4307617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4308617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4309617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4310ac27a0ecSDave Kleikamp /* If this is the first large file 4311ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4312ac27a0ecSDave Kleikamp */ 4313617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4314617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4315ac27a0ecSDave Kleikamp if (err) 4316ac27a0ecSDave Kleikamp goto out_brelse; 4317617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4318617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4319617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4320ac27a0ecSDave Kleikamp sb->s_dirt = 1; 4321ac27a0ecSDave Kleikamp handle->h_sync = 1; 4322617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, 4323617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4324ac27a0ecSDave Kleikamp } 4325ac27a0ecSDave Kleikamp } 4326ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4327ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4328ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4329ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4330ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4331ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4332ac27a0ecSDave Kleikamp } else { 4333ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4334ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4335ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4336ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4337ac27a0ecSDave Kleikamp } 4338617ba13bSMingming Cao } else for (block = 0; block < EXT4_N_BLOCKS; block++) 4339ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4340ac27a0ecSDave Kleikamp 434125ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 434225ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 434325ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 434425ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 434525ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4346ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 434725ec56b5SJean Noel Cordenner } 434825ec56b5SJean Noel Cordenner 4349ac27a0ecSDave Kleikamp 4350617ba13bSMingming Cao BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); 4351617ba13bSMingming Cao rc = ext4_journal_dirty_metadata(handle, bh); 4352ac27a0ecSDave Kleikamp if (!err) 4353ac27a0ecSDave Kleikamp err = rc; 4354617ba13bSMingming Cao ei->i_state &= ~EXT4_STATE_NEW; 4355ac27a0ecSDave Kleikamp 4356ac27a0ecSDave Kleikamp out_brelse: 4357ac27a0ecSDave Kleikamp brelse(bh); 4358617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4359ac27a0ecSDave Kleikamp return err; 4360ac27a0ecSDave Kleikamp } 4361ac27a0ecSDave Kleikamp 4362ac27a0ecSDave Kleikamp /* 4363617ba13bSMingming Cao * ext4_write_inode() 4364ac27a0ecSDave Kleikamp * 4365ac27a0ecSDave Kleikamp * We are called from a few places: 4366ac27a0ecSDave Kleikamp * 4367ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4368ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 4369ac27a0ecSDave Kleikamp * trasnaction to commit. 4370ac27a0ecSDave Kleikamp * 4371ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4372ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4373ac27a0ecSDave Kleikamp * 4374ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4375ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4376ac27a0ecSDave Kleikamp * journal commit. 4377ac27a0ecSDave Kleikamp * 4378ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4379ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4380617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4381ac27a0ecSDave Kleikamp * knfsd. 4382ac27a0ecSDave Kleikamp * 4383ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4384ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4385ac27a0ecSDave Kleikamp * which we are interested. 4386ac27a0ecSDave Kleikamp * 4387ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4388ac27a0ecSDave Kleikamp * 4389ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4390ac27a0ecSDave Kleikamp * stuff(); 4391ac27a0ecSDave Kleikamp * inode->i_size = expr; 4392ac27a0ecSDave Kleikamp * 4393ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4394ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4395ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4396ac27a0ecSDave Kleikamp */ 4397617ba13bSMingming Cao int ext4_write_inode(struct inode *inode, int wait) 4398ac27a0ecSDave Kleikamp { 4399ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4400ac27a0ecSDave Kleikamp return 0; 4401ac27a0ecSDave Kleikamp 4402617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4403b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4404ac27a0ecSDave Kleikamp dump_stack(); 4405ac27a0ecSDave Kleikamp return -EIO; 4406ac27a0ecSDave Kleikamp } 4407ac27a0ecSDave Kleikamp 4408ac27a0ecSDave Kleikamp if (!wait) 4409ac27a0ecSDave Kleikamp return 0; 4410ac27a0ecSDave Kleikamp 4411617ba13bSMingming Cao return ext4_force_commit(inode->i_sb); 4412ac27a0ecSDave Kleikamp } 4413ac27a0ecSDave Kleikamp 4414ac27a0ecSDave Kleikamp /* 4415617ba13bSMingming Cao * ext4_setattr() 4416ac27a0ecSDave Kleikamp * 4417ac27a0ecSDave Kleikamp * Called from notify_change. 4418ac27a0ecSDave Kleikamp * 4419ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4420ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4421ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4422ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4423ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4424ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4425ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4426ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4427ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4428ac27a0ecSDave Kleikamp * 4429678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4430678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4431678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4432678aaf48SJan Kara * This way we are sure that all the data written in the previous 4433678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4434678aaf48SJan Kara * writeback). 4435678aaf48SJan Kara * 4436678aaf48SJan Kara * Called with inode->i_mutex down. 4437ac27a0ecSDave Kleikamp */ 4438617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4439ac27a0ecSDave Kleikamp { 4440ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4441ac27a0ecSDave Kleikamp int error, rc = 0; 4442ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4443ac27a0ecSDave Kleikamp 4444ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4445ac27a0ecSDave Kleikamp if (error) 4446ac27a0ecSDave Kleikamp return error; 4447ac27a0ecSDave Kleikamp 4448ac27a0ecSDave Kleikamp if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4449ac27a0ecSDave Kleikamp (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4450ac27a0ecSDave Kleikamp handle_t *handle; 4451ac27a0ecSDave Kleikamp 4452ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4453ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 4454617ba13bSMingming Cao handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ 4455617ba13bSMingming Cao EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); 4456ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4457ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4458ac27a0ecSDave Kleikamp goto err_out; 4459ac27a0ecSDave Kleikamp } 4460ac27a0ecSDave Kleikamp error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 4461ac27a0ecSDave Kleikamp if (error) { 4462617ba13bSMingming Cao ext4_journal_stop(handle); 4463ac27a0ecSDave Kleikamp return error; 4464ac27a0ecSDave Kleikamp } 4465ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4466ac27a0ecSDave Kleikamp * one transaction */ 4467ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4468ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4469ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4470ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4471617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4472617ba13bSMingming Cao ext4_journal_stop(handle); 4473ac27a0ecSDave Kleikamp } 4474ac27a0ecSDave Kleikamp 4475e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4476e2b46574SEric Sandeen if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) { 4477e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4478e2b46574SEric Sandeen 4479e2b46574SEric Sandeen if (attr->ia_size > sbi->s_bitmap_maxbytes) { 4480e2b46574SEric Sandeen error = -EFBIG; 4481e2b46574SEric Sandeen goto err_out; 4482e2b46574SEric Sandeen } 4483e2b46574SEric Sandeen } 4484e2b46574SEric Sandeen } 4485e2b46574SEric Sandeen 4486ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4487ac27a0ecSDave Kleikamp attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 4488ac27a0ecSDave Kleikamp handle_t *handle; 4489ac27a0ecSDave Kleikamp 4490617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4491ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4492ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4493ac27a0ecSDave Kleikamp goto err_out; 4494ac27a0ecSDave Kleikamp } 4495ac27a0ecSDave Kleikamp 4496617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 4497617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4498617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4499ac27a0ecSDave Kleikamp if (!error) 4500ac27a0ecSDave Kleikamp error = rc; 4501617ba13bSMingming Cao ext4_journal_stop(handle); 4502678aaf48SJan Kara 4503678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4504678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4505678aaf48SJan Kara attr->ia_size); 4506678aaf48SJan Kara if (error) { 4507678aaf48SJan Kara /* Do as much error cleanup as possible */ 4508678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4509678aaf48SJan Kara if (IS_ERR(handle)) { 4510678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4511678aaf48SJan Kara goto err_out; 4512678aaf48SJan Kara } 4513678aaf48SJan Kara ext4_orphan_del(handle, inode); 4514678aaf48SJan Kara ext4_journal_stop(handle); 4515678aaf48SJan Kara goto err_out; 4516678aaf48SJan Kara } 4517678aaf48SJan Kara } 4518ac27a0ecSDave Kleikamp } 4519ac27a0ecSDave Kleikamp 4520ac27a0ecSDave Kleikamp rc = inode_setattr(inode, attr); 4521ac27a0ecSDave Kleikamp 4522617ba13bSMingming Cao /* If inode_setattr's call to ext4_truncate failed to get a 4523ac27a0ecSDave Kleikamp * transaction handle at all, we need to clean up the in-core 4524ac27a0ecSDave Kleikamp * orphan list manually. */ 4525ac27a0ecSDave Kleikamp if (inode->i_nlink) 4526617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4527ac27a0ecSDave Kleikamp 4528ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4529617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4530ac27a0ecSDave Kleikamp 4531ac27a0ecSDave Kleikamp err_out: 4532617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4533ac27a0ecSDave Kleikamp if (!error) 4534ac27a0ecSDave Kleikamp error = rc; 4535ac27a0ecSDave Kleikamp return error; 4536ac27a0ecSDave Kleikamp } 4537ac27a0ecSDave Kleikamp 45383e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 45393e3398a0SMingming Cao struct kstat *stat) 45403e3398a0SMingming Cao { 45413e3398a0SMingming Cao struct inode *inode; 45423e3398a0SMingming Cao unsigned long delalloc_blocks; 45433e3398a0SMingming Cao 45443e3398a0SMingming Cao inode = dentry->d_inode; 45453e3398a0SMingming Cao generic_fillattr(inode, stat); 45463e3398a0SMingming Cao 45473e3398a0SMingming Cao /* 45483e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 45493e3398a0SMingming Cao * otherwise in the case of system crash before the real block 45503e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 45513e3398a0SMingming Cao * on-disk file blocks. 45523e3398a0SMingming Cao * We always keep i_blocks updated together with real 45533e3398a0SMingming Cao * allocation. But to not confuse with user, stat 45543e3398a0SMingming Cao * will return the blocks that include the delayed allocation 45553e3398a0SMingming Cao * blocks for this file. 45563e3398a0SMingming Cao */ 45573e3398a0SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 45583e3398a0SMingming Cao delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 45593e3398a0SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 45603e3398a0SMingming Cao 45613e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 45623e3398a0SMingming Cao return 0; 45633e3398a0SMingming Cao } 4564ac27a0ecSDave Kleikamp 4565a02908f1SMingming Cao static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, 4566a02908f1SMingming Cao int chunk) 4567ac27a0ecSDave Kleikamp { 4568a02908f1SMingming Cao int indirects; 4569ac27a0ecSDave Kleikamp 4570a02908f1SMingming Cao /* if nrblocks are contiguous */ 4571a02908f1SMingming Cao if (chunk) { 4572a02908f1SMingming Cao /* 4573a02908f1SMingming Cao * With N contiguous data blocks, it need at most 4574a02908f1SMingming Cao * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks 4575a02908f1SMingming Cao * 2 dindirect blocks 4576a02908f1SMingming Cao * 1 tindirect block 4577a02908f1SMingming Cao */ 4578a02908f1SMingming Cao indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); 4579a02908f1SMingming Cao return indirects + 3; 4580a02908f1SMingming Cao } 4581a02908f1SMingming Cao /* 4582a02908f1SMingming Cao * if nrblocks are not contiguous, worse case, each block touch 4583a02908f1SMingming Cao * a indirect block, and each indirect block touch a double indirect 4584a02908f1SMingming Cao * block, plus a triple indirect block 4585a02908f1SMingming Cao */ 4586a02908f1SMingming Cao indirects = nrblocks * 2 + 1; 4587a02908f1SMingming Cao return indirects; 4588a02908f1SMingming Cao } 4589a86c6181SAlex Tomas 4590a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4591a02908f1SMingming Cao { 4592a02908f1SMingming Cao if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) 4593a02908f1SMingming Cao return ext4_indirect_trans_blocks(inode, nrblocks, 0); 4594a02908f1SMingming Cao return ext4_ext_index_trans_blocks(inode, nrblocks, 0); 4595a02908f1SMingming Cao } 4596a02908f1SMingming Cao /* 4597a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4598a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4599a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4600a02908f1SMingming Cao * 4601a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 4602a02908f1SMingming Cao * different block groups too. If they are contiugous, with flexbg, 4603a02908f1SMingming Cao * they could still across block group boundary. 4604a02908f1SMingming Cao * 4605a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4606a02908f1SMingming Cao */ 4607a02908f1SMingming Cao int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4608a02908f1SMingming Cao { 4609a02908f1SMingming Cao int groups, gdpblocks; 4610a02908f1SMingming Cao int idxblocks; 4611a02908f1SMingming Cao int ret = 0; 4612a02908f1SMingming Cao 4613a02908f1SMingming Cao /* 4614a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4615a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4616a02908f1SMingming Cao * physically contiguous on disk 4617a02908f1SMingming Cao * 4618a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4619a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4620a02908f1SMingming Cao */ 4621a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4622a02908f1SMingming Cao 4623a02908f1SMingming Cao ret = idxblocks; 4624a02908f1SMingming Cao 4625a02908f1SMingming Cao /* 4626a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4627a02908f1SMingming Cao * to account 4628a02908f1SMingming Cao */ 4629a02908f1SMingming Cao groups = idxblocks; 4630a02908f1SMingming Cao if (chunk) 4631a02908f1SMingming Cao groups += 1; 4632ac27a0ecSDave Kleikamp else 4633a02908f1SMingming Cao groups += nrblocks; 4634ac27a0ecSDave Kleikamp 4635a02908f1SMingming Cao gdpblocks = groups; 4636a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_groups_count) 4637a02908f1SMingming Cao groups = EXT4_SB(inode->i_sb)->s_groups_count; 4638a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4639a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4640a02908f1SMingming Cao 4641a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4642a02908f1SMingming Cao ret += groups + gdpblocks; 4643a02908f1SMingming Cao 4644a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4645a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4646ac27a0ecSDave Kleikamp 4647ac27a0ecSDave Kleikamp return ret; 4648ac27a0ecSDave Kleikamp } 4649ac27a0ecSDave Kleikamp 4650ac27a0ecSDave Kleikamp /* 4651a02908f1SMingming Cao * Calulate the total number of credits to reserve to fit 4652f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4653f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4654a02908f1SMingming Cao * 4655525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4656a02908f1SMingming Cao * 4657525f4ed8SMingming Cao * We need to consider the worse case, when 4658a02908f1SMingming Cao * one new block per extent. 4659a02908f1SMingming Cao */ 4660a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4661a02908f1SMingming Cao { 4662a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4663a02908f1SMingming Cao int ret; 4664a02908f1SMingming Cao 4665a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4666a02908f1SMingming Cao 4667a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4668a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4669a02908f1SMingming Cao ret += bpp; 4670a02908f1SMingming Cao return ret; 4671a02908f1SMingming Cao } 4672f3bd1f3fSMingming Cao 4673f3bd1f3fSMingming Cao /* 4674f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4675f3bd1f3fSMingming Cao * 4676f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 4677f3bd1f3fSMingming Cao * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks. 4678f3bd1f3fSMingming Cao * 4679f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4680f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4681f3bd1f3fSMingming Cao */ 4682f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4683f3bd1f3fSMingming Cao { 4684f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4685f3bd1f3fSMingming Cao } 4686f3bd1f3fSMingming Cao 4687a02908f1SMingming Cao /* 4688617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4689ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4690ac27a0ecSDave Kleikamp */ 4691617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4692617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4693ac27a0ecSDave Kleikamp { 4694ac27a0ecSDave Kleikamp int err = 0; 4695ac27a0ecSDave Kleikamp 469625ec56b5SJean Noel Cordenner if (test_opt(inode->i_sb, I_VERSION)) 469725ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 469825ec56b5SJean Noel Cordenner 4699ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4700ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4701ac27a0ecSDave Kleikamp 4702dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4703617ba13bSMingming Cao err = ext4_do_update_inode(handle, inode, iloc); 4704ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4705ac27a0ecSDave Kleikamp return err; 4706ac27a0ecSDave Kleikamp } 4707ac27a0ecSDave Kleikamp 4708ac27a0ecSDave Kleikamp /* 4709ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4710ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4711ac27a0ecSDave Kleikamp */ 4712ac27a0ecSDave Kleikamp 4713ac27a0ecSDave Kleikamp int 4714617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4715617ba13bSMingming Cao struct ext4_iloc *iloc) 4716ac27a0ecSDave Kleikamp { 4717ac27a0ecSDave Kleikamp int err = 0; 4718ac27a0ecSDave Kleikamp if (handle) { 4719617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4720ac27a0ecSDave Kleikamp if (!err) { 4721ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4722617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4723ac27a0ecSDave Kleikamp if (err) { 4724ac27a0ecSDave Kleikamp brelse(iloc->bh); 4725ac27a0ecSDave Kleikamp iloc->bh = NULL; 4726ac27a0ecSDave Kleikamp } 4727ac27a0ecSDave Kleikamp } 4728ac27a0ecSDave Kleikamp } 4729617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4730ac27a0ecSDave Kleikamp return err; 4731ac27a0ecSDave Kleikamp } 4732ac27a0ecSDave Kleikamp 4733ac27a0ecSDave Kleikamp /* 47346dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 47356dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 47366dd4ee7cSKalpak Shah */ 47371d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 47381d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 47391d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 47401d03ec98SAneesh Kumar K.V handle_t *handle) 47416dd4ee7cSKalpak Shah { 47426dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 47436dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 47446dd4ee7cSKalpak Shah struct ext4_xattr_entry *entry; 47456dd4ee7cSKalpak Shah 47466dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 47476dd4ee7cSKalpak Shah return 0; 47486dd4ee7cSKalpak Shah 47496dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 47506dd4ee7cSKalpak Shah 47516dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 47526dd4ee7cSKalpak Shah entry = IFIRST(header); 47536dd4ee7cSKalpak Shah 47546dd4ee7cSKalpak Shah /* No extended attributes present */ 47556dd4ee7cSKalpak Shah if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) || 47566dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 47576dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 47586dd4ee7cSKalpak Shah new_extra_isize); 47596dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 47606dd4ee7cSKalpak Shah return 0; 47616dd4ee7cSKalpak Shah } 47626dd4ee7cSKalpak Shah 47636dd4ee7cSKalpak Shah /* try to expand with EAs present */ 47646dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 47656dd4ee7cSKalpak Shah raw_inode, handle); 47666dd4ee7cSKalpak Shah } 47676dd4ee7cSKalpak Shah 47686dd4ee7cSKalpak Shah /* 4769ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4770ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4771ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4772ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4773ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4774ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4775ac27a0ecSDave Kleikamp * 4776ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4777ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4778ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4779ac27a0ecSDave Kleikamp * we start and wait on commits. 4780ac27a0ecSDave Kleikamp * 4781ac27a0ecSDave Kleikamp * Is this efficient/effective? Well, we're being nice to the system 4782ac27a0ecSDave Kleikamp * by cleaning up our inodes proactively so they can be reaped 4783ac27a0ecSDave Kleikamp * without I/O. But we are potentially leaving up to five seconds' 4784ac27a0ecSDave Kleikamp * worth of inodes floating about which prune_icache wants us to 4785ac27a0ecSDave Kleikamp * write out. One way to fix that would be to get prune_icache() 4786ac27a0ecSDave Kleikamp * to do a write_super() to free up some memory. It has the desired 4787ac27a0ecSDave Kleikamp * effect. 4788ac27a0ecSDave Kleikamp */ 4789617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4790ac27a0ecSDave Kleikamp { 4791617ba13bSMingming Cao struct ext4_iloc iloc; 47926dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 47936dd4ee7cSKalpak Shah static unsigned int mnt_count; 47946dd4ee7cSKalpak Shah int err, ret; 4795ac27a0ecSDave Kleikamp 4796ac27a0ecSDave Kleikamp might_sleep(); 4797617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 47986dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 47996dd4ee7cSKalpak Shah !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { 48006dd4ee7cSKalpak Shah /* 48016dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 48026dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 48036dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 48046dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 48056dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 48066dd4ee7cSKalpak Shah */ 48076dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 48086dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 48096dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 48106dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 48116dd4ee7cSKalpak Shah iloc, handle); 48126dd4ee7cSKalpak Shah if (ret) { 48136dd4ee7cSKalpak Shah EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND; 4814c1bddad9SAneesh Kumar K.V if (mnt_count != 4815c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 481646e665e9SHarvey Harrison ext4_warning(inode->i_sb, __func__, 48176dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 48186dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 48196dd4ee7cSKalpak Shah inode->i_ino); 4820c1bddad9SAneesh Kumar K.V mnt_count = 4821c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 48226dd4ee7cSKalpak Shah } 48236dd4ee7cSKalpak Shah } 48246dd4ee7cSKalpak Shah } 48256dd4ee7cSKalpak Shah } 4826ac27a0ecSDave Kleikamp if (!err) 4827617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4828ac27a0ecSDave Kleikamp return err; 4829ac27a0ecSDave Kleikamp } 4830ac27a0ecSDave Kleikamp 4831ac27a0ecSDave Kleikamp /* 4832617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4833ac27a0ecSDave Kleikamp * 4834ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4835ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4836ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4837ac27a0ecSDave Kleikamp * 4838ac27a0ecSDave Kleikamp * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 4839ac27a0ecSDave Kleikamp * are allocated to the file. 4840ac27a0ecSDave Kleikamp * 4841ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4842ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4843ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4844ac27a0ecSDave Kleikamp */ 4845617ba13bSMingming Cao void ext4_dirty_inode(struct inode *inode) 4846ac27a0ecSDave Kleikamp { 4847617ba13bSMingming Cao handle_t *current_handle = ext4_journal_current_handle(); 4848ac27a0ecSDave Kleikamp handle_t *handle; 4849ac27a0ecSDave Kleikamp 4850617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4851ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4852ac27a0ecSDave Kleikamp goto out; 4853ac27a0ecSDave Kleikamp if (current_handle && 4854ac27a0ecSDave Kleikamp current_handle->h_transaction != handle->h_transaction) { 4855ac27a0ecSDave Kleikamp /* This task has a transaction open against a different fs */ 4856ac27a0ecSDave Kleikamp printk(KERN_EMERG "%s: transactions do not match!\n", 485746e665e9SHarvey Harrison __func__); 4858ac27a0ecSDave Kleikamp } else { 4859ac27a0ecSDave Kleikamp jbd_debug(5, "marking dirty. outer handle=%p\n", 4860ac27a0ecSDave Kleikamp current_handle); 4861617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4862ac27a0ecSDave Kleikamp } 4863617ba13bSMingming Cao ext4_journal_stop(handle); 4864ac27a0ecSDave Kleikamp out: 4865ac27a0ecSDave Kleikamp return; 4866ac27a0ecSDave Kleikamp } 4867ac27a0ecSDave Kleikamp 4868ac27a0ecSDave Kleikamp #if 0 4869ac27a0ecSDave Kleikamp /* 4870ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4871ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4872617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4873ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4874ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4875ac27a0ecSDave Kleikamp */ 4876617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4877ac27a0ecSDave Kleikamp { 4878617ba13bSMingming Cao struct ext4_iloc iloc; 4879ac27a0ecSDave Kleikamp 4880ac27a0ecSDave Kleikamp int err = 0; 4881ac27a0ecSDave Kleikamp if (handle) { 4882617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4883ac27a0ecSDave Kleikamp if (!err) { 4884ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4885dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4886ac27a0ecSDave Kleikamp if (!err) 4887617ba13bSMingming Cao err = ext4_journal_dirty_metadata(handle, 4888ac27a0ecSDave Kleikamp iloc.bh); 4889ac27a0ecSDave Kleikamp brelse(iloc.bh); 4890ac27a0ecSDave Kleikamp } 4891ac27a0ecSDave Kleikamp } 4892617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4893ac27a0ecSDave Kleikamp return err; 4894ac27a0ecSDave Kleikamp } 4895ac27a0ecSDave Kleikamp #endif 4896ac27a0ecSDave Kleikamp 4897617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4898ac27a0ecSDave Kleikamp { 4899ac27a0ecSDave Kleikamp journal_t *journal; 4900ac27a0ecSDave Kleikamp handle_t *handle; 4901ac27a0ecSDave Kleikamp int err; 4902ac27a0ecSDave Kleikamp 4903ac27a0ecSDave Kleikamp /* 4904ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4905ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4906ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4907ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4908ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4909ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4910ac27a0ecSDave Kleikamp * nobody is changing anything. 4911ac27a0ecSDave Kleikamp */ 4912ac27a0ecSDave Kleikamp 4913617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 4914d699594dSDave Hansen if (is_journal_aborted(journal)) 4915ac27a0ecSDave Kleikamp return -EROFS; 4916ac27a0ecSDave Kleikamp 4917dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4918dab291afSMingming Cao jbd2_journal_flush(journal); 4919ac27a0ecSDave Kleikamp 4920ac27a0ecSDave Kleikamp /* 4921ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4922ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4923ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4924ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4925ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4926ac27a0ecSDave Kleikamp */ 4927ac27a0ecSDave Kleikamp 4928ac27a0ecSDave Kleikamp if (val) 4929617ba13bSMingming Cao EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL; 4930ac27a0ecSDave Kleikamp else 4931617ba13bSMingming Cao EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL; 4932617ba13bSMingming Cao ext4_set_aops(inode); 4933ac27a0ecSDave Kleikamp 4934dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 4935ac27a0ecSDave Kleikamp 4936ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4937ac27a0ecSDave Kleikamp 4938617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4939ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4940ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4941ac27a0ecSDave Kleikamp 4942617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 4943ac27a0ecSDave Kleikamp handle->h_sync = 1; 4944617ba13bSMingming Cao ext4_journal_stop(handle); 4945617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4946ac27a0ecSDave Kleikamp 4947ac27a0ecSDave Kleikamp return err; 4948ac27a0ecSDave Kleikamp } 49492e9ee850SAneesh Kumar K.V 49502e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 49512e9ee850SAneesh Kumar K.V { 49522e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 49532e9ee850SAneesh Kumar K.V } 49542e9ee850SAneesh Kumar K.V 49552e9ee850SAneesh Kumar K.V int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page) 49562e9ee850SAneesh Kumar K.V { 49572e9ee850SAneesh Kumar K.V loff_t size; 49582e9ee850SAneesh Kumar K.V unsigned long len; 49592e9ee850SAneesh Kumar K.V int ret = -EINVAL; 496079f0be8dSAneesh Kumar K.V void *fsdata; 49612e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 49622e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 49632e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 49642e9ee850SAneesh Kumar K.V 49652e9ee850SAneesh Kumar K.V /* 49662e9ee850SAneesh Kumar K.V * Get i_alloc_sem to stop truncates messing with the inode. We cannot 49672e9ee850SAneesh Kumar K.V * get i_mutex because we are already holding mmap_sem. 49682e9ee850SAneesh Kumar K.V */ 49692e9ee850SAneesh Kumar K.V down_read(&inode->i_alloc_sem); 49702e9ee850SAneesh Kumar K.V size = i_size_read(inode); 49712e9ee850SAneesh Kumar K.V if (page->mapping != mapping || size <= page_offset(page) 49722e9ee850SAneesh Kumar K.V || !PageUptodate(page)) { 49732e9ee850SAneesh Kumar K.V /* page got truncated from under us? */ 49742e9ee850SAneesh Kumar K.V goto out_unlock; 49752e9ee850SAneesh Kumar K.V } 49762e9ee850SAneesh Kumar K.V ret = 0; 49772e9ee850SAneesh Kumar K.V if (PageMappedToDisk(page)) 49782e9ee850SAneesh Kumar K.V goto out_unlock; 49792e9ee850SAneesh Kumar K.V 49802e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 49812e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 49822e9ee850SAneesh Kumar K.V else 49832e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 49842e9ee850SAneesh Kumar K.V 49852e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 49862e9ee850SAneesh Kumar K.V /* return if we have all the buffers mapped */ 49872e9ee850SAneesh Kumar K.V if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 49882e9ee850SAneesh Kumar K.V ext4_bh_unmapped)) 49892e9ee850SAneesh Kumar K.V goto out_unlock; 49902e9ee850SAneesh Kumar K.V } 49912e9ee850SAneesh Kumar K.V /* 49922e9ee850SAneesh Kumar K.V * OK, we need to fill the hole... Do write_begin write_end 49932e9ee850SAneesh Kumar K.V * to do block allocation/reservation.We are not holding 49942e9ee850SAneesh Kumar K.V * inode.i__mutex here. That allow * parallel write_begin, 49952e9ee850SAneesh Kumar K.V * write_end call. lock_page prevent this from happening 49962e9ee850SAneesh Kumar K.V * on the same page though 49972e9ee850SAneesh Kumar K.V */ 49982e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), 499979f0be8dSAneesh Kumar K.V len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); 50002e9ee850SAneesh Kumar K.V if (ret < 0) 50012e9ee850SAneesh Kumar K.V goto out_unlock; 50022e9ee850SAneesh Kumar K.V ret = mapping->a_ops->write_end(file, mapping, page_offset(page), 500379f0be8dSAneesh Kumar K.V len, len, page, fsdata); 50042e9ee850SAneesh Kumar K.V if (ret < 0) 50052e9ee850SAneesh Kumar K.V goto out_unlock; 50062e9ee850SAneesh Kumar K.V ret = 0; 50072e9ee850SAneesh Kumar K.V out_unlock: 50082e9ee850SAneesh Kumar K.V up_read(&inode->i_alloc_sem); 50092e9ee850SAneesh Kumar K.V return ret; 50102e9ee850SAneesh Kumar K.V } 5011