1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 40a27bb332SKent Overstreet #include <linux/aio.h> 419bffad1eSTheodore Ts'o 423dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 43ac27a0ecSDave Kleikamp #include "xattr.h" 44ac27a0ecSDave Kleikamp #include "acl.h" 459f125d64STheodore Ts'o #include "truncate.h" 46ac27a0ecSDave Kleikamp 479bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 489bffad1eSTheodore Ts'o 49a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 50a1d6cc56SAneesh Kumar K.V 51814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 52814525f4SDarrick J. Wong struct ext4_inode_info *ei) 53814525f4SDarrick J. Wong { 54814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 55814525f4SDarrick J. Wong __u16 csum_lo; 56814525f4SDarrick J. Wong __u16 csum_hi = 0; 57814525f4SDarrick J. Wong __u32 csum; 58814525f4SDarrick J. Wong 59171a7f21SDmitry Monakhov csum_lo = le16_to_cpu(raw->i_checksum_lo); 60814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 61814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 62814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 63171a7f21SDmitry Monakhov csum_hi = le16_to_cpu(raw->i_checksum_hi); 64814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 65814525f4SDarrick J. Wong } 66814525f4SDarrick J. Wong 67814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 68814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 69814525f4SDarrick J. Wong 70171a7f21SDmitry Monakhov raw->i_checksum_lo = cpu_to_le16(csum_lo); 71814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 72814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 73171a7f21SDmitry Monakhov raw->i_checksum_hi = cpu_to_le16(csum_hi); 74814525f4SDarrick J. Wong 75814525f4SDarrick J. Wong return csum; 76814525f4SDarrick J. Wong } 77814525f4SDarrick J. Wong 78814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 79814525f4SDarrick J. Wong struct ext4_inode_info *ei) 80814525f4SDarrick J. Wong { 81814525f4SDarrick J. Wong __u32 provided, calculated; 82814525f4SDarrick J. Wong 83814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 84814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 85814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 86814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 87814525f4SDarrick J. Wong return 1; 88814525f4SDarrick J. Wong 89814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 90814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 91814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 92814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 93814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 94814525f4SDarrick J. Wong else 95814525f4SDarrick J. Wong calculated &= 0xFFFF; 96814525f4SDarrick J. Wong 97814525f4SDarrick J. Wong return provided == calculated; 98814525f4SDarrick J. Wong } 99814525f4SDarrick J. Wong 100814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 101814525f4SDarrick J. Wong struct ext4_inode_info *ei) 102814525f4SDarrick J. Wong { 103814525f4SDarrick J. Wong __u32 csum; 104814525f4SDarrick J. Wong 105814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 106814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 107814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 108814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 109814525f4SDarrick J. Wong return; 110814525f4SDarrick J. Wong 111814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 112814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 113814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 114814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 115814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 116814525f4SDarrick J. Wong } 117814525f4SDarrick J. Wong 118678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 119678aaf48SJan Kara loff_t new_size) 120678aaf48SJan Kara { 1217ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1228aefcd55STheodore Ts'o /* 1238aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1248aefcd55STheodore Ts'o * writing, so there's no need to call 1258aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1268aefcd55STheodore Ts'o * outstanding writes we need to flush. 1278aefcd55STheodore Ts'o */ 1288aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1298aefcd55STheodore Ts'o return 0; 1308aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1318aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 132678aaf48SJan Kara new_size); 133678aaf48SJan Kara } 134678aaf48SJan Kara 135d47992f8SLukas Czerner static void ext4_invalidatepage(struct page *page, unsigned int offset, 136d47992f8SLukas Czerner unsigned int length); 137cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 138cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 139fffb2739SJan Kara static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 140fffb2739SJan Kara int pextents); 14164769240SAlex Tomas 142ac27a0ecSDave Kleikamp /* 143ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 144ac27a0ecSDave Kleikamp */ 145617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 146ac27a0ecSDave Kleikamp { 147617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 148ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 149ac27a0ecSDave Kleikamp 150ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 151ac27a0ecSDave Kleikamp } 152ac27a0ecSDave Kleikamp 153ac27a0ecSDave Kleikamp /* 154ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 155ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 156ac27a0ecSDave Kleikamp * this transaction. 157ac27a0ecSDave Kleikamp */ 158487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 159487caeefSJan Kara int nblocks) 160ac27a0ecSDave Kleikamp { 161487caeefSJan Kara int ret; 162487caeefSJan Kara 163487caeefSJan Kara /* 164e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 165487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 166487caeefSJan Kara * page cache has been already dropped and writes are blocked by 167487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 168487caeefSJan Kara */ 1690390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 170ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 171487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1728e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 173487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 174fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 175487caeefSJan Kara 176487caeefSJan Kara return ret; 177ac27a0ecSDave Kleikamp } 178ac27a0ecSDave Kleikamp 179ac27a0ecSDave Kleikamp /* 180ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 181ac27a0ecSDave Kleikamp */ 1820930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 183ac27a0ecSDave Kleikamp { 184ac27a0ecSDave Kleikamp handle_t *handle; 185bc965ab3STheodore Ts'o int err; 186ac27a0ecSDave Kleikamp 1877ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1882581fdc8SJiaying Zhang 1890930fcc1SAl Viro if (inode->i_nlink) { 1902d859db3SJan Kara /* 1912d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1922d859db3SJan Kara * journal. So although mm thinks everything is clean and 1932d859db3SJan Kara * ready for reaping the inode might still have some pages to 1942d859db3SJan Kara * write in the running transaction or waiting to be 1952d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1962d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1972d859db3SJan Kara * cause data loss. Also even if we did not discard these 1982d859db3SJan Kara * buffers, we would have no way to find them after the inode 1992d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 2002d859db3SJan Kara * read them before the transaction is checkpointed. So be 2012d859db3SJan Kara * careful and force everything to disk here... We use 2022d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2032d859db3SJan Kara * containing inode's data. 2042d859db3SJan Kara * 2052d859db3SJan Kara * Note that directories do not have this problem because they 2062d859db3SJan Kara * don't use page cache. 2072d859db3SJan Kara */ 2082d859db3SJan Kara if (ext4_should_journal_data(inode) && 2092b405bfaSTheodore Ts'o (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 2102b405bfaSTheodore Ts'o inode->i_ino != EXT4_JOURNAL_INO) { 2112d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2122d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2132d859db3SJan Kara 214d76a3a77STheodore Ts'o jbd2_complete_transaction(journal, commit_tid); 2152d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2162d859db3SJan Kara } 2170930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2185dc23bddSJan Kara 2195dc23bddSJan Kara WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); 2200930fcc1SAl Viro goto no_delete; 2210930fcc1SAl Viro } 2220930fcc1SAl Viro 223907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 224871a2931SChristoph Hellwig dquot_initialize(inode); 225907f4554SChristoph Hellwig 226678aaf48SJan Kara if (ext4_should_order_data(inode)) 227678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 228ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 229ac27a0ecSDave Kleikamp 2305dc23bddSJan Kara WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); 231ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 232ac27a0ecSDave Kleikamp goto no_delete; 233ac27a0ecSDave Kleikamp 2348e8ad8a5SJan Kara /* 2358e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2368e8ad8a5SJan Kara * protection against it 2378e8ad8a5SJan Kara */ 2388e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2399924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 2409924a92aSTheodore Ts'o ext4_blocks_for_truncate(inode)+3); 241ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 242bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 243ac27a0ecSDave Kleikamp /* 244ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 245ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 246ac27a0ecSDave Kleikamp * cleaned up. 247ac27a0ecSDave Kleikamp */ 248617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2498e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 250ac27a0ecSDave Kleikamp goto no_delete; 251ac27a0ecSDave Kleikamp } 252ac27a0ecSDave Kleikamp 253ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2540390131bSFrank Mayhar ext4_handle_sync(handle); 255ac27a0ecSDave Kleikamp inode->i_size = 0; 256bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 257bc965ab3STheodore Ts'o if (err) { 25812062dddSEric Sandeen ext4_warning(inode->i_sb, 259bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 260bc965ab3STheodore Ts'o goto stop_handle; 261bc965ab3STheodore Ts'o } 262ac27a0ecSDave Kleikamp if (inode->i_blocks) 263617ba13bSMingming Cao ext4_truncate(inode); 264bc965ab3STheodore Ts'o 265bc965ab3STheodore Ts'o /* 266bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 267bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 268bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 269bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 270bc965ab3STheodore Ts'o */ 2710390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 272bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 273bc965ab3STheodore Ts'o if (err > 0) 274bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 275bc965ab3STheodore Ts'o if (err != 0) { 27612062dddSEric Sandeen ext4_warning(inode->i_sb, 277bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 278bc965ab3STheodore Ts'o stop_handle: 279bc965ab3STheodore Ts'o ext4_journal_stop(handle); 28045388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2818e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 282bc965ab3STheodore Ts'o goto no_delete; 283bc965ab3STheodore Ts'o } 284bc965ab3STheodore Ts'o } 285bc965ab3STheodore Ts'o 286ac27a0ecSDave Kleikamp /* 287617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 288ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 289617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 290ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 291617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 292ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 293ac27a0ecSDave Kleikamp */ 294617ba13bSMingming Cao ext4_orphan_del(handle, inode); 295617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 296ac27a0ecSDave Kleikamp 297ac27a0ecSDave Kleikamp /* 298ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 299ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 300ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 301ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 302ac27a0ecSDave Kleikamp * fails. 303ac27a0ecSDave Kleikamp */ 304617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 305ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3060930fcc1SAl Viro ext4_clear_inode(inode); 307ac27a0ecSDave Kleikamp else 308617ba13bSMingming Cao ext4_free_inode(handle, inode); 309617ba13bSMingming Cao ext4_journal_stop(handle); 3108e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 311ac27a0ecSDave Kleikamp return; 312ac27a0ecSDave Kleikamp no_delete: 3130930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 314ac27a0ecSDave Kleikamp } 315ac27a0ecSDave Kleikamp 316a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 317a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31860e58e0fSMingming Cao { 319a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 32060e58e0fSMingming Cao } 321a9e7f447SDmitry Monakhov #endif 3229d0be502STheodore Ts'o 32312219aeaSAneesh Kumar K.V /* 32412219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3259d0be502STheodore Ts'o * to allocate a block located at @lblock 32612219aeaSAneesh Kumar K.V */ 32701f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32812219aeaSAneesh Kumar K.V { 32912e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3309d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 33112219aeaSAneesh Kumar K.V 3328bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33312219aeaSAneesh Kumar K.V } 33412219aeaSAneesh Kumar K.V 3350637c6f4STheodore Ts'o /* 3360637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3370637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3380637c6f4STheodore Ts'o */ 3395f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3405f634d06SAneesh Kumar K.V int used, int quota_claim) 34112219aeaSAneesh Kumar K.V { 34212219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3430637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34412219aeaSAneesh Kumar K.V 3450637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 346d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3470637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 3488de5c325STheodore Ts'o ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 3491084f252STheodore Ts'o "with only %d reserved data blocks", 3500637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3510637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3520637c6f4STheodore Ts'o WARN_ON(1); 3530637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3546bc6e63fSAneesh Kumar K.V } 35512219aeaSAneesh Kumar K.V 35697795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 35701a523ebSTheodore Ts'o ext4_warning(inode->i_sb, "ino %lu, allocated %d " 35801a523ebSTheodore Ts'o "with only %d reserved metadata blocks " 35901a523ebSTheodore Ts'o "(releasing %d blocks with reserved %d data blocks)", 36097795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 36101a523ebSTheodore Ts'o ei->i_reserved_meta_blocks, used, 36201a523ebSTheodore Ts'o ei->i_reserved_data_blocks); 36397795d2aSBrian Foster WARN_ON(1); 36497795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36597795d2aSBrian Foster } 36697795d2aSBrian Foster 3670637c6f4STheodore Ts'o /* Update per-inode reservations */ 3680637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3690637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 37057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37172b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3720637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3730637c6f4STheodore Ts'o 3740637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3750637c6f4STheodore Ts'o /* 3760637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3770637c6f4STheodore Ts'o * only when we have written all of the delayed 3780637c6f4STheodore Ts'o * allocation blocks. 3790637c6f4STheodore Ts'o */ 38057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 38172b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 382ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3839d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3840637c6f4STheodore Ts'o } 38512219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38660e58e0fSMingming Cao 38772b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38872b8ab9dSEric Sandeen if (quota_claim) 3897b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 39072b8ab9dSEric Sandeen else { 3915f634d06SAneesh Kumar K.V /* 3925f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3935f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 39472b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3955f634d06SAneesh Kumar K.V */ 3967b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3975f634d06SAneesh Kumar K.V } 398d6014301SAneesh Kumar K.V 399d6014301SAneesh Kumar K.V /* 400d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 401d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 402d6014301SAneesh Kumar K.V * inode's preallocations. 403d6014301SAneesh Kumar K.V */ 4040637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4050637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 406d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40712219aeaSAneesh Kumar K.V } 40812219aeaSAneesh Kumar K.V 409e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 410c398eda0STheodore Ts'o unsigned int line, 41124676da4STheodore Ts'o struct ext4_map_blocks *map) 4126fd058f7STheodore Ts'o { 41324676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 41424676da4STheodore Ts'o map->m_len)) { 415c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 416c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41724676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 418c398eda0STheodore Ts'o map->m_len); 4196fd058f7STheodore Ts'o return -EIO; 4206fd058f7STheodore Ts'o } 4216fd058f7STheodore Ts'o return 0; 4226fd058f7STheodore Ts'o } 4236fd058f7STheodore Ts'o 424e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 425c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 426e29136f8STheodore Ts'o 427921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 428921f266bSDmitry Monakhov static void ext4_map_blocks_es_recheck(handle_t *handle, 429921f266bSDmitry Monakhov struct inode *inode, 430921f266bSDmitry Monakhov struct ext4_map_blocks *es_map, 431921f266bSDmitry Monakhov struct ext4_map_blocks *map, 432921f266bSDmitry Monakhov int flags) 433921f266bSDmitry Monakhov { 434921f266bSDmitry Monakhov int retval; 435921f266bSDmitry Monakhov 436921f266bSDmitry Monakhov map->m_flags = 0; 437921f266bSDmitry Monakhov /* 438921f266bSDmitry Monakhov * There is a race window that the result is not the same. 439921f266bSDmitry Monakhov * e.g. xfstests #223 when dioread_nolock enables. The reason 440921f266bSDmitry Monakhov * is that we lookup a block mapping in extent status tree with 441921f266bSDmitry Monakhov * out taking i_data_sem. So at the time the unwritten extent 442921f266bSDmitry Monakhov * could be converted. 443921f266bSDmitry Monakhov */ 444921f266bSDmitry Monakhov if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 445921f266bSDmitry Monakhov down_read((&EXT4_I(inode)->i_data_sem)); 446921f266bSDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 447921f266bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 448921f266bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 449921f266bSDmitry Monakhov } else { 450921f266bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 451921f266bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 452921f266bSDmitry Monakhov } 453921f266bSDmitry Monakhov if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 454921f266bSDmitry Monakhov up_read((&EXT4_I(inode)->i_data_sem)); 455921f266bSDmitry Monakhov /* 456921f266bSDmitry Monakhov * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag 457921f266bSDmitry Monakhov * because it shouldn't be marked in es_map->m_flags. 458921f266bSDmitry Monakhov */ 459921f266bSDmitry Monakhov map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); 460921f266bSDmitry Monakhov 461921f266bSDmitry Monakhov /* 462921f266bSDmitry Monakhov * We don't check m_len because extent will be collpased in status 463921f266bSDmitry Monakhov * tree. So the m_len might not equal. 464921f266bSDmitry Monakhov */ 465921f266bSDmitry Monakhov if (es_map->m_lblk != map->m_lblk || 466921f266bSDmitry Monakhov es_map->m_flags != map->m_flags || 467921f266bSDmitry Monakhov es_map->m_pblk != map->m_pblk) { 468bdafe42aSTheodore Ts'o printk("ES cache assertion failed for inode: %lu " 469921f266bSDmitry Monakhov "es_cached ex [%d/%d/%llu/%x] != " 470921f266bSDmitry Monakhov "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 471921f266bSDmitry Monakhov inode->i_ino, es_map->m_lblk, es_map->m_len, 472921f266bSDmitry Monakhov es_map->m_pblk, es_map->m_flags, map->m_lblk, 473921f266bSDmitry Monakhov map->m_len, map->m_pblk, map->m_flags, 474921f266bSDmitry Monakhov retval, flags); 475921f266bSDmitry Monakhov } 476921f266bSDmitry Monakhov } 477921f266bSDmitry Monakhov #endif /* ES_AGGRESSIVE_TEST */ 478921f266bSDmitry Monakhov 47955138e0bSTheodore Ts'o /* 480e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4812b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 482f5ab0d1fSMingming Cao * 483f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 484f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 485f5ab0d1fSMingming Cao * mapped. 486f5ab0d1fSMingming Cao * 487e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 488e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 489f5ab0d1fSMingming Cao * based files 490f5ab0d1fSMingming Cao * 491f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 492f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 493f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 494f5ab0d1fSMingming Cao * the buffer head is mapped. 495f5ab0d1fSMingming Cao * 496f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 497df3ab170STao Ma * that case, buffer head is unmapped 498f5ab0d1fSMingming Cao * 499f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 500f5ab0d1fSMingming Cao */ 501e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 502e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5030e855ac8SAneesh Kumar K.V { 504d100eef2SZheng Liu struct extent_status es; 5050e855ac8SAneesh Kumar K.V int retval; 506921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 507921f266bSDmitry Monakhov struct ext4_map_blocks orig_map; 508921f266bSDmitry Monakhov 509921f266bSDmitry Monakhov memcpy(&orig_map, map, sizeof(*map)); 510921f266bSDmitry Monakhov #endif 511f5ab0d1fSMingming Cao 512e35fd660STheodore Ts'o map->m_flags = 0; 513e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 514e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 515e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 516d100eef2SZheng Liu 517d100eef2SZheng Liu /* Lookup extent status tree firstly */ 518d100eef2SZheng Liu if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 51963b99968STheodore Ts'o ext4_es_lru_add(inode); 520d100eef2SZheng Liu if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 521d100eef2SZheng Liu map->m_pblk = ext4_es_pblock(&es) + 522d100eef2SZheng Liu map->m_lblk - es.es_lblk; 523d100eef2SZheng Liu map->m_flags |= ext4_es_is_written(&es) ? 524d100eef2SZheng Liu EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 525d100eef2SZheng Liu retval = es.es_len - (map->m_lblk - es.es_lblk); 526d100eef2SZheng Liu if (retval > map->m_len) 527d100eef2SZheng Liu retval = map->m_len; 528d100eef2SZheng Liu map->m_len = retval; 529d100eef2SZheng Liu } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 530d100eef2SZheng Liu retval = 0; 531d100eef2SZheng Liu } else { 532d100eef2SZheng Liu BUG_ON(1); 533d100eef2SZheng Liu } 534921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 535921f266bSDmitry Monakhov ext4_map_blocks_es_recheck(handle, inode, map, 536921f266bSDmitry Monakhov &orig_map, flags); 537921f266bSDmitry Monakhov #endif 538d100eef2SZheng Liu goto found; 539d100eef2SZheng Liu } 540d100eef2SZheng Liu 5414df3d265SAneesh Kumar K.V /* 542b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 543b920c755STheodore Ts'o * file system block. 5444df3d265SAneesh Kumar K.V */ 545729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5460e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 54712e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 548a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 549a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5504df3d265SAneesh Kumar K.V } else { 551a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 552a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5530e855ac8SAneesh Kumar K.V } 554f7fec032SZheng Liu if (retval > 0) { 555f7fec032SZheng Liu int ret; 556*3be78c73STheodore Ts'o unsigned int status; 557f7fec032SZheng Liu 55844fb851dSZheng Liu if (unlikely(retval != map->m_len)) { 55944fb851dSZheng Liu ext4_warning(inode->i_sb, 56044fb851dSZheng Liu "ES len assertion failed for inode " 56144fb851dSZheng Liu "%lu: retval %d != map->m_len %d", 56244fb851dSZheng Liu inode->i_ino, retval, map->m_len); 56344fb851dSZheng Liu WARN_ON(1); 564921f266bSDmitry Monakhov } 565921f266bSDmitry Monakhov 566f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 567f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 568f7fec032SZheng Liu if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 569f7fec032SZheng Liu ext4_find_delalloc_range(inode, map->m_lblk, 570f7fec032SZheng Liu map->m_lblk + map->m_len - 1)) 571f7fec032SZheng Liu status |= EXTENT_STATUS_DELAYED; 572f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, 573f7fec032SZheng Liu map->m_len, map->m_pblk, status); 574f7fec032SZheng Liu if (ret < 0) 575f7fec032SZheng Liu retval = ret; 576f7fec032SZheng Liu } 577729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5784df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 579f5ab0d1fSMingming Cao 580d100eef2SZheng Liu found: 581e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 582f7fec032SZheng Liu int ret = check_block_validity(inode, map); 5836fd058f7STheodore Ts'o if (ret != 0) 5846fd058f7STheodore Ts'o return ret; 5856fd058f7STheodore Ts'o } 5866fd058f7STheodore Ts'o 587f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 588c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 5894df3d265SAneesh Kumar K.V return retval; 5904df3d265SAneesh Kumar K.V 5914df3d265SAneesh Kumar K.V /* 592f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 593f5ab0d1fSMingming Cao * 594f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 595df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 596f5ab0d1fSMingming Cao * with buffer head unmapped. 597f5ab0d1fSMingming Cao */ 598e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 599f5ab0d1fSMingming Cao return retval; 600f5ab0d1fSMingming Cao 601f5ab0d1fSMingming Cao /* 602a25a4e1aSZheng Liu * Here we clear m_flags because after allocating an new extent, 603a25a4e1aSZheng Liu * it will be set again. 6042a8964d6SAneesh Kumar K.V */ 605a25a4e1aSZheng Liu map->m_flags &= ~EXT4_MAP_FLAGS; 6062a8964d6SAneesh Kumar K.V 6072a8964d6SAneesh Kumar K.V /* 608f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 609f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 610f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 611f5ab0d1fSMingming Cao * with create == 1 flag. 6124df3d265SAneesh Kumar K.V */ 6134df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 614d2a17637SMingming Cao 615d2a17637SMingming Cao /* 616d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 617d2a17637SMingming Cao * we have already reserved fs blocks for allocation 618d2a17637SMingming Cao * let the underlying get_block() function know to 619d2a17637SMingming Cao * avoid double accounting 620d2a17637SMingming Cao */ 621c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 622f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 6234df3d265SAneesh Kumar K.V /* 6244df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 6254df3d265SAneesh Kumar K.V * could have changed the inode type in between 6264df3d265SAneesh Kumar K.V */ 62712e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 628e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 6290e855ac8SAneesh Kumar K.V } else { 630e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 631267e4db9SAneesh Kumar K.V 632e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 633267e4db9SAneesh Kumar K.V /* 634267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 635267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 636267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 637267e4db9SAneesh Kumar K.V */ 63819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 639267e4db9SAneesh Kumar K.V } 6402ac3b6e0STheodore Ts'o 641d2a17637SMingming Cao /* 6422ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 6435f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 6445f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 6455f634d06SAneesh Kumar K.V * reserve space here. 646d2a17637SMingming Cao */ 6475f634d06SAneesh Kumar K.V if ((retval > 0) && 6481296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 6495f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 6505f634d06SAneesh Kumar K.V } 651f7fec032SZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 652f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 653d2a17637SMingming Cao 654f7fec032SZheng Liu if (retval > 0) { 65551865fdaSZheng Liu int ret; 656*3be78c73STheodore Ts'o unsigned int status; 657f7fec032SZheng Liu 65844fb851dSZheng Liu if (unlikely(retval != map->m_len)) { 65944fb851dSZheng Liu ext4_warning(inode->i_sb, 66044fb851dSZheng Liu "ES len assertion failed for inode " 66144fb851dSZheng Liu "%lu: retval %d != map->m_len %d", 66244fb851dSZheng Liu inode->i_ino, retval, map->m_len); 66344fb851dSZheng Liu WARN_ON(1); 664921f266bSDmitry Monakhov } 665921f266bSDmitry Monakhov 666adb23551SZheng Liu /* 667adb23551SZheng Liu * If the extent has been zeroed out, we don't need to update 668adb23551SZheng Liu * extent status tree. 669adb23551SZheng Liu */ 670adb23551SZheng Liu if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 671adb23551SZheng Liu ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 672adb23551SZheng Liu if (ext4_es_is_written(&es)) 673adb23551SZheng Liu goto has_zeroout; 674adb23551SZheng Liu } 675f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 676f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 677f7fec032SZheng Liu if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 678f7fec032SZheng Liu ext4_find_delalloc_range(inode, map->m_lblk, 679f7fec032SZheng Liu map->m_lblk + map->m_len - 1)) 680f7fec032SZheng Liu status |= EXTENT_STATUS_DELAYED; 681f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 682f7fec032SZheng Liu map->m_pblk, status); 68351865fdaSZheng Liu if (ret < 0) 68451865fdaSZheng Liu retval = ret; 68551865fdaSZheng Liu } 6865356f261SAditya Kali 687adb23551SZheng Liu has_zeroout: 6880e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 689e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 690e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 6916fd058f7STheodore Ts'o if (ret != 0) 6926fd058f7STheodore Ts'o return ret; 6936fd058f7STheodore Ts'o } 6940e855ac8SAneesh Kumar K.V return retval; 6950e855ac8SAneesh Kumar K.V } 6960e855ac8SAneesh Kumar K.V 697f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 698f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 699f3bd1f3fSMingming Cao 7002ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 7012ed88685STheodore Ts'o struct buffer_head *bh, int flags) 702ac27a0ecSDave Kleikamp { 7033e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 7042ed88685STheodore Ts'o struct ext4_map_blocks map; 7057fb5409dSJan Kara int ret = 0, started = 0; 706f3bd1f3fSMingming Cao int dio_credits; 707ac27a0ecSDave Kleikamp 70846c7f254STao Ma if (ext4_has_inline_data(inode)) 70946c7f254STao Ma return -ERANGE; 71046c7f254STao Ma 7112ed88685STheodore Ts'o map.m_lblk = iblock; 7122ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 7132ed88685STheodore Ts'o 7148b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 7157fb5409dSJan Kara /* Direct IO write... */ 7162ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 7172ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 7182ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 7199924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 7209924a92aSTheodore Ts'o dio_credits); 7217fb5409dSJan Kara if (IS_ERR(handle)) { 722ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 7232ed88685STheodore Ts'o return ret; 7247fb5409dSJan Kara } 7257fb5409dSJan Kara started = 1; 726ac27a0ecSDave Kleikamp } 727ac27a0ecSDave Kleikamp 7282ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 729ac27a0ecSDave Kleikamp if (ret > 0) { 7302ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 7312ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 7322ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 733ac27a0ecSDave Kleikamp ret = 0; 734ac27a0ecSDave Kleikamp } 7357fb5409dSJan Kara if (started) 7367fb5409dSJan Kara ext4_journal_stop(handle); 737ac27a0ecSDave Kleikamp return ret; 738ac27a0ecSDave Kleikamp } 739ac27a0ecSDave Kleikamp 7402ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 7412ed88685STheodore Ts'o struct buffer_head *bh, int create) 7422ed88685STheodore Ts'o { 7432ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 7442ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7452ed88685STheodore Ts'o } 7462ed88685STheodore Ts'o 747ac27a0ecSDave Kleikamp /* 748ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 749ac27a0ecSDave Kleikamp */ 750617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 751725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 752ac27a0ecSDave Kleikamp { 7532ed88685STheodore Ts'o struct ext4_map_blocks map; 7542ed88685STheodore Ts'o struct buffer_head *bh; 755ac27a0ecSDave Kleikamp int fatal = 0, err; 756ac27a0ecSDave Kleikamp 757ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 758ac27a0ecSDave Kleikamp 7592ed88685STheodore Ts'o map.m_lblk = block; 7602ed88685STheodore Ts'o map.m_len = 1; 7612ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 7622ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7632ed88685STheodore Ts'o 76490b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 76590b0a973SCarlos Maiolino *errp = 0; 76690b0a973SCarlos Maiolino 7670f70b406STheodore Ts'o if (create && err == 0) 7680f70b406STheodore Ts'o err = -ENOSPC; /* should never happen */ 7692ed88685STheodore Ts'o if (err < 0) 770ac27a0ecSDave Kleikamp *errp = err; 7712ed88685STheodore Ts'o if (err <= 0) 7722ed88685STheodore Ts'o return NULL; 7732ed88685STheodore Ts'o 7742ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 775aebf0243SWang Shilong if (unlikely(!bh)) { 776860d21e2STheodore Ts'o *errp = -ENOMEM; 7772ed88685STheodore Ts'o return NULL; 778ac27a0ecSDave Kleikamp } 7792ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 780ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 781ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 782ac27a0ecSDave Kleikamp 783ac27a0ecSDave Kleikamp /* 784ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 785ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 786ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 787617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 788ac27a0ecSDave Kleikamp * problem. 789ac27a0ecSDave Kleikamp */ 790ac27a0ecSDave Kleikamp lock_buffer(bh); 791ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 792617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 793ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 794ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 795ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 796ac27a0ecSDave Kleikamp } 797ac27a0ecSDave Kleikamp unlock_buffer(bh); 7980390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7990390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 800ac27a0ecSDave Kleikamp if (!fatal) 801ac27a0ecSDave Kleikamp fatal = err; 802ac27a0ecSDave Kleikamp } else { 803ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 804ac27a0ecSDave Kleikamp } 805ac27a0ecSDave Kleikamp if (fatal) { 806ac27a0ecSDave Kleikamp *errp = fatal; 807ac27a0ecSDave Kleikamp brelse(bh); 808ac27a0ecSDave Kleikamp bh = NULL; 809ac27a0ecSDave Kleikamp } 810ac27a0ecSDave Kleikamp return bh; 811ac27a0ecSDave Kleikamp } 812ac27a0ecSDave Kleikamp 813617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 814725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 815ac27a0ecSDave Kleikamp { 816ac27a0ecSDave Kleikamp struct buffer_head *bh; 817ac27a0ecSDave Kleikamp 818617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 819ac27a0ecSDave Kleikamp if (!bh) 820ac27a0ecSDave Kleikamp return bh; 821ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 822ac27a0ecSDave Kleikamp return bh; 82365299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 824ac27a0ecSDave Kleikamp wait_on_buffer(bh); 825ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 826ac27a0ecSDave Kleikamp return bh; 827ac27a0ecSDave Kleikamp put_bh(bh); 828ac27a0ecSDave Kleikamp *err = -EIO; 829ac27a0ecSDave Kleikamp return NULL; 830ac27a0ecSDave Kleikamp } 831ac27a0ecSDave Kleikamp 832f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 833ac27a0ecSDave Kleikamp struct buffer_head *head, 834ac27a0ecSDave Kleikamp unsigned from, 835ac27a0ecSDave Kleikamp unsigned to, 836ac27a0ecSDave Kleikamp int *partial, 837ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 838ac27a0ecSDave Kleikamp struct buffer_head *bh)) 839ac27a0ecSDave Kleikamp { 840ac27a0ecSDave Kleikamp struct buffer_head *bh; 841ac27a0ecSDave Kleikamp unsigned block_start, block_end; 842ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 843ac27a0ecSDave Kleikamp int err, ret = 0; 844ac27a0ecSDave Kleikamp struct buffer_head *next; 845ac27a0ecSDave Kleikamp 846ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 847ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 848de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 849ac27a0ecSDave Kleikamp next = bh->b_this_page; 850ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 851ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 852ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 853ac27a0ecSDave Kleikamp *partial = 1; 854ac27a0ecSDave Kleikamp continue; 855ac27a0ecSDave Kleikamp } 856ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 857ac27a0ecSDave Kleikamp if (!ret) 858ac27a0ecSDave Kleikamp ret = err; 859ac27a0ecSDave Kleikamp } 860ac27a0ecSDave Kleikamp return ret; 861ac27a0ecSDave Kleikamp } 862ac27a0ecSDave Kleikamp 863ac27a0ecSDave Kleikamp /* 864ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 865ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 866617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 867dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 868ac27a0ecSDave Kleikamp * prepare_write() is the right place. 869ac27a0ecSDave Kleikamp * 87036ade451SJan Kara * Also, this function can nest inside ext4_writepage(). In that case, we 87136ade451SJan Kara * *know* that ext4_writepage() has generated enough buffer credits to do the 87236ade451SJan Kara * whole page. So we won't block on the journal in that case, which is good, 87336ade451SJan Kara * because the caller may be PF_MEMALLOC. 874ac27a0ecSDave Kleikamp * 875617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 876ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 877ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 878ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 879ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 880ac27a0ecSDave Kleikamp * violation. 881ac27a0ecSDave Kleikamp * 882dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 883ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 884ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 885ac27a0ecSDave Kleikamp * write. 886ac27a0ecSDave Kleikamp */ 887f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 888ac27a0ecSDave Kleikamp struct buffer_head *bh) 889ac27a0ecSDave Kleikamp { 89056d35a4cSJan Kara int dirty = buffer_dirty(bh); 89156d35a4cSJan Kara int ret; 89256d35a4cSJan Kara 893ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 894ac27a0ecSDave Kleikamp return 0; 89556d35a4cSJan Kara /* 896ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 89756d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 89856d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 899ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 90056d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 90156d35a4cSJan Kara * ever write the buffer. 90256d35a4cSJan Kara */ 90356d35a4cSJan Kara if (dirty) 90456d35a4cSJan Kara clear_buffer_dirty(bh); 90556d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 90656d35a4cSJan Kara if (!ret && dirty) 90756d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 90856d35a4cSJan Kara return ret; 909ac27a0ecSDave Kleikamp } 910ac27a0ecSDave Kleikamp 9118b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 9128b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 913bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 914bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 915bfc1af65SNick Piggin struct page **pagep, void **fsdata) 916ac27a0ecSDave Kleikamp { 917bfc1af65SNick Piggin struct inode *inode = mapping->host; 9181938a150SAneesh Kumar K.V int ret, needed_blocks; 919ac27a0ecSDave Kleikamp handle_t *handle; 920ac27a0ecSDave Kleikamp int retries = 0; 921bfc1af65SNick Piggin struct page *page; 922bfc1af65SNick Piggin pgoff_t index; 923bfc1af65SNick Piggin unsigned from, to; 924bfc1af65SNick Piggin 9259bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 9261938a150SAneesh Kumar K.V /* 9271938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 9281938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 9291938a150SAneesh Kumar K.V */ 9301938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 931bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 932bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 933bfc1af65SNick Piggin to = from + len; 934ac27a0ecSDave Kleikamp 935f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 936f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 937f19d5870STao Ma flags, pagep); 938f19d5870STao Ma if (ret < 0) 93947564bfbSTheodore Ts'o return ret; 94047564bfbSTheodore Ts'o if (ret == 1) 94147564bfbSTheodore Ts'o return 0; 942f19d5870STao Ma } 943f19d5870STao Ma 94447564bfbSTheodore Ts'o /* 94547564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 94647564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 94747564bfbSTheodore Ts'o * is being written back. So grab it first before we start 94847564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 94947564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 95047564bfbSTheodore Ts'o */ 95147564bfbSTheodore Ts'o retry_grab: 95254566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 95347564bfbSTheodore Ts'o if (!page) 95447564bfbSTheodore Ts'o return -ENOMEM; 95547564bfbSTheodore Ts'o unlock_page(page); 95647564bfbSTheodore Ts'o 95747564bfbSTheodore Ts'o retry_journal: 9589924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 959ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 96047564bfbSTheodore Ts'o page_cache_release(page); 96147564bfbSTheodore Ts'o return PTR_ERR(handle); 962cf108bcaSJan Kara } 963f19d5870STao Ma 96447564bfbSTheodore Ts'o lock_page(page); 96547564bfbSTheodore Ts'o if (page->mapping != mapping) { 96647564bfbSTheodore Ts'o /* The page got truncated from under us */ 96747564bfbSTheodore Ts'o unlock_page(page); 96847564bfbSTheodore Ts'o page_cache_release(page); 969cf108bcaSJan Kara ext4_journal_stop(handle); 97047564bfbSTheodore Ts'o goto retry_grab; 971cf108bcaSJan Kara } 97247564bfbSTheodore Ts'o wait_on_page_writeback(page); 973cf108bcaSJan Kara 974744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 9756e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 976744692dcSJiaying Zhang else 9776e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 978bfc1af65SNick Piggin 979bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 980f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 981f19d5870STao Ma from, to, NULL, 982f19d5870STao Ma do_journal_get_write_access); 983b46be050SAndrey Savochkin } 984bfc1af65SNick Piggin 985bfc1af65SNick Piggin if (ret) { 986bfc1af65SNick Piggin unlock_page(page); 987ae4d5372SAneesh Kumar K.V /* 9886e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 989ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 990ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 9911938a150SAneesh Kumar K.V * 9921938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 9931938a150SAneesh Kumar K.V * truncate finishes 994ae4d5372SAneesh Kumar K.V */ 995ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 9961938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 9971938a150SAneesh Kumar K.V 9981938a150SAneesh Kumar K.V ext4_journal_stop(handle); 9991938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 1000b9a4207dSJan Kara ext4_truncate_failed_write(inode); 10011938a150SAneesh Kumar K.V /* 1002ffacfa7aSJan Kara * If truncate failed early the inode might 10031938a150SAneesh Kumar K.V * still be on the orphan list; we need to 10041938a150SAneesh Kumar K.V * make sure the inode is removed from the 10051938a150SAneesh Kumar K.V * orphan list in that case. 10061938a150SAneesh Kumar K.V */ 10071938a150SAneesh Kumar K.V if (inode->i_nlink) 10081938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 10091938a150SAneesh Kumar K.V } 1010bfc1af65SNick Piggin 101147564bfbSTheodore Ts'o if (ret == -ENOSPC && 101247564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 101347564bfbSTheodore Ts'o goto retry_journal; 101447564bfbSTheodore Ts'o page_cache_release(page); 101547564bfbSTheodore Ts'o return ret; 101647564bfbSTheodore Ts'o } 101747564bfbSTheodore Ts'o *pagep = page; 1018ac27a0ecSDave Kleikamp return ret; 1019ac27a0ecSDave Kleikamp } 1020ac27a0ecSDave Kleikamp 1021bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 1022bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1023ac27a0ecSDave Kleikamp { 102413fca323STheodore Ts'o int ret; 1025ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1026ac27a0ecSDave Kleikamp return 0; 1027ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 102813fca323STheodore Ts'o ret = ext4_handle_dirty_metadata(handle, NULL, bh); 102913fca323STheodore Ts'o clear_buffer_meta(bh); 103013fca323STheodore Ts'o clear_buffer_prio(bh); 103113fca323STheodore Ts'o return ret; 1032ac27a0ecSDave Kleikamp } 1033ac27a0ecSDave Kleikamp 1034eed4333fSZheng Liu /* 1035eed4333fSZheng Liu * We need to pick up the new inode size which generic_commit_write gave us 1036eed4333fSZheng Liu * `file' can be NULL - eg, when called from page_symlink(). 1037eed4333fSZheng Liu * 1038eed4333fSZheng Liu * ext4 never places buffers on inode->i_mapping->private_list. metadata 1039eed4333fSZheng Liu * buffers are managed internally. 1040eed4333fSZheng Liu */ 1041eed4333fSZheng Liu static int ext4_write_end(struct file *file, 1042f8514083SAneesh Kumar K.V struct address_space *mapping, 1043f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 1044f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 1045f8514083SAneesh Kumar K.V { 1046f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 1047eed4333fSZheng Liu struct inode *inode = mapping->host; 1048eed4333fSZheng Liu int ret = 0, ret2; 1049eed4333fSZheng Liu int i_size_changed = 0; 1050eed4333fSZheng Liu 1051eed4333fSZheng Liu trace_ext4_write_end(inode, pos, len, copied); 1052eed4333fSZheng Liu if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { 1053eed4333fSZheng Liu ret = ext4_jbd2_file_inode(handle, inode); 1054eed4333fSZheng Liu if (ret) { 1055eed4333fSZheng Liu unlock_page(page); 1056eed4333fSZheng Liu page_cache_release(page); 1057eed4333fSZheng Liu goto errout; 1058eed4333fSZheng Liu } 1059eed4333fSZheng Liu } 1060f8514083SAneesh Kumar K.V 106142c832deSTheodore Ts'o if (ext4_has_inline_data(inode)) { 106242c832deSTheodore Ts'o ret = ext4_write_inline_data_end(inode, pos, len, 1063f19d5870STao Ma copied, page); 106442c832deSTheodore Ts'o if (ret < 0) 106542c832deSTheodore Ts'o goto errout; 106642c832deSTheodore Ts'o copied = ret; 106742c832deSTheodore Ts'o } else 1068f19d5870STao Ma copied = block_write_end(file, mapping, pos, 1069f19d5870STao Ma len, copied, page, fsdata); 1070f8514083SAneesh Kumar K.V 1071f8514083SAneesh Kumar K.V /* 1072f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 1073eed4333fSZheng Liu * cannot change under us because we hole i_mutex. 1074f8514083SAneesh Kumar K.V * 1075f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 1076f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 1077f8514083SAneesh Kumar K.V */ 1078f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 1079f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 1080f8514083SAneesh Kumar K.V i_size_changed = 1; 1081f8514083SAneesh Kumar K.V } 1082f8514083SAneesh Kumar K.V 1083f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 1084f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 1085f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 1086eed4333fSZheng Liu * but greater than i_disksize. (hint delalloc) 1087f8514083SAneesh Kumar K.V */ 1088f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 1089f8514083SAneesh Kumar K.V i_size_changed = 1; 1090f8514083SAneesh Kumar K.V } 1091f8514083SAneesh Kumar K.V unlock_page(page); 1092f8514083SAneesh Kumar K.V page_cache_release(page); 1093f8514083SAneesh Kumar K.V 1094f8514083SAneesh Kumar K.V /* 1095f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 1096f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 1097f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1098f8514083SAneesh Kumar K.V * filesystems. 1099f8514083SAneesh Kumar K.V */ 1100f8514083SAneesh Kumar K.V if (i_size_changed) 1101f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1102f8514083SAneesh Kumar K.V 1103ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1104f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1105f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1106f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1107f8514083SAneesh Kumar K.V */ 1108f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 110974d553aaSTheodore Ts'o errout: 1110617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1111ac27a0ecSDave Kleikamp if (!ret) 1112ac27a0ecSDave Kleikamp ret = ret2; 1113bfc1af65SNick Piggin 1114f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1115b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1116f8514083SAneesh Kumar K.V /* 1117ffacfa7aSJan Kara * If truncate failed early the inode might still be 1118f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1119f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1120f8514083SAneesh Kumar K.V */ 1121f8514083SAneesh Kumar K.V if (inode->i_nlink) 1122f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1123f8514083SAneesh Kumar K.V } 1124f8514083SAneesh Kumar K.V 1125bfc1af65SNick Piggin return ret ? ret : copied; 1126ac27a0ecSDave Kleikamp } 1127ac27a0ecSDave Kleikamp 1128bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1129bfc1af65SNick Piggin struct address_space *mapping, 1130bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1131bfc1af65SNick Piggin struct page *page, void *fsdata) 1132ac27a0ecSDave Kleikamp { 1133617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1134bfc1af65SNick Piggin struct inode *inode = mapping->host; 1135ac27a0ecSDave Kleikamp int ret = 0, ret2; 1136ac27a0ecSDave Kleikamp int partial = 0; 1137bfc1af65SNick Piggin unsigned from, to; 1138cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1139ac27a0ecSDave Kleikamp 11409bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1141bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1142bfc1af65SNick Piggin to = from + len; 1143bfc1af65SNick Piggin 1144441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1145441c8508SCurt Wohlgemuth 11463fdcfb66STao Ma if (ext4_has_inline_data(inode)) 11473fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 11483fdcfb66STao Ma copied, page); 11493fdcfb66STao Ma else { 1150bfc1af65SNick Piggin if (copied < len) { 1151bfc1af65SNick Piggin if (!PageUptodate(page)) 1152bfc1af65SNick Piggin copied = 0; 1153bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1154bfc1af65SNick Piggin } 1155ac27a0ecSDave Kleikamp 1156f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1157bfc1af65SNick Piggin to, &partial, write_end_fn); 1158ac27a0ecSDave Kleikamp if (!partial) 1159ac27a0ecSDave Kleikamp SetPageUptodate(page); 11603fdcfb66STao Ma } 1161cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1162cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1163bfc1af65SNick Piggin i_size_write(inode, pos+copied); 116419f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 11652d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1166cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1167cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1168617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1169ac27a0ecSDave Kleikamp if (!ret) 1170ac27a0ecSDave Kleikamp ret = ret2; 1171ac27a0ecSDave Kleikamp } 1172bfc1af65SNick Piggin 1173cf108bcaSJan Kara unlock_page(page); 1174f8514083SAneesh Kumar K.V page_cache_release(page); 1175ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1176f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1177f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1178f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1179f8514083SAneesh Kumar K.V */ 1180f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1181f8514083SAneesh Kumar K.V 1182617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1183ac27a0ecSDave Kleikamp if (!ret) 1184ac27a0ecSDave Kleikamp ret = ret2; 1185f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1186b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1187f8514083SAneesh Kumar K.V /* 1188ffacfa7aSJan Kara * If truncate failed early the inode might still be 1189f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1190f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1191f8514083SAneesh Kumar K.V */ 1192f8514083SAneesh Kumar K.V if (inode->i_nlink) 1193f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1194f8514083SAneesh Kumar K.V } 1195bfc1af65SNick Piggin 1196bfc1af65SNick Piggin return ret ? ret : copied; 1197ac27a0ecSDave Kleikamp } 1198d2a17637SMingming Cao 11999d0be502STheodore Ts'o /* 1200386ad67cSLukas Czerner * Reserve a metadata for a single block located at lblock 1201386ad67cSLukas Czerner */ 1202386ad67cSLukas Czerner static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) 1203386ad67cSLukas Czerner { 1204386ad67cSLukas Czerner int retries = 0; 1205386ad67cSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1206386ad67cSLukas Czerner struct ext4_inode_info *ei = EXT4_I(inode); 1207386ad67cSLukas Czerner unsigned int md_needed; 1208386ad67cSLukas Czerner ext4_lblk_t save_last_lblock; 1209386ad67cSLukas Czerner int save_len; 1210386ad67cSLukas Czerner 1211386ad67cSLukas Czerner /* 1212386ad67cSLukas Czerner * recalculate the amount of metadata blocks to reserve 1213386ad67cSLukas Czerner * in order to allocate nrblocks 1214386ad67cSLukas Czerner * worse case is one extent per block 1215386ad67cSLukas Czerner */ 1216386ad67cSLukas Czerner repeat: 1217386ad67cSLukas Czerner spin_lock(&ei->i_block_reservation_lock); 1218386ad67cSLukas Czerner /* 1219386ad67cSLukas Czerner * ext4_calc_metadata_amount() has side effects, which we have 1220386ad67cSLukas Czerner * to be prepared undo if we fail to claim space. 1221386ad67cSLukas Czerner */ 1222386ad67cSLukas Czerner save_len = ei->i_da_metadata_calc_len; 1223386ad67cSLukas Czerner save_last_lblock = ei->i_da_metadata_calc_last_lblock; 1224386ad67cSLukas Czerner md_needed = EXT4_NUM_B2C(sbi, 1225386ad67cSLukas Czerner ext4_calc_metadata_amount(inode, lblock)); 1226386ad67cSLukas Czerner trace_ext4_da_reserve_space(inode, md_needed); 1227386ad67cSLukas Czerner 1228386ad67cSLukas Czerner /* 1229386ad67cSLukas Czerner * We do still charge estimated metadata to the sb though; 1230386ad67cSLukas Czerner * we cannot afford to run out of free blocks. 1231386ad67cSLukas Czerner */ 1232386ad67cSLukas Czerner if (ext4_claim_free_clusters(sbi, md_needed, 0)) { 1233386ad67cSLukas Czerner ei->i_da_metadata_calc_len = save_len; 1234386ad67cSLukas Czerner ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1235386ad67cSLukas Czerner spin_unlock(&ei->i_block_reservation_lock); 1236386ad67cSLukas Czerner if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1237386ad67cSLukas Czerner cond_resched(); 1238386ad67cSLukas Czerner goto repeat; 1239386ad67cSLukas Czerner } 1240386ad67cSLukas Czerner return -ENOSPC; 1241386ad67cSLukas Czerner } 1242386ad67cSLukas Czerner ei->i_reserved_meta_blocks += md_needed; 1243386ad67cSLukas Czerner spin_unlock(&ei->i_block_reservation_lock); 1244386ad67cSLukas Czerner 1245386ad67cSLukas Czerner return 0; /* success */ 1246386ad67cSLukas Czerner } 1247386ad67cSLukas Czerner 1248386ad67cSLukas Czerner /* 12497b415bf6SAditya Kali * Reserve a single cluster located at lblock 12509d0be502STheodore Ts'o */ 125101f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1252d2a17637SMingming Cao { 1253030ba6bcSAneesh Kumar K.V int retries = 0; 1254d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12550637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 12567b415bf6SAditya Kali unsigned int md_needed; 12575dd4056dSChristoph Hellwig int ret; 125803179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 125903179fe9STheodore Ts'o int save_len; 1260d2a17637SMingming Cao 126160e58e0fSMingming Cao /* 126272b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 126372b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 126472b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 126560e58e0fSMingming Cao */ 12667b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 12675dd4056dSChristoph Hellwig if (ret) 12685dd4056dSChristoph Hellwig return ret; 126903179fe9STheodore Ts'o 127003179fe9STheodore Ts'o /* 127103179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 127203179fe9STheodore Ts'o * in order to allocate nrblocks 127303179fe9STheodore Ts'o * worse case is one extent per block 127403179fe9STheodore Ts'o */ 127503179fe9STheodore Ts'o repeat: 127603179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 127703179fe9STheodore Ts'o /* 127803179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 127903179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 128003179fe9STheodore Ts'o */ 128103179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 128203179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 128303179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 128403179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 128503179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 128603179fe9STheodore Ts'o 128772b8ab9dSEric Sandeen /* 128872b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 128972b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 129072b8ab9dSEric Sandeen */ 1291e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 129203179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 129303179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 129403179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1295030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1296bb8b20edSLukas Czerner cond_resched(); 1297030ba6bcSAneesh Kumar K.V goto repeat; 1298030ba6bcSAneesh Kumar K.V } 129903179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1300d2a17637SMingming Cao return -ENOSPC; 1301d2a17637SMingming Cao } 13029d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 13030637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 13040637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 130539bc680aSDmitry Monakhov 1306d2a17637SMingming Cao return 0; /* success */ 1307d2a17637SMingming Cao } 1308d2a17637SMingming Cao 130912219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1310d2a17637SMingming Cao { 1311d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 13120637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1313d2a17637SMingming Cao 1314cd213226SMingming Cao if (!to_free) 1315cd213226SMingming Cao return; /* Nothing to release, exit */ 1316cd213226SMingming Cao 1317d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1318cd213226SMingming Cao 13195a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 13200637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1321cd213226SMingming Cao /* 13220637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 13230637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 13240637c6f4STheodore Ts'o * function is called from invalidate page, it's 13250637c6f4STheodore Ts'o * harmless to return without any action. 1326cd213226SMingming Cao */ 13278de5c325STheodore Ts'o ext4_warning(inode->i_sb, "ext4_da_release_space: " 13280637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 13291084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 13300637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 13310637c6f4STheodore Ts'o WARN_ON(1); 13320637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 13330637c6f4STheodore Ts'o } 13340637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 13350637c6f4STheodore Ts'o 13360637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 13370637c6f4STheodore Ts'o /* 13380637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 13390637c6f4STheodore Ts'o * only when we have written all of the delayed 13400637c6f4STheodore Ts'o * allocation blocks. 13417b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 13427b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 13430637c6f4STheodore Ts'o */ 134457042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 134572b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1346ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 13479d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1348cd213226SMingming Cao } 1349cd213226SMingming Cao 135072b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 135157042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1352d2a17637SMingming Cao 1353d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 135460e58e0fSMingming Cao 13557b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1356d2a17637SMingming Cao } 1357d2a17637SMingming Cao 1358d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1359ca99fdd2SLukas Czerner unsigned int offset, 1360ca99fdd2SLukas Czerner unsigned int length) 1361d2a17637SMingming Cao { 1362d2a17637SMingming Cao int to_release = 0; 1363d2a17637SMingming Cao struct buffer_head *head, *bh; 1364d2a17637SMingming Cao unsigned int curr_off = 0; 13657b415bf6SAditya Kali struct inode *inode = page->mapping->host; 13667b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1367ca99fdd2SLukas Czerner unsigned int stop = offset + length; 13687b415bf6SAditya Kali int num_clusters; 136951865fdaSZheng Liu ext4_fsblk_t lblk; 1370d2a17637SMingming Cao 1371ca99fdd2SLukas Czerner BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); 1372ca99fdd2SLukas Czerner 1373d2a17637SMingming Cao head = page_buffers(page); 1374d2a17637SMingming Cao bh = head; 1375d2a17637SMingming Cao do { 1376d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1377d2a17637SMingming Cao 1378ca99fdd2SLukas Czerner if (next_off > stop) 1379ca99fdd2SLukas Czerner break; 1380ca99fdd2SLukas Czerner 1381d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1382d2a17637SMingming Cao to_release++; 1383d2a17637SMingming Cao clear_buffer_delay(bh); 1384d2a17637SMingming Cao } 1385d2a17637SMingming Cao curr_off = next_off; 1386d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 13877b415bf6SAditya Kali 138851865fdaSZheng Liu if (to_release) { 138951865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 139051865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 139151865fdaSZheng Liu } 139251865fdaSZheng Liu 13937b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 13947b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 13957b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 13967b415bf6SAditya Kali while (num_clusters > 0) { 13977b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 13987b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 13997b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 14007d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 14017b415bf6SAditya Kali ext4_da_release_space(inode, 1); 14027b415bf6SAditya Kali 14037b415bf6SAditya Kali num_clusters--; 14047b415bf6SAditya Kali } 1405d2a17637SMingming Cao } 1406ac27a0ecSDave Kleikamp 1407ac27a0ecSDave Kleikamp /* 140864769240SAlex Tomas * Delayed allocation stuff 140964769240SAlex Tomas */ 141064769240SAlex Tomas 14114e7ea81dSJan Kara struct mpage_da_data { 14124e7ea81dSJan Kara struct inode *inode; 14134e7ea81dSJan Kara struct writeback_control *wbc; 14146b523df4SJan Kara 14154e7ea81dSJan Kara pgoff_t first_page; /* The first page to write */ 14164e7ea81dSJan Kara pgoff_t next_page; /* Current page to examine */ 14174e7ea81dSJan Kara pgoff_t last_page; /* Last page to examine */ 141864769240SAlex Tomas /* 14194e7ea81dSJan Kara * Extent to map - this can be after first_page because that can be 14204e7ea81dSJan Kara * fully mapped. We somewhat abuse m_flags to store whether the extent 14214e7ea81dSJan Kara * is delalloc or unwritten. 142264769240SAlex Tomas */ 14234e7ea81dSJan Kara struct ext4_map_blocks map; 14244e7ea81dSJan Kara struct ext4_io_submit io_submit; /* IO submission data */ 14254e7ea81dSJan Kara }; 142664769240SAlex Tomas 14274e7ea81dSJan Kara static void mpage_release_unused_pages(struct mpage_da_data *mpd, 14284e7ea81dSJan Kara bool invalidate) 1429c4a0c46eSAneesh Kumar K.V { 1430c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1431c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1432c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1433c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1434c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 14354e7ea81dSJan Kara 14364e7ea81dSJan Kara /* This is necessary when next_page == 0. */ 14374e7ea81dSJan Kara if (mpd->first_page >= mpd->next_page) 14384e7ea81dSJan Kara return; 1439c4a0c46eSAneesh Kumar K.V 1440c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1441c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 14424e7ea81dSJan Kara if (invalidate) { 14434e7ea81dSJan Kara ext4_lblk_t start, last; 144451865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 144551865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 144651865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 14474e7ea81dSJan Kara } 144851865fdaSZheng Liu 144966bea92cSEric Sandeen pagevec_init(&pvec, 0); 1450c4a0c46eSAneesh Kumar K.V while (index <= end) { 1451c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1452c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1453c4a0c46eSAneesh Kumar K.V break; 1454c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1455c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 14569b1d0998SJan Kara if (page->index > end) 1457c4a0c46eSAneesh Kumar K.V break; 1458c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1459c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 14604e7ea81dSJan Kara if (invalidate) { 1461d47992f8SLukas Czerner block_invalidatepage(page, 0, PAGE_CACHE_SIZE); 1462c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 14634e7ea81dSJan Kara } 1464c4a0c46eSAneesh Kumar K.V unlock_page(page); 1465c4a0c46eSAneesh Kumar K.V } 14669b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 14679b1d0998SJan Kara pagevec_release(&pvec); 1468c4a0c46eSAneesh Kumar K.V } 1469c4a0c46eSAneesh Kumar K.V } 1470c4a0c46eSAneesh Kumar K.V 1471df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1472df22291fSAneesh Kumar K.V { 1473df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 147492b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 1475f78ee70dSLukas Czerner struct ext4_inode_info *ei = EXT4_I(inode); 147692b97816STheodore Ts'o 147792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 14785dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 1479f78ee70dSLukas Czerner ext4_count_free_clusters(sb))); 148092b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 148192b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1482f78ee70dSLukas Czerner (long long) EXT4_C2B(EXT4_SB(sb), 148357042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 148492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1485f78ee70dSLukas Czerner (long long) EXT4_C2B(EXT4_SB(sb), 14867b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 148792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 148892b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1489f78ee70dSLukas Czerner ei->i_reserved_data_blocks); 149092b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1491f78ee70dSLukas Czerner ei->i_reserved_meta_blocks); 1492f78ee70dSLukas Czerner ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u", 1493f78ee70dSLukas Czerner ei->i_allocated_meta_blocks); 1494df22291fSAneesh Kumar K.V return; 1495df22291fSAneesh Kumar K.V } 1496df22291fSAneesh Kumar K.V 1497c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 149829fa89d0SAneesh Kumar K.V { 1499c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 150029fa89d0SAneesh Kumar K.V } 150129fa89d0SAneesh Kumar K.V 150264769240SAlex Tomas /* 15035356f261SAditya Kali * This function is grabs code from the very beginning of 15045356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 15055356f261SAditya Kali * time. This function looks up the requested blocks and sets the 15065356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 15075356f261SAditya Kali */ 15085356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 15095356f261SAditya Kali struct ext4_map_blocks *map, 15105356f261SAditya Kali struct buffer_head *bh) 15115356f261SAditya Kali { 1512d100eef2SZheng Liu struct extent_status es; 15135356f261SAditya Kali int retval; 15145356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 1515921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 1516921f266bSDmitry Monakhov struct ext4_map_blocks orig_map; 1517921f266bSDmitry Monakhov 1518921f266bSDmitry Monakhov memcpy(&orig_map, map, sizeof(*map)); 1519921f266bSDmitry Monakhov #endif 15205356f261SAditya Kali 15215356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 15225356f261SAditya Kali invalid_block = ~0; 15235356f261SAditya Kali 15245356f261SAditya Kali map->m_flags = 0; 15255356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 15265356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 15275356f261SAditya Kali (unsigned long) map->m_lblk); 1528d100eef2SZheng Liu 1529d100eef2SZheng Liu /* Lookup extent status tree firstly */ 1530d100eef2SZheng Liu if (ext4_es_lookup_extent(inode, iblock, &es)) { 153163b99968STheodore Ts'o ext4_es_lru_add(inode); 1532d100eef2SZheng Liu if (ext4_es_is_hole(&es)) { 1533d100eef2SZheng Liu retval = 0; 1534d100eef2SZheng Liu down_read((&EXT4_I(inode)->i_data_sem)); 1535d100eef2SZheng Liu goto add_delayed; 1536d100eef2SZheng Liu } 1537d100eef2SZheng Liu 1538d100eef2SZheng Liu /* 1539d100eef2SZheng Liu * Delayed extent could be allocated by fallocate. 1540d100eef2SZheng Liu * So we need to check it. 1541d100eef2SZheng Liu */ 1542d100eef2SZheng Liu if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1543d100eef2SZheng Liu map_bh(bh, inode->i_sb, invalid_block); 1544d100eef2SZheng Liu set_buffer_new(bh); 1545d100eef2SZheng Liu set_buffer_delay(bh); 1546d100eef2SZheng Liu return 0; 1547d100eef2SZheng Liu } 1548d100eef2SZheng Liu 1549d100eef2SZheng Liu map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1550d100eef2SZheng Liu retval = es.es_len - (iblock - es.es_lblk); 1551d100eef2SZheng Liu if (retval > map->m_len) 1552d100eef2SZheng Liu retval = map->m_len; 1553d100eef2SZheng Liu map->m_len = retval; 1554d100eef2SZheng Liu if (ext4_es_is_written(&es)) 1555d100eef2SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 1556d100eef2SZheng Liu else if (ext4_es_is_unwritten(&es)) 1557d100eef2SZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 1558d100eef2SZheng Liu else 1559d100eef2SZheng Liu BUG_ON(1); 1560d100eef2SZheng Liu 1561921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 1562921f266bSDmitry Monakhov ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1563921f266bSDmitry Monakhov #endif 1564d100eef2SZheng Liu return retval; 1565d100eef2SZheng Liu } 1566d100eef2SZheng Liu 15675356f261SAditya Kali /* 15685356f261SAditya Kali * Try to see if we can get the block without requesting a new 15695356f261SAditya Kali * file system block. 15705356f261SAditya Kali */ 15715356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 15729c3569b5STao Ma if (ext4_has_inline_data(inode)) { 15739c3569b5STao Ma /* 15749c3569b5STao Ma * We will soon create blocks for this page, and let 15759c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 15769c3569b5STao Ma * In case of clusters, we have to handle the work 15779c3569b5STao Ma * of mapping from cluster so that the reserved space 15789c3569b5STao Ma * is calculated properly. 15799c3569b5STao Ma */ 15809c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 15819c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 15829c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 15839c3569b5STao Ma retval = 0; 15849c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1585d100eef2SZheng Liu retval = ext4_ext_map_blocks(NULL, inode, map, 1586d100eef2SZheng Liu EXT4_GET_BLOCKS_NO_PUT_HOLE); 15875356f261SAditya Kali else 1588d100eef2SZheng Liu retval = ext4_ind_map_blocks(NULL, inode, map, 1589d100eef2SZheng Liu EXT4_GET_BLOCKS_NO_PUT_HOLE); 15905356f261SAditya Kali 1591d100eef2SZheng Liu add_delayed: 15925356f261SAditya Kali if (retval == 0) { 1593f7fec032SZheng Liu int ret; 15945356f261SAditya Kali /* 15955356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 15965356f261SAditya Kali * is it OK? 15975356f261SAditya Kali */ 1598386ad67cSLukas Czerner /* 1599386ad67cSLukas Czerner * If the block was allocated from previously allocated cluster, 1600386ad67cSLukas Czerner * then we don't need to reserve it again. However we still need 1601386ad67cSLukas Czerner * to reserve metadata for every block we're going to write. 1602386ad67cSLukas Czerner */ 16035356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 1604f7fec032SZheng Liu ret = ext4_da_reserve_space(inode, iblock); 1605f7fec032SZheng Liu if (ret) { 16065356f261SAditya Kali /* not enough space to reserve */ 1607f7fec032SZheng Liu retval = ret; 16085356f261SAditya Kali goto out_unlock; 16095356f261SAditya Kali } 1610386ad67cSLukas Czerner } else { 1611386ad67cSLukas Czerner ret = ext4_da_reserve_metadata(inode, iblock); 1612386ad67cSLukas Czerner if (ret) { 1613386ad67cSLukas Czerner /* not enough space to reserve */ 1614386ad67cSLukas Czerner retval = ret; 1615386ad67cSLukas Czerner goto out_unlock; 1616386ad67cSLukas Czerner } 1617f7fec032SZheng Liu } 16185356f261SAditya Kali 1619f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1620fdc0212eSZheng Liu ~0, EXTENT_STATUS_DELAYED); 1621f7fec032SZheng Liu if (ret) { 1622f7fec032SZheng Liu retval = ret; 162351865fdaSZheng Liu goto out_unlock; 1624f7fec032SZheng Liu } 162551865fdaSZheng Liu 16265356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 16275356f261SAditya Kali * and it should not appear on the bh->b_state. 16285356f261SAditya Kali */ 16295356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 16305356f261SAditya Kali 16315356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 16325356f261SAditya Kali set_buffer_new(bh); 16335356f261SAditya Kali set_buffer_delay(bh); 1634f7fec032SZheng Liu } else if (retval > 0) { 1635f7fec032SZheng Liu int ret; 1636*3be78c73STheodore Ts'o unsigned int status; 1637f7fec032SZheng Liu 163844fb851dSZheng Liu if (unlikely(retval != map->m_len)) { 163944fb851dSZheng Liu ext4_warning(inode->i_sb, 164044fb851dSZheng Liu "ES len assertion failed for inode " 164144fb851dSZheng Liu "%lu: retval %d != map->m_len %d", 164244fb851dSZheng Liu inode->i_ino, retval, map->m_len); 164344fb851dSZheng Liu WARN_ON(1); 1644921f266bSDmitry Monakhov } 1645921f266bSDmitry Monakhov 1646f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 1647f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 1648f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1649f7fec032SZheng Liu map->m_pblk, status); 1650f7fec032SZheng Liu if (ret != 0) 1651f7fec032SZheng Liu retval = ret; 16525356f261SAditya Kali } 16535356f261SAditya Kali 16545356f261SAditya Kali out_unlock: 16555356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 16565356f261SAditya Kali 16575356f261SAditya Kali return retval; 16585356f261SAditya Kali } 16595356f261SAditya Kali 16605356f261SAditya Kali /* 1661b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1662b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1663b920c755STheodore Ts'o * reserve space for a single block. 166429fa89d0SAneesh Kumar K.V * 166529fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 166629fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 166729fa89d0SAneesh Kumar K.V * 166829fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 166929fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 167029fa89d0SAneesh Kumar K.V * initialized properly. 167164769240SAlex Tomas */ 16729c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 16732ed88685STheodore Ts'o struct buffer_head *bh, int create) 167464769240SAlex Tomas { 16752ed88685STheodore Ts'o struct ext4_map_blocks map; 167664769240SAlex Tomas int ret = 0; 167764769240SAlex Tomas 167864769240SAlex Tomas BUG_ON(create == 0); 16792ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 16802ed88685STheodore Ts'o 16812ed88685STheodore Ts'o map.m_lblk = iblock; 16822ed88685STheodore Ts'o map.m_len = 1; 168364769240SAlex Tomas 168464769240SAlex Tomas /* 168564769240SAlex Tomas * first, we need to know whether the block is allocated already 168664769240SAlex Tomas * preallocated blocks are unmapped but should treated 168764769240SAlex Tomas * the same as allocated blocks. 168864769240SAlex Tomas */ 16895356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 16905356f261SAditya Kali if (ret <= 0) 16912ed88685STheodore Ts'o return ret; 169264769240SAlex Tomas 16932ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 16942ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 16952ed88685STheodore Ts'o 16962ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 16972ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 16982ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 16992ed88685STheodore Ts'o * get_block multiple times when we write to the same 17002ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 17012ed88685STheodore Ts'o * for partial write. 17022ed88685STheodore Ts'o */ 17032ed88685STheodore Ts'o set_buffer_new(bh); 1704c8205636STheodore Ts'o set_buffer_mapped(bh); 17052ed88685STheodore Ts'o } 17062ed88685STheodore Ts'o return 0; 170764769240SAlex Tomas } 170861628a3fSMingming Cao 170962e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 171062e086beSAneesh Kumar K.V { 171162e086beSAneesh Kumar K.V get_bh(bh); 171262e086beSAneesh Kumar K.V return 0; 171362e086beSAneesh Kumar K.V } 171462e086beSAneesh Kumar K.V 171562e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 171662e086beSAneesh Kumar K.V { 171762e086beSAneesh Kumar K.V put_bh(bh); 171862e086beSAneesh Kumar K.V return 0; 171962e086beSAneesh Kumar K.V } 172062e086beSAneesh Kumar K.V 172162e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 172262e086beSAneesh Kumar K.V unsigned int len) 172362e086beSAneesh Kumar K.V { 172462e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 172562e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 17263fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 172762e086beSAneesh Kumar K.V handle_t *handle = NULL; 17283fdcfb66STao Ma int ret = 0, err = 0; 17293fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 17303fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 173162e086beSAneesh Kumar K.V 1732cb20d518STheodore Ts'o ClearPageChecked(page); 17333fdcfb66STao Ma 17343fdcfb66STao Ma if (inline_data) { 17353fdcfb66STao Ma BUG_ON(page->index != 0); 17363fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 17373fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 17383fdcfb66STao Ma if (inode_bh == NULL) 17393fdcfb66STao Ma goto out; 17403fdcfb66STao Ma } else { 174162e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 17423fdcfb66STao Ma if (!page_bufs) { 17433fdcfb66STao Ma BUG(); 17443fdcfb66STao Ma goto out; 17453fdcfb66STao Ma } 17463fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 17473fdcfb66STao Ma NULL, bget_one); 17483fdcfb66STao Ma } 174962e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 175062e086beSAneesh Kumar K.V * references to buffers so we are safe */ 175162e086beSAneesh Kumar K.V unlock_page(page); 175262e086beSAneesh Kumar K.V 17539924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 17549924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 175562e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 175662e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 175762e086beSAneesh Kumar K.V goto out; 175862e086beSAneesh Kumar K.V } 175962e086beSAneesh Kumar K.V 1760441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1761441c8508SCurt Wohlgemuth 17623fdcfb66STao Ma if (inline_data) { 17633fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 17643fdcfb66STao Ma 17653fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 17663fdcfb66STao Ma 17673fdcfb66STao Ma } else { 1768f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 176962e086beSAneesh Kumar K.V do_journal_get_write_access); 177062e086beSAneesh Kumar K.V 1771f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 177262e086beSAneesh Kumar K.V write_end_fn); 17733fdcfb66STao Ma } 177462e086beSAneesh Kumar K.V if (ret == 0) 177562e086beSAneesh Kumar K.V ret = err; 17762d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 177762e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 177862e086beSAneesh Kumar K.V if (!ret) 177962e086beSAneesh Kumar K.V ret = err; 178062e086beSAneesh Kumar K.V 17813fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 17823fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 17833fdcfb66STao Ma NULL, bput_one); 178419f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 178562e086beSAneesh Kumar K.V out: 17863fdcfb66STao Ma brelse(inode_bh); 178762e086beSAneesh Kumar K.V return ret; 178862e086beSAneesh Kumar K.V } 178962e086beSAneesh Kumar K.V 179061628a3fSMingming Cao /* 179143ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 179243ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 179343ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 179443ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 179543ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 179643ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 179743ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 179843ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 179943ce1d23SAneesh Kumar K.V * 1800b920c755STheodore Ts'o * This function can get called via... 180120970ba6STheodore Ts'o * - ext4_writepages after taking page lock (have journal handle) 1802b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1803f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1804b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 180543ce1d23SAneesh Kumar K.V * 180643ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 180743ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 180843ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 180943ce1d23SAneesh Kumar K.V * truncate(f, 1024); 181043ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 181143ce1d23SAneesh Kumar K.V * a[0] = 'a'; 181243ce1d23SAneesh Kumar K.V * truncate(f, 4096); 181343ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 181490802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 181543ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 181643ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 181743ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 181843ce1d23SAneesh Kumar K.V * buffer_heads mapped. 181943ce1d23SAneesh Kumar K.V * 182043ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 182143ce1d23SAneesh Kumar K.V * unwritten in the page. 182243ce1d23SAneesh Kumar K.V * 182343ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 182443ce1d23SAneesh Kumar K.V * 182543ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 182643ce1d23SAneesh Kumar K.V * ext4_writepage() 182743ce1d23SAneesh Kumar K.V * 182843ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 182943ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 183061628a3fSMingming Cao */ 183143ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 183264769240SAlex Tomas struct writeback_control *wbc) 183364769240SAlex Tomas { 1834f8bec370SJan Kara int ret = 0; 183561628a3fSMingming Cao loff_t size; 1836498e5f24STheodore Ts'o unsigned int len; 1837744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 183861628a3fSMingming Cao struct inode *inode = page->mapping->host; 183936ade451SJan Kara struct ext4_io_submit io_submit; 184064769240SAlex Tomas 1841a9c667f8SLukas Czerner trace_ext4_writepage(page); 184261628a3fSMingming Cao size = i_size_read(inode); 184361628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 184461628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 184561628a3fSMingming Cao else 184661628a3fSMingming Cao len = PAGE_CACHE_SIZE; 184761628a3fSMingming Cao 1848f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 184964769240SAlex Tomas /* 1850fe386132SJan Kara * We cannot do block allocation or other extent handling in this 1851fe386132SJan Kara * function. If there are buffers needing that, we have to redirty 1852fe386132SJan Kara * the page. But we may reach here when we do a journal commit via 1853fe386132SJan Kara * journal_submit_inode_data_buffers() and in that case we must write 1854fe386132SJan Kara * allocated buffers to achieve data=ordered mode guarantees. 185564769240SAlex Tomas */ 1856f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1857c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 185861628a3fSMingming Cao redirty_page_for_writepage(wbc, page); 1859fe386132SJan Kara if (current->flags & PF_MEMALLOC) { 1860fe386132SJan Kara /* 1861fe386132SJan Kara * For memory cleaning there's no point in writing only 1862fe386132SJan Kara * some buffers. So just bail out. Warn if we came here 1863fe386132SJan Kara * from direct reclaim. 1864fe386132SJan Kara */ 1865fe386132SJan Kara WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 1866fe386132SJan Kara == PF_MEMALLOC); 186761628a3fSMingming Cao unlock_page(page); 186861628a3fSMingming Cao return 0; 186961628a3fSMingming Cao } 1870f0e6c985SAneesh Kumar K.V } 187164769240SAlex Tomas 1872cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 187343ce1d23SAneesh Kumar K.V /* 187443ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 187543ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 187643ce1d23SAneesh Kumar K.V */ 18773f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 187843ce1d23SAneesh Kumar K.V 187997a851edSJan Kara ext4_io_submit_init(&io_submit, wbc); 188097a851edSJan Kara io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS); 188197a851edSJan Kara if (!io_submit.io_end) { 188297a851edSJan Kara redirty_page_for_writepage(wbc, page); 188397a851edSJan Kara unlock_page(page); 188497a851edSJan Kara return -ENOMEM; 188597a851edSJan Kara } 188636ade451SJan Kara ret = ext4_bio_write_page(&io_submit, page, len, wbc); 188736ade451SJan Kara ext4_io_submit(&io_submit); 188897a851edSJan Kara /* Drop io_end reference we got from init */ 188997a851edSJan Kara ext4_put_io_end_defer(io_submit.io_end); 189064769240SAlex Tomas return ret; 189164769240SAlex Tomas } 189264769240SAlex Tomas 18934e7ea81dSJan Kara #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay)) 18944e7ea81dSJan Kara 189561628a3fSMingming Cao /* 1896fffb2739SJan Kara * mballoc gives us at most this number of blocks... 1897fffb2739SJan Kara * XXX: That seems to be only a limitation of ext4_mb_normalize_request(). 1898fffb2739SJan Kara * The rest of mballoc seems to handle chunks upto full group size. 189961628a3fSMingming Cao */ 1900fffb2739SJan Kara #define MAX_WRITEPAGES_EXTENT_LEN 2048 1901525f4ed8SMingming Cao 1902525f4ed8SMingming Cao /* 19034e7ea81dSJan Kara * mpage_add_bh_to_extent - try to add bh to extent of blocks to map 19044e7ea81dSJan Kara * 19054e7ea81dSJan Kara * @mpd - extent of blocks 19064e7ea81dSJan Kara * @lblk - logical number of the block in the file 19074e7ea81dSJan Kara * @b_state - b_state of the buffer head added 19084e7ea81dSJan Kara * 19094e7ea81dSJan Kara * the function is used to collect contig. blocks in same state 19104e7ea81dSJan Kara */ 19114e7ea81dSJan Kara static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk, 19124e7ea81dSJan Kara unsigned long b_state) 19134e7ea81dSJan Kara { 19144e7ea81dSJan Kara struct ext4_map_blocks *map = &mpd->map; 19154e7ea81dSJan Kara 19164e7ea81dSJan Kara /* Don't go larger than mballoc is willing to allocate */ 19174e7ea81dSJan Kara if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) 19184e7ea81dSJan Kara return 0; 19194e7ea81dSJan Kara 19204e7ea81dSJan Kara /* First block in the extent? */ 19214e7ea81dSJan Kara if (map->m_len == 0) { 19224e7ea81dSJan Kara map->m_lblk = lblk; 19234e7ea81dSJan Kara map->m_len = 1; 19244e7ea81dSJan Kara map->m_flags = b_state & BH_FLAGS; 19254e7ea81dSJan Kara return 1; 19264e7ea81dSJan Kara } 19274e7ea81dSJan Kara 19284e7ea81dSJan Kara /* Can we merge the block to our big extent? */ 19294e7ea81dSJan Kara if (lblk == map->m_lblk + map->m_len && 19304e7ea81dSJan Kara (b_state & BH_FLAGS) == map->m_flags) { 19314e7ea81dSJan Kara map->m_len++; 19324e7ea81dSJan Kara return 1; 19334e7ea81dSJan Kara } 19344e7ea81dSJan Kara return 0; 19354e7ea81dSJan Kara } 19364e7ea81dSJan Kara 19374e7ea81dSJan Kara static bool add_page_bufs_to_extent(struct mpage_da_data *mpd, 19384e7ea81dSJan Kara struct buffer_head *head, 19394e7ea81dSJan Kara struct buffer_head *bh, 19404e7ea81dSJan Kara ext4_lblk_t lblk) 19414e7ea81dSJan Kara { 19424e7ea81dSJan Kara struct inode *inode = mpd->inode; 19434e7ea81dSJan Kara ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) 19444e7ea81dSJan Kara >> inode->i_blkbits; 19454e7ea81dSJan Kara 19464e7ea81dSJan Kara do { 19474e7ea81dSJan Kara BUG_ON(buffer_locked(bh)); 19484e7ea81dSJan Kara 19494e7ea81dSJan Kara if (!buffer_dirty(bh) || !buffer_mapped(bh) || 19504e7ea81dSJan Kara (!buffer_delay(bh) && !buffer_unwritten(bh)) || 19514e7ea81dSJan Kara lblk >= blocks) { 19524e7ea81dSJan Kara /* Found extent to map? */ 19534e7ea81dSJan Kara if (mpd->map.m_len) 19544e7ea81dSJan Kara return false; 19554e7ea81dSJan Kara if (lblk >= blocks) 19564e7ea81dSJan Kara return true; 19574e7ea81dSJan Kara continue; 19584e7ea81dSJan Kara } 19594e7ea81dSJan Kara if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state)) 19604e7ea81dSJan Kara return false; 19614e7ea81dSJan Kara } while (lblk++, (bh = bh->b_this_page) != head); 19624e7ea81dSJan Kara return true; 19634e7ea81dSJan Kara } 19644e7ea81dSJan Kara 19654e7ea81dSJan Kara static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 19664e7ea81dSJan Kara { 19674e7ea81dSJan Kara int len; 19684e7ea81dSJan Kara loff_t size = i_size_read(mpd->inode); 19694e7ea81dSJan Kara int err; 19704e7ea81dSJan Kara 19714e7ea81dSJan Kara BUG_ON(page->index != mpd->first_page); 19724e7ea81dSJan Kara if (page->index == size >> PAGE_CACHE_SHIFT) 19734e7ea81dSJan Kara len = size & ~PAGE_CACHE_MASK; 19744e7ea81dSJan Kara else 19754e7ea81dSJan Kara len = PAGE_CACHE_SIZE; 19764e7ea81dSJan Kara clear_page_dirty_for_io(page); 19774e7ea81dSJan Kara err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc); 19784e7ea81dSJan Kara if (!err) 19794e7ea81dSJan Kara mpd->wbc->nr_to_write--; 19804e7ea81dSJan Kara mpd->first_page++; 19814e7ea81dSJan Kara 19824e7ea81dSJan Kara return err; 19834e7ea81dSJan Kara } 19844e7ea81dSJan Kara 19854e7ea81dSJan Kara /* 19864e7ea81dSJan Kara * mpage_map_buffers - update buffers corresponding to changed extent and 19874e7ea81dSJan Kara * submit fully mapped pages for IO 19884e7ea81dSJan Kara * 19894e7ea81dSJan Kara * @mpd - description of extent to map, on return next extent to map 19904e7ea81dSJan Kara * 19914e7ea81dSJan Kara * Scan buffers corresponding to changed extent (we expect corresponding pages 19924e7ea81dSJan Kara * to be already locked) and update buffer state according to new extent state. 19934e7ea81dSJan Kara * We map delalloc buffers to their physical location, clear unwritten bits, 19944e7ea81dSJan Kara * and mark buffers as uninit when we perform writes to uninitialized extents 19954e7ea81dSJan Kara * and do extent conversion after IO is finished. If the last page is not fully 19964e7ea81dSJan Kara * mapped, we update @map to the next extent in the last page that needs 19974e7ea81dSJan Kara * mapping. Otherwise we submit the page for IO. 19984e7ea81dSJan Kara */ 19994e7ea81dSJan Kara static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) 20004e7ea81dSJan Kara { 20014e7ea81dSJan Kara struct pagevec pvec; 20024e7ea81dSJan Kara int nr_pages, i; 20034e7ea81dSJan Kara struct inode *inode = mpd->inode; 20044e7ea81dSJan Kara struct buffer_head *head, *bh; 20054e7ea81dSJan Kara int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; 20064e7ea81dSJan Kara ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) 20074e7ea81dSJan Kara >> inode->i_blkbits; 20084e7ea81dSJan Kara pgoff_t start, end; 20094e7ea81dSJan Kara ext4_lblk_t lblk; 20104e7ea81dSJan Kara sector_t pblock; 20114e7ea81dSJan Kara int err; 20124e7ea81dSJan Kara 20134e7ea81dSJan Kara start = mpd->map.m_lblk >> bpp_bits; 20144e7ea81dSJan Kara end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; 20154e7ea81dSJan Kara lblk = start << bpp_bits; 20164e7ea81dSJan Kara pblock = mpd->map.m_pblk; 20174e7ea81dSJan Kara 20184e7ea81dSJan Kara pagevec_init(&pvec, 0); 20194e7ea81dSJan Kara while (start <= end) { 20204e7ea81dSJan Kara nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start, 20214e7ea81dSJan Kara PAGEVEC_SIZE); 20224e7ea81dSJan Kara if (nr_pages == 0) 20234e7ea81dSJan Kara break; 20244e7ea81dSJan Kara for (i = 0; i < nr_pages; i++) { 20254e7ea81dSJan Kara struct page *page = pvec.pages[i]; 20264e7ea81dSJan Kara 20274e7ea81dSJan Kara if (page->index > end) 20284e7ea81dSJan Kara break; 20294e7ea81dSJan Kara /* Upto 'end' pages must be contiguous */ 20304e7ea81dSJan Kara BUG_ON(page->index != start); 20314e7ea81dSJan Kara bh = head = page_buffers(page); 20324e7ea81dSJan Kara do { 20334e7ea81dSJan Kara if (lblk < mpd->map.m_lblk) 20344e7ea81dSJan Kara continue; 20354e7ea81dSJan Kara if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { 20364e7ea81dSJan Kara /* 20374e7ea81dSJan Kara * Buffer after end of mapped extent. 20384e7ea81dSJan Kara * Find next buffer in the page to map. 20394e7ea81dSJan Kara */ 20404e7ea81dSJan Kara mpd->map.m_len = 0; 20414e7ea81dSJan Kara mpd->map.m_flags = 0; 20424e7ea81dSJan Kara add_page_bufs_to_extent(mpd, head, bh, 20434e7ea81dSJan Kara lblk); 20444e7ea81dSJan Kara pagevec_release(&pvec); 20454e7ea81dSJan Kara return 0; 20464e7ea81dSJan Kara } 20474e7ea81dSJan Kara if (buffer_delay(bh)) { 20484e7ea81dSJan Kara clear_buffer_delay(bh); 20494e7ea81dSJan Kara bh->b_blocknr = pblock++; 20504e7ea81dSJan Kara } 20514e7ea81dSJan Kara clear_buffer_unwritten(bh); 20524e7ea81dSJan Kara } while (++lblk < blocks && 20534e7ea81dSJan Kara (bh = bh->b_this_page) != head); 20544e7ea81dSJan Kara 20554e7ea81dSJan Kara /* 20564e7ea81dSJan Kara * FIXME: This is going to break if dioread_nolock 20574e7ea81dSJan Kara * supports blocksize < pagesize as we will try to 20584e7ea81dSJan Kara * convert potentially unmapped parts of inode. 20594e7ea81dSJan Kara */ 20604e7ea81dSJan Kara mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; 20614e7ea81dSJan Kara /* Page fully mapped - let IO run! */ 20624e7ea81dSJan Kara err = mpage_submit_page(mpd, page); 20634e7ea81dSJan Kara if (err < 0) { 20644e7ea81dSJan Kara pagevec_release(&pvec); 20654e7ea81dSJan Kara return err; 20664e7ea81dSJan Kara } 20674e7ea81dSJan Kara start++; 20684e7ea81dSJan Kara } 20694e7ea81dSJan Kara pagevec_release(&pvec); 20704e7ea81dSJan Kara } 20714e7ea81dSJan Kara /* Extent fully mapped and matches with page boundary. We are done. */ 20724e7ea81dSJan Kara mpd->map.m_len = 0; 20734e7ea81dSJan Kara mpd->map.m_flags = 0; 20744e7ea81dSJan Kara return 0; 20754e7ea81dSJan Kara } 20764e7ea81dSJan Kara 20774e7ea81dSJan Kara static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd) 20784e7ea81dSJan Kara { 20794e7ea81dSJan Kara struct inode *inode = mpd->inode; 20804e7ea81dSJan Kara struct ext4_map_blocks *map = &mpd->map; 20814e7ea81dSJan Kara int get_blocks_flags; 20824e7ea81dSJan Kara int err; 20834e7ea81dSJan Kara 20844e7ea81dSJan Kara trace_ext4_da_write_pages_extent(inode, map); 20854e7ea81dSJan Kara /* 20864e7ea81dSJan Kara * Call ext4_map_blocks() to allocate any delayed allocation blocks, or 20874e7ea81dSJan Kara * to convert an uninitialized extent to be initialized (in the case 20884e7ea81dSJan Kara * where we have written into one or more preallocated blocks). It is 20894e7ea81dSJan Kara * possible that we're going to need more metadata blocks than 20904e7ea81dSJan Kara * previously reserved. However we must not fail because we're in 20914e7ea81dSJan Kara * writeback and there is nothing we can do about it so it might result 20924e7ea81dSJan Kara * in data loss. So use reserved blocks to allocate metadata if 20934e7ea81dSJan Kara * possible. 20944e7ea81dSJan Kara * 20954e7ea81dSJan Kara * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks 20964e7ea81dSJan Kara * in question are delalloc blocks. This affects functions in many 20974e7ea81dSJan Kara * different parts of the allocation call path. This flag exists 20984e7ea81dSJan Kara * primarily because we don't want to change *many* call functions, so 20994e7ea81dSJan Kara * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag 21004e7ea81dSJan Kara * once the inode's allocation semaphore is taken. 21014e7ea81dSJan Kara */ 21024e7ea81dSJan Kara get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 21034e7ea81dSJan Kara EXT4_GET_BLOCKS_METADATA_NOFAIL; 21044e7ea81dSJan Kara if (ext4_should_dioread_nolock(inode)) 21054e7ea81dSJan Kara get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 21064e7ea81dSJan Kara if (map->m_flags & (1 << BH_Delay)) 21074e7ea81dSJan Kara get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 21084e7ea81dSJan Kara 21094e7ea81dSJan Kara err = ext4_map_blocks(handle, inode, map, get_blocks_flags); 21104e7ea81dSJan Kara if (err < 0) 21114e7ea81dSJan Kara return err; 21126b523df4SJan Kara if (map->m_flags & EXT4_MAP_UNINIT) { 21136b523df4SJan Kara if (!mpd->io_submit.io_end->handle && 21146b523df4SJan Kara ext4_handle_valid(handle)) { 21156b523df4SJan Kara mpd->io_submit.io_end->handle = handle->h_rsv_handle; 21166b523df4SJan Kara handle->h_rsv_handle = NULL; 21176b523df4SJan Kara } 21183613d228SJan Kara ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end); 21196b523df4SJan Kara } 21204e7ea81dSJan Kara 21214e7ea81dSJan Kara BUG_ON(map->m_len == 0); 21224e7ea81dSJan Kara if (map->m_flags & EXT4_MAP_NEW) { 21234e7ea81dSJan Kara struct block_device *bdev = inode->i_sb->s_bdev; 21244e7ea81dSJan Kara int i; 21254e7ea81dSJan Kara 21264e7ea81dSJan Kara for (i = 0; i < map->m_len; i++) 21274e7ea81dSJan Kara unmap_underlying_metadata(bdev, map->m_pblk + i); 21284e7ea81dSJan Kara } 21294e7ea81dSJan Kara return 0; 21304e7ea81dSJan Kara } 21314e7ea81dSJan Kara 21324e7ea81dSJan Kara /* 21334e7ea81dSJan Kara * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length 21344e7ea81dSJan Kara * mpd->len and submit pages underlying it for IO 21354e7ea81dSJan Kara * 21364e7ea81dSJan Kara * @handle - handle for journal operations 21374e7ea81dSJan Kara * @mpd - extent to map 21384e7ea81dSJan Kara * 21394e7ea81dSJan Kara * The function maps extent starting at mpd->lblk of length mpd->len. If it is 21404e7ea81dSJan Kara * delayed, blocks are allocated, if it is unwritten, we may need to convert 21414e7ea81dSJan Kara * them to initialized or split the described range from larger unwritten 21424e7ea81dSJan Kara * extent. Note that we need not map all the described range since allocation 21434e7ea81dSJan Kara * can return less blocks or the range is covered by more unwritten extents. We 21444e7ea81dSJan Kara * cannot map more because we are limited by reserved transaction credits. On 21454e7ea81dSJan Kara * the other hand we always make sure that the last touched page is fully 21464e7ea81dSJan Kara * mapped so that it can be written out (and thus forward progress is 21474e7ea81dSJan Kara * guaranteed). After mapping we submit all mapped pages for IO. 21484e7ea81dSJan Kara */ 21494e7ea81dSJan Kara static int mpage_map_and_submit_extent(handle_t *handle, 2150cb530541STheodore Ts'o struct mpage_da_data *mpd, 2151cb530541STheodore Ts'o bool *give_up_on_write) 21524e7ea81dSJan Kara { 21534e7ea81dSJan Kara struct inode *inode = mpd->inode; 21544e7ea81dSJan Kara struct ext4_map_blocks *map = &mpd->map; 21554e7ea81dSJan Kara int err; 21564e7ea81dSJan Kara loff_t disksize; 21574e7ea81dSJan Kara 21584e7ea81dSJan Kara mpd->io_submit.io_end->offset = 21594e7ea81dSJan Kara ((loff_t)map->m_lblk) << inode->i_blkbits; 216027d7c4edSJan Kara do { 21614e7ea81dSJan Kara err = mpage_map_one_extent(handle, mpd); 21624e7ea81dSJan Kara if (err < 0) { 21634e7ea81dSJan Kara struct super_block *sb = inode->i_sb; 21644e7ea81dSJan Kara 2165cb530541STheodore Ts'o if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 2166cb530541STheodore Ts'o goto invalidate_dirty_pages; 21674e7ea81dSJan Kara /* 2168cb530541STheodore Ts'o * Let the uper layers retry transient errors. 2169cb530541STheodore Ts'o * In the case of ENOSPC, if ext4_count_free_blocks() 2170cb530541STheodore Ts'o * is non-zero, a commit should free up blocks. 21714e7ea81dSJan Kara */ 2172cb530541STheodore Ts'o if ((err == -ENOMEM) || 2173cb530541STheodore Ts'o (err == -ENOSPC && ext4_count_free_clusters(sb))) 2174cb530541STheodore Ts'o return err; 21754e7ea81dSJan Kara ext4_msg(sb, KERN_CRIT, 21764e7ea81dSJan Kara "Delayed block allocation failed for " 21774e7ea81dSJan Kara "inode %lu at logical offset %llu with" 21784e7ea81dSJan Kara " max blocks %u with error %d", 21794e7ea81dSJan Kara inode->i_ino, 21804e7ea81dSJan Kara (unsigned long long)map->m_lblk, 2181cb530541STheodore Ts'o (unsigned)map->m_len, -err); 21824e7ea81dSJan Kara ext4_msg(sb, KERN_CRIT, 21834e7ea81dSJan Kara "This should not happen!! Data will " 21844e7ea81dSJan Kara "be lost\n"); 21854e7ea81dSJan Kara if (err == -ENOSPC) 21864e7ea81dSJan Kara ext4_print_free_blocks(inode); 2187cb530541STheodore Ts'o invalidate_dirty_pages: 2188cb530541STheodore Ts'o *give_up_on_write = true; 21894e7ea81dSJan Kara return err; 21904e7ea81dSJan Kara } 21914e7ea81dSJan Kara /* 21924e7ea81dSJan Kara * Update buffer state, submit mapped pages, and get us new 21934e7ea81dSJan Kara * extent to map 21944e7ea81dSJan Kara */ 21954e7ea81dSJan Kara err = mpage_map_and_submit_buffers(mpd); 21964e7ea81dSJan Kara if (err < 0) 21974e7ea81dSJan Kara return err; 219827d7c4edSJan Kara } while (map->m_len); 21994e7ea81dSJan Kara 22004e7ea81dSJan Kara /* Update on-disk size after IO is submitted */ 22014e7ea81dSJan Kara disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; 22024e7ea81dSJan Kara if (disksize > i_size_read(inode)) 22034e7ea81dSJan Kara disksize = i_size_read(inode); 22044e7ea81dSJan Kara if (disksize > EXT4_I(inode)->i_disksize) { 22054e7ea81dSJan Kara int err2; 22064e7ea81dSJan Kara 22074e7ea81dSJan Kara ext4_update_i_disksize(inode, disksize); 22084e7ea81dSJan Kara err2 = ext4_mark_inode_dirty(handle, inode); 22094e7ea81dSJan Kara if (err2) 22104e7ea81dSJan Kara ext4_error(inode->i_sb, 22114e7ea81dSJan Kara "Failed to mark inode %lu dirty", 22124e7ea81dSJan Kara inode->i_ino); 22134e7ea81dSJan Kara if (!err) 22144e7ea81dSJan Kara err = err2; 22154e7ea81dSJan Kara } 22164e7ea81dSJan Kara return err; 22174e7ea81dSJan Kara } 22184e7ea81dSJan Kara 22194e7ea81dSJan Kara /* 2220fffb2739SJan Kara * Calculate the total number of credits to reserve for one writepages 222120970ba6STheodore Ts'o * iteration. This is called from ext4_writepages(). We map an extent of 2222fffb2739SJan Kara * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping 2223fffb2739SJan Kara * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN + 2224fffb2739SJan Kara * bpp - 1 blocks in bpp different extents. 2225525f4ed8SMingming Cao */ 2226fffb2739SJan Kara static int ext4_da_writepages_trans_blocks(struct inode *inode) 2227fffb2739SJan Kara { 2228fffb2739SJan Kara int bpp = ext4_journal_blocks_per_page(inode); 2229525f4ed8SMingming Cao 2230fffb2739SJan Kara return ext4_meta_trans_blocks(inode, 2231fffb2739SJan Kara MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp); 2232525f4ed8SMingming Cao } 223361628a3fSMingming Cao 22348e48dcfbSTheodore Ts'o /* 22354e7ea81dSJan Kara * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages 22364e7ea81dSJan Kara * and underlying extent to map 22374e7ea81dSJan Kara * 22384e7ea81dSJan Kara * @mpd - where to look for pages 22394e7ea81dSJan Kara * 22404e7ea81dSJan Kara * Walk dirty pages in the mapping. If they are fully mapped, submit them for 22414e7ea81dSJan Kara * IO immediately. When we find a page which isn't mapped we start accumulating 22424e7ea81dSJan Kara * extent of buffers underlying these pages that needs mapping (formed by 22434e7ea81dSJan Kara * either delayed or unwritten buffers). We also lock the pages containing 22444e7ea81dSJan Kara * these buffers. The extent found is returned in @mpd structure (starting at 22454e7ea81dSJan Kara * mpd->lblk with length mpd->len blocks). 22464e7ea81dSJan Kara * 22474e7ea81dSJan Kara * Note that this function can attach bios to one io_end structure which are 22484e7ea81dSJan Kara * neither logically nor physically contiguous. Although it may seem as an 22494e7ea81dSJan Kara * unnecessary complication, it is actually inevitable in blocksize < pagesize 22504e7ea81dSJan Kara * case as we need to track IO to all buffers underlying a page in one io_end. 22518e48dcfbSTheodore Ts'o */ 22524e7ea81dSJan Kara static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) 22538e48dcfbSTheodore Ts'o { 22544e7ea81dSJan Kara struct address_space *mapping = mpd->inode->i_mapping; 22558e48dcfbSTheodore Ts'o struct pagevec pvec; 22564f01b02cSTheodore Ts'o unsigned int nr_pages; 22574e7ea81dSJan Kara pgoff_t index = mpd->first_page; 22584e7ea81dSJan Kara pgoff_t end = mpd->last_page; 22594e7ea81dSJan Kara int tag; 22604e7ea81dSJan Kara int i, err = 0; 22614e7ea81dSJan Kara int blkbits = mpd->inode->i_blkbits; 22624e7ea81dSJan Kara ext4_lblk_t lblk; 22634e7ea81dSJan Kara struct buffer_head *head; 22648e48dcfbSTheodore Ts'o 22654e7ea81dSJan Kara if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages) 22665b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 22675b41d924SEric Sandeen else 22685b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 22695b41d924SEric Sandeen 22704e7ea81dSJan Kara pagevec_init(&pvec, 0); 22714e7ea81dSJan Kara mpd->map.m_len = 0; 22724e7ea81dSJan Kara mpd->next_page = index; 22734f01b02cSTheodore Ts'o while (index <= end) { 22745b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 22758e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 22768e48dcfbSTheodore Ts'o if (nr_pages == 0) 22774e7ea81dSJan Kara goto out; 22788e48dcfbSTheodore Ts'o 22798e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 22808e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 22818e48dcfbSTheodore Ts'o 22828e48dcfbSTheodore Ts'o /* 22838e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 22848e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 22858e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 22868e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 22878e48dcfbSTheodore Ts'o * because we have a reference on the page. 22888e48dcfbSTheodore Ts'o */ 22894f01b02cSTheodore Ts'o if (page->index > end) 22904f01b02cSTheodore Ts'o goto out; 22918e48dcfbSTheodore Ts'o 22924e7ea81dSJan Kara /* If we can't merge this page, we are done. */ 22934e7ea81dSJan Kara if (mpd->map.m_len > 0 && mpd->next_page != page->index) 22944e7ea81dSJan Kara goto out; 229578aaced3STheodore Ts'o 22968e48dcfbSTheodore Ts'o lock_page(page); 22978e48dcfbSTheodore Ts'o /* 22984e7ea81dSJan Kara * If the page is no longer dirty, or its mapping no 22994e7ea81dSJan Kara * longer corresponds to inode we are writing (which 23004e7ea81dSJan Kara * means it has been truncated or invalidated), or the 23014e7ea81dSJan Kara * page is already under writeback and we are not doing 23024e7ea81dSJan Kara * a data integrity writeback, skip the page 23038e48dcfbSTheodore Ts'o */ 23044f01b02cSTheodore Ts'o if (!PageDirty(page) || 23054f01b02cSTheodore Ts'o (PageWriteback(page) && 23064e7ea81dSJan Kara (mpd->wbc->sync_mode == WB_SYNC_NONE)) || 23074f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 23088e48dcfbSTheodore Ts'o unlock_page(page); 23098e48dcfbSTheodore Ts'o continue; 23108e48dcfbSTheodore Ts'o } 23118e48dcfbSTheodore Ts'o 23128e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 23138e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 23148e48dcfbSTheodore Ts'o 23154e7ea81dSJan Kara if (mpd->map.m_len == 0) 23168eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 23178eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 2318f8bec370SJan Kara /* Add all dirty buffers to mpd */ 23194e7ea81dSJan Kara lblk = ((ext4_lblk_t)page->index) << 23204e7ea81dSJan Kara (PAGE_CACHE_SHIFT - blkbits); 23218eb9e5ceSTheodore Ts'o head = page_buffers(page); 23224e7ea81dSJan Kara if (!add_page_bufs_to_extent(mpd, head, head, lblk)) 23234e7ea81dSJan Kara goto out; 23244e7ea81dSJan Kara /* So far everything mapped? Submit the page for IO. */ 23254e7ea81dSJan Kara if (mpd->map.m_len == 0) { 23264e7ea81dSJan Kara err = mpage_submit_page(mpd, page); 23274e7ea81dSJan Kara if (err < 0) 23284f01b02cSTheodore Ts'o goto out; 23298e48dcfbSTheodore Ts'o } 23304e7ea81dSJan Kara 23314e7ea81dSJan Kara /* 23324e7ea81dSJan Kara * Accumulated enough dirty pages? This doesn't apply 23334e7ea81dSJan Kara * to WB_SYNC_ALL mode. For integrity sync we have to 23344e7ea81dSJan Kara * keep going because someone may be concurrently 23354e7ea81dSJan Kara * dirtying pages, and we might have synced a lot of 23364e7ea81dSJan Kara * newly appeared dirty pages, but have not synced all 23374e7ea81dSJan Kara * of the old dirty pages. 23384e7ea81dSJan Kara */ 23394e7ea81dSJan Kara if (mpd->wbc->sync_mode == WB_SYNC_NONE && 23404e7ea81dSJan Kara mpd->next_page - mpd->first_page >= 23414e7ea81dSJan Kara mpd->wbc->nr_to_write) 23424e7ea81dSJan Kara goto out; 23438e48dcfbSTheodore Ts'o } 23448e48dcfbSTheodore Ts'o pagevec_release(&pvec); 23458e48dcfbSTheodore Ts'o cond_resched(); 23468e48dcfbSTheodore Ts'o } 23474f01b02cSTheodore Ts'o return 0; 23488eb9e5ceSTheodore Ts'o out: 23498eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 23504e7ea81dSJan Kara return err; 23518e48dcfbSTheodore Ts'o } 23528e48dcfbSTheodore Ts'o 235320970ba6STheodore Ts'o static int __writepage(struct page *page, struct writeback_control *wbc, 235420970ba6STheodore Ts'o void *data) 235520970ba6STheodore Ts'o { 235620970ba6STheodore Ts'o struct address_space *mapping = data; 235720970ba6STheodore Ts'o int ret = ext4_writepage(page, wbc); 235820970ba6STheodore Ts'o mapping_set_error(mapping, ret); 235920970ba6STheodore Ts'o return ret; 236020970ba6STheodore Ts'o } 236120970ba6STheodore Ts'o 236220970ba6STheodore Ts'o static int ext4_writepages(struct address_space *mapping, 236364769240SAlex Tomas struct writeback_control *wbc) 236464769240SAlex Tomas { 23654e7ea81dSJan Kara pgoff_t writeback_index = 0; 23664e7ea81dSJan Kara long nr_to_write = wbc->nr_to_write; 236722208dedSAneesh Kumar K.V int range_whole = 0; 23684e7ea81dSJan Kara int cycled = 1; 236961628a3fSMingming Cao handle_t *handle = NULL; 2370df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 23715e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 23726b523df4SJan Kara int needed_blocks, rsv_blocks = 0, ret = 0; 23735e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 23744e7ea81dSJan Kara bool done; 23751bce63d1SShaohua Li struct blk_plug plug; 2376cb530541STheodore Ts'o bool give_up_on_write = false; 237761628a3fSMingming Cao 237820970ba6STheodore Ts'o trace_ext4_writepages(inode, wbc); 2379ba80b101STheodore Ts'o 238061628a3fSMingming Cao /* 238161628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 238261628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 238361628a3fSMingming Cao * because that could violate lock ordering on umount 238461628a3fSMingming Cao */ 2385a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 238661628a3fSMingming Cao return 0; 23872a21e37eSTheodore Ts'o 238820970ba6STheodore Ts'o if (ext4_should_journal_data(inode)) { 238920970ba6STheodore Ts'o struct blk_plug plug; 239020970ba6STheodore Ts'o int ret; 239120970ba6STheodore Ts'o 239220970ba6STheodore Ts'o blk_start_plug(&plug); 239320970ba6STheodore Ts'o ret = write_cache_pages(mapping, wbc, __writepage, mapping); 239420970ba6STheodore Ts'o blk_finish_plug(&plug); 239520970ba6STheodore Ts'o return ret; 239620970ba6STheodore Ts'o } 239720970ba6STheodore Ts'o 23982a21e37eSTheodore Ts'o /* 23992a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 24002a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 24012a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 24024ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 24032a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 240420970ba6STheodore Ts'o * read-only, and in that case, ext4_writepages should 24052a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 24062a21e37eSTheodore Ts'o * the stack trace. 24072a21e37eSTheodore Ts'o */ 24084ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 24092a21e37eSTheodore Ts'o return -EROFS; 24102a21e37eSTheodore Ts'o 24116b523df4SJan Kara if (ext4_should_dioread_nolock(inode)) { 24126b523df4SJan Kara /* 24136b523df4SJan Kara * We may need to convert upto one extent per block in 24146b523df4SJan Kara * the page and we may dirty the inode. 24156b523df4SJan Kara */ 24166b523df4SJan Kara rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); 24176b523df4SJan Kara } 24186b523df4SJan Kara 24194e7ea81dSJan Kara /* 24204e7ea81dSJan Kara * If we have inline data and arrive here, it means that 24214e7ea81dSJan Kara * we will soon create the block for the 1st page, so 24224e7ea81dSJan Kara * we'd better clear the inline data here. 24234e7ea81dSJan Kara */ 24244e7ea81dSJan Kara if (ext4_has_inline_data(inode)) { 24254e7ea81dSJan Kara /* Just inode will be modified... */ 24264e7ea81dSJan Kara handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 24274e7ea81dSJan Kara if (IS_ERR(handle)) { 24284e7ea81dSJan Kara ret = PTR_ERR(handle); 24294e7ea81dSJan Kara goto out_writepages; 24304e7ea81dSJan Kara } 24314e7ea81dSJan Kara BUG_ON(ext4_test_inode_state(inode, 24324e7ea81dSJan Kara EXT4_STATE_MAY_INLINE_DATA)); 24334e7ea81dSJan Kara ext4_destroy_inline_data(handle, inode); 24344e7ea81dSJan Kara ext4_journal_stop(handle); 24354e7ea81dSJan Kara } 24364e7ea81dSJan Kara 243722208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 243822208dedSAneesh Kumar K.V range_whole = 1; 243961628a3fSMingming Cao 24402acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 24414e7ea81dSJan Kara writeback_index = mapping->writeback_index; 24424e7ea81dSJan Kara if (writeback_index) 24432acf2c26SAneesh Kumar K.V cycled = 0; 24444e7ea81dSJan Kara mpd.first_page = writeback_index; 24454e7ea81dSJan Kara mpd.last_page = -1; 24465b41d924SEric Sandeen } else { 24474e7ea81dSJan Kara mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; 24484e7ea81dSJan Kara mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; 24495b41d924SEric Sandeen } 2450a1d6cc56SAneesh Kumar K.V 24514e7ea81dSJan Kara mpd.inode = inode; 24524e7ea81dSJan Kara mpd.wbc = wbc; 24534e7ea81dSJan Kara ext4_io_submit_init(&mpd.io_submit, wbc); 24542acf2c26SAneesh Kumar K.V retry: 24556e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 24564e7ea81dSJan Kara tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); 24574e7ea81dSJan Kara done = false; 24581bce63d1SShaohua Li blk_start_plug(&plug); 24594e7ea81dSJan Kara while (!done && mpd.first_page <= mpd.last_page) { 24604e7ea81dSJan Kara /* For each extent of pages we use new io_end */ 24614e7ea81dSJan Kara mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL); 24624e7ea81dSJan Kara if (!mpd.io_submit.io_end) { 24634e7ea81dSJan Kara ret = -ENOMEM; 24644e7ea81dSJan Kara break; 24654e7ea81dSJan Kara } 2466a1d6cc56SAneesh Kumar K.V 2467a1d6cc56SAneesh Kumar K.V /* 24684e7ea81dSJan Kara * We have two constraints: We find one extent to map and we 24694e7ea81dSJan Kara * must always write out whole page (makes a difference when 24704e7ea81dSJan Kara * blocksize < pagesize) so that we don't block on IO when we 24714e7ea81dSJan Kara * try to write out the rest of the page. Journalled mode is 24724e7ea81dSJan Kara * not supported by delalloc. 2473a1d6cc56SAneesh Kumar K.V */ 2474a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2475525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2476a1d6cc56SAneesh Kumar K.V 247761628a3fSMingming Cao /* start a new transaction */ 24786b523df4SJan Kara handle = ext4_journal_start_with_reserve(inode, 24796b523df4SJan Kara EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks); 248061628a3fSMingming Cao if (IS_ERR(handle)) { 248161628a3fSMingming Cao ret = PTR_ERR(handle); 24821693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2483fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2484a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 24854e7ea81dSJan Kara /* Release allocated io_end */ 24864e7ea81dSJan Kara ext4_put_io_end(mpd.io_submit.io_end); 24874e7ea81dSJan Kara break; 248861628a3fSMingming Cao } 2489f63e6005STheodore Ts'o 24904e7ea81dSJan Kara trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc); 24914e7ea81dSJan Kara ret = mpage_prepare_extent_to_map(&mpd); 24924e7ea81dSJan Kara if (!ret) { 24934e7ea81dSJan Kara if (mpd.map.m_len) 2494cb530541STheodore Ts'o ret = mpage_map_and_submit_extent(handle, &mpd, 2495cb530541STheodore Ts'o &give_up_on_write); 24964e7ea81dSJan Kara else { 2497f63e6005STheodore Ts'o /* 24984e7ea81dSJan Kara * We scanned the whole range (or exhausted 24994e7ea81dSJan Kara * nr_to_write), submitted what was mapped and 25004e7ea81dSJan Kara * didn't find anything needing mapping. We are 25014e7ea81dSJan Kara * done. 2502f63e6005STheodore Ts'o */ 25034e7ea81dSJan Kara done = true; 2504f63e6005STheodore Ts'o } 25054e7ea81dSJan Kara } 250661628a3fSMingming Cao ext4_journal_stop(handle); 25074e7ea81dSJan Kara /* Submit prepared bio */ 25084e7ea81dSJan Kara ext4_io_submit(&mpd.io_submit); 25094e7ea81dSJan Kara /* Unlock pages we didn't use */ 2510cb530541STheodore Ts'o mpage_release_unused_pages(&mpd, give_up_on_write); 25114e7ea81dSJan Kara /* Drop our io_end reference we got from init */ 25124e7ea81dSJan Kara ext4_put_io_end(mpd.io_submit.io_end); 2513df22291fSAneesh Kumar K.V 25144e7ea81dSJan Kara if (ret == -ENOSPC && sbi->s_journal) { 25154e7ea81dSJan Kara /* 25164e7ea81dSJan Kara * Commit the transaction which would 251722208dedSAneesh Kumar K.V * free blocks released in the transaction 251822208dedSAneesh Kumar K.V * and try again 251922208dedSAneesh Kumar K.V */ 2520df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 252122208dedSAneesh Kumar K.V ret = 0; 25224e7ea81dSJan Kara continue; 25234e7ea81dSJan Kara } 25244e7ea81dSJan Kara /* Fatal error - ENOMEM, EIO... */ 25254e7ea81dSJan Kara if (ret) 252661628a3fSMingming Cao break; 252761628a3fSMingming Cao } 25281bce63d1SShaohua Li blk_finish_plug(&plug); 25294e7ea81dSJan Kara if (!ret && !cycled) { 25302acf2c26SAneesh Kumar K.V cycled = 1; 25314e7ea81dSJan Kara mpd.last_page = writeback_index - 1; 25324e7ea81dSJan Kara mpd.first_page = 0; 25332acf2c26SAneesh Kumar K.V goto retry; 25342acf2c26SAneesh Kumar K.V } 253561628a3fSMingming Cao 253622208dedSAneesh Kumar K.V /* Update index */ 253722208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 253822208dedSAneesh Kumar K.V /* 25394e7ea81dSJan Kara * Set the writeback_index so that range_cyclic 254022208dedSAneesh Kumar K.V * mode will write it back later 254122208dedSAneesh Kumar K.V */ 25424e7ea81dSJan Kara mapping->writeback_index = mpd.first_page; 2543a1d6cc56SAneesh Kumar K.V 254461628a3fSMingming Cao out_writepages: 254520970ba6STheodore Ts'o trace_ext4_writepages_result(inode, wbc, ret, 25464e7ea81dSJan Kara nr_to_write - wbc->nr_to_write); 254761628a3fSMingming Cao return ret; 254864769240SAlex Tomas } 254964769240SAlex Tomas 255079f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 255179f0be8dSAneesh Kumar K.V { 25525c1ff336SEric Whitney s64 free_clusters, dirty_clusters; 255379f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 255479f0be8dSAneesh Kumar K.V 255579f0be8dSAneesh Kumar K.V /* 255679f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 255779f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2558179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 255979f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 256079f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 256179f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 256279f0be8dSAneesh Kumar K.V */ 25635c1ff336SEric Whitney free_clusters = 25645c1ff336SEric Whitney percpu_counter_read_positive(&sbi->s_freeclusters_counter); 25655c1ff336SEric Whitney dirty_clusters = 25665c1ff336SEric Whitney percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 256700d4e736STheodore Ts'o /* 256800d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 256900d4e736STheodore Ts'o */ 25705c1ff336SEric Whitney if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 257110ee27a0SMiao Xie try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 257200d4e736STheodore Ts'o 25735c1ff336SEric Whitney if (2 * free_clusters < 3 * dirty_clusters || 25745c1ff336SEric Whitney free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 257579f0be8dSAneesh Kumar K.V /* 2576c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2577c8afb446SEric Sandeen * or free blocks is less than watermark 257879f0be8dSAneesh Kumar K.V */ 257979f0be8dSAneesh Kumar K.V return 1; 258079f0be8dSAneesh Kumar K.V } 258179f0be8dSAneesh Kumar K.V return 0; 258279f0be8dSAneesh Kumar K.V } 258379f0be8dSAneesh Kumar K.V 258464769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 258564769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 258664769240SAlex Tomas struct page **pagep, void **fsdata) 258764769240SAlex Tomas { 258872b8ab9dSEric Sandeen int ret, retries = 0; 258964769240SAlex Tomas struct page *page; 259064769240SAlex Tomas pgoff_t index; 259164769240SAlex Tomas struct inode *inode = mapping->host; 259264769240SAlex Tomas handle_t *handle; 259364769240SAlex Tomas 259464769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 259579f0be8dSAneesh Kumar K.V 259679f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 259779f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 259879f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 259979f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 260079f0be8dSAneesh Kumar K.V } 260179f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 26029bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 26039c3569b5STao Ma 26049c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 26059c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 26069c3569b5STao Ma pos, len, flags, 26079c3569b5STao Ma pagep, fsdata); 26089c3569b5STao Ma if (ret < 0) 260947564bfbSTheodore Ts'o return ret; 261047564bfbSTheodore Ts'o if (ret == 1) 261147564bfbSTheodore Ts'o return 0; 26129c3569b5STao Ma } 26139c3569b5STao Ma 261447564bfbSTheodore Ts'o /* 261547564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 261647564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 261747564bfbSTheodore Ts'o * is being written back. So grab it first before we start 261847564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 261947564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 262047564bfbSTheodore Ts'o */ 262147564bfbSTheodore Ts'o retry_grab: 262247564bfbSTheodore Ts'o page = grab_cache_page_write_begin(mapping, index, flags); 262347564bfbSTheodore Ts'o if (!page) 262447564bfbSTheodore Ts'o return -ENOMEM; 262547564bfbSTheodore Ts'o unlock_page(page); 262647564bfbSTheodore Ts'o 262764769240SAlex Tomas /* 262864769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 262964769240SAlex Tomas * if there is delayed block allocation. But we still need 263064769240SAlex Tomas * to journalling the i_disksize update if writes to the end 263164769240SAlex Tomas * of file which has an already mapped buffer. 263264769240SAlex Tomas */ 263347564bfbSTheodore Ts'o retry_journal: 26349924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); 263564769240SAlex Tomas if (IS_ERR(handle)) { 263647564bfbSTheodore Ts'o page_cache_release(page); 263747564bfbSTheodore Ts'o return PTR_ERR(handle); 263864769240SAlex Tomas } 263964769240SAlex Tomas 264047564bfbSTheodore Ts'o lock_page(page); 264147564bfbSTheodore Ts'o if (page->mapping != mapping) { 264247564bfbSTheodore Ts'o /* The page got truncated from under us */ 264347564bfbSTheodore Ts'o unlock_page(page); 264447564bfbSTheodore Ts'o page_cache_release(page); 2645d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 264647564bfbSTheodore Ts'o goto retry_grab; 2647d5a0d4f7SEric Sandeen } 264847564bfbSTheodore Ts'o /* In case writeback began while the page was unlocked */ 264947564bfbSTheodore Ts'o wait_on_page_writeback(page); 265064769240SAlex Tomas 26516e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 265264769240SAlex Tomas if (ret < 0) { 265364769240SAlex Tomas unlock_page(page); 265464769240SAlex Tomas ext4_journal_stop(handle); 2655ae4d5372SAneesh Kumar K.V /* 2656ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2657ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2658ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2659ae4d5372SAneesh Kumar K.V */ 2660ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2661b9a4207dSJan Kara ext4_truncate_failed_write(inode); 266247564bfbSTheodore Ts'o 266347564bfbSTheodore Ts'o if (ret == -ENOSPC && 266447564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 266547564bfbSTheodore Ts'o goto retry_journal; 266647564bfbSTheodore Ts'o 266747564bfbSTheodore Ts'o page_cache_release(page); 266847564bfbSTheodore Ts'o return ret; 266964769240SAlex Tomas } 267064769240SAlex Tomas 267147564bfbSTheodore Ts'o *pagep = page; 267264769240SAlex Tomas return ret; 267364769240SAlex Tomas } 267464769240SAlex Tomas 2675632eaeabSMingming Cao /* 2676632eaeabSMingming Cao * Check if we should update i_disksize 2677632eaeabSMingming Cao * when write to the end of file but not require block allocation 2678632eaeabSMingming Cao */ 2679632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2680632eaeabSMingming Cao unsigned long offset) 2681632eaeabSMingming Cao { 2682632eaeabSMingming Cao struct buffer_head *bh; 2683632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2684632eaeabSMingming Cao unsigned int idx; 2685632eaeabSMingming Cao int i; 2686632eaeabSMingming Cao 2687632eaeabSMingming Cao bh = page_buffers(page); 2688632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2689632eaeabSMingming Cao 2690632eaeabSMingming Cao for (i = 0; i < idx; i++) 2691632eaeabSMingming Cao bh = bh->b_this_page; 2692632eaeabSMingming Cao 269329fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2694632eaeabSMingming Cao return 0; 2695632eaeabSMingming Cao return 1; 2696632eaeabSMingming Cao } 2697632eaeabSMingming Cao 269864769240SAlex Tomas static int ext4_da_write_end(struct file *file, 269964769240SAlex Tomas struct address_space *mapping, 270064769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 270164769240SAlex Tomas struct page *page, void *fsdata) 270264769240SAlex Tomas { 270364769240SAlex Tomas struct inode *inode = mapping->host; 270464769240SAlex Tomas int ret = 0, ret2; 270564769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 270664769240SAlex Tomas loff_t new_i_size; 2707632eaeabSMingming Cao unsigned long start, end; 270879f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 270979f0be8dSAneesh Kumar K.V 271074d553aaSTheodore Ts'o if (write_mode == FALL_BACK_TO_NONDELALLOC) 271174d553aaSTheodore Ts'o return ext4_write_end(file, mapping, pos, 271279f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 2713632eaeabSMingming Cao 27149bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2715632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2716632eaeabSMingming Cao end = start + copied - 1; 271764769240SAlex Tomas 271864769240SAlex Tomas /* 271964769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 272064769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 272164769240SAlex Tomas * into that. 272264769240SAlex Tomas */ 272364769240SAlex Tomas new_i_size = pos + copied; 2724ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 27259c3569b5STao Ma if (ext4_has_inline_data(inode) || 27269c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2727632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2728f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 272964769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2730632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2731cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2732cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2733cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2734cf17fea6SAneesh Kumar K.V */ 2735cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2736632eaeabSMingming Cao } 2737632eaeabSMingming Cao } 27389c3569b5STao Ma 27399c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 27409c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 27419c3569b5STao Ma ext4_has_inline_data(inode)) 27429c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 27439c3569b5STao Ma page); 27449c3569b5STao Ma else 274564769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 274664769240SAlex Tomas page, fsdata); 27479c3569b5STao Ma 274864769240SAlex Tomas copied = ret2; 274964769240SAlex Tomas if (ret2 < 0) 275064769240SAlex Tomas ret = ret2; 275164769240SAlex Tomas ret2 = ext4_journal_stop(handle); 275264769240SAlex Tomas if (!ret) 275364769240SAlex Tomas ret = ret2; 275464769240SAlex Tomas 275564769240SAlex Tomas return ret ? ret : copied; 275664769240SAlex Tomas } 275764769240SAlex Tomas 2758d47992f8SLukas Czerner static void ext4_da_invalidatepage(struct page *page, unsigned int offset, 2759d47992f8SLukas Czerner unsigned int length) 276064769240SAlex Tomas { 276164769240SAlex Tomas /* 276264769240SAlex Tomas * Drop reserved blocks 276364769240SAlex Tomas */ 276464769240SAlex Tomas BUG_ON(!PageLocked(page)); 276564769240SAlex Tomas if (!page_has_buffers(page)) 276664769240SAlex Tomas goto out; 276764769240SAlex Tomas 2768ca99fdd2SLukas Czerner ext4_da_page_release_reservation(page, offset, length); 276964769240SAlex Tomas 277064769240SAlex Tomas out: 2771d47992f8SLukas Czerner ext4_invalidatepage(page, offset, length); 277264769240SAlex Tomas 277364769240SAlex Tomas return; 277464769240SAlex Tomas } 277564769240SAlex Tomas 2776ccd2506bSTheodore Ts'o /* 2777ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2778ccd2506bSTheodore Ts'o */ 2779ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2780ccd2506bSTheodore Ts'o { 2781fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2782fb40ba0dSTheodore Ts'o 2783ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2784ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2785ccd2506bSTheodore Ts'o return 0; 2786ccd2506bSTheodore Ts'o 2787ccd2506bSTheodore Ts'o /* 2788ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2789ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2790ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2791ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2792ccd2506bSTheodore Ts'o * would require replicating code paths in: 2793ccd2506bSTheodore Ts'o * 279420970ba6STheodore Ts'o * ext4_writepages() -> 2795ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2796ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2797ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2798ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2799ccd2506bSTheodore Ts'o * 2800ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2801ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2802ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2803ccd2506bSTheodore Ts'o * doing I/O at all. 2804ccd2506bSTheodore Ts'o * 2805ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2806380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2807ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2808ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 280925985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2810ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2811ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2812ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2813ccd2506bSTheodore Ts'o * 2814ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2815ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2816ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2817ccd2506bSTheodore Ts'o */ 2818ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2819ccd2506bSTheodore Ts'o } 282064769240SAlex Tomas 282164769240SAlex Tomas /* 2822ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2823ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2824ac27a0ecSDave Kleikamp * 2825ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2826617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2827ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2828ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2829ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2830ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2831ac27a0ecSDave Kleikamp * 2832ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2833ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2834ac27a0ecSDave Kleikamp */ 2835617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2836ac27a0ecSDave Kleikamp { 2837ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2838ac27a0ecSDave Kleikamp journal_t *journal; 2839ac27a0ecSDave Kleikamp int err; 2840ac27a0ecSDave Kleikamp 284146c7f254STao Ma /* 284246c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 284346c7f254STao Ma */ 284446c7f254STao Ma if (ext4_has_inline_data(inode)) 284546c7f254STao Ma return 0; 284646c7f254STao Ma 284764769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 284864769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 284964769240SAlex Tomas /* 285064769240SAlex Tomas * With delalloc we want to sync the file 285164769240SAlex Tomas * so that we can make sure we allocate 285264769240SAlex Tomas * blocks for file 285364769240SAlex Tomas */ 285464769240SAlex Tomas filemap_write_and_wait(mapping); 285564769240SAlex Tomas } 285664769240SAlex Tomas 285719f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 285819f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2859ac27a0ecSDave Kleikamp /* 2860ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2861ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2862ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2863ac27a0ecSDave Kleikamp * do we expect this to happen. 2864ac27a0ecSDave Kleikamp * 2865ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2866ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2867ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2868ac27a0ecSDave Kleikamp * will.) 2869ac27a0ecSDave Kleikamp * 2870617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2871ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2872ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2873ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2874ac27a0ecSDave Kleikamp * everything they get. 2875ac27a0ecSDave Kleikamp */ 2876ac27a0ecSDave Kleikamp 287719f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2878617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2879dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2880dab291afSMingming Cao err = jbd2_journal_flush(journal); 2881dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2882ac27a0ecSDave Kleikamp 2883ac27a0ecSDave Kleikamp if (err) 2884ac27a0ecSDave Kleikamp return 0; 2885ac27a0ecSDave Kleikamp } 2886ac27a0ecSDave Kleikamp 2887617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2888ac27a0ecSDave Kleikamp } 2889ac27a0ecSDave Kleikamp 2890617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2891ac27a0ecSDave Kleikamp { 289246c7f254STao Ma int ret = -EAGAIN; 289346c7f254STao Ma struct inode *inode = page->mapping->host; 289446c7f254STao Ma 28950562e0baSJiaying Zhang trace_ext4_readpage(page); 289646c7f254STao Ma 289746c7f254STao Ma if (ext4_has_inline_data(inode)) 289846c7f254STao Ma ret = ext4_readpage_inline(inode, page); 289946c7f254STao Ma 290046c7f254STao Ma if (ret == -EAGAIN) 2901617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 290246c7f254STao Ma 290346c7f254STao Ma return ret; 2904ac27a0ecSDave Kleikamp } 2905ac27a0ecSDave Kleikamp 2906ac27a0ecSDave Kleikamp static int 2907617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2908ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2909ac27a0ecSDave Kleikamp { 291046c7f254STao Ma struct inode *inode = mapping->host; 291146c7f254STao Ma 291246c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 291346c7f254STao Ma if (ext4_has_inline_data(inode)) 291446c7f254STao Ma return 0; 291546c7f254STao Ma 2916617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2917ac27a0ecSDave Kleikamp } 2918ac27a0ecSDave Kleikamp 2919d47992f8SLukas Czerner static void ext4_invalidatepage(struct page *page, unsigned int offset, 2920d47992f8SLukas Czerner unsigned int length) 2921ac27a0ecSDave Kleikamp { 2922ca99fdd2SLukas Czerner trace_ext4_invalidatepage(page, offset, length); 29230562e0baSJiaying Zhang 29244520fb3cSJan Kara /* No journalling happens on data buffers when this function is used */ 29254520fb3cSJan Kara WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 29264520fb3cSJan Kara 2927ca99fdd2SLukas Czerner block_invalidatepage(page, offset, length); 29284520fb3cSJan Kara } 29294520fb3cSJan Kara 293053e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page, 2931ca99fdd2SLukas Czerner unsigned int offset, 2932ca99fdd2SLukas Czerner unsigned int length) 29334520fb3cSJan Kara { 29344520fb3cSJan Kara journal_t *journal = EXT4_JOURNAL(page->mapping->host); 29354520fb3cSJan Kara 2936ca99fdd2SLukas Czerner trace_ext4_journalled_invalidatepage(page, offset, length); 29374520fb3cSJan Kara 2938744692dcSJiaying Zhang /* 2939ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2940ac27a0ecSDave Kleikamp */ 2941ca99fdd2SLukas Czerner if (offset == 0 && length == PAGE_CACHE_SIZE) 2942ac27a0ecSDave Kleikamp ClearPageChecked(page); 2943ac27a0ecSDave Kleikamp 2944ca99fdd2SLukas Czerner return jbd2_journal_invalidatepage(journal, page, offset, length); 294553e87268SJan Kara } 294653e87268SJan Kara 294753e87268SJan Kara /* Wrapper for aops... */ 294853e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page, 2949d47992f8SLukas Czerner unsigned int offset, 2950d47992f8SLukas Czerner unsigned int length) 295153e87268SJan Kara { 2952ca99fdd2SLukas Czerner WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0); 2953ac27a0ecSDave Kleikamp } 2954ac27a0ecSDave Kleikamp 2955617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2956ac27a0ecSDave Kleikamp { 2957617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2958ac27a0ecSDave Kleikamp 29590562e0baSJiaying Zhang trace_ext4_releasepage(page); 29600562e0baSJiaying Zhang 2961e1c36595SJan Kara /* Page has dirty journalled data -> cannot release */ 2962e1c36595SJan Kara if (PageChecked(page)) 2963ac27a0ecSDave Kleikamp return 0; 29640390131bSFrank Mayhar if (journal) 2965dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 29660390131bSFrank Mayhar else 29670390131bSFrank Mayhar return try_to_free_buffers(page); 2968ac27a0ecSDave Kleikamp } 2969ac27a0ecSDave Kleikamp 2970ac27a0ecSDave Kleikamp /* 29712ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 29722ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 29732ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 29742ed88685STheodore Ts'o */ 2975f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 29764c0425ffSMingming Cao struct buffer_head *bh_result, int create) 29774c0425ffSMingming Cao { 2978c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 29798d5d02e6SMingming Cao inode->i_ino, create); 29802ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 29812ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 29824c0425ffSMingming Cao } 29834c0425ffSMingming Cao 2984729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 29858b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 2986729f52c6SZheng Liu { 29878b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 29888b0f165fSAnatol Pomozov inode->i_ino, create); 29898b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 29908b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 2991729f52c6SZheng Liu } 2992729f52c6SZheng Liu 29934c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2994552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2995552ef802SChristoph Hellwig bool is_async) 29964c0425ffSMingming Cao { 2997496ad9aaSAl Viro struct inode *inode = file_inode(iocb->ki_filp); 29984c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 29994c0425ffSMingming Cao 300097a851edSJan Kara /* if not async direct IO just return */ 300197a851edSJan Kara if (!io_end) { 300297a851edSJan Kara inode_dio_done(inode); 300397a851edSJan Kara if (is_async) 300497a851edSJan Kara aio_complete(iocb, ret, 0); 300597a851edSJan Kara return; 300697a851edSJan Kara } 30074b70df18SMingming 30088d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 3009ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 30108d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 30118d5d02e6SMingming Cao size); 30128d5d02e6SMingming Cao 3013b5a7e970STheodore Ts'o iocb->private = NULL; 30144c0425ffSMingming Cao io_end->offset = offset; 30154c0425ffSMingming Cao io_end->size = size; 30165b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 30175b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 30185b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 30195b3ff237Sjiayingz@google.com (Jiaying Zhang) } 302097a851edSJan Kara ext4_put_io_end_defer(io_end); 30214c0425ffSMingming Cao } 3022c7064ef1SJiaying Zhang 30234c0425ffSMingming Cao /* 30244c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 30254c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 30264c0425ffSMingming Cao * fall back to buffered IO. 30274c0425ffSMingming Cao * 3028b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 302969c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 3030b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 30314c0425ffSMingming Cao * 303269c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 30338d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 303425985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 30358d5d02e6SMingming Cao * when async direct IO completed. 30364c0425ffSMingming Cao * 30374c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 30384c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 30394c0425ffSMingming Cao * if the machine crashes during the write. 30404c0425ffSMingming Cao * 30414c0425ffSMingming Cao */ 30424c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 30434c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 30444c0425ffSMingming Cao unsigned long nr_segs) 30454c0425ffSMingming Cao { 30464c0425ffSMingming Cao struct file *file = iocb->ki_filp; 30474c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 30484c0425ffSMingming Cao ssize_t ret; 30494c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 3050729f52c6SZheng Liu int overwrite = 0; 30518b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 30528b0f165fSAnatol Pomozov int dio_flags = 0; 305369c499d1STheodore Ts'o loff_t final_size = offset + count; 305497a851edSJan Kara ext4_io_end_t *io_end = NULL; 305569c499d1STheodore Ts'o 305669c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 305769c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 305869c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3059729f52c6SZheng Liu 30604bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 30614bd809dbSZheng Liu 3062e8340395SJan Kara /* 3063e8340395SJan Kara * Make all waiters for direct IO properly wait also for extent 3064e8340395SJan Kara * conversion. This also disallows race between truncate() and 3065e8340395SJan Kara * overwrite DIO as i_dio_count needs to be incremented under i_mutex. 3066e8340395SJan Kara */ 3067e8340395SJan Kara if (rw == WRITE) 3068e8340395SJan Kara atomic_inc(&inode->i_dio_count); 3069e8340395SJan Kara 30704bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 30714bd809dbSZheng Liu overwrite = *((int *)iocb->private); 30724bd809dbSZheng Liu 30734bd809dbSZheng Liu if (overwrite) { 30744bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 30754bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 30764bd809dbSZheng Liu } 30774bd809dbSZheng Liu 30784c0425ffSMingming Cao /* 30798d5d02e6SMingming Cao * We could direct write to holes and fallocate. 30808d5d02e6SMingming Cao * 308169c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 308269c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 308369c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 30848d5d02e6SMingming Cao * 308569c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 308669c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 308769c499d1STheodore Ts'o * extents uninitialized. 30884c0425ffSMingming Cao * 308969c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 30908d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 30914c0425ffSMingming Cao * 309269c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 309369c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 309469c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 309569c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 30964c0425ffSMingming Cao */ 30978d5d02e6SMingming Cao iocb->private = NULL; 3098f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 30998d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 310097a851edSJan Kara io_end = ext4_init_io_end(inode, GFP_NOFS); 31014bd809dbSZheng Liu if (!io_end) { 31024bd809dbSZheng Liu ret = -ENOMEM; 31034bd809dbSZheng Liu goto retake_lock; 31044bd809dbSZheng Liu } 3105266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 310697a851edSJan Kara /* 310797a851edSJan Kara * Grab reference for DIO. Will be dropped in ext4_end_io_dio() 310897a851edSJan Kara */ 310997a851edSJan Kara iocb->private = ext4_get_io_end(io_end); 31108d5d02e6SMingming Cao /* 311169c499d1STheodore Ts'o * we save the io structure for current async direct 311269c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 311369c499d1STheodore Ts'o * io structure whether there is a unwritten extents 311469c499d1STheodore Ts'o * needs to be converted when IO is completed. 31158d5d02e6SMingming Cao */ 3116f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 31178d5d02e6SMingming Cao } 31188d5d02e6SMingming Cao 31198b0f165fSAnatol Pomozov if (overwrite) { 31208b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 31218b0f165fSAnatol Pomozov } else { 31228b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 31238b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 31248b0f165fSAnatol Pomozov } 3125729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 3126729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 3127729f52c6SZheng Liu offset, nr_segs, 31288b0f165fSAnatol Pomozov get_block_func, 3129729f52c6SZheng Liu ext4_end_io_dio, 3130729f52c6SZheng Liu NULL, 31318b0f165fSAnatol Pomozov dio_flags); 31328b0f165fSAnatol Pomozov 31334eec708dSJan Kara /* 313497a851edSJan Kara * Put our reference to io_end. This can free the io_end structure e.g. 313597a851edSJan Kara * in sync IO case or in case of error. It can even perform extent 313697a851edSJan Kara * conversion if all bios we submitted finished before we got here. 313797a851edSJan Kara * Note that in that case iocb->private can be already set to NULL 313897a851edSJan Kara * here. 31394eec708dSJan Kara */ 314097a851edSJan Kara if (io_end) { 314197a851edSJan Kara ext4_inode_aio_set(inode, NULL); 314297a851edSJan Kara ext4_put_io_end(io_end); 314397a851edSJan Kara /* 314497a851edSJan Kara * When no IO was submitted ext4_end_io_dio() was not 314597a851edSJan Kara * called so we have to put iocb's reference. 314697a851edSJan Kara */ 314797a851edSJan Kara if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) { 314897a851edSJan Kara WARN_ON(iocb->private != io_end); 314997a851edSJan Kara WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 315097a851edSJan Kara WARN_ON(io_end->iocb); 315197a851edSJan Kara /* 315297a851edSJan Kara * Generic code already did inode_dio_done() so we 315397a851edSJan Kara * have to clear EXT4_IO_END_DIRECT to not do it for 315497a851edSJan Kara * the second time. 315597a851edSJan Kara */ 315697a851edSJan Kara io_end->flag = 0; 315797a851edSJan Kara ext4_put_io_end(io_end); 31588d5d02e6SMingming Cao iocb->private = NULL; 315997a851edSJan Kara } 316097a851edSJan Kara } 316197a851edSJan Kara if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 31625f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 3163109f5565SMingming int err; 31648d5d02e6SMingming Cao /* 31658d5d02e6SMingming Cao * for non AIO case, since the IO is already 316625985edcSLucas De Marchi * completed, we could do the conversion right here 31678d5d02e6SMingming Cao */ 31686b523df4SJan Kara err = ext4_convert_unwritten_extents(NULL, inode, 31698d5d02e6SMingming Cao offset, ret); 3170109f5565SMingming if (err < 0) 3171109f5565SMingming ret = err; 317219f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3173109f5565SMingming } 31744bd809dbSZheng Liu 31754bd809dbSZheng Liu retake_lock: 3176e8340395SJan Kara if (rw == WRITE) 3177e8340395SJan Kara inode_dio_done(inode); 31784bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 31794bd809dbSZheng Liu if (overwrite) { 31804bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 31814bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 31824bd809dbSZheng Liu } 31834bd809dbSZheng Liu 31844c0425ffSMingming Cao return ret; 31854c0425ffSMingming Cao } 31868d5d02e6SMingming Cao 31874c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 31884c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 31894c0425ffSMingming Cao unsigned long nr_segs) 31904c0425ffSMingming Cao { 31914c0425ffSMingming Cao struct file *file = iocb->ki_filp; 31924c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 31930562e0baSJiaying Zhang ssize_t ret; 31944c0425ffSMingming Cao 319584ebd795STheodore Ts'o /* 319684ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 319784ebd795STheodore Ts'o */ 319884ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 319984ebd795STheodore Ts'o return 0; 320084ebd795STheodore Ts'o 320146c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 320246c7f254STao Ma if (ext4_has_inline_data(inode)) 320346c7f254STao Ma return 0; 320446c7f254STao Ma 32050562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 320612e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 32070562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 32080562e0baSJiaying Zhang else 32090562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 32100562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 32110562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 32120562e0baSJiaying Zhang return ret; 32134c0425ffSMingming Cao } 32144c0425ffSMingming Cao 3215ac27a0ecSDave Kleikamp /* 3216617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3217ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3218ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3219ac27a0ecSDave Kleikamp * not necessarily locked. 3220ac27a0ecSDave Kleikamp * 3221ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3222ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3223ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3224ac27a0ecSDave Kleikamp * 3225ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3226ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3227ac27a0ecSDave Kleikamp */ 3228617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3229ac27a0ecSDave Kleikamp { 3230ac27a0ecSDave Kleikamp SetPageChecked(page); 3231ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3232ac27a0ecSDave Kleikamp } 3233ac27a0ecSDave Kleikamp 323474d553aaSTheodore Ts'o static const struct address_space_operations ext4_aops = { 3235617ba13bSMingming Cao .readpage = ext4_readpage, 3236617ba13bSMingming Cao .readpages = ext4_readpages, 323743ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 323820970ba6STheodore Ts'o .writepages = ext4_writepages, 3239bfc1af65SNick Piggin .write_begin = ext4_write_begin, 324074d553aaSTheodore Ts'o .write_end = ext4_write_end, 3241617ba13bSMingming Cao .bmap = ext4_bmap, 3242617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3243617ba13bSMingming Cao .releasepage = ext4_releasepage, 3244617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3245ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32468ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3247aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3248ac27a0ecSDave Kleikamp }; 3249ac27a0ecSDave Kleikamp 3250617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3251617ba13bSMingming Cao .readpage = ext4_readpage, 3252617ba13bSMingming Cao .readpages = ext4_readpages, 325343ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 325420970ba6STheodore Ts'o .writepages = ext4_writepages, 3255bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3256bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3257617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3258617ba13bSMingming Cao .bmap = ext4_bmap, 32594520fb3cSJan Kara .invalidatepage = ext4_journalled_invalidatepage, 3260617ba13bSMingming Cao .releasepage = ext4_releasepage, 326184ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 32628ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3263aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3264ac27a0ecSDave Kleikamp }; 3265ac27a0ecSDave Kleikamp 326664769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 326764769240SAlex Tomas .readpage = ext4_readpage, 326864769240SAlex Tomas .readpages = ext4_readpages, 326943ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 327020970ba6STheodore Ts'o .writepages = ext4_writepages, 327164769240SAlex Tomas .write_begin = ext4_da_write_begin, 327264769240SAlex Tomas .write_end = ext4_da_write_end, 327364769240SAlex Tomas .bmap = ext4_bmap, 327464769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 327564769240SAlex Tomas .releasepage = ext4_releasepage, 327664769240SAlex Tomas .direct_IO = ext4_direct_IO, 327764769240SAlex Tomas .migratepage = buffer_migrate_page, 32788ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3279aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 328064769240SAlex Tomas }; 328164769240SAlex Tomas 3282617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3283ac27a0ecSDave Kleikamp { 32843d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 32853d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 328674d553aaSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE); 32873d2b1582SLukas Czerner break; 32883d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 328974d553aaSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE); 32903d2b1582SLukas Czerner break; 32913d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3292617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 329374d553aaSTheodore Ts'o return; 32943d2b1582SLukas Czerner default: 32953d2b1582SLukas Czerner BUG(); 32963d2b1582SLukas Czerner } 329774d553aaSTheodore Ts'o if (test_opt(inode->i_sb, DELALLOC)) 329874d553aaSTheodore Ts'o inode->i_mapping->a_ops = &ext4_da_aops; 329974d553aaSTheodore Ts'o else 330074d553aaSTheodore Ts'o inode->i_mapping->a_ops = &ext4_aops; 3301ac27a0ecSDave Kleikamp } 3302ac27a0ecSDave Kleikamp 3303d863dc36SLukas Czerner /* 3304d863dc36SLukas Czerner * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3305d863dc36SLukas Czerner * up to the end of the block which corresponds to `from'. 3306d863dc36SLukas Czerner * This required during truncate. We need to physically zero the tail end 3307d863dc36SLukas Czerner * of that block so it doesn't yield old data if the file is later grown. 3308d863dc36SLukas Czerner */ 3309d863dc36SLukas Czerner int ext4_block_truncate_page(handle_t *handle, 3310d863dc36SLukas Czerner struct address_space *mapping, loff_t from) 3311d863dc36SLukas Czerner { 3312d863dc36SLukas Czerner unsigned offset = from & (PAGE_CACHE_SIZE-1); 3313d863dc36SLukas Czerner unsigned length; 3314d863dc36SLukas Czerner unsigned blocksize; 3315d863dc36SLukas Czerner struct inode *inode = mapping->host; 3316d863dc36SLukas Czerner 3317d863dc36SLukas Czerner blocksize = inode->i_sb->s_blocksize; 3318d863dc36SLukas Czerner length = blocksize - (offset & (blocksize - 1)); 3319d863dc36SLukas Czerner 3320d863dc36SLukas Czerner return ext4_block_zero_page_range(handle, mapping, from, length); 3321d863dc36SLukas Czerner } 3322d863dc36SLukas Czerner 3323d863dc36SLukas Czerner /* 3324d863dc36SLukas Czerner * ext4_block_zero_page_range() zeros out a mapping of length 'length' 3325d863dc36SLukas Czerner * starting from file offset 'from'. The range to be zero'd must 3326d863dc36SLukas Czerner * be contained with in one block. If the specified range exceeds 3327d863dc36SLukas Czerner * the end of the block it will be shortened to end of the block 3328d863dc36SLukas Czerner * that cooresponds to 'from' 3329d863dc36SLukas Czerner */ 3330d863dc36SLukas Czerner int ext4_block_zero_page_range(handle_t *handle, 3331d863dc36SLukas Czerner struct address_space *mapping, loff_t from, loff_t length) 3332d863dc36SLukas Czerner { 3333d863dc36SLukas Czerner ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3334d863dc36SLukas Czerner unsigned offset = from & (PAGE_CACHE_SIZE-1); 3335d863dc36SLukas Czerner unsigned blocksize, max, pos; 3336d863dc36SLukas Czerner ext4_lblk_t iblock; 3337d863dc36SLukas Czerner struct inode *inode = mapping->host; 3338d863dc36SLukas Czerner struct buffer_head *bh; 3339d863dc36SLukas Czerner struct page *page; 3340d863dc36SLukas Czerner int err = 0; 3341d863dc36SLukas Czerner 3342d863dc36SLukas Czerner page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3343d863dc36SLukas Czerner mapping_gfp_mask(mapping) & ~__GFP_FS); 3344d863dc36SLukas Czerner if (!page) 3345d863dc36SLukas Czerner return -ENOMEM; 3346d863dc36SLukas Czerner 3347d863dc36SLukas Czerner blocksize = inode->i_sb->s_blocksize; 3348d863dc36SLukas Czerner max = blocksize - (offset & (blocksize - 1)); 3349d863dc36SLukas Czerner 3350d863dc36SLukas Czerner /* 3351d863dc36SLukas Czerner * correct length if it does not fall between 3352d863dc36SLukas Czerner * 'from' and the end of the block 3353d863dc36SLukas Czerner */ 3354d863dc36SLukas Czerner if (length > max || length < 0) 3355d863dc36SLukas Czerner length = max; 3356d863dc36SLukas Czerner 3357d863dc36SLukas Czerner iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3358d863dc36SLukas Czerner 3359d863dc36SLukas Czerner if (!page_has_buffers(page)) 3360d863dc36SLukas Czerner create_empty_buffers(page, blocksize, 0); 3361d863dc36SLukas Czerner 3362d863dc36SLukas Czerner /* Find the buffer that contains "offset" */ 3363d863dc36SLukas Czerner bh = page_buffers(page); 3364d863dc36SLukas Czerner pos = blocksize; 3365d863dc36SLukas Czerner while (offset >= pos) { 3366d863dc36SLukas Czerner bh = bh->b_this_page; 3367d863dc36SLukas Czerner iblock++; 3368d863dc36SLukas Czerner pos += blocksize; 3369d863dc36SLukas Czerner } 3370d863dc36SLukas Czerner if (buffer_freed(bh)) { 3371d863dc36SLukas Czerner BUFFER_TRACE(bh, "freed: skip"); 3372d863dc36SLukas Czerner goto unlock; 3373d863dc36SLukas Czerner } 3374d863dc36SLukas Czerner if (!buffer_mapped(bh)) { 3375d863dc36SLukas Czerner BUFFER_TRACE(bh, "unmapped"); 3376d863dc36SLukas Czerner ext4_get_block(inode, iblock, bh, 0); 3377d863dc36SLukas Czerner /* unmapped? It's a hole - nothing to do */ 3378d863dc36SLukas Czerner if (!buffer_mapped(bh)) { 3379d863dc36SLukas Czerner BUFFER_TRACE(bh, "still unmapped"); 3380d863dc36SLukas Czerner goto unlock; 3381d863dc36SLukas Czerner } 3382d863dc36SLukas Czerner } 3383d863dc36SLukas Czerner 3384d863dc36SLukas Czerner /* Ok, it's mapped. Make sure it's up-to-date */ 3385d863dc36SLukas Czerner if (PageUptodate(page)) 3386d863dc36SLukas Czerner set_buffer_uptodate(bh); 3387d863dc36SLukas Czerner 3388d863dc36SLukas Czerner if (!buffer_uptodate(bh)) { 3389d863dc36SLukas Czerner err = -EIO; 3390d863dc36SLukas Czerner ll_rw_block(READ, 1, &bh); 3391d863dc36SLukas Czerner wait_on_buffer(bh); 3392d863dc36SLukas Czerner /* Uhhuh. Read error. Complain and punt. */ 3393d863dc36SLukas Czerner if (!buffer_uptodate(bh)) 3394d863dc36SLukas Czerner goto unlock; 3395d863dc36SLukas Czerner } 3396d863dc36SLukas Czerner if (ext4_should_journal_data(inode)) { 3397d863dc36SLukas Czerner BUFFER_TRACE(bh, "get write access"); 3398d863dc36SLukas Czerner err = ext4_journal_get_write_access(handle, bh); 3399d863dc36SLukas Czerner if (err) 3400d863dc36SLukas Czerner goto unlock; 3401d863dc36SLukas Czerner } 3402d863dc36SLukas Czerner zero_user(page, offset, length); 3403d863dc36SLukas Czerner BUFFER_TRACE(bh, "zeroed end of block"); 3404d863dc36SLukas Czerner 3405d863dc36SLukas Czerner if (ext4_should_journal_data(inode)) { 3406d863dc36SLukas Czerner err = ext4_handle_dirty_metadata(handle, inode, bh); 34070713ed0cSLukas Czerner } else { 3408353eefd3Sjon ernst err = 0; 3409d863dc36SLukas Czerner mark_buffer_dirty(bh); 34100713ed0cSLukas Czerner if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) 34110713ed0cSLukas Czerner err = ext4_jbd2_file_inode(handle, inode); 34120713ed0cSLukas Czerner } 3413d863dc36SLukas Czerner 3414d863dc36SLukas Czerner unlock: 3415d863dc36SLukas Czerner unlock_page(page); 3416d863dc36SLukas Czerner page_cache_release(page); 3417d863dc36SLukas Czerner return err; 3418d863dc36SLukas Czerner } 3419d863dc36SLukas Czerner 3420a87dd18cSLukas Czerner int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode, 3421a87dd18cSLukas Czerner loff_t lstart, loff_t length) 3422a87dd18cSLukas Czerner { 3423a87dd18cSLukas Czerner struct super_block *sb = inode->i_sb; 3424a87dd18cSLukas Czerner struct address_space *mapping = inode->i_mapping; 3425e1be3a92SLukas Czerner unsigned partial_start, partial_end; 3426a87dd18cSLukas Czerner ext4_fsblk_t start, end; 3427a87dd18cSLukas Czerner loff_t byte_end = (lstart + length - 1); 3428a87dd18cSLukas Czerner int err = 0; 3429a87dd18cSLukas Czerner 3430e1be3a92SLukas Czerner partial_start = lstart & (sb->s_blocksize - 1); 3431e1be3a92SLukas Czerner partial_end = byte_end & (sb->s_blocksize - 1); 3432e1be3a92SLukas Czerner 3433a87dd18cSLukas Czerner start = lstart >> sb->s_blocksize_bits; 3434a87dd18cSLukas Czerner end = byte_end >> sb->s_blocksize_bits; 3435a87dd18cSLukas Czerner 3436a87dd18cSLukas Czerner /* Handle partial zero within the single block */ 3437e1be3a92SLukas Czerner if (start == end && 3438e1be3a92SLukas Czerner (partial_start || (partial_end != sb->s_blocksize - 1))) { 3439a87dd18cSLukas Czerner err = ext4_block_zero_page_range(handle, mapping, 3440a87dd18cSLukas Czerner lstart, length); 3441a87dd18cSLukas Czerner return err; 3442a87dd18cSLukas Czerner } 3443a87dd18cSLukas Czerner /* Handle partial zero out on the start of the range */ 3444e1be3a92SLukas Czerner if (partial_start) { 3445a87dd18cSLukas Czerner err = ext4_block_zero_page_range(handle, mapping, 3446a87dd18cSLukas Czerner lstart, sb->s_blocksize); 3447a87dd18cSLukas Czerner if (err) 3448a87dd18cSLukas Czerner return err; 3449a87dd18cSLukas Czerner } 3450a87dd18cSLukas Czerner /* Handle partial zero out on the end of the range */ 3451e1be3a92SLukas Czerner if (partial_end != sb->s_blocksize - 1) 3452a87dd18cSLukas Czerner err = ext4_block_zero_page_range(handle, mapping, 3453e1be3a92SLukas Czerner byte_end - partial_end, 3454e1be3a92SLukas Czerner partial_end + 1); 3455a87dd18cSLukas Czerner return err; 3456a87dd18cSLukas Czerner } 3457a87dd18cSLukas Czerner 345891ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 345991ef4cafSDuane Griffin { 346091ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 346191ef4cafSDuane Griffin return 1; 346291ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 346391ef4cafSDuane Griffin return 1; 346491ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 346591ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 346691ef4cafSDuane Griffin return 0; 346791ef4cafSDuane Griffin } 346891ef4cafSDuane Griffin 3469ac27a0ecSDave Kleikamp /* 3470a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3471a4bb6b64SAllison Henderson * associated with the given offset and length 3472a4bb6b64SAllison Henderson * 3473a4bb6b64SAllison Henderson * @inode: File inode 3474a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3475a4bb6b64SAllison Henderson * @len: The length of the hole 3476a4bb6b64SAllison Henderson * 34774907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3478a4bb6b64SAllison Henderson */ 3479a4bb6b64SAllison Henderson 3480aeb2817aSAshish Sangwan int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) 3481a4bb6b64SAllison Henderson { 348226a4c0c6STheodore Ts'o struct super_block *sb = inode->i_sb; 348326a4c0c6STheodore Ts'o ext4_lblk_t first_block, stop_block; 348426a4c0c6STheodore Ts'o struct address_space *mapping = inode->i_mapping; 3485a87dd18cSLukas Czerner loff_t first_block_offset, last_block_offset; 348626a4c0c6STheodore Ts'o handle_t *handle; 348726a4c0c6STheodore Ts'o unsigned int credits; 348826a4c0c6STheodore Ts'o int ret = 0; 348926a4c0c6STheodore Ts'o 3490a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 349173355192SAllison Henderson return -EOPNOTSUPP; 3492a4bb6b64SAllison Henderson 349326a4c0c6STheodore Ts'o if (EXT4_SB(sb)->s_cluster_ratio > 1) { 3494bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 349573355192SAllison Henderson return -EOPNOTSUPP; 3496bab08ab9STheodore Ts'o } 3497bab08ab9STheodore Ts'o 3498aaddea81SZheng Liu trace_ext4_punch_hole(inode, offset, length); 3499aaddea81SZheng Liu 350026a4c0c6STheodore Ts'o /* 350126a4c0c6STheodore Ts'o * Write out all dirty pages to avoid race conditions 350226a4c0c6STheodore Ts'o * Then release them. 350326a4c0c6STheodore Ts'o */ 350426a4c0c6STheodore Ts'o if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 350526a4c0c6STheodore Ts'o ret = filemap_write_and_wait_range(mapping, offset, 350626a4c0c6STheodore Ts'o offset + length - 1); 350726a4c0c6STheodore Ts'o if (ret) 350826a4c0c6STheodore Ts'o return ret; 350926a4c0c6STheodore Ts'o } 351026a4c0c6STheodore Ts'o 351126a4c0c6STheodore Ts'o mutex_lock(&inode->i_mutex); 351226a4c0c6STheodore Ts'o /* It's not possible punch hole on append only file */ 351326a4c0c6STheodore Ts'o if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 351426a4c0c6STheodore Ts'o ret = -EPERM; 351526a4c0c6STheodore Ts'o goto out_mutex; 351626a4c0c6STheodore Ts'o } 351726a4c0c6STheodore Ts'o if (IS_SWAPFILE(inode)) { 351826a4c0c6STheodore Ts'o ret = -ETXTBSY; 351926a4c0c6STheodore Ts'o goto out_mutex; 352026a4c0c6STheodore Ts'o } 352126a4c0c6STheodore Ts'o 352226a4c0c6STheodore Ts'o /* No need to punch hole beyond i_size */ 352326a4c0c6STheodore Ts'o if (offset >= inode->i_size) 352426a4c0c6STheodore Ts'o goto out_mutex; 352526a4c0c6STheodore Ts'o 352626a4c0c6STheodore Ts'o /* 352726a4c0c6STheodore Ts'o * If the hole extends beyond i_size, set the hole 352826a4c0c6STheodore Ts'o * to end after the page that contains i_size 352926a4c0c6STheodore Ts'o */ 353026a4c0c6STheodore Ts'o if (offset + length > inode->i_size) { 353126a4c0c6STheodore Ts'o length = inode->i_size + 353226a4c0c6STheodore Ts'o PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 353326a4c0c6STheodore Ts'o offset; 353426a4c0c6STheodore Ts'o } 353526a4c0c6STheodore Ts'o 3536a361293fSJan Kara if (offset & (sb->s_blocksize - 1) || 3537a361293fSJan Kara (offset + length) & (sb->s_blocksize - 1)) { 3538a361293fSJan Kara /* 3539a361293fSJan Kara * Attach jinode to inode for jbd2 if we do any zeroing of 3540a361293fSJan Kara * partial block 3541a361293fSJan Kara */ 3542a361293fSJan Kara ret = ext4_inode_attach_jinode(inode); 3543a361293fSJan Kara if (ret < 0) 3544a361293fSJan Kara goto out_mutex; 3545a361293fSJan Kara 3546a361293fSJan Kara } 3547a361293fSJan Kara 3548a87dd18cSLukas Czerner first_block_offset = round_up(offset, sb->s_blocksize); 3549a87dd18cSLukas Czerner last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; 355026a4c0c6STheodore Ts'o 3551a87dd18cSLukas Czerner /* Now release the pages and zero block aligned part of pages*/ 3552a87dd18cSLukas Czerner if (last_block_offset > first_block_offset) 3553a87dd18cSLukas Czerner truncate_pagecache_range(inode, first_block_offset, 3554a87dd18cSLukas Czerner last_block_offset); 355526a4c0c6STheodore Ts'o 355626a4c0c6STheodore Ts'o /* Wait all existing dio workers, newcomers will block on i_mutex */ 355726a4c0c6STheodore Ts'o ext4_inode_block_unlocked_dio(inode); 355826a4c0c6STheodore Ts'o inode_dio_wait(inode); 355926a4c0c6STheodore Ts'o 356026a4c0c6STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 356126a4c0c6STheodore Ts'o credits = ext4_writepage_trans_blocks(inode); 356226a4c0c6STheodore Ts'o else 356326a4c0c6STheodore Ts'o credits = ext4_blocks_for_truncate(inode); 356426a4c0c6STheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 356526a4c0c6STheodore Ts'o if (IS_ERR(handle)) { 356626a4c0c6STheodore Ts'o ret = PTR_ERR(handle); 356726a4c0c6STheodore Ts'o ext4_std_error(sb, ret); 356826a4c0c6STheodore Ts'o goto out_dio; 356926a4c0c6STheodore Ts'o } 357026a4c0c6STheodore Ts'o 3571a87dd18cSLukas Czerner ret = ext4_zero_partial_blocks(handle, inode, offset, 3572a87dd18cSLukas Czerner length); 357326a4c0c6STheodore Ts'o if (ret) 357426a4c0c6STheodore Ts'o goto out_stop; 357526a4c0c6STheodore Ts'o 357626a4c0c6STheodore Ts'o first_block = (offset + sb->s_blocksize - 1) >> 357726a4c0c6STheodore Ts'o EXT4_BLOCK_SIZE_BITS(sb); 357826a4c0c6STheodore Ts'o stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 357926a4c0c6STheodore Ts'o 358026a4c0c6STheodore Ts'o /* If there are no blocks to remove, return now */ 358126a4c0c6STheodore Ts'o if (first_block >= stop_block) 358226a4c0c6STheodore Ts'o goto out_stop; 358326a4c0c6STheodore Ts'o 358426a4c0c6STheodore Ts'o down_write(&EXT4_I(inode)->i_data_sem); 358526a4c0c6STheodore Ts'o ext4_discard_preallocations(inode); 358626a4c0c6STheodore Ts'o 358726a4c0c6STheodore Ts'o ret = ext4_es_remove_extent(inode, first_block, 358826a4c0c6STheodore Ts'o stop_block - first_block); 358926a4c0c6STheodore Ts'o if (ret) { 359026a4c0c6STheodore Ts'o up_write(&EXT4_I(inode)->i_data_sem); 359126a4c0c6STheodore Ts'o goto out_stop; 359226a4c0c6STheodore Ts'o } 359326a4c0c6STheodore Ts'o 359426a4c0c6STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 359526a4c0c6STheodore Ts'o ret = ext4_ext_remove_space(inode, first_block, 359626a4c0c6STheodore Ts'o stop_block - 1); 359726a4c0c6STheodore Ts'o else 359826a4c0c6STheodore Ts'o ret = ext4_free_hole_blocks(handle, inode, first_block, 359926a4c0c6STheodore Ts'o stop_block); 360026a4c0c6STheodore Ts'o 360126a4c0c6STheodore Ts'o ext4_discard_preallocations(inode); 3602819c4920STheodore Ts'o up_write(&EXT4_I(inode)->i_data_sem); 360326a4c0c6STheodore Ts'o if (IS_SYNC(inode)) 360426a4c0c6STheodore Ts'o ext4_handle_sync(handle); 360526a4c0c6STheodore Ts'o inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 360626a4c0c6STheodore Ts'o ext4_mark_inode_dirty(handle, inode); 360726a4c0c6STheodore Ts'o out_stop: 360826a4c0c6STheodore Ts'o ext4_journal_stop(handle); 360926a4c0c6STheodore Ts'o out_dio: 361026a4c0c6STheodore Ts'o ext4_inode_resume_unlocked_dio(inode); 361126a4c0c6STheodore Ts'o out_mutex: 361226a4c0c6STheodore Ts'o mutex_unlock(&inode->i_mutex); 361326a4c0c6STheodore Ts'o return ret; 3614a4bb6b64SAllison Henderson } 3615a4bb6b64SAllison Henderson 3616a361293fSJan Kara int ext4_inode_attach_jinode(struct inode *inode) 3617a361293fSJan Kara { 3618a361293fSJan Kara struct ext4_inode_info *ei = EXT4_I(inode); 3619a361293fSJan Kara struct jbd2_inode *jinode; 3620a361293fSJan Kara 3621a361293fSJan Kara if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) 3622a361293fSJan Kara return 0; 3623a361293fSJan Kara 3624a361293fSJan Kara jinode = jbd2_alloc_inode(GFP_KERNEL); 3625a361293fSJan Kara spin_lock(&inode->i_lock); 3626a361293fSJan Kara if (!ei->jinode) { 3627a361293fSJan Kara if (!jinode) { 3628a361293fSJan Kara spin_unlock(&inode->i_lock); 3629a361293fSJan Kara return -ENOMEM; 3630a361293fSJan Kara } 3631a361293fSJan Kara ei->jinode = jinode; 3632a361293fSJan Kara jbd2_journal_init_jbd_inode(ei->jinode, inode); 3633a361293fSJan Kara jinode = NULL; 3634a361293fSJan Kara } 3635a361293fSJan Kara spin_unlock(&inode->i_lock); 3636a361293fSJan Kara if (unlikely(jinode != NULL)) 3637a361293fSJan Kara jbd2_free_inode(jinode); 3638a361293fSJan Kara return 0; 3639a361293fSJan Kara } 3640a361293fSJan Kara 3641a4bb6b64SAllison Henderson /* 3642617ba13bSMingming Cao * ext4_truncate() 3643ac27a0ecSDave Kleikamp * 3644617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3645617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3646ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3647ac27a0ecSDave Kleikamp * 364842b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3649ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3650ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3651ac27a0ecSDave Kleikamp * 3652ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3653ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3654ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3655ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3656ac27a0ecSDave Kleikamp * left-to-right works OK too). 3657ac27a0ecSDave Kleikamp * 3658ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3659ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3660ac27a0ecSDave Kleikamp * 3661ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3662617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3663ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3664617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3665617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3666ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3667617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3668ac27a0ecSDave Kleikamp */ 3669617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3670ac27a0ecSDave Kleikamp { 3671819c4920STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 3672819c4920STheodore Ts'o unsigned int credits; 3673819c4920STheodore Ts'o handle_t *handle; 3674819c4920STheodore Ts'o struct address_space *mapping = inode->i_mapping; 3675819c4920STheodore Ts'o 367619b5ef61STheodore Ts'o /* 367719b5ef61STheodore Ts'o * There is a possibility that we're either freeing the inode 367819b5ef61STheodore Ts'o * or it completely new indode. In those cases we might not 367919b5ef61STheodore Ts'o * have i_mutex locked because it's not necessary. 368019b5ef61STheodore Ts'o */ 368119b5ef61STheodore Ts'o if (!(inode->i_state & (I_NEW|I_FREEING))) 368219b5ef61STheodore Ts'o WARN_ON(!mutex_is_locked(&inode->i_mutex)); 36830562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 36840562e0baSJiaying Zhang 368591ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3686ac27a0ecSDave Kleikamp return; 3687ac27a0ecSDave Kleikamp 368812e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3689c8d46e41SJiaying Zhang 36905534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 369119f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 36927d8f9f7dSTheodore Ts'o 3693aef1c851STao Ma if (ext4_has_inline_data(inode)) { 3694aef1c851STao Ma int has_inline = 1; 3695aef1c851STao Ma 3696aef1c851STao Ma ext4_inline_data_truncate(inode, &has_inline); 3697aef1c851STao Ma if (has_inline) 3698aef1c851STao Ma return; 3699aef1c851STao Ma } 3700aef1c851STao Ma 3701a361293fSJan Kara /* If we zero-out tail of the page, we have to create jinode for jbd2 */ 3702a361293fSJan Kara if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { 3703a361293fSJan Kara if (ext4_inode_attach_jinode(inode) < 0) 3704a361293fSJan Kara return; 3705a361293fSJan Kara } 3706a361293fSJan Kara 3707ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3708819c4920STheodore Ts'o credits = ext4_writepage_trans_blocks(inode); 3709ff9893dcSAmir Goldstein else 3710819c4920STheodore Ts'o credits = ext4_blocks_for_truncate(inode); 3711819c4920STheodore Ts'o 3712819c4920STheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 3713819c4920STheodore Ts'o if (IS_ERR(handle)) { 3714819c4920STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 3715819c4920STheodore Ts'o return; 3716819c4920STheodore Ts'o } 3717819c4920STheodore Ts'o 3718eb3544c6SLukas Czerner if (inode->i_size & (inode->i_sb->s_blocksize - 1)) 3719eb3544c6SLukas Czerner ext4_block_truncate_page(handle, mapping, inode->i_size); 3720819c4920STheodore Ts'o 3721819c4920STheodore Ts'o /* 3722819c4920STheodore Ts'o * We add the inode to the orphan list, so that if this 3723819c4920STheodore Ts'o * truncate spans multiple transactions, and we crash, we will 3724819c4920STheodore Ts'o * resume the truncate when the filesystem recovers. It also 3725819c4920STheodore Ts'o * marks the inode dirty, to catch the new size. 3726819c4920STheodore Ts'o * 3727819c4920STheodore Ts'o * Implication: the file must always be in a sane, consistent 3728819c4920STheodore Ts'o * truncatable state while each transaction commits. 3729819c4920STheodore Ts'o */ 3730819c4920STheodore Ts'o if (ext4_orphan_add(handle, inode)) 3731819c4920STheodore Ts'o goto out_stop; 3732819c4920STheodore Ts'o 3733819c4920STheodore Ts'o down_write(&EXT4_I(inode)->i_data_sem); 3734819c4920STheodore Ts'o 3735819c4920STheodore Ts'o ext4_discard_preallocations(inode); 3736819c4920STheodore Ts'o 3737819c4920STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3738819c4920STheodore Ts'o ext4_ext_truncate(handle, inode); 3739819c4920STheodore Ts'o else 3740819c4920STheodore Ts'o ext4_ind_truncate(handle, inode); 3741819c4920STheodore Ts'o 3742819c4920STheodore Ts'o up_write(&ei->i_data_sem); 3743819c4920STheodore Ts'o 3744819c4920STheodore Ts'o if (IS_SYNC(inode)) 3745819c4920STheodore Ts'o ext4_handle_sync(handle); 3746819c4920STheodore Ts'o 3747819c4920STheodore Ts'o out_stop: 3748819c4920STheodore Ts'o /* 3749819c4920STheodore Ts'o * If this was a simple ftruncate() and the file will remain alive, 3750819c4920STheodore Ts'o * then we need to clear up the orphan record which we created above. 3751819c4920STheodore Ts'o * However, if this was a real unlink then we were called by 3752819c4920STheodore Ts'o * ext4_delete_inode(), and we allow that function to clean up the 3753819c4920STheodore Ts'o * orphan info for us. 3754819c4920STheodore Ts'o */ 3755819c4920STheodore Ts'o if (inode->i_nlink) 3756819c4920STheodore Ts'o ext4_orphan_del(handle, inode); 3757819c4920STheodore Ts'o 3758819c4920STheodore Ts'o inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3759819c4920STheodore Ts'o ext4_mark_inode_dirty(handle, inode); 3760819c4920STheodore Ts'o ext4_journal_stop(handle); 3761a86c6181SAlex Tomas 37620562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3763ac27a0ecSDave Kleikamp } 3764ac27a0ecSDave Kleikamp 3765ac27a0ecSDave Kleikamp /* 3766617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3767ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3768ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3769ac27a0ecSDave Kleikamp * inode. 3770ac27a0ecSDave Kleikamp */ 3771617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3772617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3773ac27a0ecSDave Kleikamp { 3774240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3775ac27a0ecSDave Kleikamp struct buffer_head *bh; 3776240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3777240799cdSTheodore Ts'o ext4_fsblk_t block; 3778240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3779ac27a0ecSDave Kleikamp 37803a06d778SAneesh Kumar K.V iloc->bh = NULL; 3781240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3782ac27a0ecSDave Kleikamp return -EIO; 3783ac27a0ecSDave Kleikamp 3784240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3785240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3786240799cdSTheodore Ts'o if (!gdp) 3787240799cdSTheodore Ts'o return -EIO; 3788240799cdSTheodore Ts'o 3789240799cdSTheodore Ts'o /* 3790240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3791240799cdSTheodore Ts'o */ 379200d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3793240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3794240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3795240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3796240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3797240799cdSTheodore Ts'o 3798240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3799aebf0243SWang Shilong if (unlikely(!bh)) 3800860d21e2STheodore Ts'o return -ENOMEM; 3801ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3802ac27a0ecSDave Kleikamp lock_buffer(bh); 38039c83a923SHidehiro Kawai 38049c83a923SHidehiro Kawai /* 38059c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 38069c83a923SHidehiro Kawai * to write out another inode in the same block. In this 38079c83a923SHidehiro Kawai * case, we don't have to read the block because we may 38089c83a923SHidehiro Kawai * read the old inode data successfully. 38099c83a923SHidehiro Kawai */ 38109c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 38119c83a923SHidehiro Kawai set_buffer_uptodate(bh); 38129c83a923SHidehiro Kawai 3813ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3814ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3815ac27a0ecSDave Kleikamp unlock_buffer(bh); 3816ac27a0ecSDave Kleikamp goto has_buffer; 3817ac27a0ecSDave Kleikamp } 3818ac27a0ecSDave Kleikamp 3819ac27a0ecSDave Kleikamp /* 3820ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3821ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3822ac27a0ecSDave Kleikamp * block. 3823ac27a0ecSDave Kleikamp */ 3824ac27a0ecSDave Kleikamp if (in_mem) { 3825ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3826240799cdSTheodore Ts'o int i, start; 3827ac27a0ecSDave Kleikamp 3828240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3829ac27a0ecSDave Kleikamp 3830ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3831240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3832aebf0243SWang Shilong if (unlikely(!bitmap_bh)) 3833ac27a0ecSDave Kleikamp goto make_io; 3834ac27a0ecSDave Kleikamp 3835ac27a0ecSDave Kleikamp /* 3836ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3837ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3838ac27a0ecSDave Kleikamp * of one, so skip it. 3839ac27a0ecSDave Kleikamp */ 3840ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3841ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3842ac27a0ecSDave Kleikamp goto make_io; 3843ac27a0ecSDave Kleikamp } 3844240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3845ac27a0ecSDave Kleikamp if (i == inode_offset) 3846ac27a0ecSDave Kleikamp continue; 3847617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3848ac27a0ecSDave Kleikamp break; 3849ac27a0ecSDave Kleikamp } 3850ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3851240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3852ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3853ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3854ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3855ac27a0ecSDave Kleikamp unlock_buffer(bh); 3856ac27a0ecSDave Kleikamp goto has_buffer; 3857ac27a0ecSDave Kleikamp } 3858ac27a0ecSDave Kleikamp } 3859ac27a0ecSDave Kleikamp 3860ac27a0ecSDave Kleikamp make_io: 3861ac27a0ecSDave Kleikamp /* 3862240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3863240799cdSTheodore Ts'o * blocks from the inode table. 3864240799cdSTheodore Ts'o */ 3865240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3866240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3867240799cdSTheodore Ts'o unsigned num; 38680d606e2cSTheodore Ts'o __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks; 3869240799cdSTheodore Ts'o 3870240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3871b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 38720d606e2cSTheodore Ts'o b = block & ~((ext4_fsblk_t) ra_blks - 1); 3873240799cdSTheodore Ts'o if (table > b) 3874240799cdSTheodore Ts'o b = table; 38750d606e2cSTheodore Ts'o end = b + ra_blks; 3876240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3877feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 3878560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3879240799cdSTheodore Ts'o table += num / inodes_per_block; 3880240799cdSTheodore Ts'o if (end > table) 3881240799cdSTheodore Ts'o end = table; 3882240799cdSTheodore Ts'o while (b <= end) 3883240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3884240799cdSTheodore Ts'o } 3885240799cdSTheodore Ts'o 3886240799cdSTheodore Ts'o /* 3887ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3888ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3889ac27a0ecSDave Kleikamp * Read the block from disk. 3890ac27a0ecSDave Kleikamp */ 38910562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3892ac27a0ecSDave Kleikamp get_bh(bh); 3893ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 389465299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3895ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3896ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3897c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3898c398eda0STheodore Ts'o "unable to read itable block"); 3899ac27a0ecSDave Kleikamp brelse(bh); 3900ac27a0ecSDave Kleikamp return -EIO; 3901ac27a0ecSDave Kleikamp } 3902ac27a0ecSDave Kleikamp } 3903ac27a0ecSDave Kleikamp has_buffer: 3904ac27a0ecSDave Kleikamp iloc->bh = bh; 3905ac27a0ecSDave Kleikamp return 0; 3906ac27a0ecSDave Kleikamp } 3907ac27a0ecSDave Kleikamp 3908617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3909ac27a0ecSDave Kleikamp { 3910ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3911617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 391219f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3913ac27a0ecSDave Kleikamp } 3914ac27a0ecSDave Kleikamp 3915617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3916ac27a0ecSDave Kleikamp { 3917617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3918ac27a0ecSDave Kleikamp 3919ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3920617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3921ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3922617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3923ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3924617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3925ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3926617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3927ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3928617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3929ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3930ac27a0ecSDave Kleikamp } 3931ac27a0ecSDave Kleikamp 3932ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3933ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3934ff9ddf7eSJan Kara { 393584a8dce2SDmitry Monakhov unsigned int vfs_fl; 393684a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3937ff9ddf7eSJan Kara 393884a8dce2SDmitry Monakhov do { 393984a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 394084a8dce2SDmitry Monakhov old_fl = ei->i_flags; 394184a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 394284a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 394384a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 394484a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 394584a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 394684a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 394784a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 394884a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 394984a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 395084a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 395184a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 395284a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 395384a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 395484a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3955ff9ddf7eSJan Kara } 3956de9a55b8STheodore Ts'o 39570fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 39580fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 39590fc1b451SAneesh Kumar K.V { 39600fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 39618180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 39628180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 39630fc1b451SAneesh Kumar K.V 39640fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 39650fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 39660fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 39670fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 39680fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 396907a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 39708180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 39718180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 39728180a562SAneesh Kumar K.V } else { 39730fc1b451SAneesh Kumar K.V return i_blocks; 39748180a562SAneesh Kumar K.V } 39750fc1b451SAneesh Kumar K.V } else { 39760fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 39770fc1b451SAneesh Kumar K.V } 39780fc1b451SAneesh Kumar K.V } 3979ff9ddf7eSJan Kara 3980152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 3981152a7b0aSTao Ma struct ext4_inode *raw_inode, 3982152a7b0aSTao Ma struct ext4_inode_info *ei) 3983152a7b0aSTao Ma { 3984152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 3985152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 398667cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3987152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 398867cf5b09STao Ma ext4_find_inline_data_nolock(inode); 3989f19d5870STao Ma } else 3990f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 3991152a7b0aSTao Ma } 3992152a7b0aSTao Ma 39931d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3994ac27a0ecSDave Kleikamp { 3995617ba13bSMingming Cao struct ext4_iloc iloc; 3996617ba13bSMingming Cao struct ext4_inode *raw_inode; 39971d1fe1eeSDavid Howells struct ext4_inode_info *ei; 39981d1fe1eeSDavid Howells struct inode *inode; 3999b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 40001d1fe1eeSDavid Howells long ret; 4001ac27a0ecSDave Kleikamp int block; 400208cefc7aSEric W. Biederman uid_t i_uid; 400308cefc7aSEric W. Biederman gid_t i_gid; 4004ac27a0ecSDave Kleikamp 40051d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 40061d1fe1eeSDavid Howells if (!inode) 40071d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 40081d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 40091d1fe1eeSDavid Howells return inode; 40101d1fe1eeSDavid Howells 40111d1fe1eeSDavid Howells ei = EXT4_I(inode); 40127dc57615SPeter Huewe iloc.bh = NULL; 4013ac27a0ecSDave Kleikamp 40141d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 40151d1fe1eeSDavid Howells if (ret < 0) 4016ac27a0ecSDave Kleikamp goto bad_inode; 4017617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 4018814525f4SDarrick J. Wong 4019814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4020814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4021814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4022814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 4023814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 4024814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 4025814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 4026814525f4SDarrick J. Wong ret = -EIO; 4027814525f4SDarrick J. Wong goto bad_inode; 4028814525f4SDarrick J. Wong } 4029814525f4SDarrick J. Wong } else 4030814525f4SDarrick J. Wong ei->i_extra_isize = 0; 4031814525f4SDarrick J. Wong 4032814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 4033814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4034814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 4035814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4036814525f4SDarrick J. Wong __u32 csum; 4037814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 4038814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 4039814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4040814525f4SDarrick J. Wong sizeof(inum)); 4041814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4042814525f4SDarrick J. Wong sizeof(gen)); 4043814525f4SDarrick J. Wong } 4044814525f4SDarrick J. Wong 4045814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4046814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 4047814525f4SDarrick J. Wong ret = -EIO; 4048814525f4SDarrick J. Wong goto bad_inode; 4049814525f4SDarrick J. Wong } 4050814525f4SDarrick J. Wong 4051ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 405208cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 405308cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4054ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 405508cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 405608cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4057ac27a0ecSDave Kleikamp } 405808cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 405908cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 4060bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4061ac27a0ecSDave Kleikamp 4062353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 406367cf5b09STao Ma ei->i_inline_off = 0; 4064ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 4065ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4066ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 4067ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 4068ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 4069ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 4070ac27a0ecSDave Kleikamp */ 4071ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 4072393d1d1dSDr. Tilmann Bubeck if ((inode->i_mode == 0 || 4073393d1d1dSDr. Tilmann Bubeck !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4074393d1d1dSDr. Tilmann Bubeck ino != EXT4_BOOT_LOADER_INO) { 4075ac27a0ecSDave Kleikamp /* this inode is deleted */ 40761d1fe1eeSDavid Howells ret = -ESTALE; 4077ac27a0ecSDave Kleikamp goto bad_inode; 4078ac27a0ecSDave Kleikamp } 4079ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 4080ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 4081ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 4082393d1d1dSDr. Tilmann Bubeck * the process of deleting those. 4083393d1d1dSDr. Tilmann Bubeck * OR it is the EXT4_BOOT_LOADER_INO which is 4084393d1d1dSDr. Tilmann Bubeck * not initialized on a new filesystem. */ 4085ac27a0ecSDave Kleikamp } 4086ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 40870fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 40887973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4089a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4090a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 4091a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4092a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 4093ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 4094a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 4095a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 4096a9e7f447SDmitry Monakhov #endif 4097ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4098ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 4099a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 4100ac27a0ecSDave Kleikamp /* 4101ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 4102ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 4103ac27a0ecSDave Kleikamp */ 4104617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 4105ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 4106ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 4107ac27a0ecSDave Kleikamp 4108b436b9beSJan Kara /* 4109b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 4110b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 4111b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 4112b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 4113b436b9beSJan Kara * now it is reread from disk. 4114b436b9beSJan Kara */ 4115b436b9beSJan Kara if (journal) { 4116b436b9beSJan Kara transaction_t *transaction; 4117b436b9beSJan Kara tid_t tid; 4118b436b9beSJan Kara 4119a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 4120b436b9beSJan Kara if (journal->j_running_transaction) 4121b436b9beSJan Kara transaction = journal->j_running_transaction; 4122b436b9beSJan Kara else 4123b436b9beSJan Kara transaction = journal->j_committing_transaction; 4124b436b9beSJan Kara if (transaction) 4125b436b9beSJan Kara tid = transaction->t_tid; 4126b436b9beSJan Kara else 4127b436b9beSJan Kara tid = journal->j_commit_sequence; 4128a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 4129b436b9beSJan Kara ei->i_sync_tid = tid; 4130b436b9beSJan Kara ei->i_datasync_tid = tid; 4131b436b9beSJan Kara } 4132b436b9beSJan Kara 41330040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4134ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 4135ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 4136617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 4137617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 4138ac27a0ecSDave Kleikamp } else { 4139152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 4140ac27a0ecSDave Kleikamp } 4141814525f4SDarrick J. Wong } 4142ac27a0ecSDave Kleikamp 4143ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4144ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4145ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4146ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4147ef7f3835SKalpak Shah 414825ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 414925ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 415025ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 415125ec56b5SJean Noel Cordenner inode->i_version |= 415225ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 415325ec56b5SJean Noel Cordenner } 415425ec56b5SJean Noel Cordenner 4155c4b5a614STheodore Ts'o ret = 0; 4156485c26ecSTheodore Ts'o if (ei->i_file_acl && 41571032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 415824676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 415924676da4STheodore Ts'o ei->i_file_acl); 4160485c26ecSTheodore Ts'o ret = -EIO; 4161485c26ecSTheodore Ts'o goto bad_inode; 4162f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4163f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4164f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4165c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 4166f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 41677a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 41687a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 4169fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4170fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 4171fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 4172fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 41731f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 4174fe2c8191SThiemo Nagel } 4175f19d5870STao Ma } 4176567f3e9aSTheodore Ts'o if (ret) 41777a262f7cSAneesh Kumar K.V goto bad_inode; 41787a262f7cSAneesh Kumar K.V 4179ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 4180617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 4181617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 4182617ba13bSMingming Cao ext4_set_aops(inode); 4183ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 4184617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 4185617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 4186ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 4187e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 4188617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 4189e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 4190e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 4191e83c1397SDuane Griffin } else { 4192617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 4193617ba13bSMingming Cao ext4_set_aops(inode); 4194ac27a0ecSDave Kleikamp } 4195563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4196563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4197617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 4198ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 4199ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4200ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4201ac27a0ecSDave Kleikamp else 4202ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4203ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4204393d1d1dSDr. Tilmann Bubeck } else if (ino == EXT4_BOOT_LOADER_INO) { 4205393d1d1dSDr. Tilmann Bubeck make_bad_inode(inode); 4206563bdd61STheodore Ts'o } else { 4207563bdd61STheodore Ts'o ret = -EIO; 420824676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4209563bdd61STheodore Ts'o goto bad_inode; 4210ac27a0ecSDave Kleikamp } 4211ac27a0ecSDave Kleikamp brelse(iloc.bh); 4212617ba13bSMingming Cao ext4_set_inode_flags(inode); 42131d1fe1eeSDavid Howells unlock_new_inode(inode); 42141d1fe1eeSDavid Howells return inode; 4215ac27a0ecSDave Kleikamp 4216ac27a0ecSDave Kleikamp bad_inode: 4217567f3e9aSTheodore Ts'o brelse(iloc.bh); 42181d1fe1eeSDavid Howells iget_failed(inode); 42191d1fe1eeSDavid Howells return ERR_PTR(ret); 4220ac27a0ecSDave Kleikamp } 4221ac27a0ecSDave Kleikamp 42220fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 42230fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 42240fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 42250fc1b451SAneesh Kumar K.V { 42260fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 42270fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 42280fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 42290fc1b451SAneesh Kumar K.V 42300fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 42310fc1b451SAneesh Kumar K.V /* 42324907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 42330fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 42340fc1b451SAneesh Kumar K.V */ 42358180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42360fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 423784a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4238f287a1a5STheodore Ts'o return 0; 4239f287a1a5STheodore Ts'o } 4240f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4241f287a1a5STheodore Ts'o return -EFBIG; 4242f287a1a5STheodore Ts'o 4243f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 42440fc1b451SAneesh Kumar K.V /* 42450fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 42460fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 42470fc1b451SAneesh Kumar K.V */ 42488180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42490fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 425084a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 42510fc1b451SAneesh Kumar K.V } else { 425284a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 42538180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 42548180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 42558180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 42568180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 42570fc1b451SAneesh Kumar K.V } 4258f287a1a5STheodore Ts'o return 0; 42590fc1b451SAneesh Kumar K.V } 42600fc1b451SAneesh Kumar K.V 4261ac27a0ecSDave Kleikamp /* 4262ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4263ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4264ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4265ac27a0ecSDave Kleikamp * 4266ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4267ac27a0ecSDave Kleikamp */ 4268617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4269ac27a0ecSDave Kleikamp struct inode *inode, 4270830156c7SFrank Mayhar struct ext4_iloc *iloc) 4271ac27a0ecSDave Kleikamp { 4272617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4273617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4274ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4275ac27a0ecSDave Kleikamp int err = 0, rc, block; 4276b71fc079SJan Kara int need_datasync = 0; 427708cefc7aSEric W. Biederman uid_t i_uid; 427808cefc7aSEric W. Biederman gid_t i_gid; 4279ac27a0ecSDave Kleikamp 4280ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4281ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 428219f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4283617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4284ac27a0ecSDave Kleikamp 4285ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4286ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 428708cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 428808cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 4289ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 429008cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 429108cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4292ac27a0ecSDave Kleikamp /* 4293ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4294ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4295ac27a0ecSDave Kleikamp */ 4296ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4297ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 429808cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 4299ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 430008cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 4301ac27a0ecSDave Kleikamp } else { 4302ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4303ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4304ac27a0ecSDave Kleikamp } 4305ac27a0ecSDave Kleikamp } else { 430608cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 430708cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4308ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4309ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4310ac27a0ecSDave Kleikamp } 4311ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4312ef7f3835SKalpak Shah 4313ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4314ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4315ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4316ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4317ef7f3835SKalpak Shah 43180fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 43190fc1b451SAneesh Kumar K.V goto out_brelse; 4320ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4321353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 43229b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 43239b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4324a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4325a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 43267973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4327b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4328a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4329b71fc079SJan Kara need_datasync = 1; 4330b71fc079SJan Kara } 4331ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4332ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4333617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4334617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4335617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4336617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4337ac27a0ecSDave Kleikamp /* If this is the first large file 4338ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4339ac27a0ecSDave Kleikamp */ 4340617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4341617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4342ac27a0ecSDave Kleikamp if (err) 4343ac27a0ecSDave Kleikamp goto out_brelse; 4344617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4345617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4346617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 43470390131bSFrank Mayhar ext4_handle_sync(handle); 4348b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4349ac27a0ecSDave Kleikamp } 4350ac27a0ecSDave Kleikamp } 4351ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4352ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4353ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4354ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4355ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4356ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4357ac27a0ecSDave Kleikamp } else { 4358ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4359ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4360ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4361ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4362ac27a0ecSDave Kleikamp } 4363f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4364de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4365ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4366f19d5870STao Ma } 4367ac27a0ecSDave Kleikamp 436825ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 436925ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 437025ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 437125ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 437225ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4373ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 437425ec56b5SJean Noel Cordenner } 437525ec56b5SJean Noel Cordenner 4376814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4377814525f4SDarrick J. Wong 43780390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 437973b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4380ac27a0ecSDave Kleikamp if (!err) 4381ac27a0ecSDave Kleikamp err = rc; 438219f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4383ac27a0ecSDave Kleikamp 4384b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4385ac27a0ecSDave Kleikamp out_brelse: 4386ac27a0ecSDave Kleikamp brelse(bh); 4387617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4388ac27a0ecSDave Kleikamp return err; 4389ac27a0ecSDave Kleikamp } 4390ac27a0ecSDave Kleikamp 4391ac27a0ecSDave Kleikamp /* 4392617ba13bSMingming Cao * ext4_write_inode() 4393ac27a0ecSDave Kleikamp * 4394ac27a0ecSDave Kleikamp * We are called from a few places: 4395ac27a0ecSDave Kleikamp * 4396ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4397ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 43984907cb7bSAnatol Pomozov * transaction to commit. 4399ac27a0ecSDave Kleikamp * 4400ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4401ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4402ac27a0ecSDave Kleikamp * 4403ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4404ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4405ac27a0ecSDave Kleikamp * journal commit. 4406ac27a0ecSDave Kleikamp * 4407ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4408ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4409617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4410ac27a0ecSDave Kleikamp * knfsd. 4411ac27a0ecSDave Kleikamp * 4412ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4413ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4414ac27a0ecSDave Kleikamp * which we are interested. 4415ac27a0ecSDave Kleikamp * 4416ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4417ac27a0ecSDave Kleikamp * 4418ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4419ac27a0ecSDave Kleikamp * stuff(); 4420ac27a0ecSDave Kleikamp * inode->i_size = expr; 4421ac27a0ecSDave Kleikamp * 4422ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4423ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4424ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4425ac27a0ecSDave Kleikamp */ 4426a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4427ac27a0ecSDave Kleikamp { 442891ac6f43SFrank Mayhar int err; 442991ac6f43SFrank Mayhar 4430ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4431ac27a0ecSDave Kleikamp return 0; 4432ac27a0ecSDave Kleikamp 443391ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4434617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4435b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4436ac27a0ecSDave Kleikamp dump_stack(); 4437ac27a0ecSDave Kleikamp return -EIO; 4438ac27a0ecSDave Kleikamp } 4439ac27a0ecSDave Kleikamp 4440a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4441ac27a0ecSDave Kleikamp return 0; 4442ac27a0ecSDave Kleikamp 444391ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 444491ac6f43SFrank Mayhar } else { 444591ac6f43SFrank Mayhar struct ext4_iloc iloc; 444691ac6f43SFrank Mayhar 44478b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 444891ac6f43SFrank Mayhar if (err) 444991ac6f43SFrank Mayhar return err; 4450a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4451830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4452830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4453c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4454c398eda0STheodore Ts'o "IO error syncing inode"); 4455830156c7SFrank Mayhar err = -EIO; 4456830156c7SFrank Mayhar } 4457fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 445891ac6f43SFrank Mayhar } 445991ac6f43SFrank Mayhar return err; 4460ac27a0ecSDave Kleikamp } 4461ac27a0ecSDave Kleikamp 4462ac27a0ecSDave Kleikamp /* 446353e87268SJan Kara * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 446453e87268SJan Kara * buffers that are attached to a page stradding i_size and are undergoing 446553e87268SJan Kara * commit. In that case we have to wait for commit to finish and try again. 446653e87268SJan Kara */ 446753e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode) 446853e87268SJan Kara { 446953e87268SJan Kara struct page *page; 447053e87268SJan Kara unsigned offset; 447153e87268SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 447253e87268SJan Kara tid_t commit_tid = 0; 447353e87268SJan Kara int ret; 447453e87268SJan Kara 447553e87268SJan Kara offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 447653e87268SJan Kara /* 447753e87268SJan Kara * All buffers in the last page remain valid? Then there's nothing to 447853e87268SJan Kara * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 447953e87268SJan Kara * blocksize case 448053e87268SJan Kara */ 448153e87268SJan Kara if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 448253e87268SJan Kara return; 448353e87268SJan Kara while (1) { 448453e87268SJan Kara page = find_lock_page(inode->i_mapping, 448553e87268SJan Kara inode->i_size >> PAGE_CACHE_SHIFT); 448653e87268SJan Kara if (!page) 448753e87268SJan Kara return; 4488ca99fdd2SLukas Czerner ret = __ext4_journalled_invalidatepage(page, offset, 4489ca99fdd2SLukas Czerner PAGE_CACHE_SIZE - offset); 449053e87268SJan Kara unlock_page(page); 449153e87268SJan Kara page_cache_release(page); 449253e87268SJan Kara if (ret != -EBUSY) 449353e87268SJan Kara return; 449453e87268SJan Kara commit_tid = 0; 449553e87268SJan Kara read_lock(&journal->j_state_lock); 449653e87268SJan Kara if (journal->j_committing_transaction) 449753e87268SJan Kara commit_tid = journal->j_committing_transaction->t_tid; 449853e87268SJan Kara read_unlock(&journal->j_state_lock); 449953e87268SJan Kara if (commit_tid) 450053e87268SJan Kara jbd2_log_wait_commit(journal, commit_tid); 450153e87268SJan Kara } 450253e87268SJan Kara } 450353e87268SJan Kara 450453e87268SJan Kara /* 4505617ba13bSMingming Cao * ext4_setattr() 4506ac27a0ecSDave Kleikamp * 4507ac27a0ecSDave Kleikamp * Called from notify_change. 4508ac27a0ecSDave Kleikamp * 4509ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4510ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4511ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4512ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4513ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4514ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4515ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4516ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4517ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4518ac27a0ecSDave Kleikamp * 4519678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4520678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4521678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4522678aaf48SJan Kara * This way we are sure that all the data written in the previous 4523678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4524678aaf48SJan Kara * writeback). 4525678aaf48SJan Kara * 4526678aaf48SJan Kara * Called with inode->i_mutex down. 4527ac27a0ecSDave Kleikamp */ 4528617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4529ac27a0ecSDave Kleikamp { 4530ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4531ac27a0ecSDave Kleikamp int error, rc = 0; 45323d287de3SDmitry Monakhov int orphan = 0; 4533ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4534ac27a0ecSDave Kleikamp 4535ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4536ac27a0ecSDave Kleikamp if (error) 4537ac27a0ecSDave Kleikamp return error; 4538ac27a0ecSDave Kleikamp 453912755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4540871a2931SChristoph Hellwig dquot_initialize(inode); 454108cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 454208cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4543ac27a0ecSDave Kleikamp handle_t *handle; 4544ac27a0ecSDave Kleikamp 4545ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4546ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 45479924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 45489924a92aSTheodore Ts'o (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 4549194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 4550ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4551ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4552ac27a0ecSDave Kleikamp goto err_out; 4553ac27a0ecSDave Kleikamp } 4554b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4555ac27a0ecSDave Kleikamp if (error) { 4556617ba13bSMingming Cao ext4_journal_stop(handle); 4557ac27a0ecSDave Kleikamp return error; 4558ac27a0ecSDave Kleikamp } 4559ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4560ac27a0ecSDave Kleikamp * one transaction */ 4561ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4562ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4563ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4564ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4565617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4566617ba13bSMingming Cao ext4_journal_stop(handle); 4567ac27a0ecSDave Kleikamp } 4568ac27a0ecSDave Kleikamp 4569e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4570562c72aaSChristoph Hellwig 457112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4572e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4573e2b46574SEric Sandeen 45740c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 45750c095c7fSTheodore Ts'o return -EFBIG; 4576e2b46574SEric Sandeen } 4577e2b46574SEric Sandeen } 4578e2b46574SEric Sandeen 4579ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4580c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4581072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4582ac27a0ecSDave Kleikamp handle_t *handle; 4583ac27a0ecSDave Kleikamp 45849924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 4585ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4586ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4587ac27a0ecSDave Kleikamp goto err_out; 4588ac27a0ecSDave Kleikamp } 45893d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4590617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 45913d287de3SDmitry Monakhov orphan = 1; 45923d287de3SDmitry Monakhov } 4593617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4594617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4595ac27a0ecSDave Kleikamp if (!error) 4596ac27a0ecSDave Kleikamp error = rc; 4597617ba13bSMingming Cao ext4_journal_stop(handle); 4598678aaf48SJan Kara 4599678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4600678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4601678aaf48SJan Kara attr->ia_size); 4602678aaf48SJan Kara if (error) { 4603678aaf48SJan Kara /* Do as much error cleanup as possible */ 46049924a92aSTheodore Ts'o handle = ext4_journal_start(inode, 46059924a92aSTheodore Ts'o EXT4_HT_INODE, 3); 4606678aaf48SJan Kara if (IS_ERR(handle)) { 4607678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4608678aaf48SJan Kara goto err_out; 4609678aaf48SJan Kara } 4610678aaf48SJan Kara ext4_orphan_del(handle, inode); 46113d287de3SDmitry Monakhov orphan = 0; 4612678aaf48SJan Kara ext4_journal_stop(handle); 4613678aaf48SJan Kara goto err_out; 4614678aaf48SJan Kara } 4615678aaf48SJan Kara } 4616ac27a0ecSDave Kleikamp } 4617ac27a0ecSDave Kleikamp 4618072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 461953e87268SJan Kara if (attr->ia_size != inode->i_size) { 462053e87268SJan Kara loff_t oldsize = inode->i_size; 462153e87268SJan Kara 462253e87268SJan Kara i_size_write(inode, attr->ia_size); 462353e87268SJan Kara /* 462453e87268SJan Kara * Blocks are going to be removed from the inode. Wait 462553e87268SJan Kara * for dio in flight. Temporarily disable 462653e87268SJan Kara * dioread_nolock to prevent livelock. 462753e87268SJan Kara */ 46281b65007eSDmitry Monakhov if (orphan) { 462953e87268SJan Kara if (!ext4_should_journal_data(inode)) { 46301b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 46311c9114f9SDmitry Monakhov inode_dio_wait(inode); 46321b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 463353e87268SJan Kara } else 463453e87268SJan Kara ext4_wait_for_tail_page_commit(inode); 46351b65007eSDmitry Monakhov } 463653e87268SJan Kara /* 463753e87268SJan Kara * Truncate pagecache after we've waited for commit 463853e87268SJan Kara * in data=journal mode to make pages freeable. 463953e87268SJan Kara */ 464053e87268SJan Kara truncate_pagecache(inode, oldsize, inode->i_size); 46411c9114f9SDmitry Monakhov } 4642072bd7eaSTheodore Ts'o ext4_truncate(inode); 4643072bd7eaSTheodore Ts'o } 4644ac27a0ecSDave Kleikamp 46451025774cSChristoph Hellwig if (!rc) { 46461025774cSChristoph Hellwig setattr_copy(inode, attr); 46471025774cSChristoph Hellwig mark_inode_dirty(inode); 46481025774cSChristoph Hellwig } 46491025774cSChristoph Hellwig 46501025774cSChristoph Hellwig /* 46511025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 46521025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 46531025774cSChristoph Hellwig */ 46543d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4655617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4656ac27a0ecSDave Kleikamp 4657ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4658617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4659ac27a0ecSDave Kleikamp 4660ac27a0ecSDave Kleikamp err_out: 4661617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4662ac27a0ecSDave Kleikamp if (!error) 4663ac27a0ecSDave Kleikamp error = rc; 4664ac27a0ecSDave Kleikamp return error; 4665ac27a0ecSDave Kleikamp } 4666ac27a0ecSDave Kleikamp 46673e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 46683e3398a0SMingming Cao struct kstat *stat) 46693e3398a0SMingming Cao { 46703e3398a0SMingming Cao struct inode *inode; 46718af8eeccSJan Kara unsigned long long delalloc_blocks; 46723e3398a0SMingming Cao 46733e3398a0SMingming Cao inode = dentry->d_inode; 46743e3398a0SMingming Cao generic_fillattr(inode, stat); 46753e3398a0SMingming Cao 46763e3398a0SMingming Cao /* 46773e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 46783e3398a0SMingming Cao * otherwise in the case of system crash before the real block 46793e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 46803e3398a0SMingming Cao * on-disk file blocks. 46813e3398a0SMingming Cao * We always keep i_blocks updated together with real 46823e3398a0SMingming Cao * allocation. But to not confuse with user, stat 46833e3398a0SMingming Cao * will return the blocks that include the delayed allocation 46843e3398a0SMingming Cao * blocks for this file. 46853e3398a0SMingming Cao */ 468696607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 468796607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 46883e3398a0SMingming Cao 46898af8eeccSJan Kara stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9); 46903e3398a0SMingming Cao return 0; 46913e3398a0SMingming Cao } 4692ac27a0ecSDave Kleikamp 4693fffb2739SJan Kara static int ext4_index_trans_blocks(struct inode *inode, int lblocks, 4694fffb2739SJan Kara int pextents) 4695a02908f1SMingming Cao { 469612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 4697fffb2739SJan Kara return ext4_ind_trans_blocks(inode, lblocks); 4698fffb2739SJan Kara return ext4_ext_index_trans_blocks(inode, pextents); 4699a02908f1SMingming Cao } 4700ac51d837STheodore Ts'o 4701a02908f1SMingming Cao /* 4702a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4703a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4704a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4705a02908f1SMingming Cao * 4706a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 47074907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4708a02908f1SMingming Cao * they could still across block group boundary. 4709a02908f1SMingming Cao * 4710a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4711a02908f1SMingming Cao */ 4712fffb2739SJan Kara static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, 4713fffb2739SJan Kara int pextents) 4714a02908f1SMingming Cao { 47158df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 47168df9675fSTheodore Ts'o int gdpblocks; 4717a02908f1SMingming Cao int idxblocks; 4718a02908f1SMingming Cao int ret = 0; 4719a02908f1SMingming Cao 4720a02908f1SMingming Cao /* 4721fffb2739SJan Kara * How many index blocks need to touch to map @lblocks logical blocks 4722fffb2739SJan Kara * to @pextents physical extents? 4723a02908f1SMingming Cao */ 4724fffb2739SJan Kara idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents); 4725a02908f1SMingming Cao 4726a02908f1SMingming Cao ret = idxblocks; 4727a02908f1SMingming Cao 4728a02908f1SMingming Cao /* 4729a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4730a02908f1SMingming Cao * to account 4731a02908f1SMingming Cao */ 4732fffb2739SJan Kara groups = idxblocks + pextents; 4733a02908f1SMingming Cao gdpblocks = groups; 47348df9675fSTheodore Ts'o if (groups > ngroups) 47358df9675fSTheodore Ts'o groups = ngroups; 4736a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4737a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4738a02908f1SMingming Cao 4739a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4740a02908f1SMingming Cao ret += groups + gdpblocks; 4741a02908f1SMingming Cao 4742a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4743a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4744ac27a0ecSDave Kleikamp 4745ac27a0ecSDave Kleikamp return ret; 4746ac27a0ecSDave Kleikamp } 4747ac27a0ecSDave Kleikamp 4748ac27a0ecSDave Kleikamp /* 474925985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4750f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4751f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4752a02908f1SMingming Cao * 4753525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4754a02908f1SMingming Cao * 4755525f4ed8SMingming Cao * We need to consider the worse case, when 4756a02908f1SMingming Cao * one new block per extent. 4757a02908f1SMingming Cao */ 4758a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4759a02908f1SMingming Cao { 4760a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4761a02908f1SMingming Cao int ret; 4762a02908f1SMingming Cao 4763fffb2739SJan Kara ret = ext4_meta_trans_blocks(inode, bpp, bpp); 4764a02908f1SMingming Cao 4765a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4766a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4767a02908f1SMingming Cao ret += bpp; 4768a02908f1SMingming Cao return ret; 4769a02908f1SMingming Cao } 4770f3bd1f3fSMingming Cao 4771f3bd1f3fSMingming Cao /* 4772f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4773f3bd1f3fSMingming Cao * 4774f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 477579e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4776f3bd1f3fSMingming Cao * 4777f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4778f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4779f3bd1f3fSMingming Cao */ 4780f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4781f3bd1f3fSMingming Cao { 4782f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4783f3bd1f3fSMingming Cao } 4784f3bd1f3fSMingming Cao 4785a02908f1SMingming Cao /* 4786617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4787ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4788ac27a0ecSDave Kleikamp */ 4789617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4790617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4791ac27a0ecSDave Kleikamp { 4792ac27a0ecSDave Kleikamp int err = 0; 4793ac27a0ecSDave Kleikamp 4794c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 479525ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 479625ec56b5SJean Noel Cordenner 4797ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4798ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4799ac27a0ecSDave Kleikamp 4800dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4801830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4802ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4803ac27a0ecSDave Kleikamp return err; 4804ac27a0ecSDave Kleikamp } 4805ac27a0ecSDave Kleikamp 4806ac27a0ecSDave Kleikamp /* 4807ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4808ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4809ac27a0ecSDave Kleikamp */ 4810ac27a0ecSDave Kleikamp 4811ac27a0ecSDave Kleikamp int 4812617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4813617ba13bSMingming Cao struct ext4_iloc *iloc) 4814ac27a0ecSDave Kleikamp { 48150390131bSFrank Mayhar int err; 48160390131bSFrank Mayhar 4817617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4818ac27a0ecSDave Kleikamp if (!err) { 4819ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4820617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4821ac27a0ecSDave Kleikamp if (err) { 4822ac27a0ecSDave Kleikamp brelse(iloc->bh); 4823ac27a0ecSDave Kleikamp iloc->bh = NULL; 4824ac27a0ecSDave Kleikamp } 4825ac27a0ecSDave Kleikamp } 4826617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4827ac27a0ecSDave Kleikamp return err; 4828ac27a0ecSDave Kleikamp } 4829ac27a0ecSDave Kleikamp 4830ac27a0ecSDave Kleikamp /* 48316dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 48326dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 48336dd4ee7cSKalpak Shah */ 48341d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 48351d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 48361d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 48371d03ec98SAneesh Kumar K.V handle_t *handle) 48386dd4ee7cSKalpak Shah { 48396dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 48406dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 48416dd4ee7cSKalpak Shah 48426dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 48436dd4ee7cSKalpak Shah return 0; 48446dd4ee7cSKalpak Shah 48456dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 48466dd4ee7cSKalpak Shah 48476dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 48486dd4ee7cSKalpak Shah 48496dd4ee7cSKalpak Shah /* No extended attributes present */ 485019f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 48516dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 48526dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 48536dd4ee7cSKalpak Shah new_extra_isize); 48546dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 48556dd4ee7cSKalpak Shah return 0; 48566dd4ee7cSKalpak Shah } 48576dd4ee7cSKalpak Shah 48586dd4ee7cSKalpak Shah /* try to expand with EAs present */ 48596dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 48606dd4ee7cSKalpak Shah raw_inode, handle); 48616dd4ee7cSKalpak Shah } 48626dd4ee7cSKalpak Shah 48636dd4ee7cSKalpak Shah /* 4864ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4865ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4866ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4867ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4868ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4869ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4870ac27a0ecSDave Kleikamp * 4871ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4872ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4873ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4874ac27a0ecSDave Kleikamp * we start and wait on commits. 4875ac27a0ecSDave Kleikamp */ 4876617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4877ac27a0ecSDave Kleikamp { 4878617ba13bSMingming Cao struct ext4_iloc iloc; 48796dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 48806dd4ee7cSKalpak Shah static unsigned int mnt_count; 48816dd4ee7cSKalpak Shah int err, ret; 4882ac27a0ecSDave Kleikamp 4883ac27a0ecSDave Kleikamp might_sleep(); 48847ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4885617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 48860390131bSFrank Mayhar if (ext4_handle_valid(handle) && 48870390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 488819f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 48896dd4ee7cSKalpak Shah /* 48906dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 48916dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 48926dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 48936dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 48946dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 48956dd4ee7cSKalpak Shah */ 48966dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 48976dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 48986dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 48996dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 49006dd4ee7cSKalpak Shah iloc, handle); 49016dd4ee7cSKalpak Shah if (ret) { 490219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 490319f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4904c1bddad9SAneesh Kumar K.V if (mnt_count != 4905c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 490612062dddSEric Sandeen ext4_warning(inode->i_sb, 49076dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 49086dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 49096dd4ee7cSKalpak Shah inode->i_ino); 4910c1bddad9SAneesh Kumar K.V mnt_count = 4911c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 49126dd4ee7cSKalpak Shah } 49136dd4ee7cSKalpak Shah } 49146dd4ee7cSKalpak Shah } 49156dd4ee7cSKalpak Shah } 4916ac27a0ecSDave Kleikamp if (!err) 4917617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4918ac27a0ecSDave Kleikamp return err; 4919ac27a0ecSDave Kleikamp } 4920ac27a0ecSDave Kleikamp 4921ac27a0ecSDave Kleikamp /* 4922617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4923ac27a0ecSDave Kleikamp * 4924ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4925ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4926ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4927ac27a0ecSDave Kleikamp * 49285dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4929ac27a0ecSDave Kleikamp * are allocated to the file. 4930ac27a0ecSDave Kleikamp * 4931ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4932ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4933ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4934ac27a0ecSDave Kleikamp */ 4935aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4936ac27a0ecSDave Kleikamp { 4937ac27a0ecSDave Kleikamp handle_t *handle; 4938ac27a0ecSDave Kleikamp 49399924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4940ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4941ac27a0ecSDave Kleikamp goto out; 4942f3dc272fSCurt Wohlgemuth 4943617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4944f3dc272fSCurt Wohlgemuth 4945617ba13bSMingming Cao ext4_journal_stop(handle); 4946ac27a0ecSDave Kleikamp out: 4947ac27a0ecSDave Kleikamp return; 4948ac27a0ecSDave Kleikamp } 4949ac27a0ecSDave Kleikamp 4950ac27a0ecSDave Kleikamp #if 0 4951ac27a0ecSDave Kleikamp /* 4952ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4953ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4954617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4955ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4956ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4957ac27a0ecSDave Kleikamp */ 4958617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4959ac27a0ecSDave Kleikamp { 4960617ba13bSMingming Cao struct ext4_iloc iloc; 4961ac27a0ecSDave Kleikamp 4962ac27a0ecSDave Kleikamp int err = 0; 4963ac27a0ecSDave Kleikamp if (handle) { 4964617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4965ac27a0ecSDave Kleikamp if (!err) { 4966ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4967dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4968ac27a0ecSDave Kleikamp if (!err) 49690390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 497073b50c1cSCurt Wohlgemuth NULL, 4971ac27a0ecSDave Kleikamp iloc.bh); 4972ac27a0ecSDave Kleikamp brelse(iloc.bh); 4973ac27a0ecSDave Kleikamp } 4974ac27a0ecSDave Kleikamp } 4975617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4976ac27a0ecSDave Kleikamp return err; 4977ac27a0ecSDave Kleikamp } 4978ac27a0ecSDave Kleikamp #endif 4979ac27a0ecSDave Kleikamp 4980617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4981ac27a0ecSDave Kleikamp { 4982ac27a0ecSDave Kleikamp journal_t *journal; 4983ac27a0ecSDave Kleikamp handle_t *handle; 4984ac27a0ecSDave Kleikamp int err; 4985ac27a0ecSDave Kleikamp 4986ac27a0ecSDave Kleikamp /* 4987ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4988ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4989ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4990ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4991ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4992ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4993ac27a0ecSDave Kleikamp * nobody is changing anything. 4994ac27a0ecSDave Kleikamp */ 4995ac27a0ecSDave Kleikamp 4996617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 49970390131bSFrank Mayhar if (!journal) 49980390131bSFrank Mayhar return 0; 4999d699594dSDave Hansen if (is_journal_aborted(journal)) 5000ac27a0ecSDave Kleikamp return -EROFS; 50012aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 50022aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 50032aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 50042aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 50052aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 50062aff57b0SYongqiang Yang */ 50072aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 50082aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 50092aff57b0SYongqiang Yang if (err < 0) 50102aff57b0SYongqiang Yang return err; 50112aff57b0SYongqiang Yang } 5012ac27a0ecSDave Kleikamp 501317335dccSDmitry Monakhov /* Wait for all existing dio workers */ 501417335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 501517335dccSDmitry Monakhov inode_dio_wait(inode); 501617335dccSDmitry Monakhov 5017dab291afSMingming Cao jbd2_journal_lock_updates(journal); 5018ac27a0ecSDave Kleikamp 5019ac27a0ecSDave Kleikamp /* 5020ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 5021ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 5022ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 5023ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 5024ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 5025ac27a0ecSDave Kleikamp */ 5026ac27a0ecSDave Kleikamp 5027ac27a0ecSDave Kleikamp if (val) 502812e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 50295872ddaaSYongqiang Yang else { 50305872ddaaSYongqiang Yang jbd2_journal_flush(journal); 503112e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 50325872ddaaSYongqiang Yang } 5033617ba13bSMingming Cao ext4_set_aops(inode); 5034ac27a0ecSDave Kleikamp 5035dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 503617335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 5037ac27a0ecSDave Kleikamp 5038ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 5039ac27a0ecSDave Kleikamp 50409924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5041ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 5042ac27a0ecSDave Kleikamp return PTR_ERR(handle); 5043ac27a0ecSDave Kleikamp 5044617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 50450390131bSFrank Mayhar ext4_handle_sync(handle); 5046617ba13bSMingming Cao ext4_journal_stop(handle); 5047617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 5048ac27a0ecSDave Kleikamp 5049ac27a0ecSDave Kleikamp return err; 5050ac27a0ecSDave Kleikamp } 50512e9ee850SAneesh Kumar K.V 50522e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 50532e9ee850SAneesh Kumar K.V { 50542e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 50552e9ee850SAneesh Kumar K.V } 50562e9ee850SAneesh Kumar K.V 5057c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 50582e9ee850SAneesh Kumar K.V { 5059c2ec175cSNick Piggin struct page *page = vmf->page; 50602e9ee850SAneesh Kumar K.V loff_t size; 50612e9ee850SAneesh Kumar K.V unsigned long len; 50629ea7df53SJan Kara int ret; 50632e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 5064496ad9aaSAl Viro struct inode *inode = file_inode(file); 50652e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 50669ea7df53SJan Kara handle_t *handle; 50679ea7df53SJan Kara get_block_t *get_block; 50689ea7df53SJan Kara int retries = 0; 50692e9ee850SAneesh Kumar K.V 50708e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 5071041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 50729ea7df53SJan Kara /* Delalloc case is easy... */ 50739ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 50749ea7df53SJan Kara !ext4_should_journal_data(inode) && 50759ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 50769ea7df53SJan Kara do { 50779ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 50789ea7df53SJan Kara ext4_da_get_block_prep); 50799ea7df53SJan Kara } while (ret == -ENOSPC && 50809ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 50819ea7df53SJan Kara goto out_ret; 50822e9ee850SAneesh Kumar K.V } 50830e499890SDarrick J. Wong 50840e499890SDarrick J. Wong lock_page(page); 50859ea7df53SJan Kara size = i_size_read(inode); 50869ea7df53SJan Kara /* Page got truncated from under us? */ 50879ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 50889ea7df53SJan Kara unlock_page(page); 50899ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 50909ea7df53SJan Kara goto out; 50910e499890SDarrick J. Wong } 50922e9ee850SAneesh Kumar K.V 50932e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 50942e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 50952e9ee850SAneesh Kumar K.V else 50962e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 5097a827eaffSAneesh Kumar K.V /* 50989ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 50999ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 5100a827eaffSAneesh Kumar K.V */ 51012e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 5102f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5103f19d5870STao Ma 0, len, NULL, 5104a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 51059ea7df53SJan Kara /* Wait so that we don't change page under IO */ 51061d1d1a76SDarrick J. Wong wait_for_stable_page(page); 51079ea7df53SJan Kara ret = VM_FAULT_LOCKED; 51089ea7df53SJan Kara goto out; 51092e9ee850SAneesh Kumar K.V } 5110a827eaffSAneesh Kumar K.V } 5111a827eaffSAneesh Kumar K.V unlock_page(page); 51129ea7df53SJan Kara /* OK, we need to fill the hole... */ 51139ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 51149ea7df53SJan Kara get_block = ext4_get_block_write; 51159ea7df53SJan Kara else 51169ea7df53SJan Kara get_block = ext4_get_block; 51179ea7df53SJan Kara retry_alloc: 51189924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 51199924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 51209ea7df53SJan Kara if (IS_ERR(handle)) { 5121c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 51229ea7df53SJan Kara goto out; 51239ea7df53SJan Kara } 51249ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 51259ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 5126f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 51279ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 51289ea7df53SJan Kara unlock_page(page); 51299ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 5130fcbb5515SYongqiang Yang ext4_journal_stop(handle); 51319ea7df53SJan Kara goto out; 51329ea7df53SJan Kara } 51339ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 51349ea7df53SJan Kara } 51359ea7df53SJan Kara ext4_journal_stop(handle); 51369ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 51379ea7df53SJan Kara goto retry_alloc; 51389ea7df53SJan Kara out_ret: 51399ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 51409ea7df53SJan Kara out: 51418e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 51422e9ee850SAneesh Kumar K.V return ret; 51432e9ee850SAneesh Kumar K.V } 5144