1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51814525f4SDarrick J. Wong struct ext4_inode_info *ei) 52814525f4SDarrick J. Wong { 53814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54814525f4SDarrick J. Wong __u16 csum_lo; 55814525f4SDarrick J. Wong __u16 csum_hi = 0; 56814525f4SDarrick J. Wong __u32 csum; 57814525f4SDarrick J. Wong 58*171a7f21SDmitry Monakhov csum_lo = le16_to_cpu(raw->i_checksum_lo); 59814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 60814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62*171a7f21SDmitry Monakhov csum_hi = le16_to_cpu(raw->i_checksum_hi); 63814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 64814525f4SDarrick J. Wong } 65814525f4SDarrick J. Wong 66814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 68814525f4SDarrick J. Wong 69*171a7f21SDmitry Monakhov raw->i_checksum_lo = cpu_to_le16(csum_lo); 70814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72*171a7f21SDmitry Monakhov raw->i_checksum_hi = cpu_to_le16(csum_hi); 73814525f4SDarrick J. Wong 74814525f4SDarrick J. Wong return csum; 75814525f4SDarrick J. Wong } 76814525f4SDarrick J. Wong 77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78814525f4SDarrick J. Wong struct ext4_inode_info *ei) 79814525f4SDarrick J. Wong { 80814525f4SDarrick J. Wong __u32 provided, calculated; 81814525f4SDarrick J. Wong 82814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 84814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86814525f4SDarrick J. Wong return 1; 87814525f4SDarrick J. Wong 88814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 89814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 90814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93814525f4SDarrick J. Wong else 94814525f4SDarrick J. Wong calculated &= 0xFFFF; 95814525f4SDarrick J. Wong 96814525f4SDarrick J. Wong return provided == calculated; 97814525f4SDarrick J. Wong } 98814525f4SDarrick J. Wong 99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100814525f4SDarrick J. Wong struct ext4_inode_info *ei) 101814525f4SDarrick J. Wong { 102814525f4SDarrick J. Wong __u32 csum; 103814525f4SDarrick J. Wong 104814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 106814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108814525f4SDarrick J. Wong return; 109814525f4SDarrick J. Wong 110814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 111814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115814525f4SDarrick J. Wong } 116814525f4SDarrick J. Wong 117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 118678aaf48SJan Kara loff_t new_size) 119678aaf48SJan Kara { 1207ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1218aefcd55STheodore Ts'o /* 1228aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1238aefcd55STheodore Ts'o * writing, so there's no need to call 1248aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1258aefcd55STheodore Ts'o * outstanding writes we need to flush. 1268aefcd55STheodore Ts'o */ 1278aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1288aefcd55STheodore Ts'o return 0; 1298aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1308aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 131678aaf48SJan Kara new_size); 132678aaf48SJan Kara } 133678aaf48SJan Kara 13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 135cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 136cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 1375f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 1385f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 1395f163cc7SEric Sandeen loff_t length, int flags); 14064769240SAlex Tomas 141ac27a0ecSDave Kleikamp /* 142ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 143ac27a0ecSDave Kleikamp */ 144617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 145ac27a0ecSDave Kleikamp { 146617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 147ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 148ac27a0ecSDave Kleikamp 149ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 150ac27a0ecSDave Kleikamp } 151ac27a0ecSDave Kleikamp 152ac27a0ecSDave Kleikamp /* 153ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 154ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 155ac27a0ecSDave Kleikamp * this transaction. 156ac27a0ecSDave Kleikamp */ 157487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 158487caeefSJan Kara int nblocks) 159ac27a0ecSDave Kleikamp { 160487caeefSJan Kara int ret; 161487caeefSJan Kara 162487caeefSJan Kara /* 163e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 164487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 165487caeefSJan Kara * page cache has been already dropped and writes are blocked by 166487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 167487caeefSJan Kara */ 1680390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 169ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 170487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1718e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 172487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 173fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 174487caeefSJan Kara 175487caeefSJan Kara return ret; 176ac27a0ecSDave Kleikamp } 177ac27a0ecSDave Kleikamp 178ac27a0ecSDave Kleikamp /* 179ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 180ac27a0ecSDave Kleikamp */ 1810930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 182ac27a0ecSDave Kleikamp { 183ac27a0ecSDave Kleikamp handle_t *handle; 184bc965ab3STheodore Ts'o int err; 185ac27a0ecSDave Kleikamp 1867ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1872581fdc8SJiaying Zhang 1880930fcc1SAl Viro if (inode->i_nlink) { 1892d859db3SJan Kara /* 1902d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1912d859db3SJan Kara * journal. So although mm thinks everything is clean and 1922d859db3SJan Kara * ready for reaping the inode might still have some pages to 1932d859db3SJan Kara * write in the running transaction or waiting to be 1942d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1952d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1962d859db3SJan Kara * cause data loss. Also even if we did not discard these 1972d859db3SJan Kara * buffers, we would have no way to find them after the inode 1982d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 1992d859db3SJan Kara * read them before the transaction is checkpointed. So be 2002d859db3SJan Kara * careful and force everything to disk here... We use 2012d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2022d859db3SJan Kara * containing inode's data. 2032d859db3SJan Kara * 2042d859db3SJan Kara * Note that directories do not have this problem because they 2052d859db3SJan Kara * don't use page cache. 2062d859db3SJan Kara */ 2072d859db3SJan Kara if (ext4_should_journal_data(inode) && 2082b405bfaSTheodore Ts'o (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && 2092b405bfaSTheodore Ts'o inode->i_ino != EXT4_JOURNAL_INO) { 2102d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2112d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2122d859db3SJan Kara 213d76a3a77STheodore Ts'o jbd2_complete_transaction(journal, commit_tid); 2142d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2152d859db3SJan Kara } 2160930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2171ada47d9STheodore Ts'o ext4_ioend_shutdown(inode); 2180930fcc1SAl Viro goto no_delete; 2190930fcc1SAl Viro } 2200930fcc1SAl Viro 221907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 222871a2931SChristoph Hellwig dquot_initialize(inode); 223907f4554SChristoph Hellwig 224678aaf48SJan Kara if (ext4_should_order_data(inode)) 225678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 226ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 2271ada47d9STheodore Ts'o ext4_ioend_shutdown(inode); 228ac27a0ecSDave Kleikamp 229ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 230ac27a0ecSDave Kleikamp goto no_delete; 231ac27a0ecSDave Kleikamp 2328e8ad8a5SJan Kara /* 2338e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2348e8ad8a5SJan Kara * protection against it 2358e8ad8a5SJan Kara */ 2368e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2379924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 2389924a92aSTheodore Ts'o ext4_blocks_for_truncate(inode)+3); 239ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 240bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 241ac27a0ecSDave Kleikamp /* 242ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 243ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 244ac27a0ecSDave Kleikamp * cleaned up. 245ac27a0ecSDave Kleikamp */ 246617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2478e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 248ac27a0ecSDave Kleikamp goto no_delete; 249ac27a0ecSDave Kleikamp } 250ac27a0ecSDave Kleikamp 251ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2520390131bSFrank Mayhar ext4_handle_sync(handle); 253ac27a0ecSDave Kleikamp inode->i_size = 0; 254bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 255bc965ab3STheodore Ts'o if (err) { 25612062dddSEric Sandeen ext4_warning(inode->i_sb, 257bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 258bc965ab3STheodore Ts'o goto stop_handle; 259bc965ab3STheodore Ts'o } 260ac27a0ecSDave Kleikamp if (inode->i_blocks) 261617ba13bSMingming Cao ext4_truncate(inode); 262bc965ab3STheodore Ts'o 263bc965ab3STheodore Ts'o /* 264bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 265bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 266bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 267bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 268bc965ab3STheodore Ts'o */ 2690390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 270bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 271bc965ab3STheodore Ts'o if (err > 0) 272bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 273bc965ab3STheodore Ts'o if (err != 0) { 27412062dddSEric Sandeen ext4_warning(inode->i_sb, 275bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 276bc965ab3STheodore Ts'o stop_handle: 277bc965ab3STheodore Ts'o ext4_journal_stop(handle); 27845388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2798e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 280bc965ab3STheodore Ts'o goto no_delete; 281bc965ab3STheodore Ts'o } 282bc965ab3STheodore Ts'o } 283bc965ab3STheodore Ts'o 284ac27a0ecSDave Kleikamp /* 285617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 286ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 287617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 288ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 289617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 290ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 291ac27a0ecSDave Kleikamp */ 292617ba13bSMingming Cao ext4_orphan_del(handle, inode); 293617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 294ac27a0ecSDave Kleikamp 295ac27a0ecSDave Kleikamp /* 296ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 297ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 298ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 299ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 300ac27a0ecSDave Kleikamp * fails. 301ac27a0ecSDave Kleikamp */ 302617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 303ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3040930fcc1SAl Viro ext4_clear_inode(inode); 305ac27a0ecSDave Kleikamp else 306617ba13bSMingming Cao ext4_free_inode(handle, inode); 307617ba13bSMingming Cao ext4_journal_stop(handle); 3088e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 309ac27a0ecSDave Kleikamp return; 310ac27a0ecSDave Kleikamp no_delete: 3110930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 312ac27a0ecSDave Kleikamp } 313ac27a0ecSDave Kleikamp 314a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 315a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31660e58e0fSMingming Cao { 317a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 31860e58e0fSMingming Cao } 319a9e7f447SDmitry Monakhov #endif 3209d0be502STheodore Ts'o 32112219aeaSAneesh Kumar K.V /* 32212219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3239d0be502STheodore Ts'o * to allocate a block located at @lblock 32412219aeaSAneesh Kumar K.V */ 32501f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32612219aeaSAneesh Kumar K.V { 32712e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3289d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 32912219aeaSAneesh Kumar K.V 3308bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33112219aeaSAneesh Kumar K.V } 33212219aeaSAneesh Kumar K.V 3330637c6f4STheodore Ts'o /* 3340637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3350637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3360637c6f4STheodore Ts'o */ 3375f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3385f634d06SAneesh Kumar K.V int used, int quota_claim) 33912219aeaSAneesh Kumar K.V { 34012219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3410637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34212219aeaSAneesh Kumar K.V 3430637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 344d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3450637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 3468de5c325STheodore Ts'o ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 3471084f252STheodore Ts'o "with only %d reserved data blocks", 3480637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3490637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3500637c6f4STheodore Ts'o WARN_ON(1); 3510637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3526bc6e63fSAneesh Kumar K.V } 35312219aeaSAneesh Kumar K.V 35497795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 35501a523ebSTheodore Ts'o ext4_warning(inode->i_sb, "ino %lu, allocated %d " 35601a523ebSTheodore Ts'o "with only %d reserved metadata blocks " 35701a523ebSTheodore Ts'o "(releasing %d blocks with reserved %d data blocks)", 35897795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 35901a523ebSTheodore Ts'o ei->i_reserved_meta_blocks, used, 36001a523ebSTheodore Ts'o ei->i_reserved_data_blocks); 36197795d2aSBrian Foster WARN_ON(1); 36297795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36397795d2aSBrian Foster } 36497795d2aSBrian Foster 3650637c6f4STheodore Ts'o /* Update per-inode reservations */ 3660637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3670637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 36857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 36972b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3700637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3710637c6f4STheodore Ts'o 3720637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3730637c6f4STheodore Ts'o /* 3740637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3750637c6f4STheodore Ts'o * only when we have written all of the delayed 3760637c6f4STheodore Ts'o * allocation blocks. 3770637c6f4STheodore Ts'o */ 37857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37972b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 380ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3819d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3820637c6f4STheodore Ts'o } 38312219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38460e58e0fSMingming Cao 38572b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38672b8ab9dSEric Sandeen if (quota_claim) 3877b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 38872b8ab9dSEric Sandeen else { 3895f634d06SAneesh Kumar K.V /* 3905f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3915f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 39272b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3935f634d06SAneesh Kumar K.V */ 3947b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3955f634d06SAneesh Kumar K.V } 396d6014301SAneesh Kumar K.V 397d6014301SAneesh Kumar K.V /* 398d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 399d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 400d6014301SAneesh Kumar K.V * inode's preallocations. 401d6014301SAneesh Kumar K.V */ 4020637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4030637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 404d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40512219aeaSAneesh Kumar K.V } 40612219aeaSAneesh Kumar K.V 407e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 408c398eda0STheodore Ts'o unsigned int line, 40924676da4STheodore Ts'o struct ext4_map_blocks *map) 4106fd058f7STheodore Ts'o { 41124676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 41224676da4STheodore Ts'o map->m_len)) { 413c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 414c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41524676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 416c398eda0STheodore Ts'o map->m_len); 4176fd058f7STheodore Ts'o return -EIO; 4186fd058f7STheodore Ts'o } 4196fd058f7STheodore Ts'o return 0; 4206fd058f7STheodore Ts'o } 4216fd058f7STheodore Ts'o 422e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 423c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 424e29136f8STheodore Ts'o 425f5ab0d1fSMingming Cao /* 4261f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 4271f94533dSTheodore Ts'o * starting at page frame idx. 42855138e0bSTheodore Ts'o */ 42955138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 43055138e0bSTheodore Ts'o unsigned int max_pages) 43155138e0bSTheodore Ts'o { 43255138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 43355138e0bSTheodore Ts'o pgoff_t index; 43455138e0bSTheodore Ts'o struct pagevec pvec; 43555138e0bSTheodore Ts'o pgoff_t num = 0; 43655138e0bSTheodore Ts'o int i, nr_pages, done = 0; 43755138e0bSTheodore Ts'o 43855138e0bSTheodore Ts'o if (max_pages == 0) 43955138e0bSTheodore Ts'o return 0; 44055138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 44155138e0bSTheodore Ts'o while (!done) { 44255138e0bSTheodore Ts'o index = idx; 44355138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 44455138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 44555138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 44655138e0bSTheodore Ts'o if (nr_pages == 0) 44755138e0bSTheodore Ts'o break; 44855138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 44955138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 45055138e0bSTheodore Ts'o struct buffer_head *bh, *head; 45155138e0bSTheodore Ts'o 45255138e0bSTheodore Ts'o lock_page(page); 45355138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 45455138e0bSTheodore Ts'o !PageDirty(page) || 45555138e0bSTheodore Ts'o PageWriteback(page) || 45655138e0bSTheodore Ts'o page->index != idx) { 45755138e0bSTheodore Ts'o done = 1; 45855138e0bSTheodore Ts'o unlock_page(page); 45955138e0bSTheodore Ts'o break; 46055138e0bSTheodore Ts'o } 4611f94533dSTheodore Ts'o if (page_has_buffers(page)) { 4621f94533dSTheodore Ts'o bh = head = page_buffers(page); 46355138e0bSTheodore Ts'o do { 46455138e0bSTheodore Ts'o if (!buffer_delay(bh) && 4651f94533dSTheodore Ts'o !buffer_unwritten(bh)) 46655138e0bSTheodore Ts'o done = 1; 4671f94533dSTheodore Ts'o bh = bh->b_this_page; 4681f94533dSTheodore Ts'o } while (!done && (bh != head)); 46955138e0bSTheodore Ts'o } 47055138e0bSTheodore Ts'o unlock_page(page); 47155138e0bSTheodore Ts'o if (done) 47255138e0bSTheodore Ts'o break; 47355138e0bSTheodore Ts'o idx++; 47455138e0bSTheodore Ts'o num++; 475659c6009SEric Sandeen if (num >= max_pages) { 476659c6009SEric Sandeen done = 1; 47755138e0bSTheodore Ts'o break; 47855138e0bSTheodore Ts'o } 479659c6009SEric Sandeen } 48055138e0bSTheodore Ts'o pagevec_release(&pvec); 48155138e0bSTheodore Ts'o } 48255138e0bSTheodore Ts'o return num; 48355138e0bSTheodore Ts'o } 48455138e0bSTheodore Ts'o 485921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 486921f266bSDmitry Monakhov static void ext4_map_blocks_es_recheck(handle_t *handle, 487921f266bSDmitry Monakhov struct inode *inode, 488921f266bSDmitry Monakhov struct ext4_map_blocks *es_map, 489921f266bSDmitry Monakhov struct ext4_map_blocks *map, 490921f266bSDmitry Monakhov int flags) 491921f266bSDmitry Monakhov { 492921f266bSDmitry Monakhov int retval; 493921f266bSDmitry Monakhov 494921f266bSDmitry Monakhov map->m_flags = 0; 495921f266bSDmitry Monakhov /* 496921f266bSDmitry Monakhov * There is a race window that the result is not the same. 497921f266bSDmitry Monakhov * e.g. xfstests #223 when dioread_nolock enables. The reason 498921f266bSDmitry Monakhov * is that we lookup a block mapping in extent status tree with 499921f266bSDmitry Monakhov * out taking i_data_sem. So at the time the unwritten extent 500921f266bSDmitry Monakhov * could be converted. 501921f266bSDmitry Monakhov */ 502921f266bSDmitry Monakhov if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 503921f266bSDmitry Monakhov down_read((&EXT4_I(inode)->i_data_sem)); 504921f266bSDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 505921f266bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 506921f266bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 507921f266bSDmitry Monakhov } else { 508921f266bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 509921f266bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 510921f266bSDmitry Monakhov } 511921f266bSDmitry Monakhov if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 512921f266bSDmitry Monakhov up_read((&EXT4_I(inode)->i_data_sem)); 513921f266bSDmitry Monakhov /* 514921f266bSDmitry Monakhov * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag 515921f266bSDmitry Monakhov * because it shouldn't be marked in es_map->m_flags. 516921f266bSDmitry Monakhov */ 517921f266bSDmitry Monakhov map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); 518921f266bSDmitry Monakhov 519921f266bSDmitry Monakhov /* 520921f266bSDmitry Monakhov * We don't check m_len because extent will be collpased in status 521921f266bSDmitry Monakhov * tree. So the m_len might not equal. 522921f266bSDmitry Monakhov */ 523921f266bSDmitry Monakhov if (es_map->m_lblk != map->m_lblk || 524921f266bSDmitry Monakhov es_map->m_flags != map->m_flags || 525921f266bSDmitry Monakhov es_map->m_pblk != map->m_pblk) { 526921f266bSDmitry Monakhov printk("ES cache assertation failed for inode: %lu " 527921f266bSDmitry Monakhov "es_cached ex [%d/%d/%llu/%x] != " 528921f266bSDmitry Monakhov "found ex [%d/%d/%llu/%x] retval %d flags %x\n", 529921f266bSDmitry Monakhov inode->i_ino, es_map->m_lblk, es_map->m_len, 530921f266bSDmitry Monakhov es_map->m_pblk, es_map->m_flags, map->m_lblk, 531921f266bSDmitry Monakhov map->m_len, map->m_pblk, map->m_flags, 532921f266bSDmitry Monakhov retval, flags); 533921f266bSDmitry Monakhov } 534921f266bSDmitry Monakhov } 535921f266bSDmitry Monakhov #endif /* ES_AGGRESSIVE_TEST */ 536921f266bSDmitry Monakhov 53755138e0bSTheodore Ts'o /* 538e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 5392b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 540f5ab0d1fSMingming Cao * 541f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 542f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 543f5ab0d1fSMingming Cao * mapped. 544f5ab0d1fSMingming Cao * 545e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 546e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 547f5ab0d1fSMingming Cao * based files 548f5ab0d1fSMingming Cao * 549f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 550f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 551f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 552f5ab0d1fSMingming Cao * the buffer head is mapped. 553f5ab0d1fSMingming Cao * 554f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 555df3ab170STao Ma * that case, buffer head is unmapped 556f5ab0d1fSMingming Cao * 557f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 558f5ab0d1fSMingming Cao */ 559e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 560e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5610e855ac8SAneesh Kumar K.V { 562d100eef2SZheng Liu struct extent_status es; 5630e855ac8SAneesh Kumar K.V int retval; 564921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 565921f266bSDmitry Monakhov struct ext4_map_blocks orig_map; 566921f266bSDmitry Monakhov 567921f266bSDmitry Monakhov memcpy(&orig_map, map, sizeof(*map)); 568921f266bSDmitry Monakhov #endif 569f5ab0d1fSMingming Cao 570e35fd660STheodore Ts'o map->m_flags = 0; 571e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 572e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 573e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 574d100eef2SZheng Liu 575d100eef2SZheng Liu /* Lookup extent status tree firstly */ 576d100eef2SZheng Liu if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 577d100eef2SZheng Liu if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 578d100eef2SZheng Liu map->m_pblk = ext4_es_pblock(&es) + 579d100eef2SZheng Liu map->m_lblk - es.es_lblk; 580d100eef2SZheng Liu map->m_flags |= ext4_es_is_written(&es) ? 581d100eef2SZheng Liu EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN; 582d100eef2SZheng Liu retval = es.es_len - (map->m_lblk - es.es_lblk); 583d100eef2SZheng Liu if (retval > map->m_len) 584d100eef2SZheng Liu retval = map->m_len; 585d100eef2SZheng Liu map->m_len = retval; 586d100eef2SZheng Liu } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) { 587d100eef2SZheng Liu retval = 0; 588d100eef2SZheng Liu } else { 589d100eef2SZheng Liu BUG_ON(1); 590d100eef2SZheng Liu } 591921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 592921f266bSDmitry Monakhov ext4_map_blocks_es_recheck(handle, inode, map, 593921f266bSDmitry Monakhov &orig_map, flags); 594921f266bSDmitry Monakhov #endif 595d100eef2SZheng Liu goto found; 596d100eef2SZheng Liu } 597d100eef2SZheng Liu 5984df3d265SAneesh Kumar K.V /* 599b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 600b920c755STheodore Ts'o * file system block. 6014df3d265SAneesh Kumar K.V */ 602729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 6030e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 60412e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 605a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 606a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 6074df3d265SAneesh Kumar K.V } else { 608a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 609a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 6100e855ac8SAneesh Kumar K.V } 611f7fec032SZheng Liu if (retval > 0) { 612f7fec032SZheng Liu int ret; 613f7fec032SZheng Liu unsigned long long status; 614f7fec032SZheng Liu 615921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 616921f266bSDmitry Monakhov if (retval != map->m_len) { 617921f266bSDmitry Monakhov printk("ES len assertation failed for inode: %lu " 618921f266bSDmitry Monakhov "retval %d != map->m_len %d " 619921f266bSDmitry Monakhov "in %s (lookup)\n", inode->i_ino, retval, 620921f266bSDmitry Monakhov map->m_len, __func__); 621921f266bSDmitry Monakhov } 622921f266bSDmitry Monakhov #endif 623921f266bSDmitry Monakhov 624f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 625f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 626f7fec032SZheng Liu if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 627f7fec032SZheng Liu ext4_find_delalloc_range(inode, map->m_lblk, 628f7fec032SZheng Liu map->m_lblk + map->m_len - 1)) 629f7fec032SZheng Liu status |= EXTENT_STATUS_DELAYED; 630f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, 631f7fec032SZheng Liu map->m_len, map->m_pblk, status); 632f7fec032SZheng Liu if (ret < 0) 633f7fec032SZheng Liu retval = ret; 634f7fec032SZheng Liu } 635729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 6364df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 637f5ab0d1fSMingming Cao 638d100eef2SZheng Liu found: 639e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 640f7fec032SZheng Liu int ret = check_block_validity(inode, map); 6416fd058f7STheodore Ts'o if (ret != 0) 6426fd058f7STheodore Ts'o return ret; 6436fd058f7STheodore Ts'o } 6446fd058f7STheodore Ts'o 645f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 646c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 6474df3d265SAneesh Kumar K.V return retval; 6484df3d265SAneesh Kumar K.V 6494df3d265SAneesh Kumar K.V /* 650f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 651f5ab0d1fSMingming Cao * 652f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 653df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 654f5ab0d1fSMingming Cao * with buffer head unmapped. 655f5ab0d1fSMingming Cao */ 656e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 657f5ab0d1fSMingming Cao return retval; 658f5ab0d1fSMingming Cao 659f5ab0d1fSMingming Cao /* 660a25a4e1aSZheng Liu * Here we clear m_flags because after allocating an new extent, 661a25a4e1aSZheng Liu * it will be set again. 6622a8964d6SAneesh Kumar K.V */ 663a25a4e1aSZheng Liu map->m_flags &= ~EXT4_MAP_FLAGS; 6642a8964d6SAneesh Kumar K.V 6652a8964d6SAneesh Kumar K.V /* 666f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 667f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 668f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 669f5ab0d1fSMingming Cao * with create == 1 flag. 6704df3d265SAneesh Kumar K.V */ 6714df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 672d2a17637SMingming Cao 673d2a17637SMingming Cao /* 674d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 675d2a17637SMingming Cao * we have already reserved fs blocks for allocation 676d2a17637SMingming Cao * let the underlying get_block() function know to 677d2a17637SMingming Cao * avoid double accounting 678d2a17637SMingming Cao */ 679c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 680f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 6814df3d265SAneesh Kumar K.V /* 6824df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 6834df3d265SAneesh Kumar K.V * could have changed the inode type in between 6844df3d265SAneesh Kumar K.V */ 68512e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 686e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 6870e855ac8SAneesh Kumar K.V } else { 688e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 689267e4db9SAneesh Kumar K.V 690e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 691267e4db9SAneesh Kumar K.V /* 692267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 693267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 694267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 695267e4db9SAneesh Kumar K.V */ 69619f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 697267e4db9SAneesh Kumar K.V } 6982ac3b6e0STheodore Ts'o 699d2a17637SMingming Cao /* 7002ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 7015f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 7025f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 7035f634d06SAneesh Kumar K.V * reserve space here. 704d2a17637SMingming Cao */ 7055f634d06SAneesh Kumar K.V if ((retval > 0) && 7061296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 7075f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 7085f634d06SAneesh Kumar K.V } 709f7fec032SZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 710f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 711d2a17637SMingming Cao 712f7fec032SZheng Liu if (retval > 0) { 71351865fdaSZheng Liu int ret; 714f7fec032SZheng Liu unsigned long long status; 715f7fec032SZheng Liu 716921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 717921f266bSDmitry Monakhov if (retval != map->m_len) { 718921f266bSDmitry Monakhov printk("ES len assertation failed for inode: %lu " 719921f266bSDmitry Monakhov "retval %d != map->m_len %d " 720921f266bSDmitry Monakhov "in %s (allocation)\n", inode->i_ino, retval, 721921f266bSDmitry Monakhov map->m_len, __func__); 722921f266bSDmitry Monakhov } 723921f266bSDmitry Monakhov #endif 724921f266bSDmitry Monakhov 725adb23551SZheng Liu /* 726adb23551SZheng Liu * If the extent has been zeroed out, we don't need to update 727adb23551SZheng Liu * extent status tree. 728adb23551SZheng Liu */ 729adb23551SZheng Liu if ((flags & EXT4_GET_BLOCKS_PRE_IO) && 730adb23551SZheng Liu ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 731adb23551SZheng Liu if (ext4_es_is_written(&es)) 732adb23551SZheng Liu goto has_zeroout; 733adb23551SZheng Liu } 734f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 735f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 736f7fec032SZheng Liu if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && 737f7fec032SZheng Liu ext4_find_delalloc_range(inode, map->m_lblk, 738f7fec032SZheng Liu map->m_lblk + map->m_len - 1)) 739f7fec032SZheng Liu status |= EXTENT_STATUS_DELAYED; 740f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 741f7fec032SZheng Liu map->m_pblk, status); 74251865fdaSZheng Liu if (ret < 0) 74351865fdaSZheng Liu retval = ret; 74451865fdaSZheng Liu } 7455356f261SAditya Kali 746adb23551SZheng Liu has_zeroout: 7470e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 748e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 749e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 7506fd058f7STheodore Ts'o if (ret != 0) 7516fd058f7STheodore Ts'o return ret; 7526fd058f7STheodore Ts'o } 7530e855ac8SAneesh Kumar K.V return retval; 7540e855ac8SAneesh Kumar K.V } 7550e855ac8SAneesh Kumar K.V 756f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 757f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 758f3bd1f3fSMingming Cao 7592ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 7602ed88685STheodore Ts'o struct buffer_head *bh, int flags) 761ac27a0ecSDave Kleikamp { 7623e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 7632ed88685STheodore Ts'o struct ext4_map_blocks map; 7647fb5409dSJan Kara int ret = 0, started = 0; 765f3bd1f3fSMingming Cao int dio_credits; 766ac27a0ecSDave Kleikamp 76746c7f254STao Ma if (ext4_has_inline_data(inode)) 76846c7f254STao Ma return -ERANGE; 76946c7f254STao Ma 7702ed88685STheodore Ts'o map.m_lblk = iblock; 7712ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 7722ed88685STheodore Ts'o 7738b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 7747fb5409dSJan Kara /* Direct IO write... */ 7752ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 7762ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 7772ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 7789924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 7799924a92aSTheodore Ts'o dio_credits); 7807fb5409dSJan Kara if (IS_ERR(handle)) { 781ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 7822ed88685STheodore Ts'o return ret; 7837fb5409dSJan Kara } 7847fb5409dSJan Kara started = 1; 785ac27a0ecSDave Kleikamp } 786ac27a0ecSDave Kleikamp 7872ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 788ac27a0ecSDave Kleikamp if (ret > 0) { 7892ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 7902ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 7912ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 792ac27a0ecSDave Kleikamp ret = 0; 793ac27a0ecSDave Kleikamp } 7947fb5409dSJan Kara if (started) 7957fb5409dSJan Kara ext4_journal_stop(handle); 796ac27a0ecSDave Kleikamp return ret; 797ac27a0ecSDave Kleikamp } 798ac27a0ecSDave Kleikamp 7992ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 8002ed88685STheodore Ts'o struct buffer_head *bh, int create) 8012ed88685STheodore Ts'o { 8022ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 8032ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 8042ed88685STheodore Ts'o } 8052ed88685STheodore Ts'o 806ac27a0ecSDave Kleikamp /* 807ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 808ac27a0ecSDave Kleikamp */ 809617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 810725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 811ac27a0ecSDave Kleikamp { 8122ed88685STheodore Ts'o struct ext4_map_blocks map; 8132ed88685STheodore Ts'o struct buffer_head *bh; 814ac27a0ecSDave Kleikamp int fatal = 0, err; 815ac27a0ecSDave Kleikamp 816ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 817ac27a0ecSDave Kleikamp 8182ed88685STheodore Ts'o map.m_lblk = block; 8192ed88685STheodore Ts'o map.m_len = 1; 8202ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 8212ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 8222ed88685STheodore Ts'o 82390b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 82490b0a973SCarlos Maiolino *errp = 0; 82590b0a973SCarlos Maiolino 8260f70b406STheodore Ts'o if (create && err == 0) 8270f70b406STheodore Ts'o err = -ENOSPC; /* should never happen */ 8282ed88685STheodore Ts'o if (err < 0) 829ac27a0ecSDave Kleikamp *errp = err; 8302ed88685STheodore Ts'o if (err <= 0) 8312ed88685STheodore Ts'o return NULL; 8322ed88685STheodore Ts'o 8332ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 834aebf0243SWang Shilong if (unlikely(!bh)) { 835860d21e2STheodore Ts'o *errp = -ENOMEM; 8362ed88685STheodore Ts'o return NULL; 837ac27a0ecSDave Kleikamp } 8382ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 839ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 840ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 841ac27a0ecSDave Kleikamp 842ac27a0ecSDave Kleikamp /* 843ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 844ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 845ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 846617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 847ac27a0ecSDave Kleikamp * problem. 848ac27a0ecSDave Kleikamp */ 849ac27a0ecSDave Kleikamp lock_buffer(bh); 850ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 851617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 852ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 853ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 854ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 855ac27a0ecSDave Kleikamp } 856ac27a0ecSDave Kleikamp unlock_buffer(bh); 8570390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 8580390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 859ac27a0ecSDave Kleikamp if (!fatal) 860ac27a0ecSDave Kleikamp fatal = err; 861ac27a0ecSDave Kleikamp } else { 862ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 863ac27a0ecSDave Kleikamp } 864ac27a0ecSDave Kleikamp if (fatal) { 865ac27a0ecSDave Kleikamp *errp = fatal; 866ac27a0ecSDave Kleikamp brelse(bh); 867ac27a0ecSDave Kleikamp bh = NULL; 868ac27a0ecSDave Kleikamp } 869ac27a0ecSDave Kleikamp return bh; 870ac27a0ecSDave Kleikamp } 871ac27a0ecSDave Kleikamp 872617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 873725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 874ac27a0ecSDave Kleikamp { 875ac27a0ecSDave Kleikamp struct buffer_head *bh; 876ac27a0ecSDave Kleikamp 877617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 878ac27a0ecSDave Kleikamp if (!bh) 879ac27a0ecSDave Kleikamp return bh; 880ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 881ac27a0ecSDave Kleikamp return bh; 88265299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 883ac27a0ecSDave Kleikamp wait_on_buffer(bh); 884ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 885ac27a0ecSDave Kleikamp return bh; 886ac27a0ecSDave Kleikamp put_bh(bh); 887ac27a0ecSDave Kleikamp *err = -EIO; 888ac27a0ecSDave Kleikamp return NULL; 889ac27a0ecSDave Kleikamp } 890ac27a0ecSDave Kleikamp 891f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 892ac27a0ecSDave Kleikamp struct buffer_head *head, 893ac27a0ecSDave Kleikamp unsigned from, 894ac27a0ecSDave Kleikamp unsigned to, 895ac27a0ecSDave Kleikamp int *partial, 896ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 897ac27a0ecSDave Kleikamp struct buffer_head *bh)) 898ac27a0ecSDave Kleikamp { 899ac27a0ecSDave Kleikamp struct buffer_head *bh; 900ac27a0ecSDave Kleikamp unsigned block_start, block_end; 901ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 902ac27a0ecSDave Kleikamp int err, ret = 0; 903ac27a0ecSDave Kleikamp struct buffer_head *next; 904ac27a0ecSDave Kleikamp 905ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 906ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 907de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 908ac27a0ecSDave Kleikamp next = bh->b_this_page; 909ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 910ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 911ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 912ac27a0ecSDave Kleikamp *partial = 1; 913ac27a0ecSDave Kleikamp continue; 914ac27a0ecSDave Kleikamp } 915ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 916ac27a0ecSDave Kleikamp if (!ret) 917ac27a0ecSDave Kleikamp ret = err; 918ac27a0ecSDave Kleikamp } 919ac27a0ecSDave Kleikamp return ret; 920ac27a0ecSDave Kleikamp } 921ac27a0ecSDave Kleikamp 922ac27a0ecSDave Kleikamp /* 923ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 924ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 925617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 926dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 927ac27a0ecSDave Kleikamp * prepare_write() is the right place. 928ac27a0ecSDave Kleikamp * 92936ade451SJan Kara * Also, this function can nest inside ext4_writepage(). In that case, we 93036ade451SJan Kara * *know* that ext4_writepage() has generated enough buffer credits to do the 93136ade451SJan Kara * whole page. So we won't block on the journal in that case, which is good, 93236ade451SJan Kara * because the caller may be PF_MEMALLOC. 933ac27a0ecSDave Kleikamp * 934617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 935ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 936ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 937ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 938ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 939ac27a0ecSDave Kleikamp * violation. 940ac27a0ecSDave Kleikamp * 941dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 942ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 943ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 944ac27a0ecSDave Kleikamp * write. 945ac27a0ecSDave Kleikamp */ 946f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 947ac27a0ecSDave Kleikamp struct buffer_head *bh) 948ac27a0ecSDave Kleikamp { 94956d35a4cSJan Kara int dirty = buffer_dirty(bh); 95056d35a4cSJan Kara int ret; 95156d35a4cSJan Kara 952ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 953ac27a0ecSDave Kleikamp return 0; 95456d35a4cSJan Kara /* 955ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 95656d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 95756d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 958ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 95956d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 96056d35a4cSJan Kara * ever write the buffer. 96156d35a4cSJan Kara */ 96256d35a4cSJan Kara if (dirty) 96356d35a4cSJan Kara clear_buffer_dirty(bh); 96456d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 96556d35a4cSJan Kara if (!ret && dirty) 96656d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 96756d35a4cSJan Kara return ret; 968ac27a0ecSDave Kleikamp } 969ac27a0ecSDave Kleikamp 9708b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 9718b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 972bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 973bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 974bfc1af65SNick Piggin struct page **pagep, void **fsdata) 975ac27a0ecSDave Kleikamp { 976bfc1af65SNick Piggin struct inode *inode = mapping->host; 9771938a150SAneesh Kumar K.V int ret, needed_blocks; 978ac27a0ecSDave Kleikamp handle_t *handle; 979ac27a0ecSDave Kleikamp int retries = 0; 980bfc1af65SNick Piggin struct page *page; 981bfc1af65SNick Piggin pgoff_t index; 982bfc1af65SNick Piggin unsigned from, to; 983bfc1af65SNick Piggin 9849bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 9851938a150SAneesh Kumar K.V /* 9861938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 9871938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 9881938a150SAneesh Kumar K.V */ 9891938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 990bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 991bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 992bfc1af65SNick Piggin to = from + len; 993ac27a0ecSDave Kleikamp 994f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 995f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 996f19d5870STao Ma flags, pagep); 997f19d5870STao Ma if (ret < 0) 99847564bfbSTheodore Ts'o return ret; 99947564bfbSTheodore Ts'o if (ret == 1) 100047564bfbSTheodore Ts'o return 0; 1001f19d5870STao Ma } 1002f19d5870STao Ma 100347564bfbSTheodore Ts'o /* 100447564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 100547564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 100647564bfbSTheodore Ts'o * is being written back. So grab it first before we start 100747564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 100847564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 100947564bfbSTheodore Ts'o */ 101047564bfbSTheodore Ts'o retry_grab: 101154566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 101247564bfbSTheodore Ts'o if (!page) 101347564bfbSTheodore Ts'o return -ENOMEM; 101447564bfbSTheodore Ts'o unlock_page(page); 101547564bfbSTheodore Ts'o 101647564bfbSTheodore Ts'o retry_journal: 10179924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 1018ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 101947564bfbSTheodore Ts'o page_cache_release(page); 102047564bfbSTheodore Ts'o return PTR_ERR(handle); 1021cf108bcaSJan Kara } 1022f19d5870STao Ma 102347564bfbSTheodore Ts'o lock_page(page); 102447564bfbSTheodore Ts'o if (page->mapping != mapping) { 102547564bfbSTheodore Ts'o /* The page got truncated from under us */ 102647564bfbSTheodore Ts'o unlock_page(page); 102747564bfbSTheodore Ts'o page_cache_release(page); 1028cf108bcaSJan Kara ext4_journal_stop(handle); 102947564bfbSTheodore Ts'o goto retry_grab; 1030cf108bcaSJan Kara } 103147564bfbSTheodore Ts'o wait_on_page_writeback(page); 1032cf108bcaSJan Kara 1033744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 10346e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 1035744692dcSJiaying Zhang else 10366e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 1037bfc1af65SNick Piggin 1038bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 1039f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 1040f19d5870STao Ma from, to, NULL, 1041f19d5870STao Ma do_journal_get_write_access); 1042b46be050SAndrey Savochkin } 1043bfc1af65SNick Piggin 1044bfc1af65SNick Piggin if (ret) { 1045bfc1af65SNick Piggin unlock_page(page); 1046ae4d5372SAneesh Kumar K.V /* 10476e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 1048ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 1049ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 10501938a150SAneesh Kumar K.V * 10511938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 10521938a150SAneesh Kumar K.V * truncate finishes 1053ae4d5372SAneesh Kumar K.V */ 1054ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 10551938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 10561938a150SAneesh Kumar K.V 10571938a150SAneesh Kumar K.V ext4_journal_stop(handle); 10581938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 1059b9a4207dSJan Kara ext4_truncate_failed_write(inode); 10601938a150SAneesh Kumar K.V /* 1061ffacfa7aSJan Kara * If truncate failed early the inode might 10621938a150SAneesh Kumar K.V * still be on the orphan list; we need to 10631938a150SAneesh Kumar K.V * make sure the inode is removed from the 10641938a150SAneesh Kumar K.V * orphan list in that case. 10651938a150SAneesh Kumar K.V */ 10661938a150SAneesh Kumar K.V if (inode->i_nlink) 10671938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 10681938a150SAneesh Kumar K.V } 1069bfc1af65SNick Piggin 107047564bfbSTheodore Ts'o if (ret == -ENOSPC && 107147564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 107247564bfbSTheodore Ts'o goto retry_journal; 107347564bfbSTheodore Ts'o page_cache_release(page); 107447564bfbSTheodore Ts'o return ret; 107547564bfbSTheodore Ts'o } 107647564bfbSTheodore Ts'o *pagep = page; 1077ac27a0ecSDave Kleikamp return ret; 1078ac27a0ecSDave Kleikamp } 1079ac27a0ecSDave Kleikamp 1080bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 1081bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 1082ac27a0ecSDave Kleikamp { 1083ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 1084ac27a0ecSDave Kleikamp return 0; 1085ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 10860390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 1087ac27a0ecSDave Kleikamp } 1088ac27a0ecSDave Kleikamp 1089eed4333fSZheng Liu /* 1090eed4333fSZheng Liu * We need to pick up the new inode size which generic_commit_write gave us 1091eed4333fSZheng Liu * `file' can be NULL - eg, when called from page_symlink(). 1092eed4333fSZheng Liu * 1093eed4333fSZheng Liu * ext4 never places buffers on inode->i_mapping->private_list. metadata 1094eed4333fSZheng Liu * buffers are managed internally. 1095eed4333fSZheng Liu */ 1096eed4333fSZheng Liu static int ext4_write_end(struct file *file, 1097f8514083SAneesh Kumar K.V struct address_space *mapping, 1098f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 1099f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 1100f8514083SAneesh Kumar K.V { 1101f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 1102eed4333fSZheng Liu struct inode *inode = mapping->host; 1103eed4333fSZheng Liu int ret = 0, ret2; 1104eed4333fSZheng Liu int i_size_changed = 0; 1105eed4333fSZheng Liu 1106eed4333fSZheng Liu trace_ext4_write_end(inode, pos, len, copied); 1107eed4333fSZheng Liu if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { 1108eed4333fSZheng Liu ret = ext4_jbd2_file_inode(handle, inode); 1109eed4333fSZheng Liu if (ret) { 1110eed4333fSZheng Liu unlock_page(page); 1111eed4333fSZheng Liu page_cache_release(page); 1112eed4333fSZheng Liu goto errout; 1113eed4333fSZheng Liu } 1114eed4333fSZheng Liu } 1115f8514083SAneesh Kumar K.V 1116f19d5870STao Ma if (ext4_has_inline_data(inode)) 1117f19d5870STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 1118f19d5870STao Ma copied, page); 1119f19d5870STao Ma else 1120f19d5870STao Ma copied = block_write_end(file, mapping, pos, 1121f19d5870STao Ma len, copied, page, fsdata); 1122f8514083SAneesh Kumar K.V 1123f8514083SAneesh Kumar K.V /* 1124f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 1125eed4333fSZheng Liu * cannot change under us because we hole i_mutex. 1126f8514083SAneesh Kumar K.V * 1127f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 1128f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 1129f8514083SAneesh Kumar K.V */ 1130f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 1131f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 1132f8514083SAneesh Kumar K.V i_size_changed = 1; 1133f8514083SAneesh Kumar K.V } 1134f8514083SAneesh Kumar K.V 1135f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 1136f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 1137f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 1138eed4333fSZheng Liu * but greater than i_disksize. (hint delalloc) 1139f8514083SAneesh Kumar K.V */ 1140f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 1141f8514083SAneesh Kumar K.V i_size_changed = 1; 1142f8514083SAneesh Kumar K.V } 1143f8514083SAneesh Kumar K.V unlock_page(page); 1144f8514083SAneesh Kumar K.V page_cache_release(page); 1145f8514083SAneesh Kumar K.V 1146f8514083SAneesh Kumar K.V /* 1147f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 1148f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 1149f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1150f8514083SAneesh Kumar K.V * filesystems. 1151f8514083SAneesh Kumar K.V */ 1152f8514083SAneesh Kumar K.V if (i_size_changed) 1153f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1154f8514083SAneesh Kumar K.V 115574d553aaSTheodore Ts'o if (copied < 0) 115674d553aaSTheodore Ts'o ret = copied; 1157ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1158f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1159f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1160f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1161f8514083SAneesh Kumar K.V */ 1162f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 116374d553aaSTheodore Ts'o errout: 1164617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1165ac27a0ecSDave Kleikamp if (!ret) 1166ac27a0ecSDave Kleikamp ret = ret2; 1167bfc1af65SNick Piggin 1168f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1169b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1170f8514083SAneesh Kumar K.V /* 1171ffacfa7aSJan Kara * If truncate failed early the inode might still be 1172f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1173f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1174f8514083SAneesh Kumar K.V */ 1175f8514083SAneesh Kumar K.V if (inode->i_nlink) 1176f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1177f8514083SAneesh Kumar K.V } 1178f8514083SAneesh Kumar K.V 1179bfc1af65SNick Piggin return ret ? ret : copied; 1180ac27a0ecSDave Kleikamp } 1181ac27a0ecSDave Kleikamp 1182bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1183bfc1af65SNick Piggin struct address_space *mapping, 1184bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1185bfc1af65SNick Piggin struct page *page, void *fsdata) 1186ac27a0ecSDave Kleikamp { 1187617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1188bfc1af65SNick Piggin struct inode *inode = mapping->host; 1189ac27a0ecSDave Kleikamp int ret = 0, ret2; 1190ac27a0ecSDave Kleikamp int partial = 0; 1191bfc1af65SNick Piggin unsigned from, to; 1192cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1193ac27a0ecSDave Kleikamp 11949bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1195bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1196bfc1af65SNick Piggin to = from + len; 1197bfc1af65SNick Piggin 1198441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1199441c8508SCurt Wohlgemuth 12003fdcfb66STao Ma if (ext4_has_inline_data(inode)) 12013fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 12023fdcfb66STao Ma copied, page); 12033fdcfb66STao Ma else { 1204bfc1af65SNick Piggin if (copied < len) { 1205bfc1af65SNick Piggin if (!PageUptodate(page)) 1206bfc1af65SNick Piggin copied = 0; 1207bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1208bfc1af65SNick Piggin } 1209ac27a0ecSDave Kleikamp 1210f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1211bfc1af65SNick Piggin to, &partial, write_end_fn); 1212ac27a0ecSDave Kleikamp if (!partial) 1213ac27a0ecSDave Kleikamp SetPageUptodate(page); 12143fdcfb66STao Ma } 1215cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1216cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1217bfc1af65SNick Piggin i_size_write(inode, pos+copied); 121819f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 12192d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1220cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1221cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1222617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1223ac27a0ecSDave Kleikamp if (!ret) 1224ac27a0ecSDave Kleikamp ret = ret2; 1225ac27a0ecSDave Kleikamp } 1226bfc1af65SNick Piggin 1227cf108bcaSJan Kara unlock_page(page); 1228f8514083SAneesh Kumar K.V page_cache_release(page); 1229ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1230f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1231f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1232f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1233f8514083SAneesh Kumar K.V */ 1234f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1235f8514083SAneesh Kumar K.V 1236617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1237ac27a0ecSDave Kleikamp if (!ret) 1238ac27a0ecSDave Kleikamp ret = ret2; 1239f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1240b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1241f8514083SAneesh Kumar K.V /* 1242ffacfa7aSJan Kara * If truncate failed early the inode might still be 1243f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1244f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1245f8514083SAneesh Kumar K.V */ 1246f8514083SAneesh Kumar K.V if (inode->i_nlink) 1247f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1248f8514083SAneesh Kumar K.V } 1249bfc1af65SNick Piggin 1250bfc1af65SNick Piggin return ret ? ret : copied; 1251ac27a0ecSDave Kleikamp } 1252d2a17637SMingming Cao 12539d0be502STheodore Ts'o /* 1254386ad67cSLukas Czerner * Reserve a metadata for a single block located at lblock 1255386ad67cSLukas Czerner */ 1256386ad67cSLukas Czerner static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) 1257386ad67cSLukas Czerner { 1258386ad67cSLukas Czerner int retries = 0; 1259386ad67cSLukas Czerner struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1260386ad67cSLukas Czerner struct ext4_inode_info *ei = EXT4_I(inode); 1261386ad67cSLukas Czerner unsigned int md_needed; 1262386ad67cSLukas Czerner ext4_lblk_t save_last_lblock; 1263386ad67cSLukas Czerner int save_len; 1264386ad67cSLukas Czerner 1265386ad67cSLukas Czerner /* 1266386ad67cSLukas Czerner * recalculate the amount of metadata blocks to reserve 1267386ad67cSLukas Czerner * in order to allocate nrblocks 1268386ad67cSLukas Czerner * worse case is one extent per block 1269386ad67cSLukas Czerner */ 1270386ad67cSLukas Czerner repeat: 1271386ad67cSLukas Czerner spin_lock(&ei->i_block_reservation_lock); 1272386ad67cSLukas Czerner /* 1273386ad67cSLukas Czerner * ext4_calc_metadata_amount() has side effects, which we have 1274386ad67cSLukas Czerner * to be prepared undo if we fail to claim space. 1275386ad67cSLukas Czerner */ 1276386ad67cSLukas Czerner save_len = ei->i_da_metadata_calc_len; 1277386ad67cSLukas Czerner save_last_lblock = ei->i_da_metadata_calc_last_lblock; 1278386ad67cSLukas Czerner md_needed = EXT4_NUM_B2C(sbi, 1279386ad67cSLukas Czerner ext4_calc_metadata_amount(inode, lblock)); 1280386ad67cSLukas Czerner trace_ext4_da_reserve_space(inode, md_needed); 1281386ad67cSLukas Czerner 1282386ad67cSLukas Czerner /* 1283386ad67cSLukas Czerner * We do still charge estimated metadata to the sb though; 1284386ad67cSLukas Czerner * we cannot afford to run out of free blocks. 1285386ad67cSLukas Czerner */ 1286386ad67cSLukas Czerner if (ext4_claim_free_clusters(sbi, md_needed, 0)) { 1287386ad67cSLukas Czerner ei->i_da_metadata_calc_len = save_len; 1288386ad67cSLukas Czerner ei->i_da_metadata_calc_last_lblock = save_last_lblock; 1289386ad67cSLukas Czerner spin_unlock(&ei->i_block_reservation_lock); 1290386ad67cSLukas Czerner if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1291386ad67cSLukas Czerner cond_resched(); 1292386ad67cSLukas Czerner goto repeat; 1293386ad67cSLukas Czerner } 1294386ad67cSLukas Czerner return -ENOSPC; 1295386ad67cSLukas Czerner } 1296386ad67cSLukas Czerner ei->i_reserved_meta_blocks += md_needed; 1297386ad67cSLukas Czerner spin_unlock(&ei->i_block_reservation_lock); 1298386ad67cSLukas Czerner 1299386ad67cSLukas Czerner return 0; /* success */ 1300386ad67cSLukas Czerner } 1301386ad67cSLukas Czerner 1302386ad67cSLukas Czerner /* 13037b415bf6SAditya Kali * Reserve a single cluster located at lblock 13049d0be502STheodore Ts'o */ 130501f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1306d2a17637SMingming Cao { 1307030ba6bcSAneesh Kumar K.V int retries = 0; 1308d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 13090637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 13107b415bf6SAditya Kali unsigned int md_needed; 13115dd4056dSChristoph Hellwig int ret; 131203179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 131303179fe9STheodore Ts'o int save_len; 1314d2a17637SMingming Cao 131560e58e0fSMingming Cao /* 131672b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 131772b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 131872b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 131960e58e0fSMingming Cao */ 13207b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 13215dd4056dSChristoph Hellwig if (ret) 13225dd4056dSChristoph Hellwig return ret; 132303179fe9STheodore Ts'o 132403179fe9STheodore Ts'o /* 132503179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 132603179fe9STheodore Ts'o * in order to allocate nrblocks 132703179fe9STheodore Ts'o * worse case is one extent per block 132803179fe9STheodore Ts'o */ 132903179fe9STheodore Ts'o repeat: 133003179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 133103179fe9STheodore Ts'o /* 133203179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 133303179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 133403179fe9STheodore Ts'o */ 133503179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 133603179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 133703179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 133803179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 133903179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 134003179fe9STheodore Ts'o 134172b8ab9dSEric Sandeen /* 134272b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 134372b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 134472b8ab9dSEric Sandeen */ 1345e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 134603179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 134703179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 134803179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1349030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1350bb8b20edSLukas Czerner cond_resched(); 1351030ba6bcSAneesh Kumar K.V goto repeat; 1352030ba6bcSAneesh Kumar K.V } 135303179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1354d2a17637SMingming Cao return -ENOSPC; 1355d2a17637SMingming Cao } 13569d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 13570637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 13580637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 135939bc680aSDmitry Monakhov 1360d2a17637SMingming Cao return 0; /* success */ 1361d2a17637SMingming Cao } 1362d2a17637SMingming Cao 136312219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1364d2a17637SMingming Cao { 1365d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 13660637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1367d2a17637SMingming Cao 1368cd213226SMingming Cao if (!to_free) 1369cd213226SMingming Cao return; /* Nothing to release, exit */ 1370cd213226SMingming Cao 1371d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1372cd213226SMingming Cao 13735a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 13740637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1375cd213226SMingming Cao /* 13760637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 13770637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 13780637c6f4STheodore Ts'o * function is called from invalidate page, it's 13790637c6f4STheodore Ts'o * harmless to return without any action. 1380cd213226SMingming Cao */ 13818de5c325STheodore Ts'o ext4_warning(inode->i_sb, "ext4_da_release_space: " 13820637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 13831084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 13840637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 13850637c6f4STheodore Ts'o WARN_ON(1); 13860637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 13870637c6f4STheodore Ts'o } 13880637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 13890637c6f4STheodore Ts'o 13900637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 13910637c6f4STheodore Ts'o /* 13920637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 13930637c6f4STheodore Ts'o * only when we have written all of the delayed 13940637c6f4STheodore Ts'o * allocation blocks. 13957b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 13967b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 13970637c6f4STheodore Ts'o */ 139857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 139972b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1400ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 14019d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1402cd213226SMingming Cao } 1403cd213226SMingming Cao 140472b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 140557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1406d2a17637SMingming Cao 1407d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 140860e58e0fSMingming Cao 14097b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1410d2a17637SMingming Cao } 1411d2a17637SMingming Cao 1412d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1413d2a17637SMingming Cao unsigned long offset) 1414d2a17637SMingming Cao { 1415d2a17637SMingming Cao int to_release = 0; 1416d2a17637SMingming Cao struct buffer_head *head, *bh; 1417d2a17637SMingming Cao unsigned int curr_off = 0; 14187b415bf6SAditya Kali struct inode *inode = page->mapping->host; 14197b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 14207b415bf6SAditya Kali int num_clusters; 142151865fdaSZheng Liu ext4_fsblk_t lblk; 1422d2a17637SMingming Cao 1423d2a17637SMingming Cao head = page_buffers(page); 1424d2a17637SMingming Cao bh = head; 1425d2a17637SMingming Cao do { 1426d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1427d2a17637SMingming Cao 1428d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1429d2a17637SMingming Cao to_release++; 1430d2a17637SMingming Cao clear_buffer_delay(bh); 1431d2a17637SMingming Cao } 1432d2a17637SMingming Cao curr_off = next_off; 1433d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 14347b415bf6SAditya Kali 143551865fdaSZheng Liu if (to_release) { 143651865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 143751865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 143851865fdaSZheng Liu } 143951865fdaSZheng Liu 14407b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 14417b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 14427b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 14437b415bf6SAditya Kali while (num_clusters > 0) { 14447b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 14457b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 14467b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 14477d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 14487b415bf6SAditya Kali ext4_da_release_space(inode, 1); 14497b415bf6SAditya Kali 14507b415bf6SAditya Kali num_clusters--; 14517b415bf6SAditya Kali } 1452d2a17637SMingming Cao } 1453ac27a0ecSDave Kleikamp 1454ac27a0ecSDave Kleikamp /* 145564769240SAlex Tomas * Delayed allocation stuff 145664769240SAlex Tomas */ 145764769240SAlex Tomas 145864769240SAlex Tomas /* 145964769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1460a1d6cc56SAneesh Kumar K.V * them with writepage() call back 146164769240SAlex Tomas * 146264769240SAlex Tomas * @mpd->inode: inode 146364769240SAlex Tomas * @mpd->first_page: first page of the extent 146464769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 146564769240SAlex Tomas * 146664769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 146764769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 146864769240SAlex Tomas * 146964769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 147064769240SAlex Tomas */ 14711de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 14721de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 147364769240SAlex Tomas { 1474791b7f08SAneesh Kumar K.V struct pagevec pvec; 1475791b7f08SAneesh Kumar K.V unsigned long index, end; 1476791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1477791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1478791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1479cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 14803ecdb3a1STheodore Ts'o unsigned int len, block_start; 14813ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 14821de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1483bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 148464769240SAlex Tomas 148564769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1486bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1487791b7f08SAneesh Kumar K.V /* 1488791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1489791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 14908dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1491791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1492791b7f08SAneesh Kumar K.V */ 149364769240SAlex Tomas index = mpd->first_page; 149464769240SAlex Tomas end = mpd->next_page - 1; 149564769240SAlex Tomas 1496791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 149764769240SAlex Tomas while (index <= end) { 1498791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 149964769240SAlex Tomas if (nr_pages == 0) 150064769240SAlex Tomas break; 150164769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 1502f8bec370SJan Kara int skip_page = 0; 150364769240SAlex Tomas struct page *page = pvec.pages[i]; 150464769240SAlex Tomas 1505791b7f08SAneesh Kumar K.V index = page->index; 1506791b7f08SAneesh Kumar K.V if (index > end) 1507791b7f08SAneesh Kumar K.V break; 1508cb20d518STheodore Ts'o 1509cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1510cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1511cb20d518STheodore Ts'o else 1512cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 15131de3e3dfSTheodore Ts'o if (map) { 15141de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 15151de3e3dfSTheodore Ts'o inode->i_blkbits); 15161de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 15171de3e3dfSTheodore Ts'o map->m_lblk); 15181de3e3dfSTheodore Ts'o } 1519791b7f08SAneesh Kumar K.V index++; 1520791b7f08SAneesh Kumar K.V 1521791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1522791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1523791b7f08SAneesh Kumar K.V 15243ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 15253ecdb3a1STheodore Ts'o block_start = 0; 15263ecdb3a1STheodore Ts'o do { 15271de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 15281de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 15291de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 15301de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 15311de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 15321de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 15331de3e3dfSTheodore Ts'o } 15341de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 15351de3e3dfSTheodore Ts'o buffer_mapped(bh)) 15361de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 15371de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 15381de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 15391de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 15401de3e3dfSTheodore Ts'o } 15411de3e3dfSTheodore Ts'o 154213a79a47SYongqiang Yang /* 154313a79a47SYongqiang Yang * skip page if block allocation undone and 154413a79a47SYongqiang Yang * block is dirty 154513a79a47SYongqiang Yang */ 154613a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 154797498956STheodore Ts'o skip_page = 1; 15483ecdb3a1STheodore Ts'o bh = bh->b_this_page; 15493ecdb3a1STheodore Ts'o block_start += bh->b_size; 15501de3e3dfSTheodore Ts'o cur_logical++; 15511de3e3dfSTheodore Ts'o pblock++; 15521de3e3dfSTheodore Ts'o } while (bh != page_bufs); 15531de3e3dfSTheodore Ts'o 1554f8bec370SJan Kara if (skip_page) { 1555f8bec370SJan Kara unlock_page(page); 1556f8bec370SJan Kara continue; 1557f8bec370SJan Kara } 1558cb20d518STheodore Ts'o 155997498956STheodore Ts'o clear_page_dirty_for_io(page); 1560fe089c77SJan Kara err = ext4_bio_write_page(&io_submit, page, len, 1561fe089c77SJan Kara mpd->wbc); 1562cb20d518STheodore Ts'o if (!err) 1563a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 156464769240SAlex Tomas /* 156564769240SAlex Tomas * In error case, we have to continue because 156664769240SAlex Tomas * remaining pages are still locked 156764769240SAlex Tomas */ 156864769240SAlex Tomas if (ret == 0) 156964769240SAlex Tomas ret = err; 157064769240SAlex Tomas } 157164769240SAlex Tomas pagevec_release(&pvec); 157264769240SAlex Tomas } 1573bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 157464769240SAlex Tomas return ret; 157564769240SAlex Tomas } 157664769240SAlex Tomas 1577c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1578c4a0c46eSAneesh Kumar K.V { 1579c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1580c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1581c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1582c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1583c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 158451865fdaSZheng Liu ext4_lblk_t start, last; 1585c4a0c46eSAneesh Kumar K.V 1586c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1587c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 158851865fdaSZheng Liu 158951865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 159051865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 159151865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 159251865fdaSZheng Liu 159366bea92cSEric Sandeen pagevec_init(&pvec, 0); 1594c4a0c46eSAneesh Kumar K.V while (index <= end) { 1595c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1596c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1597c4a0c46eSAneesh Kumar K.V break; 1598c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1599c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 16009b1d0998SJan Kara if (page->index > end) 1601c4a0c46eSAneesh Kumar K.V break; 1602c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1603c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1604c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1605c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1606c4a0c46eSAneesh Kumar K.V unlock_page(page); 1607c4a0c46eSAneesh Kumar K.V } 16089b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 16099b1d0998SJan Kara pagevec_release(&pvec); 1610c4a0c46eSAneesh Kumar K.V } 1611c4a0c46eSAneesh Kumar K.V return; 1612c4a0c46eSAneesh Kumar K.V } 1613c4a0c46eSAneesh Kumar K.V 1614df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1615df22291fSAneesh Kumar K.V { 1616df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 161792b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 1618f78ee70dSLukas Czerner struct ext4_inode_info *ei = EXT4_I(inode); 161992b97816STheodore Ts'o 162092b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 16215dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 1622f78ee70dSLukas Czerner ext4_count_free_clusters(sb))); 162392b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 162492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 1625f78ee70dSLukas Czerner (long long) EXT4_C2B(EXT4_SB(sb), 162657042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 162792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 1628f78ee70dSLukas Czerner (long long) EXT4_C2B(EXT4_SB(sb), 16297b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 163092b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 163192b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1632f78ee70dSLukas Czerner ei->i_reserved_data_blocks); 163392b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1634f78ee70dSLukas Czerner ei->i_reserved_meta_blocks); 1635f78ee70dSLukas Czerner ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u", 1636f78ee70dSLukas Czerner ei->i_allocated_meta_blocks); 1637df22291fSAneesh Kumar K.V return; 1638df22291fSAneesh Kumar K.V } 1639df22291fSAneesh Kumar K.V 1640b920c755STheodore Ts'o /* 16415a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 16425a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 164364769240SAlex Tomas * 16448dc207c0STheodore Ts'o * @mpd - bh describing space 164564769240SAlex Tomas * 164664769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 164764769240SAlex Tomas * 164864769240SAlex Tomas */ 16495a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 165064769240SAlex Tomas { 16512ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 16521de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 16532fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 16542fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 16552fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 16562fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 165764769240SAlex Tomas 165864769240SAlex Tomas /* 16595a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 16605a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 166164769240SAlex Tomas */ 16625a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 16635a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 166429fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 16655a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 16665a87b7a5STheodore Ts'o goto submit_io; 16672fa3cdfbSTheodore Ts'o 16682fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 16692fa3cdfbSTheodore Ts'o BUG_ON(!handle); 16702fa3cdfbSTheodore Ts'o 167179ffab34SAneesh Kumar K.V /* 167279e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 16732ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 16742ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 16752ac3b6e0STheodore Ts'o * one or more preallocated blocks). 16762ac3b6e0STheodore Ts'o * 16772ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 16782ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 16792ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 16802ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 168179e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1682f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 16832ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 16842ac3b6e0STheodore Ts'o * 16852ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 16862ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 16872ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 168879ffab34SAneesh Kumar K.V */ 16892ed88685STheodore Ts'o map.m_lblk = next; 16902ed88685STheodore Ts'o map.m_len = max_blocks; 169127dd4385SLukas Czerner /* 169227dd4385SLukas Czerner * We're in delalloc path and it is possible that we're going to 169327dd4385SLukas Czerner * need more metadata blocks than previously reserved. However 169427dd4385SLukas Czerner * we must not fail because we're in writeback and there is 169527dd4385SLukas Czerner * nothing we can do about it so it might result in data loss. 169627dd4385SLukas Czerner * So use reserved blocks to allocate metadata if possible. 169727dd4385SLukas Czerner */ 169827dd4385SLukas Czerner get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 169927dd4385SLukas Czerner EXT4_GET_BLOCKS_METADATA_NOFAIL; 1700744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1701744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 17022ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 17031296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 17041296cc85SAneesh Kumar K.V 170527dd4385SLukas Czerner 17062ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 17072fa3cdfbSTheodore Ts'o if (blks < 0) { 1708e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1709e3570639SEric Sandeen 17102fa3cdfbSTheodore Ts'o err = blks; 1711ed5bde0bSTheodore Ts'o /* 17125a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 171397498956STheodore Ts'o * appears to be free blocks we will just let 171497498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1715c4a0c46eSAneesh Kumar K.V */ 1716c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 17175a87b7a5STheodore Ts'o goto submit_io; 1718df22291fSAneesh Kumar K.V 17195dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1720df22291fSAneesh Kumar K.V mpd->retval = err; 17215a87b7a5STheodore Ts'o goto submit_io; 1722df22291fSAneesh Kumar K.V } 1723df22291fSAneesh Kumar K.V 1724c4a0c46eSAneesh Kumar K.V /* 1725ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1726ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1727ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1728ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1729ed5bde0bSTheodore Ts'o * the same. 1730c4a0c46eSAneesh Kumar K.V */ 1731e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1732e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1733e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1734e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1735e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1736c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 17378dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1738e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 173901a523ebSTheodore Ts'o "This should not happen!! Data will be lost"); 1740e3570639SEric Sandeen if (err == -ENOSPC) 1741df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1742030ba6bcSAneesh Kumar K.V } 17432fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1744c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1745e0fd9b90SCurt Wohlgemuth 1746e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1747e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 17485a87b7a5STheodore Ts'o return; 1749c4a0c46eSAneesh Kumar K.V } 17502fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 17512fa3cdfbSTheodore Ts'o 17521de3e3dfSTheodore Ts'o mapp = ↦ 17532ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 17542ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 17552ed88685STheodore Ts'o int i; 175664769240SAlex Tomas 17572ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 17582ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 17592fa3cdfbSTheodore Ts'o } 17602fa3cdfbSTheodore Ts'o 17612fa3cdfbSTheodore Ts'o /* 176203f5d8bcSJan Kara * Update on-disk size along with block allocation. 17632fa3cdfbSTheodore Ts'o */ 17642fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 17652fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 17662fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 17672fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 17682fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 17695a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 17705a87b7a5STheodore Ts'o if (err) 17715a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 17725a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 17735a87b7a5STheodore Ts'o mpd->inode->i_ino); 17742fa3cdfbSTheodore Ts'o } 17752fa3cdfbSTheodore Ts'o 17765a87b7a5STheodore Ts'o submit_io: 17771de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 17785a87b7a5STheodore Ts'o mpd->io_done = 1; 177964769240SAlex Tomas } 178064769240SAlex Tomas 1781bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1782bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 178364769240SAlex Tomas 178464769240SAlex Tomas /* 178564769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 178664769240SAlex Tomas * 178764769240SAlex Tomas * @mpd->lbh - extent of blocks 178864769240SAlex Tomas * @logical - logical number of the block in the file 1789b6a8e62fSJan Kara * @b_state - b_state of the buffer head added 179064769240SAlex Tomas * 179164769240SAlex Tomas * the function is used to collect contig. blocks in same state 179264769240SAlex Tomas */ 1793b6a8e62fSJan Kara static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, 17948dc207c0STheodore Ts'o unsigned long b_state) 179564769240SAlex Tomas { 179664769240SAlex Tomas sector_t next; 1797b6a8e62fSJan Kara int blkbits = mpd->inode->i_blkbits; 1798b6a8e62fSJan Kara int nrblocks = mpd->b_size >> blkbits; 179964769240SAlex Tomas 1800c445e3e0SEric Sandeen /* 1801c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1802c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1803c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 180479e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1805c445e3e0SEric Sandeen */ 1806b6a8e62fSJan Kara if (nrblocks >= (8*1024*1024 >> blkbits)) 1807c445e3e0SEric Sandeen goto flush_it; 1808c445e3e0SEric Sandeen 1809525f4ed8SMingming Cao /* check if the reserved journal credits might overflow */ 1810b6a8e62fSJan Kara if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { 1811525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1812525f4ed8SMingming Cao /* 1813525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1814525f4ed8SMingming Cao * credit available. Total credit needed to insert 1815525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1816525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1817525f4ed8SMingming Cao */ 1818525f4ed8SMingming Cao goto flush_it; 1819525f4ed8SMingming Cao } 1820525f4ed8SMingming Cao } 182164769240SAlex Tomas /* 182264769240SAlex Tomas * First block in the extent 182364769240SAlex Tomas */ 18248dc207c0STheodore Ts'o if (mpd->b_size == 0) { 18258dc207c0STheodore Ts'o mpd->b_blocknr = logical; 1826b6a8e62fSJan Kara mpd->b_size = 1 << blkbits; 18278dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 182864769240SAlex Tomas return; 182964769240SAlex Tomas } 183064769240SAlex Tomas 18318dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 183264769240SAlex Tomas /* 183364769240SAlex Tomas * Can we merge the block to our big extent? 183464769240SAlex Tomas */ 18358dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 1836b6a8e62fSJan Kara mpd->b_size += 1 << blkbits; 183764769240SAlex Tomas return; 183864769240SAlex Tomas } 183964769240SAlex Tomas 1840525f4ed8SMingming Cao flush_it: 184164769240SAlex Tomas /* 184264769240SAlex Tomas * We couldn't merge the block to our extent, so we 184364769240SAlex Tomas * need to flush current extent and start new one 184464769240SAlex Tomas */ 18455a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1846a1d6cc56SAneesh Kumar K.V return; 184764769240SAlex Tomas } 184864769240SAlex Tomas 1849c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 185029fa89d0SAneesh Kumar K.V { 1851c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 185229fa89d0SAneesh Kumar K.V } 185329fa89d0SAneesh Kumar K.V 185464769240SAlex Tomas /* 18555356f261SAditya Kali * This function is grabs code from the very beginning of 18565356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 18575356f261SAditya Kali * time. This function looks up the requested blocks and sets the 18585356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 18595356f261SAditya Kali */ 18605356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 18615356f261SAditya Kali struct ext4_map_blocks *map, 18625356f261SAditya Kali struct buffer_head *bh) 18635356f261SAditya Kali { 1864d100eef2SZheng Liu struct extent_status es; 18655356f261SAditya Kali int retval; 18665356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 1867921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 1868921f266bSDmitry Monakhov struct ext4_map_blocks orig_map; 1869921f266bSDmitry Monakhov 1870921f266bSDmitry Monakhov memcpy(&orig_map, map, sizeof(*map)); 1871921f266bSDmitry Monakhov #endif 18725356f261SAditya Kali 18735356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 18745356f261SAditya Kali invalid_block = ~0; 18755356f261SAditya Kali 18765356f261SAditya Kali map->m_flags = 0; 18775356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 18785356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 18795356f261SAditya Kali (unsigned long) map->m_lblk); 1880d100eef2SZheng Liu 1881d100eef2SZheng Liu /* Lookup extent status tree firstly */ 1882d100eef2SZheng Liu if (ext4_es_lookup_extent(inode, iblock, &es)) { 1883d100eef2SZheng Liu 1884d100eef2SZheng Liu if (ext4_es_is_hole(&es)) { 1885d100eef2SZheng Liu retval = 0; 1886d100eef2SZheng Liu down_read((&EXT4_I(inode)->i_data_sem)); 1887d100eef2SZheng Liu goto add_delayed; 1888d100eef2SZheng Liu } 1889d100eef2SZheng Liu 1890d100eef2SZheng Liu /* 1891d100eef2SZheng Liu * Delayed extent could be allocated by fallocate. 1892d100eef2SZheng Liu * So we need to check it. 1893d100eef2SZheng Liu */ 1894d100eef2SZheng Liu if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1895d100eef2SZheng Liu map_bh(bh, inode->i_sb, invalid_block); 1896d100eef2SZheng Liu set_buffer_new(bh); 1897d100eef2SZheng Liu set_buffer_delay(bh); 1898d100eef2SZheng Liu return 0; 1899d100eef2SZheng Liu } 1900d100eef2SZheng Liu 1901d100eef2SZheng Liu map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; 1902d100eef2SZheng Liu retval = es.es_len - (iblock - es.es_lblk); 1903d100eef2SZheng Liu if (retval > map->m_len) 1904d100eef2SZheng Liu retval = map->m_len; 1905d100eef2SZheng Liu map->m_len = retval; 1906d100eef2SZheng Liu if (ext4_es_is_written(&es)) 1907d100eef2SZheng Liu map->m_flags |= EXT4_MAP_MAPPED; 1908d100eef2SZheng Liu else if (ext4_es_is_unwritten(&es)) 1909d100eef2SZheng Liu map->m_flags |= EXT4_MAP_UNWRITTEN; 1910d100eef2SZheng Liu else 1911d100eef2SZheng Liu BUG_ON(1); 1912d100eef2SZheng Liu 1913921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 1914921f266bSDmitry Monakhov ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); 1915921f266bSDmitry Monakhov #endif 1916d100eef2SZheng Liu return retval; 1917d100eef2SZheng Liu } 1918d100eef2SZheng Liu 19195356f261SAditya Kali /* 19205356f261SAditya Kali * Try to see if we can get the block without requesting a new 19215356f261SAditya Kali * file system block. 19225356f261SAditya Kali */ 19235356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 19249c3569b5STao Ma if (ext4_has_inline_data(inode)) { 19259c3569b5STao Ma /* 19269c3569b5STao Ma * We will soon create blocks for this page, and let 19279c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 19289c3569b5STao Ma * In case of clusters, we have to handle the work 19299c3569b5STao Ma * of mapping from cluster so that the reserved space 19309c3569b5STao Ma * is calculated properly. 19319c3569b5STao Ma */ 19329c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 19339c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 19349c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 19359c3569b5STao Ma retval = 0; 19369c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1937d100eef2SZheng Liu retval = ext4_ext_map_blocks(NULL, inode, map, 1938d100eef2SZheng Liu EXT4_GET_BLOCKS_NO_PUT_HOLE); 19395356f261SAditya Kali else 1940d100eef2SZheng Liu retval = ext4_ind_map_blocks(NULL, inode, map, 1941d100eef2SZheng Liu EXT4_GET_BLOCKS_NO_PUT_HOLE); 19425356f261SAditya Kali 1943d100eef2SZheng Liu add_delayed: 19445356f261SAditya Kali if (retval == 0) { 1945f7fec032SZheng Liu int ret; 19465356f261SAditya Kali /* 19475356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 19485356f261SAditya Kali * is it OK? 19495356f261SAditya Kali */ 1950386ad67cSLukas Czerner /* 1951386ad67cSLukas Czerner * If the block was allocated from previously allocated cluster, 1952386ad67cSLukas Czerner * then we don't need to reserve it again. However we still need 1953386ad67cSLukas Czerner * to reserve metadata for every block we're going to write. 1954386ad67cSLukas Czerner */ 19555356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 1956f7fec032SZheng Liu ret = ext4_da_reserve_space(inode, iblock); 1957f7fec032SZheng Liu if (ret) { 19585356f261SAditya Kali /* not enough space to reserve */ 1959f7fec032SZheng Liu retval = ret; 19605356f261SAditya Kali goto out_unlock; 19615356f261SAditya Kali } 1962386ad67cSLukas Czerner } else { 1963386ad67cSLukas Czerner ret = ext4_da_reserve_metadata(inode, iblock); 1964386ad67cSLukas Czerner if (ret) { 1965386ad67cSLukas Czerner /* not enough space to reserve */ 1966386ad67cSLukas Czerner retval = ret; 1967386ad67cSLukas Czerner goto out_unlock; 1968386ad67cSLukas Czerner } 1969f7fec032SZheng Liu } 19705356f261SAditya Kali 1971f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 1972fdc0212eSZheng Liu ~0, EXTENT_STATUS_DELAYED); 1973f7fec032SZheng Liu if (ret) { 1974f7fec032SZheng Liu retval = ret; 197551865fdaSZheng Liu goto out_unlock; 1976f7fec032SZheng Liu } 197751865fdaSZheng Liu 19785356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 19795356f261SAditya Kali * and it should not appear on the bh->b_state. 19805356f261SAditya Kali */ 19815356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 19825356f261SAditya Kali 19835356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 19845356f261SAditya Kali set_buffer_new(bh); 19855356f261SAditya Kali set_buffer_delay(bh); 1986f7fec032SZheng Liu } else if (retval > 0) { 1987f7fec032SZheng Liu int ret; 1988f7fec032SZheng Liu unsigned long long status; 1989f7fec032SZheng Liu 1990921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST 1991921f266bSDmitry Monakhov if (retval != map->m_len) { 1992921f266bSDmitry Monakhov printk("ES len assertation failed for inode: %lu " 1993921f266bSDmitry Monakhov "retval %d != map->m_len %d " 1994921f266bSDmitry Monakhov "in %s (lookup)\n", inode->i_ino, retval, 1995921f266bSDmitry Monakhov map->m_len, __func__); 1996921f266bSDmitry Monakhov } 1997921f266bSDmitry Monakhov #endif 1998921f266bSDmitry Monakhov 1999f7fec032SZheng Liu status = map->m_flags & EXT4_MAP_UNWRITTEN ? 2000f7fec032SZheng Liu EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; 2001f7fec032SZheng Liu ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, 2002f7fec032SZheng Liu map->m_pblk, status); 2003f7fec032SZheng Liu if (ret != 0) 2004f7fec032SZheng Liu retval = ret; 20055356f261SAditya Kali } 20065356f261SAditya Kali 20075356f261SAditya Kali out_unlock: 20085356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 20095356f261SAditya Kali 20105356f261SAditya Kali return retval; 20115356f261SAditya Kali } 20125356f261SAditya Kali 20135356f261SAditya Kali /* 2014b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 2015b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 2016b920c755STheodore Ts'o * reserve space for a single block. 201729fa89d0SAneesh Kumar K.V * 201829fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 201929fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 202029fa89d0SAneesh Kumar K.V * 202129fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 202229fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 202329fa89d0SAneesh Kumar K.V * initialized properly. 202464769240SAlex Tomas */ 20259c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 20262ed88685STheodore Ts'o struct buffer_head *bh, int create) 202764769240SAlex Tomas { 20282ed88685STheodore Ts'o struct ext4_map_blocks map; 202964769240SAlex Tomas int ret = 0; 203064769240SAlex Tomas 203164769240SAlex Tomas BUG_ON(create == 0); 20322ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 20332ed88685STheodore Ts'o 20342ed88685STheodore Ts'o map.m_lblk = iblock; 20352ed88685STheodore Ts'o map.m_len = 1; 203664769240SAlex Tomas 203764769240SAlex Tomas /* 203864769240SAlex Tomas * first, we need to know whether the block is allocated already 203964769240SAlex Tomas * preallocated blocks are unmapped but should treated 204064769240SAlex Tomas * the same as allocated blocks. 204164769240SAlex Tomas */ 20425356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 20435356f261SAditya Kali if (ret <= 0) 20442ed88685STheodore Ts'o return ret; 204564769240SAlex Tomas 20462ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 20472ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 20482ed88685STheodore Ts'o 20492ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 20502ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 20512ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 20522ed88685STheodore Ts'o * get_block multiple times when we write to the same 20532ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 20542ed88685STheodore Ts'o * for partial write. 20552ed88685STheodore Ts'o */ 20562ed88685STheodore Ts'o set_buffer_new(bh); 2057c8205636STheodore Ts'o set_buffer_mapped(bh); 20582ed88685STheodore Ts'o } 20592ed88685STheodore Ts'o return 0; 206064769240SAlex Tomas } 206161628a3fSMingming Cao 206262e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 206362e086beSAneesh Kumar K.V { 206462e086beSAneesh Kumar K.V get_bh(bh); 206562e086beSAneesh Kumar K.V return 0; 206662e086beSAneesh Kumar K.V } 206762e086beSAneesh Kumar K.V 206862e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 206962e086beSAneesh Kumar K.V { 207062e086beSAneesh Kumar K.V put_bh(bh); 207162e086beSAneesh Kumar K.V return 0; 207262e086beSAneesh Kumar K.V } 207362e086beSAneesh Kumar K.V 207462e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 207562e086beSAneesh Kumar K.V unsigned int len) 207662e086beSAneesh Kumar K.V { 207762e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 207862e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 20793fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 208062e086beSAneesh Kumar K.V handle_t *handle = NULL; 20813fdcfb66STao Ma int ret = 0, err = 0; 20823fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 20833fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 208462e086beSAneesh Kumar K.V 2085cb20d518STheodore Ts'o ClearPageChecked(page); 20863fdcfb66STao Ma 20873fdcfb66STao Ma if (inline_data) { 20883fdcfb66STao Ma BUG_ON(page->index != 0); 20893fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 20903fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 20913fdcfb66STao Ma if (inode_bh == NULL) 20923fdcfb66STao Ma goto out; 20933fdcfb66STao Ma } else { 209462e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 20953fdcfb66STao Ma if (!page_bufs) { 20963fdcfb66STao Ma BUG(); 20973fdcfb66STao Ma goto out; 20983fdcfb66STao Ma } 20993fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 21003fdcfb66STao Ma NULL, bget_one); 21013fdcfb66STao Ma } 210262e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 210362e086beSAneesh Kumar K.V * references to buffers so we are safe */ 210462e086beSAneesh Kumar K.V unlock_page(page); 210562e086beSAneesh Kumar K.V 21069924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 21079924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 210862e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 210962e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 211062e086beSAneesh Kumar K.V goto out; 211162e086beSAneesh Kumar K.V } 211262e086beSAneesh Kumar K.V 2113441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 2114441c8508SCurt Wohlgemuth 21153fdcfb66STao Ma if (inline_data) { 21163fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 21173fdcfb66STao Ma 21183fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 21193fdcfb66STao Ma 21203fdcfb66STao Ma } else { 2121f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 212262e086beSAneesh Kumar K.V do_journal_get_write_access); 212362e086beSAneesh Kumar K.V 2124f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 212562e086beSAneesh Kumar K.V write_end_fn); 21263fdcfb66STao Ma } 212762e086beSAneesh Kumar K.V if (ret == 0) 212862e086beSAneesh Kumar K.V ret = err; 21292d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 213062e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 213162e086beSAneesh Kumar K.V if (!ret) 213262e086beSAneesh Kumar K.V ret = err; 213362e086beSAneesh Kumar K.V 21343fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 21353fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 21363fdcfb66STao Ma NULL, bput_one); 213719f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 213862e086beSAneesh Kumar K.V out: 21393fdcfb66STao Ma brelse(inode_bh); 214062e086beSAneesh Kumar K.V return ret; 214162e086beSAneesh Kumar K.V } 214262e086beSAneesh Kumar K.V 214361628a3fSMingming Cao /* 214443ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 214543ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 214643ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 214743ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 214843ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 214943ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 215043ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 215143ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 215243ce1d23SAneesh Kumar K.V * 2153b920c755STheodore Ts'o * This function can get called via... 2154b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 2155b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 2156f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2157b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 215843ce1d23SAneesh Kumar K.V * 215943ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 216043ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 216143ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 216243ce1d23SAneesh Kumar K.V * truncate(f, 1024); 216343ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 216443ce1d23SAneesh Kumar K.V * a[0] = 'a'; 216543ce1d23SAneesh Kumar K.V * truncate(f, 4096); 216643ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 216790802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 216843ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 216943ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 217043ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 217143ce1d23SAneesh Kumar K.V * buffer_heads mapped. 217243ce1d23SAneesh Kumar K.V * 217343ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 217443ce1d23SAneesh Kumar K.V * unwritten in the page. 217543ce1d23SAneesh Kumar K.V * 217643ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 217743ce1d23SAneesh Kumar K.V * 217843ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 217943ce1d23SAneesh Kumar K.V * ext4_writepage() 218043ce1d23SAneesh Kumar K.V * 218143ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 218243ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 218361628a3fSMingming Cao */ 218443ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 218564769240SAlex Tomas struct writeback_control *wbc) 218664769240SAlex Tomas { 2187f8bec370SJan Kara int ret = 0; 218861628a3fSMingming Cao loff_t size; 2189498e5f24STheodore Ts'o unsigned int len; 2190744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 219161628a3fSMingming Cao struct inode *inode = page->mapping->host; 219236ade451SJan Kara struct ext4_io_submit io_submit; 219364769240SAlex Tomas 2194a9c667f8SLukas Czerner trace_ext4_writepage(page); 219561628a3fSMingming Cao size = i_size_read(inode); 219661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 219761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 219861628a3fSMingming Cao else 219961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 220061628a3fSMingming Cao 2201f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 220264769240SAlex Tomas /* 2203fe386132SJan Kara * We cannot do block allocation or other extent handling in this 2204fe386132SJan Kara * function. If there are buffers needing that, we have to redirty 2205fe386132SJan Kara * the page. But we may reach here when we do a journal commit via 2206fe386132SJan Kara * journal_submit_inode_data_buffers() and in that case we must write 2207fe386132SJan Kara * allocated buffers to achieve data=ordered mode guarantees. 220864769240SAlex Tomas */ 2209f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2210c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 221161628a3fSMingming Cao redirty_page_for_writepage(wbc, page); 2212fe386132SJan Kara if (current->flags & PF_MEMALLOC) { 2213fe386132SJan Kara /* 2214fe386132SJan Kara * For memory cleaning there's no point in writing only 2215fe386132SJan Kara * some buffers. So just bail out. Warn if we came here 2216fe386132SJan Kara * from direct reclaim. 2217fe386132SJan Kara */ 2218fe386132SJan Kara WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2219fe386132SJan Kara == PF_MEMALLOC); 222061628a3fSMingming Cao unlock_page(page); 222161628a3fSMingming Cao return 0; 222261628a3fSMingming Cao } 2223f0e6c985SAneesh Kumar K.V } 222464769240SAlex Tomas 2225cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 222643ce1d23SAneesh Kumar K.V /* 222743ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 222843ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 222943ce1d23SAneesh Kumar K.V */ 22303f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 223143ce1d23SAneesh Kumar K.V 223236ade451SJan Kara memset(&io_submit, 0, sizeof(io_submit)); 223336ade451SJan Kara ret = ext4_bio_write_page(&io_submit, page, len, wbc); 223436ade451SJan Kara ext4_io_submit(&io_submit); 223564769240SAlex Tomas return ret; 223664769240SAlex Tomas } 223764769240SAlex Tomas 223861628a3fSMingming Cao /* 2239525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 224025985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 2241525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2242525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2243525f4ed8SMingming Cao * the block allocation. 224461628a3fSMingming Cao */ 2245525f4ed8SMingming Cao 2246525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2247525f4ed8SMingming Cao { 2248525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2249525f4ed8SMingming Cao 2250525f4ed8SMingming Cao /* 2251525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2252525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2253525f4ed8SMingming Cao * number of contiguous block. So we will limit 2254525f4ed8SMingming Cao * number of contiguous block to a sane value 2255525f4ed8SMingming Cao */ 225612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2257525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2258525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2259525f4ed8SMingming Cao 2260525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2261525f4ed8SMingming Cao } 226261628a3fSMingming Cao 22638e48dcfbSTheodore Ts'o /* 22648e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 22658eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2266168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2267168fc022STheodore Ts'o * and then write them. 22688e48dcfbSTheodore Ts'o */ 22699c3569b5STao Ma static int write_cache_pages_da(handle_t *handle, 22709c3569b5STao Ma struct address_space *mapping, 22718e48dcfbSTheodore Ts'o struct writeback_control *wbc, 227272f84e65SEric Sandeen struct mpage_da_data *mpd, 227372f84e65SEric Sandeen pgoff_t *done_index) 22748e48dcfbSTheodore Ts'o { 22758eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2276168fc022STheodore Ts'o struct inode *inode = mapping->host; 22778e48dcfbSTheodore Ts'o struct pagevec pvec; 22784f01b02cSTheodore Ts'o unsigned int nr_pages; 22794f01b02cSTheodore Ts'o sector_t logical; 22804f01b02cSTheodore Ts'o pgoff_t index, end; 22818e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 22824f01b02cSTheodore Ts'o int i, tag, ret = 0; 22838e48dcfbSTheodore Ts'o 2284168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2285168fc022STheodore Ts'o mpd->wbc = wbc; 2286168fc022STheodore Ts'o mpd->inode = inode; 22878e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 22888e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 22898e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 22908e48dcfbSTheodore Ts'o 22916e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 22925b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 22935b41d924SEric Sandeen else 22945b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 22955b41d924SEric Sandeen 229672f84e65SEric Sandeen *done_index = index; 22974f01b02cSTheodore Ts'o while (index <= end) { 22985b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 22998e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 23008e48dcfbSTheodore Ts'o if (nr_pages == 0) 23014f01b02cSTheodore Ts'o return 0; 23028e48dcfbSTheodore Ts'o 23038e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 23048e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 23058e48dcfbSTheodore Ts'o 23068e48dcfbSTheodore Ts'o /* 23078e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 23088e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 23098e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 23108e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 23118e48dcfbSTheodore Ts'o * because we have a reference on the page. 23128e48dcfbSTheodore Ts'o */ 23134f01b02cSTheodore Ts'o if (page->index > end) 23144f01b02cSTheodore Ts'o goto out; 23158e48dcfbSTheodore Ts'o 231672f84e65SEric Sandeen *done_index = page->index + 1; 231772f84e65SEric Sandeen 231878aaced3STheodore Ts'o /* 231978aaced3STheodore Ts'o * If we can't merge this page, and we have 232078aaced3STheodore Ts'o * accumulated an contiguous region, write it 232178aaced3STheodore Ts'o */ 232278aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 232378aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 232478aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 232578aaced3STheodore Ts'o goto ret_extent_tail; 232678aaced3STheodore Ts'o } 232778aaced3STheodore Ts'o 23288e48dcfbSTheodore Ts'o lock_page(page); 23298e48dcfbSTheodore Ts'o 23308e48dcfbSTheodore Ts'o /* 23314f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 23324f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 23334f01b02cSTheodore Ts'o * are writing (which means it has been 23344f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 23354f01b02cSTheodore Ts'o * already under writeback and we are not 23364f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 23378e48dcfbSTheodore Ts'o */ 23384f01b02cSTheodore Ts'o if (!PageDirty(page) || 23394f01b02cSTheodore Ts'o (PageWriteback(page) && 23404f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 23414f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 23428e48dcfbSTheodore Ts'o unlock_page(page); 23438e48dcfbSTheodore Ts'o continue; 23448e48dcfbSTheodore Ts'o } 23458e48dcfbSTheodore Ts'o 23468e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 23478e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 23488e48dcfbSTheodore Ts'o 23499c3569b5STao Ma /* 23509c3569b5STao Ma * If we have inline data and arrive here, it means that 23519c3569b5STao Ma * we will soon create the block for the 1st page, so 23529c3569b5STao Ma * we'd better clear the inline data here. 23539c3569b5STao Ma */ 23549c3569b5STao Ma if (ext4_has_inline_data(inode)) { 23559c3569b5STao Ma BUG_ON(ext4_test_inode_state(inode, 23569c3569b5STao Ma EXT4_STATE_MAY_INLINE_DATA)); 23579c3569b5STao Ma ext4_destroy_inline_data(handle, inode); 23589c3569b5STao Ma } 23599c3569b5STao Ma 2360168fc022STheodore Ts'o if (mpd->next_page != page->index) 23618eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 23628eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 23638eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 23648eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 23658eb9e5ceSTheodore Ts'o 2366f8bec370SJan Kara /* Add all dirty buffers to mpd */ 23678eb9e5ceSTheodore Ts'o head = page_buffers(page); 23688eb9e5ceSTheodore Ts'o bh = head; 23698eb9e5ceSTheodore Ts'o do { 23708eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 23718eb9e5ceSTheodore Ts'o /* 2372f8bec370SJan Kara * We need to try to allocate unmapped blocks 2373f8bec370SJan Kara * in the same page. Otherwise we won't make 2374f8bec370SJan Kara * progress with the page in ext4_writepage 23758eb9e5ceSTheodore Ts'o */ 23768eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 23778eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 23788eb9e5ceSTheodore Ts'o bh->b_state); 23794f01b02cSTheodore Ts'o if (mpd->io_done) 23804f01b02cSTheodore Ts'o goto ret_extent_tail; 2381f8bec370SJan Kara } else if (buffer_dirty(bh) && 2382f8bec370SJan Kara buffer_mapped(bh)) { 23838eb9e5ceSTheodore Ts'o /* 2384f8bec370SJan Kara * mapped dirty buffer. We need to 2385f8bec370SJan Kara * update the b_state because we look 2386f8bec370SJan Kara * at b_state in mpage_da_map_blocks. 2387f8bec370SJan Kara * We don't update b_size because if we 2388f8bec370SJan Kara * find an unmapped buffer_head later 2389f8bec370SJan Kara * we need to use the b_state flag of 2390f8bec370SJan Kara * that buffer_head. 23918eb9e5ceSTheodore Ts'o */ 23928eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 2393f8bec370SJan Kara mpd->b_state = 2394f8bec370SJan Kara bh->b_state & BH_FLAGS; 23958e48dcfbSTheodore Ts'o } 23968eb9e5ceSTheodore Ts'o logical++; 23978eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 23988e48dcfbSTheodore Ts'o 23998e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 24008e48dcfbSTheodore Ts'o nr_to_write--; 24018e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 24024f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 24038e48dcfbSTheodore Ts'o /* 24048e48dcfbSTheodore Ts'o * We stop writing back only if we are 24058e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 24068e48dcfbSTheodore Ts'o * integrity sync we have to keep going 24078e48dcfbSTheodore Ts'o * because someone may be concurrently 24088e48dcfbSTheodore Ts'o * dirtying pages, and we might have 24098e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 24108e48dcfbSTheodore Ts'o * pages, but have not synced all of the 24118e48dcfbSTheodore Ts'o * old dirty pages. 24128e48dcfbSTheodore Ts'o */ 24134f01b02cSTheodore Ts'o goto out; 24148e48dcfbSTheodore Ts'o } 24158e48dcfbSTheodore Ts'o } 24168e48dcfbSTheodore Ts'o pagevec_release(&pvec); 24178e48dcfbSTheodore Ts'o cond_resched(); 24188e48dcfbSTheodore Ts'o } 24194f01b02cSTheodore Ts'o return 0; 24204f01b02cSTheodore Ts'o ret_extent_tail: 24214f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 24228eb9e5ceSTheodore Ts'o out: 24238eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 24248eb9e5ceSTheodore Ts'o cond_resched(); 24258e48dcfbSTheodore Ts'o return ret; 24268e48dcfbSTheodore Ts'o } 24278e48dcfbSTheodore Ts'o 24288e48dcfbSTheodore Ts'o 242964769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 243064769240SAlex Tomas struct writeback_control *wbc) 243164769240SAlex Tomas { 243222208dedSAneesh Kumar K.V pgoff_t index; 243322208dedSAneesh Kumar K.V int range_whole = 0; 243461628a3fSMingming Cao handle_t *handle = NULL; 2435df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 24365e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2437498e5f24STheodore Ts'o int pages_written = 0; 243855138e0bSTheodore Ts'o unsigned int max_pages; 24392acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 244055138e0bSTheodore Ts'o int needed_blocks, ret = 0; 244155138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2442de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 24435e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 244472f84e65SEric Sandeen pgoff_t done_index = 0; 24455b41d924SEric Sandeen pgoff_t end; 24461bce63d1SShaohua Li struct blk_plug plug; 244761628a3fSMingming Cao 24489bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2449ba80b101STheodore Ts'o 245061628a3fSMingming Cao /* 245161628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 245261628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 245361628a3fSMingming Cao * because that could violate lock ordering on umount 245461628a3fSMingming Cao */ 2455a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 245661628a3fSMingming Cao return 0; 24572a21e37eSTheodore Ts'o 24582a21e37eSTheodore Ts'o /* 24592a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 24602a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 24612a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 24624ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 24632a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 24642a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 24652a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 24662a21e37eSTheodore Ts'o * the stack trace. 24672a21e37eSTheodore Ts'o */ 24684ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 24692a21e37eSTheodore Ts'o return -EROFS; 24702a21e37eSTheodore Ts'o 247122208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 247222208dedSAneesh Kumar K.V range_whole = 1; 247361628a3fSMingming Cao 24742acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 24752acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 247622208dedSAneesh Kumar K.V index = mapping->writeback_index; 24772acf2c26SAneesh Kumar K.V if (index) 24782acf2c26SAneesh Kumar K.V cycled = 0; 24792acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 24802acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 24812acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 24825b41d924SEric Sandeen end = -1; 24835b41d924SEric Sandeen } else { 248422208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 24855b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 24865b41d924SEric Sandeen } 2487a1d6cc56SAneesh Kumar K.V 248855138e0bSTheodore Ts'o /* 248955138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 249055138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 249155138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 249255138e0bSTheodore Ts'o * levels; different architectues have a different page size, 249355138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 249455138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 249555138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 249655138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 249755138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 249855138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 249955138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 250055138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 250155138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 250255138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 250355138e0bSTheodore Ts'o */ 250455138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2505b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2506b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2507b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 250855138e0bSTheodore Ts'o else 2509b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2510b443e733SEric Sandeen } else 251155138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 251255138e0bSTheodore Ts'o max_pages); 251355138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 251455138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 251555138e0bSTheodore Ts'o 251655138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 251755138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 251855138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 251955138e0bSTheodore Ts'o } 252055138e0bSTheodore Ts'o 25212acf2c26SAneesh Kumar K.V retry: 25226e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 25235b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 25245b41d924SEric Sandeen 25251bce63d1SShaohua Li blk_start_plug(&plug); 252622208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2527a1d6cc56SAneesh Kumar K.V 2528a1d6cc56SAneesh Kumar K.V /* 2529a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2530a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2531a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2532a1d6cc56SAneesh Kumar K.V * by delalloc 2533a1d6cc56SAneesh Kumar K.V */ 2534a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2535525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2536a1d6cc56SAneesh Kumar K.V 253761628a3fSMingming Cao /* start a new transaction*/ 25389924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 25399924a92aSTheodore Ts'o needed_blocks); 254061628a3fSMingming Cao if (IS_ERR(handle)) { 254161628a3fSMingming Cao ret = PTR_ERR(handle); 25421693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2543fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2544a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 25453c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 254661628a3fSMingming Cao goto out_writepages; 254761628a3fSMingming Cao } 2548f63e6005STheodore Ts'o 2549f63e6005STheodore Ts'o /* 25508eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2551f63e6005STheodore Ts'o * contiguous region of logical blocks that need 25528eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2553f63e6005STheodore Ts'o */ 25549c3569b5STao Ma ret = write_cache_pages_da(handle, mapping, 25559c3569b5STao Ma wbc, &mpd, &done_index); 2556f63e6005STheodore Ts'o /* 2557af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2558f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2559f63e6005STheodore Ts'o * them for I/O. 2560f63e6005STheodore Ts'o */ 2561f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 25625a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2563f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2564f63e6005STheodore Ts'o } 2565b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2566f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2567df22291fSAneesh Kumar K.V 256861628a3fSMingming Cao ext4_journal_stop(handle); 2569df22291fSAneesh Kumar K.V 25708f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 257122208dedSAneesh Kumar K.V /* commit the transaction which would 257222208dedSAneesh Kumar K.V * free blocks released in the transaction 257322208dedSAneesh Kumar K.V * and try again 257422208dedSAneesh Kumar K.V */ 2575df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 257622208dedSAneesh Kumar K.V ret = 0; 257722208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2578a1d6cc56SAneesh Kumar K.V /* 25798de49e67SKazuya Mio * Got one extent now try with rest of the pages. 25808de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 25818de49e67SKazuya Mio * So we don't need to write any more. 2582a1d6cc56SAneesh Kumar K.V */ 258322208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 25848de49e67SKazuya Mio ret = mpd.retval; 25852acf2c26SAneesh Kumar K.V io_done = 1; 258622208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 258761628a3fSMingming Cao /* 258861628a3fSMingming Cao * There is no more writeout needed 258961628a3fSMingming Cao * or we requested for a noblocking writeout 259061628a3fSMingming Cao * and we found the device congested 259161628a3fSMingming Cao */ 259261628a3fSMingming Cao break; 259361628a3fSMingming Cao } 25941bce63d1SShaohua Li blk_finish_plug(&plug); 25952acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 25962acf2c26SAneesh Kumar K.V cycled = 1; 25972acf2c26SAneesh Kumar K.V index = 0; 25982acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 25992acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 26002acf2c26SAneesh Kumar K.V goto retry; 26012acf2c26SAneesh Kumar K.V } 260261628a3fSMingming Cao 260322208dedSAneesh Kumar K.V /* Update index */ 26042acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 260522208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 260622208dedSAneesh Kumar K.V /* 260722208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 260822208dedSAneesh Kumar K.V * mode will write it back later 260922208dedSAneesh Kumar K.V */ 261072f84e65SEric Sandeen mapping->writeback_index = done_index; 2611a1d6cc56SAneesh Kumar K.V 261261628a3fSMingming Cao out_writepages: 261322208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2614de89de6eSTheodore Ts'o wbc->range_start = range_start; 26159bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 261661628a3fSMingming Cao return ret; 261764769240SAlex Tomas } 261864769240SAlex Tomas 261979f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 262079f0be8dSAneesh Kumar K.V { 26215c1ff336SEric Whitney s64 free_clusters, dirty_clusters; 262279f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 262379f0be8dSAneesh Kumar K.V 262479f0be8dSAneesh Kumar K.V /* 262579f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 262679f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2627179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 262879f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 262979f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 263079f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 263179f0be8dSAneesh Kumar K.V */ 26325c1ff336SEric Whitney free_clusters = 26335c1ff336SEric Whitney percpu_counter_read_positive(&sbi->s_freeclusters_counter); 26345c1ff336SEric Whitney dirty_clusters = 26355c1ff336SEric Whitney percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 263600d4e736STheodore Ts'o /* 263700d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 263800d4e736STheodore Ts'o */ 26395c1ff336SEric Whitney if (dirty_clusters && (free_clusters < 2 * dirty_clusters)) 264010ee27a0SMiao Xie try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 264100d4e736STheodore Ts'o 26425c1ff336SEric Whitney if (2 * free_clusters < 3 * dirty_clusters || 26435c1ff336SEric Whitney free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) { 264479f0be8dSAneesh Kumar K.V /* 2645c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2646c8afb446SEric Sandeen * or free blocks is less than watermark 264779f0be8dSAneesh Kumar K.V */ 264879f0be8dSAneesh Kumar K.V return 1; 264979f0be8dSAneesh Kumar K.V } 265079f0be8dSAneesh Kumar K.V return 0; 265179f0be8dSAneesh Kumar K.V } 265279f0be8dSAneesh Kumar K.V 265364769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 265464769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 265564769240SAlex Tomas struct page **pagep, void **fsdata) 265664769240SAlex Tomas { 265772b8ab9dSEric Sandeen int ret, retries = 0; 265864769240SAlex Tomas struct page *page; 265964769240SAlex Tomas pgoff_t index; 266064769240SAlex Tomas struct inode *inode = mapping->host; 266164769240SAlex Tomas handle_t *handle; 266264769240SAlex Tomas 266364769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 266479f0be8dSAneesh Kumar K.V 266579f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 266679f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 266779f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 266879f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 266979f0be8dSAneesh Kumar K.V } 267079f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 26719bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 26729c3569b5STao Ma 26739c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 26749c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 26759c3569b5STao Ma pos, len, flags, 26769c3569b5STao Ma pagep, fsdata); 26779c3569b5STao Ma if (ret < 0) 267847564bfbSTheodore Ts'o return ret; 267947564bfbSTheodore Ts'o if (ret == 1) 268047564bfbSTheodore Ts'o return 0; 26819c3569b5STao Ma } 26829c3569b5STao Ma 268347564bfbSTheodore Ts'o /* 268447564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 268547564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 268647564bfbSTheodore Ts'o * is being written back. So grab it first before we start 268747564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 268847564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 268947564bfbSTheodore Ts'o */ 269047564bfbSTheodore Ts'o retry_grab: 269147564bfbSTheodore Ts'o page = grab_cache_page_write_begin(mapping, index, flags); 269247564bfbSTheodore Ts'o if (!page) 269347564bfbSTheodore Ts'o return -ENOMEM; 269447564bfbSTheodore Ts'o unlock_page(page); 269547564bfbSTheodore Ts'o 269664769240SAlex Tomas /* 269764769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 269864769240SAlex Tomas * if there is delayed block allocation. But we still need 269964769240SAlex Tomas * to journalling the i_disksize update if writes to the end 270064769240SAlex Tomas * of file which has an already mapped buffer. 270164769240SAlex Tomas */ 270247564bfbSTheodore Ts'o retry_journal: 27039924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); 270464769240SAlex Tomas if (IS_ERR(handle)) { 270547564bfbSTheodore Ts'o page_cache_release(page); 270647564bfbSTheodore Ts'o return PTR_ERR(handle); 270764769240SAlex Tomas } 270864769240SAlex Tomas 270947564bfbSTheodore Ts'o lock_page(page); 271047564bfbSTheodore Ts'o if (page->mapping != mapping) { 271147564bfbSTheodore Ts'o /* The page got truncated from under us */ 271247564bfbSTheodore Ts'o unlock_page(page); 271347564bfbSTheodore Ts'o page_cache_release(page); 2714d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 271547564bfbSTheodore Ts'o goto retry_grab; 2716d5a0d4f7SEric Sandeen } 271747564bfbSTheodore Ts'o /* In case writeback began while the page was unlocked */ 271847564bfbSTheodore Ts'o wait_on_page_writeback(page); 271964769240SAlex Tomas 27206e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 272164769240SAlex Tomas if (ret < 0) { 272264769240SAlex Tomas unlock_page(page); 272364769240SAlex Tomas ext4_journal_stop(handle); 2724ae4d5372SAneesh Kumar K.V /* 2725ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2726ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2727ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2728ae4d5372SAneesh Kumar K.V */ 2729ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2730b9a4207dSJan Kara ext4_truncate_failed_write(inode); 273147564bfbSTheodore Ts'o 273247564bfbSTheodore Ts'o if (ret == -ENOSPC && 273347564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 273447564bfbSTheodore Ts'o goto retry_journal; 273547564bfbSTheodore Ts'o 273647564bfbSTheodore Ts'o page_cache_release(page); 273747564bfbSTheodore Ts'o return ret; 273864769240SAlex Tomas } 273964769240SAlex Tomas 274047564bfbSTheodore Ts'o *pagep = page; 274164769240SAlex Tomas return ret; 274264769240SAlex Tomas } 274364769240SAlex Tomas 2744632eaeabSMingming Cao /* 2745632eaeabSMingming Cao * Check if we should update i_disksize 2746632eaeabSMingming Cao * when write to the end of file but not require block allocation 2747632eaeabSMingming Cao */ 2748632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2749632eaeabSMingming Cao unsigned long offset) 2750632eaeabSMingming Cao { 2751632eaeabSMingming Cao struct buffer_head *bh; 2752632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2753632eaeabSMingming Cao unsigned int idx; 2754632eaeabSMingming Cao int i; 2755632eaeabSMingming Cao 2756632eaeabSMingming Cao bh = page_buffers(page); 2757632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2758632eaeabSMingming Cao 2759632eaeabSMingming Cao for (i = 0; i < idx; i++) 2760632eaeabSMingming Cao bh = bh->b_this_page; 2761632eaeabSMingming Cao 276229fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2763632eaeabSMingming Cao return 0; 2764632eaeabSMingming Cao return 1; 2765632eaeabSMingming Cao } 2766632eaeabSMingming Cao 276764769240SAlex Tomas static int ext4_da_write_end(struct file *file, 276864769240SAlex Tomas struct address_space *mapping, 276964769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 277064769240SAlex Tomas struct page *page, void *fsdata) 277164769240SAlex Tomas { 277264769240SAlex Tomas struct inode *inode = mapping->host; 277364769240SAlex Tomas int ret = 0, ret2; 277464769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 277564769240SAlex Tomas loff_t new_i_size; 2776632eaeabSMingming Cao unsigned long start, end; 277779f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 277879f0be8dSAneesh Kumar K.V 277974d553aaSTheodore Ts'o if (write_mode == FALL_BACK_TO_NONDELALLOC) 278074d553aaSTheodore Ts'o return ext4_write_end(file, mapping, pos, 278179f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 2782632eaeabSMingming Cao 27839bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2784632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2785632eaeabSMingming Cao end = start + copied - 1; 278664769240SAlex Tomas 278764769240SAlex Tomas /* 278864769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 278964769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 279064769240SAlex Tomas * into that. 279164769240SAlex Tomas */ 279264769240SAlex Tomas new_i_size = pos + copied; 2793ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 27949c3569b5STao Ma if (ext4_has_inline_data(inode) || 27959c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2796632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2797f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 279864769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2799632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2800cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2801cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2802cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2803cf17fea6SAneesh Kumar K.V */ 2804cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2805632eaeabSMingming Cao } 2806632eaeabSMingming Cao } 28079c3569b5STao Ma 28089c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 28099c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 28109c3569b5STao Ma ext4_has_inline_data(inode)) 28119c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 28129c3569b5STao Ma page); 28139c3569b5STao Ma else 281464769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 281564769240SAlex Tomas page, fsdata); 28169c3569b5STao Ma 281764769240SAlex Tomas copied = ret2; 281864769240SAlex Tomas if (ret2 < 0) 281964769240SAlex Tomas ret = ret2; 282064769240SAlex Tomas ret2 = ext4_journal_stop(handle); 282164769240SAlex Tomas if (!ret) 282264769240SAlex Tomas ret = ret2; 282364769240SAlex Tomas 282464769240SAlex Tomas return ret ? ret : copied; 282564769240SAlex Tomas } 282664769240SAlex Tomas 282764769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 282864769240SAlex Tomas { 282964769240SAlex Tomas /* 283064769240SAlex Tomas * Drop reserved blocks 283164769240SAlex Tomas */ 283264769240SAlex Tomas BUG_ON(!PageLocked(page)); 283364769240SAlex Tomas if (!page_has_buffers(page)) 283464769240SAlex Tomas goto out; 283564769240SAlex Tomas 2836d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 283764769240SAlex Tomas 283864769240SAlex Tomas out: 283964769240SAlex Tomas ext4_invalidatepage(page, offset); 284064769240SAlex Tomas 284164769240SAlex Tomas return; 284264769240SAlex Tomas } 284364769240SAlex Tomas 2844ccd2506bSTheodore Ts'o /* 2845ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2846ccd2506bSTheodore Ts'o */ 2847ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2848ccd2506bSTheodore Ts'o { 2849fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2850fb40ba0dSTheodore Ts'o 2851ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2852ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2853ccd2506bSTheodore Ts'o return 0; 2854ccd2506bSTheodore Ts'o 2855ccd2506bSTheodore Ts'o /* 2856ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2857ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2858ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2859ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2860ccd2506bSTheodore Ts'o * would require replicating code paths in: 2861ccd2506bSTheodore Ts'o * 2862ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2863ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2864ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2865ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2866ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2867ccd2506bSTheodore Ts'o * 2868ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2869ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2870ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2871ccd2506bSTheodore Ts'o * doing I/O at all. 2872ccd2506bSTheodore Ts'o * 2873ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2874380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2875ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2876ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 287725985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2878ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2879ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2880ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2881ccd2506bSTheodore Ts'o * 2882ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2883ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2884ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2885ccd2506bSTheodore Ts'o */ 2886ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2887ccd2506bSTheodore Ts'o } 288864769240SAlex Tomas 288964769240SAlex Tomas /* 2890ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2891ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2892ac27a0ecSDave Kleikamp * 2893ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2894617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2895ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2896ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2897ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2898ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2899ac27a0ecSDave Kleikamp * 2900ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2901ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2902ac27a0ecSDave Kleikamp */ 2903617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2904ac27a0ecSDave Kleikamp { 2905ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2906ac27a0ecSDave Kleikamp journal_t *journal; 2907ac27a0ecSDave Kleikamp int err; 2908ac27a0ecSDave Kleikamp 290946c7f254STao Ma /* 291046c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 291146c7f254STao Ma */ 291246c7f254STao Ma if (ext4_has_inline_data(inode)) 291346c7f254STao Ma return 0; 291446c7f254STao Ma 291564769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 291664769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 291764769240SAlex Tomas /* 291864769240SAlex Tomas * With delalloc we want to sync the file 291964769240SAlex Tomas * so that we can make sure we allocate 292064769240SAlex Tomas * blocks for file 292164769240SAlex Tomas */ 292264769240SAlex Tomas filemap_write_and_wait(mapping); 292364769240SAlex Tomas } 292464769240SAlex Tomas 292519f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 292619f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2927ac27a0ecSDave Kleikamp /* 2928ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2929ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2930ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2931ac27a0ecSDave Kleikamp * do we expect this to happen. 2932ac27a0ecSDave Kleikamp * 2933ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2934ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2935ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2936ac27a0ecSDave Kleikamp * will.) 2937ac27a0ecSDave Kleikamp * 2938617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2939ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2940ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2941ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2942ac27a0ecSDave Kleikamp * everything they get. 2943ac27a0ecSDave Kleikamp */ 2944ac27a0ecSDave Kleikamp 294519f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2946617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2947dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2948dab291afSMingming Cao err = jbd2_journal_flush(journal); 2949dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2950ac27a0ecSDave Kleikamp 2951ac27a0ecSDave Kleikamp if (err) 2952ac27a0ecSDave Kleikamp return 0; 2953ac27a0ecSDave Kleikamp } 2954ac27a0ecSDave Kleikamp 2955617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2956ac27a0ecSDave Kleikamp } 2957ac27a0ecSDave Kleikamp 2958617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2959ac27a0ecSDave Kleikamp { 296046c7f254STao Ma int ret = -EAGAIN; 296146c7f254STao Ma struct inode *inode = page->mapping->host; 296246c7f254STao Ma 29630562e0baSJiaying Zhang trace_ext4_readpage(page); 296446c7f254STao Ma 296546c7f254STao Ma if (ext4_has_inline_data(inode)) 296646c7f254STao Ma ret = ext4_readpage_inline(inode, page); 296746c7f254STao Ma 296846c7f254STao Ma if (ret == -EAGAIN) 2969617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 297046c7f254STao Ma 297146c7f254STao Ma return ret; 2972ac27a0ecSDave Kleikamp } 2973ac27a0ecSDave Kleikamp 2974ac27a0ecSDave Kleikamp static int 2975617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2976ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2977ac27a0ecSDave Kleikamp { 297846c7f254STao Ma struct inode *inode = mapping->host; 297946c7f254STao Ma 298046c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 298146c7f254STao Ma if (ext4_has_inline_data(inode)) 298246c7f254STao Ma return 0; 298346c7f254STao Ma 2984617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2985ac27a0ecSDave Kleikamp } 2986ac27a0ecSDave Kleikamp 2987617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2988ac27a0ecSDave Kleikamp { 29890562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 29900562e0baSJiaying Zhang 29914520fb3cSJan Kara /* No journalling happens on data buffers when this function is used */ 29924520fb3cSJan Kara WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 29934520fb3cSJan Kara 29944520fb3cSJan Kara block_invalidatepage(page, offset); 29954520fb3cSJan Kara } 29964520fb3cSJan Kara 299753e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page, 29984520fb3cSJan Kara unsigned long offset) 29994520fb3cSJan Kara { 30004520fb3cSJan Kara journal_t *journal = EXT4_JOURNAL(page->mapping->host); 30014520fb3cSJan Kara 30024520fb3cSJan Kara trace_ext4_journalled_invalidatepage(page, offset); 30034520fb3cSJan Kara 3004744692dcSJiaying Zhang /* 3005ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 3006ac27a0ecSDave Kleikamp */ 3007ac27a0ecSDave Kleikamp if (offset == 0) 3008ac27a0ecSDave Kleikamp ClearPageChecked(page); 3009ac27a0ecSDave Kleikamp 301053e87268SJan Kara return jbd2_journal_invalidatepage(journal, page, offset); 301153e87268SJan Kara } 301253e87268SJan Kara 301353e87268SJan Kara /* Wrapper for aops... */ 301453e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page, 301553e87268SJan Kara unsigned long offset) 301653e87268SJan Kara { 301753e87268SJan Kara WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); 3018ac27a0ecSDave Kleikamp } 3019ac27a0ecSDave Kleikamp 3020617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 3021ac27a0ecSDave Kleikamp { 3022617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3023ac27a0ecSDave Kleikamp 30240562e0baSJiaying Zhang trace_ext4_releasepage(page); 30250562e0baSJiaying Zhang 3026e1c36595SJan Kara /* Page has dirty journalled data -> cannot release */ 3027e1c36595SJan Kara if (PageChecked(page)) 3028ac27a0ecSDave Kleikamp return 0; 30290390131bSFrank Mayhar if (journal) 3030dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 30310390131bSFrank Mayhar else 30320390131bSFrank Mayhar return try_to_free_buffers(page); 3033ac27a0ecSDave Kleikamp } 3034ac27a0ecSDave Kleikamp 3035ac27a0ecSDave Kleikamp /* 30362ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 30372ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 30382ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 30392ed88685STheodore Ts'o */ 3040f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 30414c0425ffSMingming Cao struct buffer_head *bh_result, int create) 30424c0425ffSMingming Cao { 3043c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 30448d5d02e6SMingming Cao inode->i_ino, create); 30452ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 30462ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 30474c0425ffSMingming Cao } 30484c0425ffSMingming Cao 3049729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 30508b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 3051729f52c6SZheng Liu { 30528b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 30538b0f165fSAnatol Pomozov inode->i_ino, create); 30548b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 30558b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 3056729f52c6SZheng Liu } 3057729f52c6SZheng Liu 30584c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 3059552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 3060552ef802SChristoph Hellwig bool is_async) 30614c0425ffSMingming Cao { 3062496ad9aaSAl Viro struct inode *inode = file_inode(iocb->ki_filp); 30634c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 30644c0425ffSMingming Cao 30654b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 30664b70df18SMingming if (!io_end || !size) 3067552ef802SChristoph Hellwig goto out; 30684b70df18SMingming 30698d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 3070ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 30718d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 30728d5d02e6SMingming Cao size); 30738d5d02e6SMingming Cao 3074b5a7e970STheodore Ts'o iocb->private = NULL; 3075b5a7e970STheodore Ts'o 30768d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 3077bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 30788d5d02e6SMingming Cao ext4_free_io_end(io_end); 30795b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 3080091e26dfSJan Kara inode_dio_done(inode); 30815b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 30825b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 30835b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 30848d5d02e6SMingming Cao } 30858d5d02e6SMingming Cao 30864c0425ffSMingming Cao io_end->offset = offset; 30874c0425ffSMingming Cao io_end->size = size; 30885b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 30895b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 30905b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 30915b3ff237Sjiayingz@google.com (Jiaying Zhang) } 30924c0425ffSMingming Cao 309328a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 30944c0425ffSMingming Cao } 3095c7064ef1SJiaying Zhang 30964c0425ffSMingming Cao /* 30974c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 30984c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 30994c0425ffSMingming Cao * fall back to buffered IO. 31004c0425ffSMingming Cao * 3101b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 310269c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 3103b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 31044c0425ffSMingming Cao * 310569c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 31068d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 310725985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 31088d5d02e6SMingming Cao * when async direct IO completed. 31094c0425ffSMingming Cao * 31104c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 31114c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 31124c0425ffSMingming Cao * if the machine crashes during the write. 31134c0425ffSMingming Cao * 31144c0425ffSMingming Cao */ 31154c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 31164c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 31174c0425ffSMingming Cao unsigned long nr_segs) 31184c0425ffSMingming Cao { 31194c0425ffSMingming Cao struct file *file = iocb->ki_filp; 31204c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 31214c0425ffSMingming Cao ssize_t ret; 31224c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 3123729f52c6SZheng Liu int overwrite = 0; 31248b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 31258b0f165fSAnatol Pomozov int dio_flags = 0; 312669c499d1STheodore Ts'o loff_t final_size = offset + count; 312769c499d1STheodore Ts'o 312869c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 312969c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 313069c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3131729f52c6SZheng Liu 31324bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 31334bd809dbSZheng Liu 31344bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 31354bd809dbSZheng Liu overwrite = *((int *)iocb->private); 31364bd809dbSZheng Liu 31374bd809dbSZheng Liu if (overwrite) { 31381f555cfaSDmitry Monakhov atomic_inc(&inode->i_dio_count); 31394bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 31404bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 31414bd809dbSZheng Liu } 31424bd809dbSZheng Liu 31434c0425ffSMingming Cao /* 31448d5d02e6SMingming Cao * We could direct write to holes and fallocate. 31458d5d02e6SMingming Cao * 314669c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 314769c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 314869c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 31498d5d02e6SMingming Cao * 315069c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 315169c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 315269c499d1STheodore Ts'o * extents uninitialized. 31534c0425ffSMingming Cao * 315469c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 31558d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 31564c0425ffSMingming Cao * 315769c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 315869c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 315969c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 316069c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 31614c0425ffSMingming Cao */ 31628d5d02e6SMingming Cao iocb->private = NULL; 3163f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 31648d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 316569c499d1STheodore Ts'o ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 31664bd809dbSZheng Liu if (!io_end) { 31674bd809dbSZheng Liu ret = -ENOMEM; 31684bd809dbSZheng Liu goto retake_lock; 31694bd809dbSZheng Liu } 3170266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 3171266991b1SJeff Moyer iocb->private = io_end; 31728d5d02e6SMingming Cao /* 317369c499d1STheodore Ts'o * we save the io structure for current async direct 317469c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 317569c499d1STheodore Ts'o * io structure whether there is a unwritten extents 317669c499d1STheodore Ts'o * needs to be converted when IO is completed. 31778d5d02e6SMingming Cao */ 3178f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 31798d5d02e6SMingming Cao } 31808d5d02e6SMingming Cao 31818b0f165fSAnatol Pomozov if (overwrite) { 31828b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 31838b0f165fSAnatol Pomozov } else { 31848b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 31858b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 31868b0f165fSAnatol Pomozov } 3187729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 3188729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 3189729f52c6SZheng Liu offset, nr_segs, 31908b0f165fSAnatol Pomozov get_block_func, 3191729f52c6SZheng Liu ext4_end_io_dio, 3192729f52c6SZheng Liu NULL, 31938b0f165fSAnatol Pomozov dio_flags); 31948b0f165fSAnatol Pomozov 31958d5d02e6SMingming Cao if (iocb->private) 3196f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 31978d5d02e6SMingming Cao /* 319869c499d1STheodore Ts'o * The io_end structure takes a reference to the inode, that 319969c499d1STheodore Ts'o * structure needs to be destroyed and the reference to the 320069c499d1STheodore Ts'o * inode need to be dropped, when IO is complete, even with 0 320169c499d1STheodore Ts'o * byte write, or failed. 32028d5d02e6SMingming Cao * 320369c499d1STheodore Ts'o * In the successful AIO DIO case, the io_end structure will 320469c499d1STheodore Ts'o * be destroyed and the reference to the inode will be dropped 32058d5d02e6SMingming Cao * after the end_io call back function is called. 32068d5d02e6SMingming Cao * 320769c499d1STheodore Ts'o * In the case there is 0 byte write, or error case, since VFS 320869c499d1STheodore Ts'o * direct IO won't invoke the end_io call back function, we 320969c499d1STheodore Ts'o * need to free the end_io structure here. 32108d5d02e6SMingming Cao */ 32118d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 32128d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 32138d5d02e6SMingming Cao iocb->private = NULL; 3214729f52c6SZheng Liu } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 32155f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 3216109f5565SMingming int err; 32178d5d02e6SMingming Cao /* 32188d5d02e6SMingming Cao * for non AIO case, since the IO is already 321925985edcSLucas De Marchi * completed, we could do the conversion right here 32208d5d02e6SMingming Cao */ 3221109f5565SMingming err = ext4_convert_unwritten_extents(inode, 32228d5d02e6SMingming Cao offset, ret); 3223109f5565SMingming if (err < 0) 3224109f5565SMingming ret = err; 322519f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3226109f5565SMingming } 32274bd809dbSZheng Liu 32284bd809dbSZheng Liu retake_lock: 32294bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 32304bd809dbSZheng Liu if (overwrite) { 32311f555cfaSDmitry Monakhov inode_dio_done(inode); 32324bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 32334bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 32344bd809dbSZheng Liu } 32354bd809dbSZheng Liu 32364c0425ffSMingming Cao return ret; 32374c0425ffSMingming Cao } 32388d5d02e6SMingming Cao 32394c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 32404c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 32414c0425ffSMingming Cao unsigned long nr_segs) 32424c0425ffSMingming Cao { 32434c0425ffSMingming Cao struct file *file = iocb->ki_filp; 32444c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 32450562e0baSJiaying Zhang ssize_t ret; 32464c0425ffSMingming Cao 324784ebd795STheodore Ts'o /* 324884ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 324984ebd795STheodore Ts'o */ 325084ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 325184ebd795STheodore Ts'o return 0; 325284ebd795STheodore Ts'o 325346c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 325446c7f254STao Ma if (ext4_has_inline_data(inode)) 325546c7f254STao Ma return 0; 325646c7f254STao Ma 32570562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 325812e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 32590562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 32600562e0baSJiaying Zhang else 32610562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 32620562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 32630562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 32640562e0baSJiaying Zhang return ret; 32654c0425ffSMingming Cao } 32664c0425ffSMingming Cao 3267ac27a0ecSDave Kleikamp /* 3268617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3269ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3270ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3271ac27a0ecSDave Kleikamp * not necessarily locked. 3272ac27a0ecSDave Kleikamp * 3273ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3274ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3275ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3276ac27a0ecSDave Kleikamp * 3277ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3278ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3279ac27a0ecSDave Kleikamp */ 3280617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3281ac27a0ecSDave Kleikamp { 3282ac27a0ecSDave Kleikamp SetPageChecked(page); 3283ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3284ac27a0ecSDave Kleikamp } 3285ac27a0ecSDave Kleikamp 328674d553aaSTheodore Ts'o static const struct address_space_operations ext4_aops = { 3287617ba13bSMingming Cao .readpage = ext4_readpage, 3288617ba13bSMingming Cao .readpages = ext4_readpages, 328943ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3290bfc1af65SNick Piggin .write_begin = ext4_write_begin, 329174d553aaSTheodore Ts'o .write_end = ext4_write_end, 3292617ba13bSMingming Cao .bmap = ext4_bmap, 3293617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3294617ba13bSMingming Cao .releasepage = ext4_releasepage, 3295617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3296ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32978ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3298aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3299ac27a0ecSDave Kleikamp }; 3300ac27a0ecSDave Kleikamp 3301617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3302617ba13bSMingming Cao .readpage = ext4_readpage, 3303617ba13bSMingming Cao .readpages = ext4_readpages, 330443ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3305bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3306bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3307617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3308617ba13bSMingming Cao .bmap = ext4_bmap, 33094520fb3cSJan Kara .invalidatepage = ext4_journalled_invalidatepage, 3310617ba13bSMingming Cao .releasepage = ext4_releasepage, 331184ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 33128ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3313aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3314ac27a0ecSDave Kleikamp }; 3315ac27a0ecSDave Kleikamp 331664769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 331764769240SAlex Tomas .readpage = ext4_readpage, 331864769240SAlex Tomas .readpages = ext4_readpages, 331943ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 332064769240SAlex Tomas .writepages = ext4_da_writepages, 332164769240SAlex Tomas .write_begin = ext4_da_write_begin, 332264769240SAlex Tomas .write_end = ext4_da_write_end, 332364769240SAlex Tomas .bmap = ext4_bmap, 332464769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 332564769240SAlex Tomas .releasepage = ext4_releasepage, 332664769240SAlex Tomas .direct_IO = ext4_direct_IO, 332764769240SAlex Tomas .migratepage = buffer_migrate_page, 33288ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3329aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 333064769240SAlex Tomas }; 333164769240SAlex Tomas 3332617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3333ac27a0ecSDave Kleikamp { 33343d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 33353d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 333674d553aaSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE); 33373d2b1582SLukas Czerner break; 33383d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 333974d553aaSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE); 33403d2b1582SLukas Czerner break; 33413d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3342617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 334374d553aaSTheodore Ts'o return; 33443d2b1582SLukas Czerner default: 33453d2b1582SLukas Czerner BUG(); 33463d2b1582SLukas Czerner } 334774d553aaSTheodore Ts'o if (test_opt(inode->i_sb, DELALLOC)) 334874d553aaSTheodore Ts'o inode->i_mapping->a_ops = &ext4_da_aops; 334974d553aaSTheodore Ts'o else 335074d553aaSTheodore Ts'o inode->i_mapping->a_ops = &ext4_aops; 3351ac27a0ecSDave Kleikamp } 3352ac27a0ecSDave Kleikamp 33534e96b2dbSAllison Henderson 33544e96b2dbSAllison Henderson /* 33554e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 33564e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 33574e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 33584e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 33594e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 33604e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 33614e96b2dbSAllison Henderson */ 33624e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 33634e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 33644e96b2dbSAllison Henderson loff_t length, int flags) 33654e96b2dbSAllison Henderson { 33664e96b2dbSAllison Henderson struct inode *inode = mapping->host; 33674e96b2dbSAllison Henderson struct page *page; 33684e96b2dbSAllison Henderson int err = 0; 33694e96b2dbSAllison Henderson 33704e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 33714e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 33724e96b2dbSAllison Henderson if (!page) 33735129d05fSYongqiang Yang return -ENOMEM; 33744e96b2dbSAllison Henderson 33754e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 33764e96b2dbSAllison Henderson from, length, flags); 33774e96b2dbSAllison Henderson 33784e96b2dbSAllison Henderson unlock_page(page); 33794e96b2dbSAllison Henderson page_cache_release(page); 33804e96b2dbSAllison Henderson return err; 33814e96b2dbSAllison Henderson } 33824e96b2dbSAllison Henderson 33834e96b2dbSAllison Henderson /* 33844e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 33854e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 33864e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 33874e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 33884e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 33894e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 33904e96b2dbSAllison Henderson * 33914e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 33924e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 33934e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 33944e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 33954e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 33964e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 33974e96b2dbSAllison Henderson * released. 33984e96b2dbSAllison Henderson * 33994e96b2dbSAllison Henderson * handle: The journal handle 34004e96b2dbSAllison Henderson * inode: The files inode 34014e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 34024907cb7bSAnatol Pomozov * from: The starting byte offset (from the beginning of the file) 34034e96b2dbSAllison Henderson * to begin discarding 34044e96b2dbSAllison Henderson * len: The length of bytes to discard 34054e96b2dbSAllison Henderson * flags: Optional flags that may be used: 34064e96b2dbSAllison Henderson * 34074e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 34084e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 34094e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 34104907cb7bSAnatol Pomozov * for updating the contents of a page whose blocks may 34114e96b2dbSAllison Henderson * have already been released, and we only want to zero 34124e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 34134e96b2dbSAllison Henderson * 34144907cb7bSAnatol Pomozov * Returns zero on success or negative on failure. 34154e96b2dbSAllison Henderson */ 34165f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 34174e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 34184e96b2dbSAllison Henderson loff_t length, int flags) 34194e96b2dbSAllison Henderson { 34204e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 34214e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 34224e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 34234e96b2dbSAllison Henderson ext4_lblk_t iblock; 34244e96b2dbSAllison Henderson struct buffer_head *bh; 34254e96b2dbSAllison Henderson int err = 0; 34264e96b2dbSAllison Henderson 34274e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 34284e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 34294e96b2dbSAllison Henderson 34304e96b2dbSAllison Henderson if (index != page->index) 34314e96b2dbSAllison Henderson return -EINVAL; 34324e96b2dbSAllison Henderson 34334e96b2dbSAllison Henderson /* 34344e96b2dbSAllison Henderson * correct length if it does not fall between 34354e96b2dbSAllison Henderson * 'from' and the end of the page 34364e96b2dbSAllison Henderson */ 34374e96b2dbSAllison Henderson if (length > max || length < 0) 34384e96b2dbSAllison Henderson length = max; 34394e96b2dbSAllison Henderson 34404e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 34414e96b2dbSAllison Henderson 3442093e6e36SYongqiang Yang if (!page_has_buffers(page)) 34434e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 34444e96b2dbSAllison Henderson 34454e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 34464e96b2dbSAllison Henderson bh = page_buffers(page); 34474e96b2dbSAllison Henderson pos = blocksize; 34484e96b2dbSAllison Henderson while (offset >= pos) { 34494e96b2dbSAllison Henderson bh = bh->b_this_page; 34504e96b2dbSAllison Henderson iblock++; 34514e96b2dbSAllison Henderson pos += blocksize; 34524e96b2dbSAllison Henderson } 34534e96b2dbSAllison Henderson 34544e96b2dbSAllison Henderson pos = offset; 34554e96b2dbSAllison Henderson while (pos < offset + length) { 3456e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3457e260daf2SYongqiang Yang 34584e96b2dbSAllison Henderson err = 0; 34594e96b2dbSAllison Henderson 34604e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 34614e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 34624e96b2dbSAllison Henderson 34634e96b2dbSAllison Henderson /* The length of space until the end of the block */ 34644e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 34654e96b2dbSAllison Henderson 34664e96b2dbSAllison Henderson /* 34674e96b2dbSAllison Henderson * Do not unmap or zero past end of block 34684e96b2dbSAllison Henderson * for this buffer head 34694e96b2dbSAllison Henderson */ 34704e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 34714e96b2dbSAllison Henderson range_to_discard = end_of_block; 34724e96b2dbSAllison Henderson 34734e96b2dbSAllison Henderson 34744e96b2dbSAllison Henderson /* 34754e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 34764e96b2dbSAllison Henderson * regions of the page 34774e96b2dbSAllison Henderson */ 34784e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 34794e96b2dbSAllison Henderson buffer_mapped(bh)) 34804e96b2dbSAllison Henderson goto next; 34814e96b2dbSAllison Henderson 34824e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 34834e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 34844e96b2dbSAllison Henderson clear_buffer_dirty(bh); 34854e96b2dbSAllison Henderson bh->b_bdev = NULL; 34864e96b2dbSAllison Henderson clear_buffer_mapped(bh); 34874e96b2dbSAllison Henderson clear_buffer_req(bh); 34884e96b2dbSAllison Henderson clear_buffer_new(bh); 34894e96b2dbSAllison Henderson clear_buffer_delay(bh); 34904e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 34914e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 34924e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 34934e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 34944e96b2dbSAllison Henderson goto next; 34954e96b2dbSAllison Henderson } 34964e96b2dbSAllison Henderson 34974e96b2dbSAllison Henderson /* 34984e96b2dbSAllison Henderson * If this block is not completely contained in the range 34994e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 35004e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 35014e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 35024e96b2dbSAllison Henderson * partial zeros on it. 35034e96b2dbSAllison Henderson */ 35044e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 35054e96b2dbSAllison Henderson /* 35064e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 35074e96b2dbSAllison Henderson * from the block 35084e96b2dbSAllison Henderson */ 35094e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 35104e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 35114e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 35124e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 35134e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 35144e96b2dbSAllison Henderson goto next; 35154e96b2dbSAllison Henderson } 35164e96b2dbSAllison Henderson } 35174e96b2dbSAllison Henderson 35184e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 35194e96b2dbSAllison Henderson if (PageUptodate(page)) 35204e96b2dbSAllison Henderson set_buffer_uptodate(bh); 35214e96b2dbSAllison Henderson 35224e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 35234e96b2dbSAllison Henderson err = -EIO; 35244e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 35254e96b2dbSAllison Henderson wait_on_buffer(bh); 35264e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 35274e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 35284e96b2dbSAllison Henderson goto next; 35294e96b2dbSAllison Henderson } 35304e96b2dbSAllison Henderson 35314e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 35324e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 35334e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 35344e96b2dbSAllison Henderson if (err) 35354e96b2dbSAllison Henderson goto next; 35364e96b2dbSAllison Henderson } 35374e96b2dbSAllison Henderson 35384e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 35394e96b2dbSAllison Henderson 35404e96b2dbSAllison Henderson err = 0; 35414e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 35424e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3543decbd919STheodore Ts'o } else 35444e96b2dbSAllison Henderson mark_buffer_dirty(bh); 35454e96b2dbSAllison Henderson 35464e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 35474e96b2dbSAllison Henderson next: 35484e96b2dbSAllison Henderson bh = bh->b_this_page; 35494e96b2dbSAllison Henderson iblock++; 35504e96b2dbSAllison Henderson pos += range_to_discard; 35514e96b2dbSAllison Henderson } 35524e96b2dbSAllison Henderson 35534e96b2dbSAllison Henderson return err; 35544e96b2dbSAllison Henderson } 35554e96b2dbSAllison Henderson 355691ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 355791ef4cafSDuane Griffin { 355891ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 355991ef4cafSDuane Griffin return 1; 356091ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 356191ef4cafSDuane Griffin return 1; 356291ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 356391ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 356491ef4cafSDuane Griffin return 0; 356591ef4cafSDuane Griffin } 356691ef4cafSDuane Griffin 3567ac27a0ecSDave Kleikamp /* 3568a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3569a4bb6b64SAllison Henderson * associated with the given offset and length 3570a4bb6b64SAllison Henderson * 3571a4bb6b64SAllison Henderson * @inode: File inode 3572a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3573a4bb6b64SAllison Henderson * @len: The length of the hole 3574a4bb6b64SAllison Henderson * 35754907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3576a4bb6b64SAllison Henderson */ 3577a4bb6b64SAllison Henderson 3578a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3579a4bb6b64SAllison Henderson { 3580496ad9aaSAl Viro struct inode *inode = file_inode(file); 358126a4c0c6STheodore Ts'o struct super_block *sb = inode->i_sb; 358226a4c0c6STheodore Ts'o ext4_lblk_t first_block, stop_block; 358326a4c0c6STheodore Ts'o struct address_space *mapping = inode->i_mapping; 358426a4c0c6STheodore Ts'o loff_t first_page, last_page, page_len; 358526a4c0c6STheodore Ts'o loff_t first_page_offset, last_page_offset; 358626a4c0c6STheodore Ts'o handle_t *handle; 358726a4c0c6STheodore Ts'o unsigned int credits; 358826a4c0c6STheodore Ts'o int ret = 0; 358926a4c0c6STheodore Ts'o 3590a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 359173355192SAllison Henderson return -EOPNOTSUPP; 3592a4bb6b64SAllison Henderson 359326a4c0c6STheodore Ts'o if (EXT4_SB(sb)->s_cluster_ratio > 1) { 3594bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 359573355192SAllison Henderson return -EOPNOTSUPP; 3596bab08ab9STheodore Ts'o } 3597bab08ab9STheodore Ts'o 3598aaddea81SZheng Liu trace_ext4_punch_hole(inode, offset, length); 3599aaddea81SZheng Liu 360026a4c0c6STheodore Ts'o /* 360126a4c0c6STheodore Ts'o * Write out all dirty pages to avoid race conditions 360226a4c0c6STheodore Ts'o * Then release them. 360326a4c0c6STheodore Ts'o */ 360426a4c0c6STheodore Ts'o if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 360526a4c0c6STheodore Ts'o ret = filemap_write_and_wait_range(mapping, offset, 360626a4c0c6STheodore Ts'o offset + length - 1); 360726a4c0c6STheodore Ts'o if (ret) 360826a4c0c6STheodore Ts'o return ret; 360926a4c0c6STheodore Ts'o } 361026a4c0c6STheodore Ts'o 361126a4c0c6STheodore Ts'o mutex_lock(&inode->i_mutex); 361226a4c0c6STheodore Ts'o /* It's not possible punch hole on append only file */ 361326a4c0c6STheodore Ts'o if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) { 361426a4c0c6STheodore Ts'o ret = -EPERM; 361526a4c0c6STheodore Ts'o goto out_mutex; 361626a4c0c6STheodore Ts'o } 361726a4c0c6STheodore Ts'o if (IS_SWAPFILE(inode)) { 361826a4c0c6STheodore Ts'o ret = -ETXTBSY; 361926a4c0c6STheodore Ts'o goto out_mutex; 362026a4c0c6STheodore Ts'o } 362126a4c0c6STheodore Ts'o 362226a4c0c6STheodore Ts'o /* No need to punch hole beyond i_size */ 362326a4c0c6STheodore Ts'o if (offset >= inode->i_size) 362426a4c0c6STheodore Ts'o goto out_mutex; 362526a4c0c6STheodore Ts'o 362626a4c0c6STheodore Ts'o /* 362726a4c0c6STheodore Ts'o * If the hole extends beyond i_size, set the hole 362826a4c0c6STheodore Ts'o * to end after the page that contains i_size 362926a4c0c6STheodore Ts'o */ 363026a4c0c6STheodore Ts'o if (offset + length > inode->i_size) { 363126a4c0c6STheodore Ts'o length = inode->i_size + 363226a4c0c6STheodore Ts'o PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - 363326a4c0c6STheodore Ts'o offset; 363426a4c0c6STheodore Ts'o } 363526a4c0c6STheodore Ts'o 363626a4c0c6STheodore Ts'o first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 363726a4c0c6STheodore Ts'o last_page = (offset + length) >> PAGE_CACHE_SHIFT; 363826a4c0c6STheodore Ts'o 363926a4c0c6STheodore Ts'o first_page_offset = first_page << PAGE_CACHE_SHIFT; 364026a4c0c6STheodore Ts'o last_page_offset = last_page << PAGE_CACHE_SHIFT; 364126a4c0c6STheodore Ts'o 364226a4c0c6STheodore Ts'o /* Now release the pages */ 364326a4c0c6STheodore Ts'o if (last_page_offset > first_page_offset) { 364426a4c0c6STheodore Ts'o truncate_pagecache_range(inode, first_page_offset, 364526a4c0c6STheodore Ts'o last_page_offset - 1); 364626a4c0c6STheodore Ts'o } 364726a4c0c6STheodore Ts'o 364826a4c0c6STheodore Ts'o /* Wait all existing dio workers, newcomers will block on i_mutex */ 364926a4c0c6STheodore Ts'o ext4_inode_block_unlocked_dio(inode); 365026a4c0c6STheodore Ts'o ret = ext4_flush_unwritten_io(inode); 365126a4c0c6STheodore Ts'o if (ret) 365226a4c0c6STheodore Ts'o goto out_dio; 365326a4c0c6STheodore Ts'o inode_dio_wait(inode); 365426a4c0c6STheodore Ts'o 365526a4c0c6STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 365626a4c0c6STheodore Ts'o credits = ext4_writepage_trans_blocks(inode); 365726a4c0c6STheodore Ts'o else 365826a4c0c6STheodore Ts'o credits = ext4_blocks_for_truncate(inode); 365926a4c0c6STheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 366026a4c0c6STheodore Ts'o if (IS_ERR(handle)) { 366126a4c0c6STheodore Ts'o ret = PTR_ERR(handle); 366226a4c0c6STheodore Ts'o ext4_std_error(sb, ret); 366326a4c0c6STheodore Ts'o goto out_dio; 366426a4c0c6STheodore Ts'o } 366526a4c0c6STheodore Ts'o 366626a4c0c6STheodore Ts'o /* 366726a4c0c6STheodore Ts'o * Now we need to zero out the non-page-aligned data in the 366826a4c0c6STheodore Ts'o * pages at the start and tail of the hole, and unmap the 366926a4c0c6STheodore Ts'o * buffer heads for the block aligned regions of the page that 367026a4c0c6STheodore Ts'o * were completely zeroed. 367126a4c0c6STheodore Ts'o */ 367226a4c0c6STheodore Ts'o if (first_page > last_page) { 367326a4c0c6STheodore Ts'o /* 367426a4c0c6STheodore Ts'o * If the file space being truncated is contained 367526a4c0c6STheodore Ts'o * within a page just zero out and unmap the middle of 367626a4c0c6STheodore Ts'o * that page 367726a4c0c6STheodore Ts'o */ 367826a4c0c6STheodore Ts'o ret = ext4_discard_partial_page_buffers(handle, 367926a4c0c6STheodore Ts'o mapping, offset, length, 0); 368026a4c0c6STheodore Ts'o 368126a4c0c6STheodore Ts'o if (ret) 368226a4c0c6STheodore Ts'o goto out_stop; 368326a4c0c6STheodore Ts'o } else { 368426a4c0c6STheodore Ts'o /* 368526a4c0c6STheodore Ts'o * zero out and unmap the partial page that contains 368626a4c0c6STheodore Ts'o * the start of the hole 368726a4c0c6STheodore Ts'o */ 368826a4c0c6STheodore Ts'o page_len = first_page_offset - offset; 368926a4c0c6STheodore Ts'o if (page_len > 0) { 369026a4c0c6STheodore Ts'o ret = ext4_discard_partial_page_buffers(handle, mapping, 369126a4c0c6STheodore Ts'o offset, page_len, 0); 369226a4c0c6STheodore Ts'o if (ret) 369326a4c0c6STheodore Ts'o goto out_stop; 369426a4c0c6STheodore Ts'o } 369526a4c0c6STheodore Ts'o 369626a4c0c6STheodore Ts'o /* 369726a4c0c6STheodore Ts'o * zero out and unmap the partial page that contains 369826a4c0c6STheodore Ts'o * the end of the hole 369926a4c0c6STheodore Ts'o */ 370026a4c0c6STheodore Ts'o page_len = offset + length - last_page_offset; 370126a4c0c6STheodore Ts'o if (page_len > 0) { 370226a4c0c6STheodore Ts'o ret = ext4_discard_partial_page_buffers(handle, mapping, 370326a4c0c6STheodore Ts'o last_page_offset, page_len, 0); 370426a4c0c6STheodore Ts'o if (ret) 370526a4c0c6STheodore Ts'o goto out_stop; 370626a4c0c6STheodore Ts'o } 370726a4c0c6STheodore Ts'o } 370826a4c0c6STheodore Ts'o 370926a4c0c6STheodore Ts'o /* 371026a4c0c6STheodore Ts'o * If i_size is contained in the last page, we need to 371126a4c0c6STheodore Ts'o * unmap and zero the partial page after i_size 371226a4c0c6STheodore Ts'o */ 371326a4c0c6STheodore Ts'o if (inode->i_size >> PAGE_CACHE_SHIFT == last_page && 371426a4c0c6STheodore Ts'o inode->i_size % PAGE_CACHE_SIZE != 0) { 371526a4c0c6STheodore Ts'o page_len = PAGE_CACHE_SIZE - 371626a4c0c6STheodore Ts'o (inode->i_size & (PAGE_CACHE_SIZE - 1)); 371726a4c0c6STheodore Ts'o 371826a4c0c6STheodore Ts'o if (page_len > 0) { 371926a4c0c6STheodore Ts'o ret = ext4_discard_partial_page_buffers(handle, 372026a4c0c6STheodore Ts'o mapping, inode->i_size, page_len, 0); 372126a4c0c6STheodore Ts'o 372226a4c0c6STheodore Ts'o if (ret) 372326a4c0c6STheodore Ts'o goto out_stop; 372426a4c0c6STheodore Ts'o } 372526a4c0c6STheodore Ts'o } 372626a4c0c6STheodore Ts'o 372726a4c0c6STheodore Ts'o first_block = (offset + sb->s_blocksize - 1) >> 372826a4c0c6STheodore Ts'o EXT4_BLOCK_SIZE_BITS(sb); 372926a4c0c6STheodore Ts'o stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); 373026a4c0c6STheodore Ts'o 373126a4c0c6STheodore Ts'o /* If there are no blocks to remove, return now */ 373226a4c0c6STheodore Ts'o if (first_block >= stop_block) 373326a4c0c6STheodore Ts'o goto out_stop; 373426a4c0c6STheodore Ts'o 373526a4c0c6STheodore Ts'o down_write(&EXT4_I(inode)->i_data_sem); 373626a4c0c6STheodore Ts'o ext4_discard_preallocations(inode); 373726a4c0c6STheodore Ts'o 373826a4c0c6STheodore Ts'o ret = ext4_es_remove_extent(inode, first_block, 373926a4c0c6STheodore Ts'o stop_block - first_block); 374026a4c0c6STheodore Ts'o if (ret) { 374126a4c0c6STheodore Ts'o up_write(&EXT4_I(inode)->i_data_sem); 374226a4c0c6STheodore Ts'o goto out_stop; 374326a4c0c6STheodore Ts'o } 374426a4c0c6STheodore Ts'o 374526a4c0c6STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 374626a4c0c6STheodore Ts'o ret = ext4_ext_remove_space(inode, first_block, 374726a4c0c6STheodore Ts'o stop_block - 1); 374826a4c0c6STheodore Ts'o else 374926a4c0c6STheodore Ts'o ret = ext4_free_hole_blocks(handle, inode, first_block, 375026a4c0c6STheodore Ts'o stop_block); 375126a4c0c6STheodore Ts'o 375226a4c0c6STheodore Ts'o ext4_discard_preallocations(inode); 3753819c4920STheodore Ts'o up_write(&EXT4_I(inode)->i_data_sem); 375426a4c0c6STheodore Ts'o if (IS_SYNC(inode)) 375526a4c0c6STheodore Ts'o ext4_handle_sync(handle); 375626a4c0c6STheodore Ts'o inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 375726a4c0c6STheodore Ts'o ext4_mark_inode_dirty(handle, inode); 375826a4c0c6STheodore Ts'o out_stop: 375926a4c0c6STheodore Ts'o ext4_journal_stop(handle); 376026a4c0c6STheodore Ts'o out_dio: 376126a4c0c6STheodore Ts'o ext4_inode_resume_unlocked_dio(inode); 376226a4c0c6STheodore Ts'o out_mutex: 376326a4c0c6STheodore Ts'o mutex_unlock(&inode->i_mutex); 376426a4c0c6STheodore Ts'o return ret; 3765a4bb6b64SAllison Henderson } 3766a4bb6b64SAllison Henderson 3767a4bb6b64SAllison Henderson /* 3768617ba13bSMingming Cao * ext4_truncate() 3769ac27a0ecSDave Kleikamp * 3770617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3771617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3772ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3773ac27a0ecSDave Kleikamp * 377442b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3775ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3776ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3777ac27a0ecSDave Kleikamp * 3778ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3779ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3780ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3781ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3782ac27a0ecSDave Kleikamp * left-to-right works OK too). 3783ac27a0ecSDave Kleikamp * 3784ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3785ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3786ac27a0ecSDave Kleikamp * 3787ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3788617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3789ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3790617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3791617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3792ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3793617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3794ac27a0ecSDave Kleikamp */ 3795617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3796ac27a0ecSDave Kleikamp { 3797819c4920STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 3798819c4920STheodore Ts'o unsigned int credits; 3799819c4920STheodore Ts'o handle_t *handle; 3800819c4920STheodore Ts'o struct address_space *mapping = inode->i_mapping; 3801819c4920STheodore Ts'o loff_t page_len; 3802819c4920STheodore Ts'o 380319b5ef61STheodore Ts'o /* 380419b5ef61STheodore Ts'o * There is a possibility that we're either freeing the inode 380519b5ef61STheodore Ts'o * or it completely new indode. In those cases we might not 380619b5ef61STheodore Ts'o * have i_mutex locked because it's not necessary. 380719b5ef61STheodore Ts'o */ 380819b5ef61STheodore Ts'o if (!(inode->i_state & (I_NEW|I_FREEING))) 380919b5ef61STheodore Ts'o WARN_ON(!mutex_is_locked(&inode->i_mutex)); 38100562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 38110562e0baSJiaying Zhang 381291ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3813ac27a0ecSDave Kleikamp return; 3814ac27a0ecSDave Kleikamp 381512e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3816c8d46e41SJiaying Zhang 38175534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 381819f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 38197d8f9f7dSTheodore Ts'o 3820aef1c851STao Ma if (ext4_has_inline_data(inode)) { 3821aef1c851STao Ma int has_inline = 1; 3822aef1c851STao Ma 3823aef1c851STao Ma ext4_inline_data_truncate(inode, &has_inline); 3824aef1c851STao Ma if (has_inline) 3825aef1c851STao Ma return; 3826aef1c851STao Ma } 3827aef1c851STao Ma 3828819c4920STheodore Ts'o /* 3829819c4920STheodore Ts'o * finish any pending end_io work so we won't run the risk of 3830819c4920STheodore Ts'o * converting any truncated blocks to initialized later 3831819c4920STheodore Ts'o */ 3832819c4920STheodore Ts'o ext4_flush_unwritten_io(inode); 3833819c4920STheodore Ts'o 3834ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3835819c4920STheodore Ts'o credits = ext4_writepage_trans_blocks(inode); 3836ff9893dcSAmir Goldstein else 3837819c4920STheodore Ts'o credits = ext4_blocks_for_truncate(inode); 3838819c4920STheodore Ts'o 3839819c4920STheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); 3840819c4920STheodore Ts'o if (IS_ERR(handle)) { 3841819c4920STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 3842819c4920STheodore Ts'o return; 3843819c4920STheodore Ts'o } 3844819c4920STheodore Ts'o 3845819c4920STheodore Ts'o if (inode->i_size % PAGE_CACHE_SIZE != 0) { 3846819c4920STheodore Ts'o page_len = PAGE_CACHE_SIZE - 3847819c4920STheodore Ts'o (inode->i_size & (PAGE_CACHE_SIZE - 1)); 3848819c4920STheodore Ts'o 3849819c4920STheodore Ts'o if (ext4_discard_partial_page_buffers(handle, 3850819c4920STheodore Ts'o mapping, inode->i_size, page_len, 0)) 3851819c4920STheodore Ts'o goto out_stop; 3852819c4920STheodore Ts'o } 3853819c4920STheodore Ts'o 3854819c4920STheodore Ts'o /* 3855819c4920STheodore Ts'o * We add the inode to the orphan list, so that if this 3856819c4920STheodore Ts'o * truncate spans multiple transactions, and we crash, we will 3857819c4920STheodore Ts'o * resume the truncate when the filesystem recovers. It also 3858819c4920STheodore Ts'o * marks the inode dirty, to catch the new size. 3859819c4920STheodore Ts'o * 3860819c4920STheodore Ts'o * Implication: the file must always be in a sane, consistent 3861819c4920STheodore Ts'o * truncatable state while each transaction commits. 3862819c4920STheodore Ts'o */ 3863819c4920STheodore Ts'o if (ext4_orphan_add(handle, inode)) 3864819c4920STheodore Ts'o goto out_stop; 3865819c4920STheodore Ts'o 3866819c4920STheodore Ts'o down_write(&EXT4_I(inode)->i_data_sem); 3867819c4920STheodore Ts'o 3868819c4920STheodore Ts'o ext4_discard_preallocations(inode); 3869819c4920STheodore Ts'o 3870819c4920STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3871819c4920STheodore Ts'o ext4_ext_truncate(handle, inode); 3872819c4920STheodore Ts'o else 3873819c4920STheodore Ts'o ext4_ind_truncate(handle, inode); 3874819c4920STheodore Ts'o 3875819c4920STheodore Ts'o up_write(&ei->i_data_sem); 3876819c4920STheodore Ts'o 3877819c4920STheodore Ts'o if (IS_SYNC(inode)) 3878819c4920STheodore Ts'o ext4_handle_sync(handle); 3879819c4920STheodore Ts'o 3880819c4920STheodore Ts'o out_stop: 3881819c4920STheodore Ts'o /* 3882819c4920STheodore Ts'o * If this was a simple ftruncate() and the file will remain alive, 3883819c4920STheodore Ts'o * then we need to clear up the orphan record which we created above. 3884819c4920STheodore Ts'o * However, if this was a real unlink then we were called by 3885819c4920STheodore Ts'o * ext4_delete_inode(), and we allow that function to clean up the 3886819c4920STheodore Ts'o * orphan info for us. 3887819c4920STheodore Ts'o */ 3888819c4920STheodore Ts'o if (inode->i_nlink) 3889819c4920STheodore Ts'o ext4_orphan_del(handle, inode); 3890819c4920STheodore Ts'o 3891819c4920STheodore Ts'o inode->i_mtime = inode->i_ctime = ext4_current_time(inode); 3892819c4920STheodore Ts'o ext4_mark_inode_dirty(handle, inode); 3893819c4920STheodore Ts'o ext4_journal_stop(handle); 3894a86c6181SAlex Tomas 38950562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3896ac27a0ecSDave Kleikamp } 3897ac27a0ecSDave Kleikamp 3898ac27a0ecSDave Kleikamp /* 3899617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3900ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3901ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3902ac27a0ecSDave Kleikamp * inode. 3903ac27a0ecSDave Kleikamp */ 3904617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3905617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3906ac27a0ecSDave Kleikamp { 3907240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3908ac27a0ecSDave Kleikamp struct buffer_head *bh; 3909240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3910240799cdSTheodore Ts'o ext4_fsblk_t block; 3911240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3912ac27a0ecSDave Kleikamp 39133a06d778SAneesh Kumar K.V iloc->bh = NULL; 3914240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3915ac27a0ecSDave Kleikamp return -EIO; 3916ac27a0ecSDave Kleikamp 3917240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3918240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3919240799cdSTheodore Ts'o if (!gdp) 3920240799cdSTheodore Ts'o return -EIO; 3921240799cdSTheodore Ts'o 3922240799cdSTheodore Ts'o /* 3923240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3924240799cdSTheodore Ts'o */ 392500d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3926240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3927240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3928240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3929240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3930240799cdSTheodore Ts'o 3931240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3932aebf0243SWang Shilong if (unlikely(!bh)) 3933860d21e2STheodore Ts'o return -ENOMEM; 3934ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3935ac27a0ecSDave Kleikamp lock_buffer(bh); 39369c83a923SHidehiro Kawai 39379c83a923SHidehiro Kawai /* 39389c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 39399c83a923SHidehiro Kawai * to write out another inode in the same block. In this 39409c83a923SHidehiro Kawai * case, we don't have to read the block because we may 39419c83a923SHidehiro Kawai * read the old inode data successfully. 39429c83a923SHidehiro Kawai */ 39439c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 39449c83a923SHidehiro Kawai set_buffer_uptodate(bh); 39459c83a923SHidehiro Kawai 3946ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3947ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3948ac27a0ecSDave Kleikamp unlock_buffer(bh); 3949ac27a0ecSDave Kleikamp goto has_buffer; 3950ac27a0ecSDave Kleikamp } 3951ac27a0ecSDave Kleikamp 3952ac27a0ecSDave Kleikamp /* 3953ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3954ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3955ac27a0ecSDave Kleikamp * block. 3956ac27a0ecSDave Kleikamp */ 3957ac27a0ecSDave Kleikamp if (in_mem) { 3958ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3959240799cdSTheodore Ts'o int i, start; 3960ac27a0ecSDave Kleikamp 3961240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3962ac27a0ecSDave Kleikamp 3963ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3964240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3965aebf0243SWang Shilong if (unlikely(!bitmap_bh)) 3966ac27a0ecSDave Kleikamp goto make_io; 3967ac27a0ecSDave Kleikamp 3968ac27a0ecSDave Kleikamp /* 3969ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3970ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3971ac27a0ecSDave Kleikamp * of one, so skip it. 3972ac27a0ecSDave Kleikamp */ 3973ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3974ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3975ac27a0ecSDave Kleikamp goto make_io; 3976ac27a0ecSDave Kleikamp } 3977240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3978ac27a0ecSDave Kleikamp if (i == inode_offset) 3979ac27a0ecSDave Kleikamp continue; 3980617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3981ac27a0ecSDave Kleikamp break; 3982ac27a0ecSDave Kleikamp } 3983ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3984240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3985ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3986ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3987ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3988ac27a0ecSDave Kleikamp unlock_buffer(bh); 3989ac27a0ecSDave Kleikamp goto has_buffer; 3990ac27a0ecSDave Kleikamp } 3991ac27a0ecSDave Kleikamp } 3992ac27a0ecSDave Kleikamp 3993ac27a0ecSDave Kleikamp make_io: 3994ac27a0ecSDave Kleikamp /* 3995240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3996240799cdSTheodore Ts'o * blocks from the inode table. 3997240799cdSTheodore Ts'o */ 3998240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3999240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 4000240799cdSTheodore Ts'o unsigned num; 4001240799cdSTheodore Ts'o 4002240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 4003b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 4004240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 4005240799cdSTheodore Ts'o if (table > b) 4006240799cdSTheodore Ts'o b = table; 4007240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 4008240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 4009feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 4010560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 4011240799cdSTheodore Ts'o table += num / inodes_per_block; 4012240799cdSTheodore Ts'o if (end > table) 4013240799cdSTheodore Ts'o end = table; 4014240799cdSTheodore Ts'o while (b <= end) 4015240799cdSTheodore Ts'o sb_breadahead(sb, b++); 4016240799cdSTheodore Ts'o } 4017240799cdSTheodore Ts'o 4018240799cdSTheodore Ts'o /* 4019ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 4020ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 4021ac27a0ecSDave Kleikamp * Read the block from disk. 4022ac27a0ecSDave Kleikamp */ 40230562e0baSJiaying Zhang trace_ext4_load_inode(inode); 4024ac27a0ecSDave Kleikamp get_bh(bh); 4025ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 402665299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 4027ac27a0ecSDave Kleikamp wait_on_buffer(bh); 4028ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 4029c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 4030c398eda0STheodore Ts'o "unable to read itable block"); 4031ac27a0ecSDave Kleikamp brelse(bh); 4032ac27a0ecSDave Kleikamp return -EIO; 4033ac27a0ecSDave Kleikamp } 4034ac27a0ecSDave Kleikamp } 4035ac27a0ecSDave Kleikamp has_buffer: 4036ac27a0ecSDave Kleikamp iloc->bh = bh; 4037ac27a0ecSDave Kleikamp return 0; 4038ac27a0ecSDave Kleikamp } 4039ac27a0ecSDave Kleikamp 4040617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 4041ac27a0ecSDave Kleikamp { 4042ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 4043617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 404419f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 4045ac27a0ecSDave Kleikamp } 4046ac27a0ecSDave Kleikamp 4047617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 4048ac27a0ecSDave Kleikamp { 4049617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 4050ac27a0ecSDave Kleikamp 4051ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 4052617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 4053ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 4054617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 4055ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 4056617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 4057ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 4058617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 4059ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 4060617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 4061ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 4062ac27a0ecSDave Kleikamp } 4063ac27a0ecSDave Kleikamp 4064ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 4065ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 4066ff9ddf7eSJan Kara { 406784a8dce2SDmitry Monakhov unsigned int vfs_fl; 406884a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 4069ff9ddf7eSJan Kara 407084a8dce2SDmitry Monakhov do { 407184a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 407284a8dce2SDmitry Monakhov old_fl = ei->i_flags; 407384a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 407484a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 407584a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 407684a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 407784a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 407884a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 407984a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 408084a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 408184a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 408284a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 408384a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 408484a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 408584a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 408684a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 4087ff9ddf7eSJan Kara } 4088de9a55b8STheodore Ts'o 40890fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 40900fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 40910fc1b451SAneesh Kumar K.V { 40920fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 40938180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 40948180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 40950fc1b451SAneesh Kumar K.V 40960fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 40970fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 40980fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 40990fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 41000fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 410107a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 41028180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 41038180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 41048180a562SAneesh Kumar K.V } else { 41050fc1b451SAneesh Kumar K.V return i_blocks; 41068180a562SAneesh Kumar K.V } 41070fc1b451SAneesh Kumar K.V } else { 41080fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 41090fc1b451SAneesh Kumar K.V } 41100fc1b451SAneesh Kumar K.V } 4111ff9ddf7eSJan Kara 4112152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 4113152a7b0aSTao Ma struct ext4_inode *raw_inode, 4114152a7b0aSTao Ma struct ext4_inode_info *ei) 4115152a7b0aSTao Ma { 4116152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 4117152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 411867cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 4119152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 412067cf5b09STao Ma ext4_find_inline_data_nolock(inode); 4121f19d5870STao Ma } else 4122f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 4123152a7b0aSTao Ma } 4124152a7b0aSTao Ma 41251d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 4126ac27a0ecSDave Kleikamp { 4127617ba13bSMingming Cao struct ext4_iloc iloc; 4128617ba13bSMingming Cao struct ext4_inode *raw_inode; 41291d1fe1eeSDavid Howells struct ext4_inode_info *ei; 41301d1fe1eeSDavid Howells struct inode *inode; 4131b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 41321d1fe1eeSDavid Howells long ret; 4133ac27a0ecSDave Kleikamp int block; 413408cefc7aSEric W. Biederman uid_t i_uid; 413508cefc7aSEric W. Biederman gid_t i_gid; 4136ac27a0ecSDave Kleikamp 41371d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 41381d1fe1eeSDavid Howells if (!inode) 41391d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 41401d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 41411d1fe1eeSDavid Howells return inode; 41421d1fe1eeSDavid Howells 41431d1fe1eeSDavid Howells ei = EXT4_I(inode); 41447dc57615SPeter Huewe iloc.bh = NULL; 4145ac27a0ecSDave Kleikamp 41461d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 41471d1fe1eeSDavid Howells if (ret < 0) 4148ac27a0ecSDave Kleikamp goto bad_inode; 4149617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 4150814525f4SDarrick J. Wong 4151814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4152814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 4153814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 4154814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 4155814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 4156814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 4157814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 4158814525f4SDarrick J. Wong ret = -EIO; 4159814525f4SDarrick J. Wong goto bad_inode; 4160814525f4SDarrick J. Wong } 4161814525f4SDarrick J. Wong } else 4162814525f4SDarrick J. Wong ei->i_extra_isize = 0; 4163814525f4SDarrick J. Wong 4164814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 4165814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 4166814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 4167814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4168814525f4SDarrick J. Wong __u32 csum; 4169814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 4170814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 4171814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 4172814525f4SDarrick J. Wong sizeof(inum)); 4173814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 4174814525f4SDarrick J. Wong sizeof(gen)); 4175814525f4SDarrick J. Wong } 4176814525f4SDarrick J. Wong 4177814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 4178814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 4179814525f4SDarrick J. Wong ret = -EIO; 4180814525f4SDarrick J. Wong goto bad_inode; 4181814525f4SDarrick J. Wong } 4182814525f4SDarrick J. Wong 4183ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 418408cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 418508cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 4186ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 418708cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 418808cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 4189ac27a0ecSDave Kleikamp } 419008cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 419108cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 4192bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 4193ac27a0ecSDave Kleikamp 4194353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 419567cf5b09STao Ma ei->i_inline_off = 0; 4196ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 4197ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4198ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 4199ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 4200ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 4201ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 4202ac27a0ecSDave Kleikamp */ 4203ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 4204393d1d1dSDr. Tilmann Bubeck if ((inode->i_mode == 0 || 4205393d1d1dSDr. Tilmann Bubeck !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) && 4206393d1d1dSDr. Tilmann Bubeck ino != EXT4_BOOT_LOADER_INO) { 4207ac27a0ecSDave Kleikamp /* this inode is deleted */ 42081d1fe1eeSDavid Howells ret = -ESTALE; 4209ac27a0ecSDave Kleikamp goto bad_inode; 4210ac27a0ecSDave Kleikamp } 4211ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 4212ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 4213ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 4214393d1d1dSDr. Tilmann Bubeck * the process of deleting those. 4215393d1d1dSDr. Tilmann Bubeck * OR it is the EXT4_BOOT_LOADER_INO which is 4216393d1d1dSDr. Tilmann Bubeck * not initialized on a new filesystem. */ 4217ac27a0ecSDave Kleikamp } 4218ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 42190fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 42207973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4221a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 4222a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 4223a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 4224a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 4225ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 4226a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 4227a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 4228a9e7f447SDmitry Monakhov #endif 4229ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 4230ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 4231a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 4232ac27a0ecSDave Kleikamp /* 4233ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 4234ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 4235ac27a0ecSDave Kleikamp */ 4236617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 4237ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 4238ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 4239ac27a0ecSDave Kleikamp 4240b436b9beSJan Kara /* 4241b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 4242b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 4243b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 4244b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 4245b436b9beSJan Kara * now it is reread from disk. 4246b436b9beSJan Kara */ 4247b436b9beSJan Kara if (journal) { 4248b436b9beSJan Kara transaction_t *transaction; 4249b436b9beSJan Kara tid_t tid; 4250b436b9beSJan Kara 4251a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 4252b436b9beSJan Kara if (journal->j_running_transaction) 4253b436b9beSJan Kara transaction = journal->j_running_transaction; 4254b436b9beSJan Kara else 4255b436b9beSJan Kara transaction = journal->j_committing_transaction; 4256b436b9beSJan Kara if (transaction) 4257b436b9beSJan Kara tid = transaction->t_tid; 4258b436b9beSJan Kara else 4259b436b9beSJan Kara tid = journal->j_commit_sequence; 4260a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 4261b436b9beSJan Kara ei->i_sync_tid = tid; 4262b436b9beSJan Kara ei->i_datasync_tid = tid; 4263b436b9beSJan Kara } 4264b436b9beSJan Kara 42650040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 4266ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 4267ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 4268617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 4269617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 4270ac27a0ecSDave Kleikamp } else { 4271152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 4272ac27a0ecSDave Kleikamp } 4273814525f4SDarrick J. Wong } 4274ac27a0ecSDave Kleikamp 4275ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 4276ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 4277ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 4278ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 4279ef7f3835SKalpak Shah 428025ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 428125ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 428225ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 428325ec56b5SJean Noel Cordenner inode->i_version |= 428425ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 428525ec56b5SJean Noel Cordenner } 428625ec56b5SJean Noel Cordenner 4287c4b5a614STheodore Ts'o ret = 0; 4288485c26ecSTheodore Ts'o if (ei->i_file_acl && 42891032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 429024676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 429124676da4STheodore Ts'o ei->i_file_acl); 4292485c26ecSTheodore Ts'o ret = -EIO; 4293485c26ecSTheodore Ts'o goto bad_inode; 4294f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4295f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4296f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4297c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 4298f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 42997a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 43007a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 4301fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4302fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 4303fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 4304fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 43051f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 4306fe2c8191SThiemo Nagel } 4307f19d5870STao Ma } 4308567f3e9aSTheodore Ts'o if (ret) 43097a262f7cSAneesh Kumar K.V goto bad_inode; 43107a262f7cSAneesh Kumar K.V 4311ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 4312617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 4313617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 4314617ba13bSMingming Cao ext4_set_aops(inode); 4315ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 4316617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 4317617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 4318ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 4319e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 4320617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 4321e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 4322e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 4323e83c1397SDuane Griffin } else { 4324617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 4325617ba13bSMingming Cao ext4_set_aops(inode); 4326ac27a0ecSDave Kleikamp } 4327563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4328563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4329617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 4330ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 4331ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4332ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4333ac27a0ecSDave Kleikamp else 4334ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4335ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4336393d1d1dSDr. Tilmann Bubeck } else if (ino == EXT4_BOOT_LOADER_INO) { 4337393d1d1dSDr. Tilmann Bubeck make_bad_inode(inode); 4338563bdd61STheodore Ts'o } else { 4339563bdd61STheodore Ts'o ret = -EIO; 434024676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4341563bdd61STheodore Ts'o goto bad_inode; 4342ac27a0ecSDave Kleikamp } 4343ac27a0ecSDave Kleikamp brelse(iloc.bh); 4344617ba13bSMingming Cao ext4_set_inode_flags(inode); 43451d1fe1eeSDavid Howells unlock_new_inode(inode); 43461d1fe1eeSDavid Howells return inode; 4347ac27a0ecSDave Kleikamp 4348ac27a0ecSDave Kleikamp bad_inode: 4349567f3e9aSTheodore Ts'o brelse(iloc.bh); 43501d1fe1eeSDavid Howells iget_failed(inode); 43511d1fe1eeSDavid Howells return ERR_PTR(ret); 4352ac27a0ecSDave Kleikamp } 4353ac27a0ecSDave Kleikamp 43540fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 43550fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 43560fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 43570fc1b451SAneesh Kumar K.V { 43580fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 43590fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 43600fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 43610fc1b451SAneesh Kumar K.V 43620fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 43630fc1b451SAneesh Kumar K.V /* 43644907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 43650fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 43660fc1b451SAneesh Kumar K.V */ 43678180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43680fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 436984a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4370f287a1a5STheodore Ts'o return 0; 4371f287a1a5STheodore Ts'o } 4372f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4373f287a1a5STheodore Ts'o return -EFBIG; 4374f287a1a5STheodore Ts'o 4375f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 43760fc1b451SAneesh Kumar K.V /* 43770fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 43780fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 43790fc1b451SAneesh Kumar K.V */ 43808180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43810fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 438284a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 43830fc1b451SAneesh Kumar K.V } else { 438484a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 43858180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 43868180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 43878180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 43888180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 43890fc1b451SAneesh Kumar K.V } 4390f287a1a5STheodore Ts'o return 0; 43910fc1b451SAneesh Kumar K.V } 43920fc1b451SAneesh Kumar K.V 4393ac27a0ecSDave Kleikamp /* 4394ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4395ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4396ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4397ac27a0ecSDave Kleikamp * 4398ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4399ac27a0ecSDave Kleikamp */ 4400617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4401ac27a0ecSDave Kleikamp struct inode *inode, 4402830156c7SFrank Mayhar struct ext4_iloc *iloc) 4403ac27a0ecSDave Kleikamp { 4404617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4405617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4406ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4407ac27a0ecSDave Kleikamp int err = 0, rc, block; 4408b71fc079SJan Kara int need_datasync = 0; 440908cefc7aSEric W. Biederman uid_t i_uid; 441008cefc7aSEric W. Biederman gid_t i_gid; 4411ac27a0ecSDave Kleikamp 4412ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4413ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 441419f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4415617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4416ac27a0ecSDave Kleikamp 4417ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4418ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 441908cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 442008cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 4421ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 442208cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 442308cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4424ac27a0ecSDave Kleikamp /* 4425ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4426ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4427ac27a0ecSDave Kleikamp */ 4428ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4429ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 443008cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 4431ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 443208cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 4433ac27a0ecSDave Kleikamp } else { 4434ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4435ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4436ac27a0ecSDave Kleikamp } 4437ac27a0ecSDave Kleikamp } else { 443808cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 443908cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4440ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4441ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4442ac27a0ecSDave Kleikamp } 4443ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4444ef7f3835SKalpak Shah 4445ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4446ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4447ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4448ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4449ef7f3835SKalpak Shah 44500fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 44510fc1b451SAneesh Kumar K.V goto out_brelse; 4452ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4453353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 44549b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 44559b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4456a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4457a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 44587973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4459b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4460a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4461b71fc079SJan Kara need_datasync = 1; 4462b71fc079SJan Kara } 4463ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4464ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4465617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4466617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4467617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4468617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4469ac27a0ecSDave Kleikamp /* If this is the first large file 4470ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4471ac27a0ecSDave Kleikamp */ 4472617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4473617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4474ac27a0ecSDave Kleikamp if (err) 4475ac27a0ecSDave Kleikamp goto out_brelse; 4476617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4477617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4478617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 44790390131bSFrank Mayhar ext4_handle_sync(handle); 4480b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4481ac27a0ecSDave Kleikamp } 4482ac27a0ecSDave Kleikamp } 4483ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4484ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4485ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4486ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4487ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4488ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4489ac27a0ecSDave Kleikamp } else { 4490ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4491ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4492ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4493ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4494ac27a0ecSDave Kleikamp } 4495f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4496de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4497ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4498f19d5870STao Ma } 4499ac27a0ecSDave Kleikamp 450025ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 450125ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 450225ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 450325ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 450425ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4505ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 450625ec56b5SJean Noel Cordenner } 450725ec56b5SJean Noel Cordenner 4508814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4509814525f4SDarrick J. Wong 45100390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 451173b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4512ac27a0ecSDave Kleikamp if (!err) 4513ac27a0ecSDave Kleikamp err = rc; 451419f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4515ac27a0ecSDave Kleikamp 4516b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4517ac27a0ecSDave Kleikamp out_brelse: 4518ac27a0ecSDave Kleikamp brelse(bh); 4519617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4520ac27a0ecSDave Kleikamp return err; 4521ac27a0ecSDave Kleikamp } 4522ac27a0ecSDave Kleikamp 4523ac27a0ecSDave Kleikamp /* 4524617ba13bSMingming Cao * ext4_write_inode() 4525ac27a0ecSDave Kleikamp * 4526ac27a0ecSDave Kleikamp * We are called from a few places: 4527ac27a0ecSDave Kleikamp * 4528ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4529ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 45304907cb7bSAnatol Pomozov * transaction to commit. 4531ac27a0ecSDave Kleikamp * 4532ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4533ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4534ac27a0ecSDave Kleikamp * 4535ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4536ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4537ac27a0ecSDave Kleikamp * journal commit. 4538ac27a0ecSDave Kleikamp * 4539ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4540ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4541617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4542ac27a0ecSDave Kleikamp * knfsd. 4543ac27a0ecSDave Kleikamp * 4544ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4545ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4546ac27a0ecSDave Kleikamp * which we are interested. 4547ac27a0ecSDave Kleikamp * 4548ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4549ac27a0ecSDave Kleikamp * 4550ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4551ac27a0ecSDave Kleikamp * stuff(); 4552ac27a0ecSDave Kleikamp * inode->i_size = expr; 4553ac27a0ecSDave Kleikamp * 4554ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4555ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4556ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4557ac27a0ecSDave Kleikamp */ 4558a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4559ac27a0ecSDave Kleikamp { 456091ac6f43SFrank Mayhar int err; 456191ac6f43SFrank Mayhar 4562ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4563ac27a0ecSDave Kleikamp return 0; 4564ac27a0ecSDave Kleikamp 456591ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4566617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4567b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4568ac27a0ecSDave Kleikamp dump_stack(); 4569ac27a0ecSDave Kleikamp return -EIO; 4570ac27a0ecSDave Kleikamp } 4571ac27a0ecSDave Kleikamp 4572a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4573ac27a0ecSDave Kleikamp return 0; 4574ac27a0ecSDave Kleikamp 457591ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 457691ac6f43SFrank Mayhar } else { 457791ac6f43SFrank Mayhar struct ext4_iloc iloc; 457891ac6f43SFrank Mayhar 45798b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 458091ac6f43SFrank Mayhar if (err) 458191ac6f43SFrank Mayhar return err; 4582a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4583830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4584830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4585c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4586c398eda0STheodore Ts'o "IO error syncing inode"); 4587830156c7SFrank Mayhar err = -EIO; 4588830156c7SFrank Mayhar } 4589fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 459091ac6f43SFrank Mayhar } 459191ac6f43SFrank Mayhar return err; 4592ac27a0ecSDave Kleikamp } 4593ac27a0ecSDave Kleikamp 4594ac27a0ecSDave Kleikamp /* 459553e87268SJan Kara * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 459653e87268SJan Kara * buffers that are attached to a page stradding i_size and are undergoing 459753e87268SJan Kara * commit. In that case we have to wait for commit to finish and try again. 459853e87268SJan Kara */ 459953e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode) 460053e87268SJan Kara { 460153e87268SJan Kara struct page *page; 460253e87268SJan Kara unsigned offset; 460353e87268SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 460453e87268SJan Kara tid_t commit_tid = 0; 460553e87268SJan Kara int ret; 460653e87268SJan Kara 460753e87268SJan Kara offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 460853e87268SJan Kara /* 460953e87268SJan Kara * All buffers in the last page remain valid? Then there's nothing to 461053e87268SJan Kara * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 461153e87268SJan Kara * blocksize case 461253e87268SJan Kara */ 461353e87268SJan Kara if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 461453e87268SJan Kara return; 461553e87268SJan Kara while (1) { 461653e87268SJan Kara page = find_lock_page(inode->i_mapping, 461753e87268SJan Kara inode->i_size >> PAGE_CACHE_SHIFT); 461853e87268SJan Kara if (!page) 461953e87268SJan Kara return; 462053e87268SJan Kara ret = __ext4_journalled_invalidatepage(page, offset); 462153e87268SJan Kara unlock_page(page); 462253e87268SJan Kara page_cache_release(page); 462353e87268SJan Kara if (ret != -EBUSY) 462453e87268SJan Kara return; 462553e87268SJan Kara commit_tid = 0; 462653e87268SJan Kara read_lock(&journal->j_state_lock); 462753e87268SJan Kara if (journal->j_committing_transaction) 462853e87268SJan Kara commit_tid = journal->j_committing_transaction->t_tid; 462953e87268SJan Kara read_unlock(&journal->j_state_lock); 463053e87268SJan Kara if (commit_tid) 463153e87268SJan Kara jbd2_log_wait_commit(journal, commit_tid); 463253e87268SJan Kara } 463353e87268SJan Kara } 463453e87268SJan Kara 463553e87268SJan Kara /* 4636617ba13bSMingming Cao * ext4_setattr() 4637ac27a0ecSDave Kleikamp * 4638ac27a0ecSDave Kleikamp * Called from notify_change. 4639ac27a0ecSDave Kleikamp * 4640ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4641ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4642ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4643ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4644ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4645ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4646ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4647ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4648ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4649ac27a0ecSDave Kleikamp * 4650678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4651678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4652678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4653678aaf48SJan Kara * This way we are sure that all the data written in the previous 4654678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4655678aaf48SJan Kara * writeback). 4656678aaf48SJan Kara * 4657678aaf48SJan Kara * Called with inode->i_mutex down. 4658ac27a0ecSDave Kleikamp */ 4659617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4660ac27a0ecSDave Kleikamp { 4661ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4662ac27a0ecSDave Kleikamp int error, rc = 0; 46633d287de3SDmitry Monakhov int orphan = 0; 4664ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4665ac27a0ecSDave Kleikamp 4666ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4667ac27a0ecSDave Kleikamp if (error) 4668ac27a0ecSDave Kleikamp return error; 4669ac27a0ecSDave Kleikamp 467012755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4671871a2931SChristoph Hellwig dquot_initialize(inode); 467208cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 467308cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4674ac27a0ecSDave Kleikamp handle_t *handle; 4675ac27a0ecSDave Kleikamp 4676ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4677ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 46789924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 46799924a92aSTheodore Ts'o (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 4680194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 4681ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4682ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4683ac27a0ecSDave Kleikamp goto err_out; 4684ac27a0ecSDave Kleikamp } 4685b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4686ac27a0ecSDave Kleikamp if (error) { 4687617ba13bSMingming Cao ext4_journal_stop(handle); 4688ac27a0ecSDave Kleikamp return error; 4689ac27a0ecSDave Kleikamp } 4690ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4691ac27a0ecSDave Kleikamp * one transaction */ 4692ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4693ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4694ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4695ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4696617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4697617ba13bSMingming Cao ext4_journal_stop(handle); 4698ac27a0ecSDave Kleikamp } 4699ac27a0ecSDave Kleikamp 4700e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4701562c72aaSChristoph Hellwig 470212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4703e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4704e2b46574SEric Sandeen 47050c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 47060c095c7fSTheodore Ts'o return -EFBIG; 4707e2b46574SEric Sandeen } 4708e2b46574SEric Sandeen } 4709e2b46574SEric Sandeen 4710ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4711c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4712072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4713ac27a0ecSDave Kleikamp handle_t *handle; 4714ac27a0ecSDave Kleikamp 47159924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 4716ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4717ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4718ac27a0ecSDave Kleikamp goto err_out; 4719ac27a0ecSDave Kleikamp } 47203d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4721617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 47223d287de3SDmitry Monakhov orphan = 1; 47233d287de3SDmitry Monakhov } 4724617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4725617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4726ac27a0ecSDave Kleikamp if (!error) 4727ac27a0ecSDave Kleikamp error = rc; 4728617ba13bSMingming Cao ext4_journal_stop(handle); 4729678aaf48SJan Kara 4730678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4731678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4732678aaf48SJan Kara attr->ia_size); 4733678aaf48SJan Kara if (error) { 4734678aaf48SJan Kara /* Do as much error cleanup as possible */ 47359924a92aSTheodore Ts'o handle = ext4_journal_start(inode, 47369924a92aSTheodore Ts'o EXT4_HT_INODE, 3); 4737678aaf48SJan Kara if (IS_ERR(handle)) { 4738678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4739678aaf48SJan Kara goto err_out; 4740678aaf48SJan Kara } 4741678aaf48SJan Kara ext4_orphan_del(handle, inode); 47423d287de3SDmitry Monakhov orphan = 0; 4743678aaf48SJan Kara ext4_journal_stop(handle); 4744678aaf48SJan Kara goto err_out; 4745678aaf48SJan Kara } 4746678aaf48SJan Kara } 4747ac27a0ecSDave Kleikamp } 4748ac27a0ecSDave Kleikamp 4749072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 475053e87268SJan Kara if (attr->ia_size != inode->i_size) { 475153e87268SJan Kara loff_t oldsize = inode->i_size; 475253e87268SJan Kara 475353e87268SJan Kara i_size_write(inode, attr->ia_size); 475453e87268SJan Kara /* 475553e87268SJan Kara * Blocks are going to be removed from the inode. Wait 475653e87268SJan Kara * for dio in flight. Temporarily disable 475753e87268SJan Kara * dioread_nolock to prevent livelock. 475853e87268SJan Kara */ 47591b65007eSDmitry Monakhov if (orphan) { 476053e87268SJan Kara if (!ext4_should_journal_data(inode)) { 47611b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 47621c9114f9SDmitry Monakhov inode_dio_wait(inode); 47631b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 476453e87268SJan Kara } else 476553e87268SJan Kara ext4_wait_for_tail_page_commit(inode); 47661b65007eSDmitry Monakhov } 476753e87268SJan Kara /* 476853e87268SJan Kara * Truncate pagecache after we've waited for commit 476953e87268SJan Kara * in data=journal mode to make pages freeable. 477053e87268SJan Kara */ 477153e87268SJan Kara truncate_pagecache(inode, oldsize, inode->i_size); 47721c9114f9SDmitry Monakhov } 4773072bd7eaSTheodore Ts'o ext4_truncate(inode); 4774072bd7eaSTheodore Ts'o } 4775ac27a0ecSDave Kleikamp 47761025774cSChristoph Hellwig if (!rc) { 47771025774cSChristoph Hellwig setattr_copy(inode, attr); 47781025774cSChristoph Hellwig mark_inode_dirty(inode); 47791025774cSChristoph Hellwig } 47801025774cSChristoph Hellwig 47811025774cSChristoph Hellwig /* 47821025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 47831025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 47841025774cSChristoph Hellwig */ 47853d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4786617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4787ac27a0ecSDave Kleikamp 4788ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4789617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4790ac27a0ecSDave Kleikamp 4791ac27a0ecSDave Kleikamp err_out: 4792617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4793ac27a0ecSDave Kleikamp if (!error) 4794ac27a0ecSDave Kleikamp error = rc; 4795ac27a0ecSDave Kleikamp return error; 4796ac27a0ecSDave Kleikamp } 4797ac27a0ecSDave Kleikamp 47983e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 47993e3398a0SMingming Cao struct kstat *stat) 48003e3398a0SMingming Cao { 48013e3398a0SMingming Cao struct inode *inode; 48023e3398a0SMingming Cao unsigned long delalloc_blocks; 48033e3398a0SMingming Cao 48043e3398a0SMingming Cao inode = dentry->d_inode; 48053e3398a0SMingming Cao generic_fillattr(inode, stat); 48063e3398a0SMingming Cao 48073e3398a0SMingming Cao /* 48083e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 48093e3398a0SMingming Cao * otherwise in the case of system crash before the real block 48103e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 48113e3398a0SMingming Cao * on-disk file blocks. 48123e3398a0SMingming Cao * We always keep i_blocks updated together with real 48133e3398a0SMingming Cao * allocation. But to not confuse with user, stat 48143e3398a0SMingming Cao * will return the blocks that include the delayed allocation 48153e3398a0SMingming Cao * blocks for this file. 48163e3398a0SMingming Cao */ 481796607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 481896607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 48193e3398a0SMingming Cao 48203e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 48213e3398a0SMingming Cao return 0; 48223e3398a0SMingming Cao } 4823ac27a0ecSDave Kleikamp 4824a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4825a02908f1SMingming Cao { 482612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 48278bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4828ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4829a02908f1SMingming Cao } 4830ac51d837STheodore Ts'o 4831a02908f1SMingming Cao /* 4832a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4833a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4834a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4835a02908f1SMingming Cao * 4836a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 48374907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4838a02908f1SMingming Cao * they could still across block group boundary. 4839a02908f1SMingming Cao * 4840a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4841a02908f1SMingming Cao */ 48421f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4843a02908f1SMingming Cao { 48448df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 48458df9675fSTheodore Ts'o int gdpblocks; 4846a02908f1SMingming Cao int idxblocks; 4847a02908f1SMingming Cao int ret = 0; 4848a02908f1SMingming Cao 4849a02908f1SMingming Cao /* 4850a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4851a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4852a02908f1SMingming Cao * physically contiguous on disk 4853a02908f1SMingming Cao * 4854a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4855a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4856a02908f1SMingming Cao */ 4857a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4858a02908f1SMingming Cao 4859a02908f1SMingming Cao ret = idxblocks; 4860a02908f1SMingming Cao 4861a02908f1SMingming Cao /* 4862a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4863a02908f1SMingming Cao * to account 4864a02908f1SMingming Cao */ 4865a02908f1SMingming Cao groups = idxblocks; 4866a02908f1SMingming Cao if (chunk) 4867a02908f1SMingming Cao groups += 1; 4868ac27a0ecSDave Kleikamp else 4869a02908f1SMingming Cao groups += nrblocks; 4870ac27a0ecSDave Kleikamp 4871a02908f1SMingming Cao gdpblocks = groups; 48728df9675fSTheodore Ts'o if (groups > ngroups) 48738df9675fSTheodore Ts'o groups = ngroups; 4874a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4875a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4876a02908f1SMingming Cao 4877a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4878a02908f1SMingming Cao ret += groups + gdpblocks; 4879a02908f1SMingming Cao 4880a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4881a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4882ac27a0ecSDave Kleikamp 4883ac27a0ecSDave Kleikamp return ret; 4884ac27a0ecSDave Kleikamp } 4885ac27a0ecSDave Kleikamp 4886ac27a0ecSDave Kleikamp /* 488725985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4888f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4889f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4890a02908f1SMingming Cao * 4891525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4892a02908f1SMingming Cao * 4893525f4ed8SMingming Cao * We need to consider the worse case, when 4894a02908f1SMingming Cao * one new block per extent. 4895a02908f1SMingming Cao */ 4896a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4897a02908f1SMingming Cao { 4898a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4899a02908f1SMingming Cao int ret; 4900a02908f1SMingming Cao 4901a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4902a02908f1SMingming Cao 4903a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4904a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4905a02908f1SMingming Cao ret += bpp; 4906a02908f1SMingming Cao return ret; 4907a02908f1SMingming Cao } 4908f3bd1f3fSMingming Cao 4909f3bd1f3fSMingming Cao /* 4910f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4911f3bd1f3fSMingming Cao * 4912f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 491379e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4914f3bd1f3fSMingming Cao * 4915f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4916f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4917f3bd1f3fSMingming Cao */ 4918f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4919f3bd1f3fSMingming Cao { 4920f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4921f3bd1f3fSMingming Cao } 4922f3bd1f3fSMingming Cao 4923a02908f1SMingming Cao /* 4924617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4925ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4926ac27a0ecSDave Kleikamp */ 4927617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4928617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4929ac27a0ecSDave Kleikamp { 4930ac27a0ecSDave Kleikamp int err = 0; 4931ac27a0ecSDave Kleikamp 4932c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 493325ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 493425ec56b5SJean Noel Cordenner 4935ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4936ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4937ac27a0ecSDave Kleikamp 4938dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4939830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4940ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4941ac27a0ecSDave Kleikamp return err; 4942ac27a0ecSDave Kleikamp } 4943ac27a0ecSDave Kleikamp 4944ac27a0ecSDave Kleikamp /* 4945ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4946ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4947ac27a0ecSDave Kleikamp */ 4948ac27a0ecSDave Kleikamp 4949ac27a0ecSDave Kleikamp int 4950617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4951617ba13bSMingming Cao struct ext4_iloc *iloc) 4952ac27a0ecSDave Kleikamp { 49530390131bSFrank Mayhar int err; 49540390131bSFrank Mayhar 4955617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4956ac27a0ecSDave Kleikamp if (!err) { 4957ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4958617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4959ac27a0ecSDave Kleikamp if (err) { 4960ac27a0ecSDave Kleikamp brelse(iloc->bh); 4961ac27a0ecSDave Kleikamp iloc->bh = NULL; 4962ac27a0ecSDave Kleikamp } 4963ac27a0ecSDave Kleikamp } 4964617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4965ac27a0ecSDave Kleikamp return err; 4966ac27a0ecSDave Kleikamp } 4967ac27a0ecSDave Kleikamp 4968ac27a0ecSDave Kleikamp /* 49696dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 49706dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 49716dd4ee7cSKalpak Shah */ 49721d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 49731d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 49741d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 49751d03ec98SAneesh Kumar K.V handle_t *handle) 49766dd4ee7cSKalpak Shah { 49776dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 49786dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 49796dd4ee7cSKalpak Shah 49806dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 49816dd4ee7cSKalpak Shah return 0; 49826dd4ee7cSKalpak Shah 49836dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 49846dd4ee7cSKalpak Shah 49856dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 49866dd4ee7cSKalpak Shah 49876dd4ee7cSKalpak Shah /* No extended attributes present */ 498819f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 49896dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 49906dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 49916dd4ee7cSKalpak Shah new_extra_isize); 49926dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 49936dd4ee7cSKalpak Shah return 0; 49946dd4ee7cSKalpak Shah } 49956dd4ee7cSKalpak Shah 49966dd4ee7cSKalpak Shah /* try to expand with EAs present */ 49976dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 49986dd4ee7cSKalpak Shah raw_inode, handle); 49996dd4ee7cSKalpak Shah } 50006dd4ee7cSKalpak Shah 50016dd4ee7cSKalpak Shah /* 5002ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 5003ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 5004ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 5005ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 5006ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 5007ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 5008ac27a0ecSDave Kleikamp * 5009ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 5010ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 5011ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 5012ac27a0ecSDave Kleikamp * we start and wait on commits. 5013ac27a0ecSDave Kleikamp */ 5014617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 5015ac27a0ecSDave Kleikamp { 5016617ba13bSMingming Cao struct ext4_iloc iloc; 50176dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 50186dd4ee7cSKalpak Shah static unsigned int mnt_count; 50196dd4ee7cSKalpak Shah int err, ret; 5020ac27a0ecSDave Kleikamp 5021ac27a0ecSDave Kleikamp might_sleep(); 50227ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 5023617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 50240390131bSFrank Mayhar if (ext4_handle_valid(handle) && 50250390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 502619f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 50276dd4ee7cSKalpak Shah /* 50286dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 50296dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 50306dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 50316dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 50326dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 50336dd4ee7cSKalpak Shah */ 50346dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 50356dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 50366dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 50376dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 50386dd4ee7cSKalpak Shah iloc, handle); 50396dd4ee7cSKalpak Shah if (ret) { 504019f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 504119f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 5042c1bddad9SAneesh Kumar K.V if (mnt_count != 5043c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 504412062dddSEric Sandeen ext4_warning(inode->i_sb, 50456dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 50466dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 50476dd4ee7cSKalpak Shah inode->i_ino); 5048c1bddad9SAneesh Kumar K.V mnt_count = 5049c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 50506dd4ee7cSKalpak Shah } 50516dd4ee7cSKalpak Shah } 50526dd4ee7cSKalpak Shah } 50536dd4ee7cSKalpak Shah } 5054ac27a0ecSDave Kleikamp if (!err) 5055617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 5056ac27a0ecSDave Kleikamp return err; 5057ac27a0ecSDave Kleikamp } 5058ac27a0ecSDave Kleikamp 5059ac27a0ecSDave Kleikamp /* 5060617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 5061ac27a0ecSDave Kleikamp * 5062ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 5063ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 5064ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 5065ac27a0ecSDave Kleikamp * 50665dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 5067ac27a0ecSDave Kleikamp * are allocated to the file. 5068ac27a0ecSDave Kleikamp * 5069ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 5070ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 5071ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 5072ac27a0ecSDave Kleikamp */ 5073aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 5074ac27a0ecSDave Kleikamp { 5075ac27a0ecSDave Kleikamp handle_t *handle; 5076ac27a0ecSDave Kleikamp 50779924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 5078ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 5079ac27a0ecSDave Kleikamp goto out; 5080f3dc272fSCurt Wohlgemuth 5081617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 5082f3dc272fSCurt Wohlgemuth 5083617ba13bSMingming Cao ext4_journal_stop(handle); 5084ac27a0ecSDave Kleikamp out: 5085ac27a0ecSDave Kleikamp return; 5086ac27a0ecSDave Kleikamp } 5087ac27a0ecSDave Kleikamp 5088ac27a0ecSDave Kleikamp #if 0 5089ac27a0ecSDave Kleikamp /* 5090ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 5091ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 5092617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 5093ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 5094ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 5095ac27a0ecSDave Kleikamp */ 5096617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 5097ac27a0ecSDave Kleikamp { 5098617ba13bSMingming Cao struct ext4_iloc iloc; 5099ac27a0ecSDave Kleikamp 5100ac27a0ecSDave Kleikamp int err = 0; 5101ac27a0ecSDave Kleikamp if (handle) { 5102617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 5103ac27a0ecSDave Kleikamp if (!err) { 5104ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 5105dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 5106ac27a0ecSDave Kleikamp if (!err) 51070390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 510873b50c1cSCurt Wohlgemuth NULL, 5109ac27a0ecSDave Kleikamp iloc.bh); 5110ac27a0ecSDave Kleikamp brelse(iloc.bh); 5111ac27a0ecSDave Kleikamp } 5112ac27a0ecSDave Kleikamp } 5113617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 5114ac27a0ecSDave Kleikamp return err; 5115ac27a0ecSDave Kleikamp } 5116ac27a0ecSDave Kleikamp #endif 5117ac27a0ecSDave Kleikamp 5118617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 5119ac27a0ecSDave Kleikamp { 5120ac27a0ecSDave Kleikamp journal_t *journal; 5121ac27a0ecSDave Kleikamp handle_t *handle; 5122ac27a0ecSDave Kleikamp int err; 5123ac27a0ecSDave Kleikamp 5124ac27a0ecSDave Kleikamp /* 5125ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 5126ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 5127ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 5128ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 5129ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 5130ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 5131ac27a0ecSDave Kleikamp * nobody is changing anything. 5132ac27a0ecSDave Kleikamp */ 5133ac27a0ecSDave Kleikamp 5134617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 51350390131bSFrank Mayhar if (!journal) 51360390131bSFrank Mayhar return 0; 5137d699594dSDave Hansen if (is_journal_aborted(journal)) 5138ac27a0ecSDave Kleikamp return -EROFS; 51392aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 51402aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 51412aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 51422aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 51432aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 51442aff57b0SYongqiang Yang */ 51452aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 51462aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 51472aff57b0SYongqiang Yang if (err < 0) 51482aff57b0SYongqiang Yang return err; 51492aff57b0SYongqiang Yang } 5150ac27a0ecSDave Kleikamp 515117335dccSDmitry Monakhov /* Wait for all existing dio workers */ 515217335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 515317335dccSDmitry Monakhov inode_dio_wait(inode); 515417335dccSDmitry Monakhov 5155dab291afSMingming Cao jbd2_journal_lock_updates(journal); 5156ac27a0ecSDave Kleikamp 5157ac27a0ecSDave Kleikamp /* 5158ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 5159ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 5160ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 5161ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 5162ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 5163ac27a0ecSDave Kleikamp */ 5164ac27a0ecSDave Kleikamp 5165ac27a0ecSDave Kleikamp if (val) 516612e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 51675872ddaaSYongqiang Yang else { 51685872ddaaSYongqiang Yang jbd2_journal_flush(journal); 516912e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 51705872ddaaSYongqiang Yang } 5171617ba13bSMingming Cao ext4_set_aops(inode); 5172ac27a0ecSDave Kleikamp 5173dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 517417335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 5175ac27a0ecSDave Kleikamp 5176ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 5177ac27a0ecSDave Kleikamp 51789924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 5179ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 5180ac27a0ecSDave Kleikamp return PTR_ERR(handle); 5181ac27a0ecSDave Kleikamp 5182617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 51830390131bSFrank Mayhar ext4_handle_sync(handle); 5184617ba13bSMingming Cao ext4_journal_stop(handle); 5185617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 5186ac27a0ecSDave Kleikamp 5187ac27a0ecSDave Kleikamp return err; 5188ac27a0ecSDave Kleikamp } 51892e9ee850SAneesh Kumar K.V 51902e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 51912e9ee850SAneesh Kumar K.V { 51922e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 51932e9ee850SAneesh Kumar K.V } 51942e9ee850SAneesh Kumar K.V 5195c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 51962e9ee850SAneesh Kumar K.V { 5197c2ec175cSNick Piggin struct page *page = vmf->page; 51982e9ee850SAneesh Kumar K.V loff_t size; 51992e9ee850SAneesh Kumar K.V unsigned long len; 52009ea7df53SJan Kara int ret; 52012e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 5202496ad9aaSAl Viro struct inode *inode = file_inode(file); 52032e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 52049ea7df53SJan Kara handle_t *handle; 52059ea7df53SJan Kara get_block_t *get_block; 52069ea7df53SJan Kara int retries = 0; 52072e9ee850SAneesh Kumar K.V 52088e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 5209041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 52109ea7df53SJan Kara /* Delalloc case is easy... */ 52119ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 52129ea7df53SJan Kara !ext4_should_journal_data(inode) && 52139ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 52149ea7df53SJan Kara do { 52159ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 52169ea7df53SJan Kara ext4_da_get_block_prep); 52179ea7df53SJan Kara } while (ret == -ENOSPC && 52189ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 52199ea7df53SJan Kara goto out_ret; 52202e9ee850SAneesh Kumar K.V } 52210e499890SDarrick J. Wong 52220e499890SDarrick J. Wong lock_page(page); 52239ea7df53SJan Kara size = i_size_read(inode); 52249ea7df53SJan Kara /* Page got truncated from under us? */ 52259ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 52269ea7df53SJan Kara unlock_page(page); 52279ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 52289ea7df53SJan Kara goto out; 52290e499890SDarrick J. Wong } 52302e9ee850SAneesh Kumar K.V 52312e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 52322e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 52332e9ee850SAneesh Kumar K.V else 52342e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 5235a827eaffSAneesh Kumar K.V /* 52369ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 52379ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 5238a827eaffSAneesh Kumar K.V */ 52392e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 5240f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 5241f19d5870STao Ma 0, len, NULL, 5242a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 52439ea7df53SJan Kara /* Wait so that we don't change page under IO */ 52441d1d1a76SDarrick J. Wong wait_for_stable_page(page); 52459ea7df53SJan Kara ret = VM_FAULT_LOCKED; 52469ea7df53SJan Kara goto out; 52472e9ee850SAneesh Kumar K.V } 5248a827eaffSAneesh Kumar K.V } 5249a827eaffSAneesh Kumar K.V unlock_page(page); 52509ea7df53SJan Kara /* OK, we need to fill the hole... */ 52519ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 52529ea7df53SJan Kara get_block = ext4_get_block_write; 52539ea7df53SJan Kara else 52549ea7df53SJan Kara get_block = ext4_get_block; 52559ea7df53SJan Kara retry_alloc: 52569924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 52579924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 52589ea7df53SJan Kara if (IS_ERR(handle)) { 5259c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 52609ea7df53SJan Kara goto out; 52619ea7df53SJan Kara } 52629ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 52639ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 5264f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 52659ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 52669ea7df53SJan Kara unlock_page(page); 52679ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 5268fcbb5515SYongqiang Yang ext4_journal_stop(handle); 52699ea7df53SJan Kara goto out; 52709ea7df53SJan Kara } 52719ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 52729ea7df53SJan Kara } 52739ea7df53SJan Kara ext4_journal_stop(handle); 52749ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 52759ea7df53SJan Kara goto retry_alloc; 52769ea7df53SJan Kara out_ret: 52779ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 52789ea7df53SJan Kara out: 52798e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 52802e9ee850SAneesh Kumar K.V return ret; 52812e9ee850SAneesh Kumar K.V } 5282