1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51814525f4SDarrick J. Wong struct ext4_inode_info *ei) 52814525f4SDarrick J. Wong { 53814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54814525f4SDarrick J. Wong __u16 csum_lo; 55814525f4SDarrick J. Wong __u16 csum_hi = 0; 56814525f4SDarrick J. Wong __u32 csum; 57814525f4SDarrick J. Wong 58814525f4SDarrick J. Wong csum_lo = raw->i_checksum_lo; 59814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 60814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62814525f4SDarrick J. Wong csum_hi = raw->i_checksum_hi; 63814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 64814525f4SDarrick J. Wong } 65814525f4SDarrick J. Wong 66814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 68814525f4SDarrick J. Wong 69814525f4SDarrick J. Wong raw->i_checksum_lo = csum_lo; 70814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72814525f4SDarrick J. Wong raw->i_checksum_hi = csum_hi; 73814525f4SDarrick J. Wong 74814525f4SDarrick J. Wong return csum; 75814525f4SDarrick J. Wong } 76814525f4SDarrick J. Wong 77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78814525f4SDarrick J. Wong struct ext4_inode_info *ei) 79814525f4SDarrick J. Wong { 80814525f4SDarrick J. Wong __u32 provided, calculated; 81814525f4SDarrick J. Wong 82814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 84814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86814525f4SDarrick J. Wong return 1; 87814525f4SDarrick J. Wong 88814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 89814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 90814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93814525f4SDarrick J. Wong else 94814525f4SDarrick J. Wong calculated &= 0xFFFF; 95814525f4SDarrick J. Wong 96814525f4SDarrick J. Wong return provided == calculated; 97814525f4SDarrick J. Wong } 98814525f4SDarrick J. Wong 99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100814525f4SDarrick J. Wong struct ext4_inode_info *ei) 101814525f4SDarrick J. Wong { 102814525f4SDarrick J. Wong __u32 csum; 103814525f4SDarrick J. Wong 104814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 106814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108814525f4SDarrick J. Wong return; 109814525f4SDarrick J. Wong 110814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 111814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115814525f4SDarrick J. Wong } 116814525f4SDarrick J. Wong 117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 118678aaf48SJan Kara loff_t new_size) 119678aaf48SJan Kara { 1207ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1218aefcd55STheodore Ts'o /* 1228aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1238aefcd55STheodore Ts'o * writing, so there's no need to call 1248aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1258aefcd55STheodore Ts'o * outstanding writes we need to flush. 1268aefcd55STheodore Ts'o */ 1278aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1288aefcd55STheodore Ts'o return 0; 1298aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1308aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 131678aaf48SJan Kara new_size); 132678aaf48SJan Kara } 133678aaf48SJan Kara 13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 135cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 136cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 1375f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 1385f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 1395f163cc7SEric Sandeen loff_t length, int flags); 14064769240SAlex Tomas 141ac27a0ecSDave Kleikamp /* 142ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 143ac27a0ecSDave Kleikamp */ 144617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 145ac27a0ecSDave Kleikamp { 146617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 147ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 148ac27a0ecSDave Kleikamp 149ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 150ac27a0ecSDave Kleikamp } 151ac27a0ecSDave Kleikamp 152ac27a0ecSDave Kleikamp /* 153ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 154ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 155ac27a0ecSDave Kleikamp * this transaction. 156ac27a0ecSDave Kleikamp */ 157487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 158487caeefSJan Kara int nblocks) 159ac27a0ecSDave Kleikamp { 160487caeefSJan Kara int ret; 161487caeefSJan Kara 162487caeefSJan Kara /* 163e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 164487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 165487caeefSJan Kara * page cache has been already dropped and writes are blocked by 166487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 167487caeefSJan Kara */ 1680390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 169ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 170487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1718e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 172487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 173fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 174487caeefSJan Kara 175487caeefSJan Kara return ret; 176ac27a0ecSDave Kleikamp } 177ac27a0ecSDave Kleikamp 178ac27a0ecSDave Kleikamp /* 179ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 180ac27a0ecSDave Kleikamp */ 1810930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 182ac27a0ecSDave Kleikamp { 183ac27a0ecSDave Kleikamp handle_t *handle; 184bc965ab3STheodore Ts'o int err; 185ac27a0ecSDave Kleikamp 1867ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1872581fdc8SJiaying Zhang 1882581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1892581fdc8SJiaying Zhang 1900930fcc1SAl Viro if (inode->i_nlink) { 1912d859db3SJan Kara /* 1922d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1932d859db3SJan Kara * journal. So although mm thinks everything is clean and 1942d859db3SJan Kara * ready for reaping the inode might still have some pages to 1952d859db3SJan Kara * write in the running transaction or waiting to be 1962d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1972d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1982d859db3SJan Kara * cause data loss. Also even if we did not discard these 1992d859db3SJan Kara * buffers, we would have no way to find them after the inode 2002d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 2012d859db3SJan Kara * read them before the transaction is checkpointed. So be 2022d859db3SJan Kara * careful and force everything to disk here... We use 2032d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2042d859db3SJan Kara * containing inode's data. 2052d859db3SJan Kara * 2062d859db3SJan Kara * Note that directories do not have this problem because they 2072d859db3SJan Kara * don't use page cache. 2082d859db3SJan Kara */ 2092d859db3SJan Kara if (ext4_should_journal_data(inode) && 2102d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 2112d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2122d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2132d859db3SJan Kara 2142d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 2152d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 2162d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2172d859db3SJan Kara } 2180930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2190930fcc1SAl Viro goto no_delete; 2200930fcc1SAl Viro } 2210930fcc1SAl Viro 222907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 223871a2931SChristoph Hellwig dquot_initialize(inode); 224907f4554SChristoph Hellwig 225678aaf48SJan Kara if (ext4_should_order_data(inode)) 226678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 227ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 228ac27a0ecSDave Kleikamp 229ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 230ac27a0ecSDave Kleikamp goto no_delete; 231ac27a0ecSDave Kleikamp 2328e8ad8a5SJan Kara /* 2338e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2348e8ad8a5SJan Kara * protection against it 2358e8ad8a5SJan Kara */ 2368e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2379924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, 2389924a92aSTheodore Ts'o ext4_blocks_for_truncate(inode)+3); 239ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 240bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 241ac27a0ecSDave Kleikamp /* 242ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 243ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 244ac27a0ecSDave Kleikamp * cleaned up. 245ac27a0ecSDave Kleikamp */ 246617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2478e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 248ac27a0ecSDave Kleikamp goto no_delete; 249ac27a0ecSDave Kleikamp } 250ac27a0ecSDave Kleikamp 251ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2520390131bSFrank Mayhar ext4_handle_sync(handle); 253ac27a0ecSDave Kleikamp inode->i_size = 0; 254bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 255bc965ab3STheodore Ts'o if (err) { 25612062dddSEric Sandeen ext4_warning(inode->i_sb, 257bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 258bc965ab3STheodore Ts'o goto stop_handle; 259bc965ab3STheodore Ts'o } 260ac27a0ecSDave Kleikamp if (inode->i_blocks) 261617ba13bSMingming Cao ext4_truncate(inode); 262bc965ab3STheodore Ts'o 263bc965ab3STheodore Ts'o /* 264bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 265bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 266bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 267bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 268bc965ab3STheodore Ts'o */ 2690390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 270bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 271bc965ab3STheodore Ts'o if (err > 0) 272bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 273bc965ab3STheodore Ts'o if (err != 0) { 27412062dddSEric Sandeen ext4_warning(inode->i_sb, 275bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 276bc965ab3STheodore Ts'o stop_handle: 277bc965ab3STheodore Ts'o ext4_journal_stop(handle); 27845388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2798e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 280bc965ab3STheodore Ts'o goto no_delete; 281bc965ab3STheodore Ts'o } 282bc965ab3STheodore Ts'o } 283bc965ab3STheodore Ts'o 284ac27a0ecSDave Kleikamp /* 285617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 286ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 287617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 288ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 289617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 290ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 291ac27a0ecSDave Kleikamp */ 292617ba13bSMingming Cao ext4_orphan_del(handle, inode); 293617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 294ac27a0ecSDave Kleikamp 295ac27a0ecSDave Kleikamp /* 296ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 297ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 298ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 299ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 300ac27a0ecSDave Kleikamp * fails. 301ac27a0ecSDave Kleikamp */ 302617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 303ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3040930fcc1SAl Viro ext4_clear_inode(inode); 305ac27a0ecSDave Kleikamp else 306617ba13bSMingming Cao ext4_free_inode(handle, inode); 307617ba13bSMingming Cao ext4_journal_stop(handle); 3088e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 309ac27a0ecSDave Kleikamp return; 310ac27a0ecSDave Kleikamp no_delete: 3110930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 312ac27a0ecSDave Kleikamp } 313ac27a0ecSDave Kleikamp 314a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 315a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31660e58e0fSMingming Cao { 317a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 31860e58e0fSMingming Cao } 319a9e7f447SDmitry Monakhov #endif 3209d0be502STheodore Ts'o 32112219aeaSAneesh Kumar K.V /* 32212219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3239d0be502STheodore Ts'o * to allocate a block located at @lblock 32412219aeaSAneesh Kumar K.V */ 32501f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32612219aeaSAneesh Kumar K.V { 32712e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3289d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 32912219aeaSAneesh Kumar K.V 3308bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33112219aeaSAneesh Kumar K.V } 33212219aeaSAneesh Kumar K.V 3330637c6f4STheodore Ts'o /* 3340637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3350637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3360637c6f4STheodore Ts'o */ 3375f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3385f634d06SAneesh Kumar K.V int used, int quota_claim) 33912219aeaSAneesh Kumar K.V { 34012219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3410637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34212219aeaSAneesh Kumar K.V 3430637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 344d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3450637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 346*8de5c325STheodore Ts'o ext4_warning(inode->i_sb, "%s: ino %lu, used %d " 3471084f252STheodore Ts'o "with only %d reserved data blocks", 3480637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3490637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3500637c6f4STheodore Ts'o WARN_ON(1); 3510637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3526bc6e63fSAneesh Kumar K.V } 35312219aeaSAneesh Kumar K.V 35497795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 355*8de5c325STheodore Ts'o ext4_warning(inode->i_sb, "%s: ino %lu, allocated %d " 35697795d2aSBrian Foster "with only %d reserved metadata blocks\n", __func__, 35797795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 35897795d2aSBrian Foster ei->i_reserved_meta_blocks); 35997795d2aSBrian Foster WARN_ON(1); 36097795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36197795d2aSBrian Foster } 36297795d2aSBrian Foster 3630637c6f4STheodore Ts'o /* Update per-inode reservations */ 3640637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3650637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 36657042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 36772b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3680637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3690637c6f4STheodore Ts'o 3700637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3710637c6f4STheodore Ts'o /* 3720637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3730637c6f4STheodore Ts'o * only when we have written all of the delayed 3740637c6f4STheodore Ts'o * allocation blocks. 3750637c6f4STheodore Ts'o */ 37657042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37772b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 378ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3799d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3800637c6f4STheodore Ts'o } 38112219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38260e58e0fSMingming Cao 38372b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38472b8ab9dSEric Sandeen if (quota_claim) 3857b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 38672b8ab9dSEric Sandeen else { 3875f634d06SAneesh Kumar K.V /* 3885f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3895f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 39072b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3915f634d06SAneesh Kumar K.V */ 3927b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3935f634d06SAneesh Kumar K.V } 394d6014301SAneesh Kumar K.V 395d6014301SAneesh Kumar K.V /* 396d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 397d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 398d6014301SAneesh Kumar K.V * inode's preallocations. 399d6014301SAneesh Kumar K.V */ 4000637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4010637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 402d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40312219aeaSAneesh Kumar K.V } 40412219aeaSAneesh Kumar K.V 405e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 406c398eda0STheodore Ts'o unsigned int line, 40724676da4STheodore Ts'o struct ext4_map_blocks *map) 4086fd058f7STheodore Ts'o { 40924676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 41024676da4STheodore Ts'o map->m_len)) { 411c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 412c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41324676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 414c398eda0STheodore Ts'o map->m_len); 4156fd058f7STheodore Ts'o return -EIO; 4166fd058f7STheodore Ts'o } 4176fd058f7STheodore Ts'o return 0; 4186fd058f7STheodore Ts'o } 4196fd058f7STheodore Ts'o 420e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 421c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 422e29136f8STheodore Ts'o 423f5ab0d1fSMingming Cao /* 4241f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 4251f94533dSTheodore Ts'o * starting at page frame idx. 42655138e0bSTheodore Ts'o */ 42755138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 42855138e0bSTheodore Ts'o unsigned int max_pages) 42955138e0bSTheodore Ts'o { 43055138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 43155138e0bSTheodore Ts'o pgoff_t index; 43255138e0bSTheodore Ts'o struct pagevec pvec; 43355138e0bSTheodore Ts'o pgoff_t num = 0; 43455138e0bSTheodore Ts'o int i, nr_pages, done = 0; 43555138e0bSTheodore Ts'o 43655138e0bSTheodore Ts'o if (max_pages == 0) 43755138e0bSTheodore Ts'o return 0; 43855138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 43955138e0bSTheodore Ts'o while (!done) { 44055138e0bSTheodore Ts'o index = idx; 44155138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 44255138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 44355138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 44455138e0bSTheodore Ts'o if (nr_pages == 0) 44555138e0bSTheodore Ts'o break; 44655138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 44755138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 44855138e0bSTheodore Ts'o struct buffer_head *bh, *head; 44955138e0bSTheodore Ts'o 45055138e0bSTheodore Ts'o lock_page(page); 45155138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 45255138e0bSTheodore Ts'o !PageDirty(page) || 45355138e0bSTheodore Ts'o PageWriteback(page) || 45455138e0bSTheodore Ts'o page->index != idx) { 45555138e0bSTheodore Ts'o done = 1; 45655138e0bSTheodore Ts'o unlock_page(page); 45755138e0bSTheodore Ts'o break; 45855138e0bSTheodore Ts'o } 4591f94533dSTheodore Ts'o if (page_has_buffers(page)) { 4601f94533dSTheodore Ts'o bh = head = page_buffers(page); 46155138e0bSTheodore Ts'o do { 46255138e0bSTheodore Ts'o if (!buffer_delay(bh) && 4631f94533dSTheodore Ts'o !buffer_unwritten(bh)) 46455138e0bSTheodore Ts'o done = 1; 4651f94533dSTheodore Ts'o bh = bh->b_this_page; 4661f94533dSTheodore Ts'o } while (!done && (bh != head)); 46755138e0bSTheodore Ts'o } 46855138e0bSTheodore Ts'o unlock_page(page); 46955138e0bSTheodore Ts'o if (done) 47055138e0bSTheodore Ts'o break; 47155138e0bSTheodore Ts'o idx++; 47255138e0bSTheodore Ts'o num++; 473659c6009SEric Sandeen if (num >= max_pages) { 474659c6009SEric Sandeen done = 1; 47555138e0bSTheodore Ts'o break; 47655138e0bSTheodore Ts'o } 477659c6009SEric Sandeen } 47855138e0bSTheodore Ts'o pagevec_release(&pvec); 47955138e0bSTheodore Ts'o } 48055138e0bSTheodore Ts'o return num; 48155138e0bSTheodore Ts'o } 48255138e0bSTheodore Ts'o 48355138e0bSTheodore Ts'o /* 484e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4852b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 486f5ab0d1fSMingming Cao * 487f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 488f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 489f5ab0d1fSMingming Cao * mapped. 490f5ab0d1fSMingming Cao * 491e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 492e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 493f5ab0d1fSMingming Cao * based files 494f5ab0d1fSMingming Cao * 495f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 496f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 497f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 498f5ab0d1fSMingming Cao * the buffer head is mapped. 499f5ab0d1fSMingming Cao * 500f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 501df3ab170STao Ma * that case, buffer head is unmapped 502f5ab0d1fSMingming Cao * 503f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 504f5ab0d1fSMingming Cao */ 505e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 506e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5070e855ac8SAneesh Kumar K.V { 5080e855ac8SAneesh Kumar K.V int retval; 509f5ab0d1fSMingming Cao 510e35fd660STheodore Ts'o map->m_flags = 0; 511e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 512e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 513e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 5144df3d265SAneesh Kumar K.V /* 515b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 516b920c755STheodore Ts'o * file system block. 5174df3d265SAneesh Kumar K.V */ 518729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5190e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 52012e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 521a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 522a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5234df3d265SAneesh Kumar K.V } else { 524a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 525a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5260e855ac8SAneesh Kumar K.V } 527729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5284df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 529f5ab0d1fSMingming Cao 530e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 53151865fdaSZheng Liu int ret; 53251865fdaSZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 53351865fdaSZheng Liu /* delayed alloc may be allocated by fallocate and 53451865fdaSZheng Liu * coverted to initialized by directIO. 53551865fdaSZheng Liu * we need to handle delayed extent here. 53651865fdaSZheng Liu */ 53751865fdaSZheng Liu down_write((&EXT4_I(inode)->i_data_sem)); 53851865fdaSZheng Liu goto delayed_mapped; 53951865fdaSZheng Liu } 54051865fdaSZheng Liu ret = check_block_validity(inode, map); 5416fd058f7STheodore Ts'o if (ret != 0) 5426fd058f7STheodore Ts'o return ret; 5436fd058f7STheodore Ts'o } 5446fd058f7STheodore Ts'o 545f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 546c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 5474df3d265SAneesh Kumar K.V return retval; 5484df3d265SAneesh Kumar K.V 5494df3d265SAneesh Kumar K.V /* 550f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 551f5ab0d1fSMingming Cao * 552f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 553df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 554f5ab0d1fSMingming Cao * with buffer head unmapped. 555f5ab0d1fSMingming Cao */ 556e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 557f5ab0d1fSMingming Cao return retval; 558f5ab0d1fSMingming Cao 559f5ab0d1fSMingming Cao /* 5602a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 5612a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 5622a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 5632a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 5642a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 5652a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 5662a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 5672a8964d6SAneesh Kumar K.V * set on the buffer_head. 5682a8964d6SAneesh Kumar K.V */ 569e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 5702a8964d6SAneesh Kumar K.V 5712a8964d6SAneesh Kumar K.V /* 572f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 573f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 574f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 575f5ab0d1fSMingming Cao * with create == 1 flag. 5764df3d265SAneesh Kumar K.V */ 5774df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 578d2a17637SMingming Cao 579d2a17637SMingming Cao /* 580d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 581d2a17637SMingming Cao * we have already reserved fs blocks for allocation 582d2a17637SMingming Cao * let the underlying get_block() function know to 583d2a17637SMingming Cao * avoid double accounting 584d2a17637SMingming Cao */ 585c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 586f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 5874df3d265SAneesh Kumar K.V /* 5884df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 5894df3d265SAneesh Kumar K.V * could have changed the inode type in between 5904df3d265SAneesh Kumar K.V */ 59112e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 592e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 5930e855ac8SAneesh Kumar K.V } else { 594e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 595267e4db9SAneesh Kumar K.V 596e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 597267e4db9SAneesh Kumar K.V /* 598267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 599267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 600267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 601267e4db9SAneesh Kumar K.V */ 60219f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 603267e4db9SAneesh Kumar K.V } 6042ac3b6e0STheodore Ts'o 605d2a17637SMingming Cao /* 6062ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 6075f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 6085f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 6095f634d06SAneesh Kumar K.V * reserve space here. 610d2a17637SMingming Cao */ 6115f634d06SAneesh Kumar K.V if ((retval > 0) && 6121296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 6135f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 6145f634d06SAneesh Kumar K.V } 6155356f261SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 616f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 617d2a17637SMingming Cao 61851865fdaSZheng Liu if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 61951865fdaSZheng Liu int ret; 62051865fdaSZheng Liu delayed_mapped: 62151865fdaSZheng Liu /* delayed allocation blocks has been allocated */ 62251865fdaSZheng Liu ret = ext4_es_remove_extent(inode, map->m_lblk, 62351865fdaSZheng Liu map->m_len); 62451865fdaSZheng Liu if (ret < 0) 62551865fdaSZheng Liu retval = ret; 62651865fdaSZheng Liu } 6275356f261SAditya Kali } 6285356f261SAditya Kali 6290e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 630e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 631e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 6326fd058f7STheodore Ts'o if (ret != 0) 6336fd058f7STheodore Ts'o return ret; 6346fd058f7STheodore Ts'o } 6350e855ac8SAneesh Kumar K.V return retval; 6360e855ac8SAneesh Kumar K.V } 6370e855ac8SAneesh Kumar K.V 638f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 639f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 640f3bd1f3fSMingming Cao 6412ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 6422ed88685STheodore Ts'o struct buffer_head *bh, int flags) 643ac27a0ecSDave Kleikamp { 6443e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 6452ed88685STheodore Ts'o struct ext4_map_blocks map; 6467fb5409dSJan Kara int ret = 0, started = 0; 647f3bd1f3fSMingming Cao int dio_credits; 648ac27a0ecSDave Kleikamp 64946c7f254STao Ma if (ext4_has_inline_data(inode)) 65046c7f254STao Ma return -ERANGE; 65146c7f254STao Ma 6522ed88685STheodore Ts'o map.m_lblk = iblock; 6532ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 6542ed88685STheodore Ts'o 6558b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 6567fb5409dSJan Kara /* Direct IO write... */ 6572ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 6582ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 6592ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 6609924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, 6619924a92aSTheodore Ts'o dio_credits); 6627fb5409dSJan Kara if (IS_ERR(handle)) { 663ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 6642ed88685STheodore Ts'o return ret; 6657fb5409dSJan Kara } 6667fb5409dSJan Kara started = 1; 667ac27a0ecSDave Kleikamp } 668ac27a0ecSDave Kleikamp 6692ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 670ac27a0ecSDave Kleikamp if (ret > 0) { 6712ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 6722ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 6732ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 674ac27a0ecSDave Kleikamp ret = 0; 675ac27a0ecSDave Kleikamp } 6767fb5409dSJan Kara if (started) 6777fb5409dSJan Kara ext4_journal_stop(handle); 678ac27a0ecSDave Kleikamp return ret; 679ac27a0ecSDave Kleikamp } 680ac27a0ecSDave Kleikamp 6812ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 6822ed88685STheodore Ts'o struct buffer_head *bh, int create) 6832ed88685STheodore Ts'o { 6842ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 6852ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6862ed88685STheodore Ts'o } 6872ed88685STheodore Ts'o 688ac27a0ecSDave Kleikamp /* 689ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 690ac27a0ecSDave Kleikamp */ 691617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 692725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 693ac27a0ecSDave Kleikamp { 6942ed88685STheodore Ts'o struct ext4_map_blocks map; 6952ed88685STheodore Ts'o struct buffer_head *bh; 696ac27a0ecSDave Kleikamp int fatal = 0, err; 697ac27a0ecSDave Kleikamp 698ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 699ac27a0ecSDave Kleikamp 7002ed88685STheodore Ts'o map.m_lblk = block; 7012ed88685STheodore Ts'o map.m_len = 1; 7022ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 7032ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7042ed88685STheodore Ts'o 70590b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 70690b0a973SCarlos Maiolino *errp = 0; 70790b0a973SCarlos Maiolino 7082ed88685STheodore Ts'o if (err < 0) 709ac27a0ecSDave Kleikamp *errp = err; 7102ed88685STheodore Ts'o if (err <= 0) 7112ed88685STheodore Ts'o return NULL; 7122ed88685STheodore Ts'o 7132ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 714aebf0243SWang Shilong if (unlikely(!bh)) { 715860d21e2STheodore Ts'o *errp = -ENOMEM; 7162ed88685STheodore Ts'o return NULL; 717ac27a0ecSDave Kleikamp } 7182ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 719ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 720ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 721ac27a0ecSDave Kleikamp 722ac27a0ecSDave Kleikamp /* 723ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 724ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 725ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 726617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 727ac27a0ecSDave Kleikamp * problem. 728ac27a0ecSDave Kleikamp */ 729ac27a0ecSDave Kleikamp lock_buffer(bh); 730ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 731617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 732ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 733ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 734ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 735ac27a0ecSDave Kleikamp } 736ac27a0ecSDave Kleikamp unlock_buffer(bh); 7370390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7380390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 739ac27a0ecSDave Kleikamp if (!fatal) 740ac27a0ecSDave Kleikamp fatal = err; 741ac27a0ecSDave Kleikamp } else { 742ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 743ac27a0ecSDave Kleikamp } 744ac27a0ecSDave Kleikamp if (fatal) { 745ac27a0ecSDave Kleikamp *errp = fatal; 746ac27a0ecSDave Kleikamp brelse(bh); 747ac27a0ecSDave Kleikamp bh = NULL; 748ac27a0ecSDave Kleikamp } 749ac27a0ecSDave Kleikamp return bh; 750ac27a0ecSDave Kleikamp } 751ac27a0ecSDave Kleikamp 752617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 753725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 754ac27a0ecSDave Kleikamp { 755ac27a0ecSDave Kleikamp struct buffer_head *bh; 756ac27a0ecSDave Kleikamp 757617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 758ac27a0ecSDave Kleikamp if (!bh) 759ac27a0ecSDave Kleikamp return bh; 760ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 761ac27a0ecSDave Kleikamp return bh; 76265299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 763ac27a0ecSDave Kleikamp wait_on_buffer(bh); 764ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 765ac27a0ecSDave Kleikamp return bh; 766ac27a0ecSDave Kleikamp put_bh(bh); 767ac27a0ecSDave Kleikamp *err = -EIO; 768ac27a0ecSDave Kleikamp return NULL; 769ac27a0ecSDave Kleikamp } 770ac27a0ecSDave Kleikamp 771f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 772ac27a0ecSDave Kleikamp struct buffer_head *head, 773ac27a0ecSDave Kleikamp unsigned from, 774ac27a0ecSDave Kleikamp unsigned to, 775ac27a0ecSDave Kleikamp int *partial, 776ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 777ac27a0ecSDave Kleikamp struct buffer_head *bh)) 778ac27a0ecSDave Kleikamp { 779ac27a0ecSDave Kleikamp struct buffer_head *bh; 780ac27a0ecSDave Kleikamp unsigned block_start, block_end; 781ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 782ac27a0ecSDave Kleikamp int err, ret = 0; 783ac27a0ecSDave Kleikamp struct buffer_head *next; 784ac27a0ecSDave Kleikamp 785ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 786ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 787de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 788ac27a0ecSDave Kleikamp next = bh->b_this_page; 789ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 790ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 791ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 792ac27a0ecSDave Kleikamp *partial = 1; 793ac27a0ecSDave Kleikamp continue; 794ac27a0ecSDave Kleikamp } 795ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 796ac27a0ecSDave Kleikamp if (!ret) 797ac27a0ecSDave Kleikamp ret = err; 798ac27a0ecSDave Kleikamp } 799ac27a0ecSDave Kleikamp return ret; 800ac27a0ecSDave Kleikamp } 801ac27a0ecSDave Kleikamp 802ac27a0ecSDave Kleikamp /* 803ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 804ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 805617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 806dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 807ac27a0ecSDave Kleikamp * prepare_write() is the right place. 808ac27a0ecSDave Kleikamp * 80936ade451SJan Kara * Also, this function can nest inside ext4_writepage(). In that case, we 81036ade451SJan Kara * *know* that ext4_writepage() has generated enough buffer credits to do the 81136ade451SJan Kara * whole page. So we won't block on the journal in that case, which is good, 81236ade451SJan Kara * because the caller may be PF_MEMALLOC. 813ac27a0ecSDave Kleikamp * 814617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 815ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 816ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 817ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 818ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 819ac27a0ecSDave Kleikamp * violation. 820ac27a0ecSDave Kleikamp * 821dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 822ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 823ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 824ac27a0ecSDave Kleikamp * write. 825ac27a0ecSDave Kleikamp */ 826f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 827ac27a0ecSDave Kleikamp struct buffer_head *bh) 828ac27a0ecSDave Kleikamp { 82956d35a4cSJan Kara int dirty = buffer_dirty(bh); 83056d35a4cSJan Kara int ret; 83156d35a4cSJan Kara 832ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 833ac27a0ecSDave Kleikamp return 0; 83456d35a4cSJan Kara /* 835ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 83656d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 83756d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 838ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 83956d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 84056d35a4cSJan Kara * ever write the buffer. 84156d35a4cSJan Kara */ 84256d35a4cSJan Kara if (dirty) 84356d35a4cSJan Kara clear_buffer_dirty(bh); 84456d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 84556d35a4cSJan Kara if (!ret && dirty) 84656d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 84756d35a4cSJan Kara return ret; 848ac27a0ecSDave Kleikamp } 849ac27a0ecSDave Kleikamp 8508b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 8518b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 852bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 853bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 854bfc1af65SNick Piggin struct page **pagep, void **fsdata) 855ac27a0ecSDave Kleikamp { 856bfc1af65SNick Piggin struct inode *inode = mapping->host; 8571938a150SAneesh Kumar K.V int ret, needed_blocks; 858ac27a0ecSDave Kleikamp handle_t *handle; 859ac27a0ecSDave Kleikamp int retries = 0; 860bfc1af65SNick Piggin struct page *page; 861bfc1af65SNick Piggin pgoff_t index; 862bfc1af65SNick Piggin unsigned from, to; 863bfc1af65SNick Piggin 8649bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 8651938a150SAneesh Kumar K.V /* 8661938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 8671938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 8681938a150SAneesh Kumar K.V */ 8691938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 870bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 871bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 872bfc1af65SNick Piggin to = from + len; 873ac27a0ecSDave Kleikamp 874f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 875f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 876f19d5870STao Ma flags, pagep); 877f19d5870STao Ma if (ret < 0) 87847564bfbSTheodore Ts'o return ret; 87947564bfbSTheodore Ts'o if (ret == 1) 88047564bfbSTheodore Ts'o return 0; 881f19d5870STao Ma } 882f19d5870STao Ma 88347564bfbSTheodore Ts'o /* 88447564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 88547564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 88647564bfbSTheodore Ts'o * is being written back. So grab it first before we start 88747564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 88847564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 88947564bfbSTheodore Ts'o */ 89047564bfbSTheodore Ts'o retry_grab: 89147564bfbSTheodore Ts'o page = grab_cache_page_write_begin(mapping, index, flags); 89247564bfbSTheodore Ts'o if (!page) 89347564bfbSTheodore Ts'o return -ENOMEM; 89447564bfbSTheodore Ts'o unlock_page(page); 89547564bfbSTheodore Ts'o 89647564bfbSTheodore Ts'o retry_journal: 8979924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); 8987479d2b9SAndrew Morton if (IS_ERR(handle)) { 89947564bfbSTheodore Ts'o page_cache_release(page); 90047564bfbSTheodore Ts'o return PTR_ERR(handle); 9017479d2b9SAndrew Morton } 902ac27a0ecSDave Kleikamp 90347564bfbSTheodore Ts'o lock_page(page); 90447564bfbSTheodore Ts'o if (page->mapping != mapping) { 90547564bfbSTheodore Ts'o /* The page got truncated from under us */ 90647564bfbSTheodore Ts'o unlock_page(page); 90747564bfbSTheodore Ts'o page_cache_release(page); 908cf108bcaSJan Kara ext4_journal_stop(handle); 90947564bfbSTheodore Ts'o goto retry_grab; 910cf108bcaSJan Kara } 91147564bfbSTheodore Ts'o wait_on_page_writeback(page); 912cf108bcaSJan Kara 913744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 9146e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 915744692dcSJiaying Zhang else 9166e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 917bfc1af65SNick Piggin 918bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 919f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 920f19d5870STao Ma from, to, NULL, 921f19d5870STao Ma do_journal_get_write_access); 922b46be050SAndrey Savochkin } 923bfc1af65SNick Piggin 924bfc1af65SNick Piggin if (ret) { 925bfc1af65SNick Piggin unlock_page(page); 926ae4d5372SAneesh Kumar K.V /* 9276e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 928ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 929ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 9301938a150SAneesh Kumar K.V * 9311938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 9321938a150SAneesh Kumar K.V * truncate finishes 933ae4d5372SAneesh Kumar K.V */ 934ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 9351938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 9361938a150SAneesh Kumar K.V 9371938a150SAneesh Kumar K.V ext4_journal_stop(handle); 9381938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 939b9a4207dSJan Kara ext4_truncate_failed_write(inode); 9401938a150SAneesh Kumar K.V /* 941ffacfa7aSJan Kara * If truncate failed early the inode might 9421938a150SAneesh Kumar K.V * still be on the orphan list; we need to 9431938a150SAneesh Kumar K.V * make sure the inode is removed from the 9441938a150SAneesh Kumar K.V * orphan list in that case. 9451938a150SAneesh Kumar K.V */ 9461938a150SAneesh Kumar K.V if (inode->i_nlink) 9471938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 9481938a150SAneesh Kumar K.V } 949bfc1af65SNick Piggin 95047564bfbSTheodore Ts'o if (ret == -ENOSPC && 95147564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 95247564bfbSTheodore Ts'o goto retry_journal; 95347564bfbSTheodore Ts'o page_cache_release(page); 95447564bfbSTheodore Ts'o return ret; 95547564bfbSTheodore Ts'o } 95647564bfbSTheodore Ts'o *pagep = page; 957ac27a0ecSDave Kleikamp return ret; 958ac27a0ecSDave Kleikamp } 959ac27a0ecSDave Kleikamp 960bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 961bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 962ac27a0ecSDave Kleikamp { 963ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 964ac27a0ecSDave Kleikamp return 0; 965ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 9660390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 967ac27a0ecSDave Kleikamp } 968ac27a0ecSDave Kleikamp 969f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 970f8514083SAneesh Kumar K.V struct address_space *mapping, 971f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 972f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 973f8514083SAneesh Kumar K.V { 974f8514083SAneesh Kumar K.V int i_size_changed = 0; 975f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 976f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 977f8514083SAneesh Kumar K.V 978f19d5870STao Ma if (ext4_has_inline_data(inode)) 979f19d5870STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 980f19d5870STao Ma copied, page); 981f19d5870STao Ma else 982f19d5870STao Ma copied = block_write_end(file, mapping, pos, 983f19d5870STao Ma len, copied, page, fsdata); 984f8514083SAneesh Kumar K.V 985f8514083SAneesh Kumar K.V /* 986f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 987f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 988f8514083SAneesh Kumar K.V * 989f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 990f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 991f8514083SAneesh Kumar K.V */ 992f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 993f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 994f8514083SAneesh Kumar K.V i_size_changed = 1; 995f8514083SAneesh Kumar K.V } 996f8514083SAneesh Kumar K.V 997f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 998f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 999f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 1000f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 1001f8514083SAneesh Kumar K.V */ 1002f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 1003f8514083SAneesh Kumar K.V i_size_changed = 1; 1004f8514083SAneesh Kumar K.V } 1005f8514083SAneesh Kumar K.V unlock_page(page); 1006f8514083SAneesh Kumar K.V page_cache_release(page); 1007f8514083SAneesh Kumar K.V 1008f8514083SAneesh Kumar K.V /* 1009f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 1010f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 1011f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1012f8514083SAneesh Kumar K.V * filesystems. 1013f8514083SAneesh Kumar K.V */ 1014f8514083SAneesh Kumar K.V if (i_size_changed) 1015f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1016f8514083SAneesh Kumar K.V 1017f8514083SAneesh Kumar K.V return copied; 1018f8514083SAneesh Kumar K.V } 1019f8514083SAneesh Kumar K.V 1020ac27a0ecSDave Kleikamp /* 1021ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1022ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1023ac27a0ecSDave Kleikamp * 1024617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1025ac27a0ecSDave Kleikamp * buffers are managed internally. 1026ac27a0ecSDave Kleikamp */ 1027bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1028bfc1af65SNick Piggin struct address_space *mapping, 1029bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1030bfc1af65SNick Piggin struct page *page, void *fsdata) 1031ac27a0ecSDave Kleikamp { 1032617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1033cf108bcaSJan Kara struct inode *inode = mapping->host; 1034ac27a0ecSDave Kleikamp int ret = 0, ret2; 1035ac27a0ecSDave Kleikamp 10369bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 1037678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1038ac27a0ecSDave Kleikamp 1039ac27a0ecSDave Kleikamp if (ret == 0) { 1040f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1041bfc1af65SNick Piggin page, fsdata); 1042f8a87d89SRoel Kluin copied = ret2; 1043ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1044f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1045f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1046f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1047f8514083SAneesh Kumar K.V */ 1048f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1049f8a87d89SRoel Kluin if (ret2 < 0) 1050f8a87d89SRoel Kluin ret = ret2; 105109e0834fSAkira Fujita } else { 105209e0834fSAkira Fujita unlock_page(page); 105309e0834fSAkira Fujita page_cache_release(page); 1054ac27a0ecSDave Kleikamp } 105509e0834fSAkira Fujita 1056617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1057ac27a0ecSDave Kleikamp if (!ret) 1058ac27a0ecSDave Kleikamp ret = ret2; 1059bfc1af65SNick Piggin 1060f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1061b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1062f8514083SAneesh Kumar K.V /* 1063ffacfa7aSJan Kara * If truncate failed early the inode might still be 1064f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1065f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1066f8514083SAneesh Kumar K.V */ 1067f8514083SAneesh Kumar K.V if (inode->i_nlink) 1068f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1069f8514083SAneesh Kumar K.V } 1070f8514083SAneesh Kumar K.V 1071f8514083SAneesh Kumar K.V 1072bfc1af65SNick Piggin return ret ? ret : copied; 1073ac27a0ecSDave Kleikamp } 1074ac27a0ecSDave Kleikamp 1075bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1076bfc1af65SNick Piggin struct address_space *mapping, 1077bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1078bfc1af65SNick Piggin struct page *page, void *fsdata) 1079ac27a0ecSDave Kleikamp { 1080617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1081cf108bcaSJan Kara struct inode *inode = mapping->host; 1082ac27a0ecSDave Kleikamp int ret = 0, ret2; 1083ac27a0ecSDave Kleikamp 10849bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 1085f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1086bfc1af65SNick Piggin page, fsdata); 1087f8a87d89SRoel Kluin copied = ret2; 1088ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1089f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1090f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1091f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1092f8514083SAneesh Kumar K.V */ 1093f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1094f8514083SAneesh Kumar K.V 1095f8a87d89SRoel Kluin if (ret2 < 0) 1096f8a87d89SRoel Kluin ret = ret2; 1097ac27a0ecSDave Kleikamp 1098617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1099ac27a0ecSDave Kleikamp if (!ret) 1100ac27a0ecSDave Kleikamp ret = ret2; 1101bfc1af65SNick Piggin 1102f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1103b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1104f8514083SAneesh Kumar K.V /* 1105ffacfa7aSJan Kara * If truncate failed early the inode might still be 1106f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1107f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1108f8514083SAneesh Kumar K.V */ 1109f8514083SAneesh Kumar K.V if (inode->i_nlink) 1110f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1111f8514083SAneesh Kumar K.V } 1112f8514083SAneesh Kumar K.V 1113bfc1af65SNick Piggin return ret ? ret : copied; 1114ac27a0ecSDave Kleikamp } 1115ac27a0ecSDave Kleikamp 1116bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1117bfc1af65SNick Piggin struct address_space *mapping, 1118bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1119bfc1af65SNick Piggin struct page *page, void *fsdata) 1120ac27a0ecSDave Kleikamp { 1121617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1122bfc1af65SNick Piggin struct inode *inode = mapping->host; 1123ac27a0ecSDave Kleikamp int ret = 0, ret2; 1124ac27a0ecSDave Kleikamp int partial = 0; 1125bfc1af65SNick Piggin unsigned from, to; 1126cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1127ac27a0ecSDave Kleikamp 11289bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1129bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1130bfc1af65SNick Piggin to = from + len; 1131bfc1af65SNick Piggin 1132441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1133441c8508SCurt Wohlgemuth 11343fdcfb66STao Ma if (ext4_has_inline_data(inode)) 11353fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 11363fdcfb66STao Ma copied, page); 11373fdcfb66STao Ma else { 1138bfc1af65SNick Piggin if (copied < len) { 1139bfc1af65SNick Piggin if (!PageUptodate(page)) 1140bfc1af65SNick Piggin copied = 0; 1141bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1142bfc1af65SNick Piggin } 1143ac27a0ecSDave Kleikamp 1144f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1145bfc1af65SNick Piggin to, &partial, write_end_fn); 1146ac27a0ecSDave Kleikamp if (!partial) 1147ac27a0ecSDave Kleikamp SetPageUptodate(page); 11483fdcfb66STao Ma } 1149cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1150cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1151bfc1af65SNick Piggin i_size_write(inode, pos+copied); 115219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 11532d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1154cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1155cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1156617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1157ac27a0ecSDave Kleikamp if (!ret) 1158ac27a0ecSDave Kleikamp ret = ret2; 1159ac27a0ecSDave Kleikamp } 1160bfc1af65SNick Piggin 1161cf108bcaSJan Kara unlock_page(page); 1162f8514083SAneesh Kumar K.V page_cache_release(page); 1163ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1164f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1165f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1166f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1167f8514083SAneesh Kumar K.V */ 1168f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1169f8514083SAneesh Kumar K.V 1170617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1171ac27a0ecSDave Kleikamp if (!ret) 1172ac27a0ecSDave Kleikamp ret = ret2; 1173f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1174b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1175f8514083SAneesh Kumar K.V /* 1176ffacfa7aSJan Kara * If truncate failed early the inode might still be 1177f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1178f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1179f8514083SAneesh Kumar K.V */ 1180f8514083SAneesh Kumar K.V if (inode->i_nlink) 1181f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1182f8514083SAneesh Kumar K.V } 1183bfc1af65SNick Piggin 1184bfc1af65SNick Piggin return ret ? ret : copied; 1185ac27a0ecSDave Kleikamp } 1186d2a17637SMingming Cao 11879d0be502STheodore Ts'o /* 11887b415bf6SAditya Kali * Reserve a single cluster located at lblock 11899d0be502STheodore Ts'o */ 119001f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1191d2a17637SMingming Cao { 1192030ba6bcSAneesh Kumar K.V int retries = 0; 1193d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11940637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 11957b415bf6SAditya Kali unsigned int md_needed; 11965dd4056dSChristoph Hellwig int ret; 119703179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 119803179fe9STheodore Ts'o int save_len; 1199d2a17637SMingming Cao 120060e58e0fSMingming Cao /* 120172b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 120272b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 120372b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 120460e58e0fSMingming Cao */ 12057b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 12065dd4056dSChristoph Hellwig if (ret) 12075dd4056dSChristoph Hellwig return ret; 120803179fe9STheodore Ts'o 120903179fe9STheodore Ts'o /* 121003179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 121103179fe9STheodore Ts'o * in order to allocate nrblocks 121203179fe9STheodore Ts'o * worse case is one extent per block 121303179fe9STheodore Ts'o */ 121403179fe9STheodore Ts'o repeat: 121503179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 121603179fe9STheodore Ts'o /* 121703179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 121803179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 121903179fe9STheodore Ts'o */ 122003179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 122103179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 122203179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 122303179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 122403179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 122503179fe9STheodore Ts'o 122672b8ab9dSEric Sandeen /* 122772b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 122872b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 122972b8ab9dSEric Sandeen */ 1230e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 123103179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 123203179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 123303179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1234030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1235030ba6bcSAneesh Kumar K.V yield(); 1236030ba6bcSAneesh Kumar K.V goto repeat; 1237030ba6bcSAneesh Kumar K.V } 123803179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1239d2a17637SMingming Cao return -ENOSPC; 1240d2a17637SMingming Cao } 12419d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 12420637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 12430637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 124439bc680aSDmitry Monakhov 1245d2a17637SMingming Cao return 0; /* success */ 1246d2a17637SMingming Cao } 1247d2a17637SMingming Cao 124812219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1249d2a17637SMingming Cao { 1250d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12510637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1252d2a17637SMingming Cao 1253cd213226SMingming Cao if (!to_free) 1254cd213226SMingming Cao return; /* Nothing to release, exit */ 1255cd213226SMingming Cao 1256d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1257cd213226SMingming Cao 12585a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 12590637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1260cd213226SMingming Cao /* 12610637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 12620637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 12630637c6f4STheodore Ts'o * function is called from invalidate page, it's 12640637c6f4STheodore Ts'o * harmless to return without any action. 1265cd213226SMingming Cao */ 1266*8de5c325STheodore Ts'o ext4_warning(inode->i_sb, "ext4_da_release_space: " 12670637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 12681084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 12690637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 12700637c6f4STheodore Ts'o WARN_ON(1); 12710637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 12720637c6f4STheodore Ts'o } 12730637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 12740637c6f4STheodore Ts'o 12750637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 12760637c6f4STheodore Ts'o /* 12770637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 12780637c6f4STheodore Ts'o * only when we have written all of the delayed 12790637c6f4STheodore Ts'o * allocation blocks. 12807b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 12817b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 12820637c6f4STheodore Ts'o */ 128357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 128472b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1285ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 12869d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1287cd213226SMingming Cao } 1288cd213226SMingming Cao 128972b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 129057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1291d2a17637SMingming Cao 1292d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 129360e58e0fSMingming Cao 12947b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1295d2a17637SMingming Cao } 1296d2a17637SMingming Cao 1297d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1298d2a17637SMingming Cao unsigned long offset) 1299d2a17637SMingming Cao { 1300d2a17637SMingming Cao int to_release = 0; 1301d2a17637SMingming Cao struct buffer_head *head, *bh; 1302d2a17637SMingming Cao unsigned int curr_off = 0; 13037b415bf6SAditya Kali struct inode *inode = page->mapping->host; 13047b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 13057b415bf6SAditya Kali int num_clusters; 130651865fdaSZheng Liu ext4_fsblk_t lblk; 1307d2a17637SMingming Cao 1308d2a17637SMingming Cao head = page_buffers(page); 1309d2a17637SMingming Cao bh = head; 1310d2a17637SMingming Cao do { 1311d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1312d2a17637SMingming Cao 1313d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1314d2a17637SMingming Cao to_release++; 1315d2a17637SMingming Cao clear_buffer_delay(bh); 1316d2a17637SMingming Cao } 1317d2a17637SMingming Cao curr_off = next_off; 1318d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 13197b415bf6SAditya Kali 132051865fdaSZheng Liu if (to_release) { 132151865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 132251865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 132351865fdaSZheng Liu } 132451865fdaSZheng Liu 13257b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 13267b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 13277b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 13287b415bf6SAditya Kali while (num_clusters > 0) { 13297b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 13307b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 13317b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 13327d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 13337b415bf6SAditya Kali ext4_da_release_space(inode, 1); 13347b415bf6SAditya Kali 13357b415bf6SAditya Kali num_clusters--; 13367b415bf6SAditya Kali } 1337d2a17637SMingming Cao } 1338ac27a0ecSDave Kleikamp 1339ac27a0ecSDave Kleikamp /* 134064769240SAlex Tomas * Delayed allocation stuff 134164769240SAlex Tomas */ 134264769240SAlex Tomas 134364769240SAlex Tomas /* 134464769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1345a1d6cc56SAneesh Kumar K.V * them with writepage() call back 134664769240SAlex Tomas * 134764769240SAlex Tomas * @mpd->inode: inode 134864769240SAlex Tomas * @mpd->first_page: first page of the extent 134964769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 135064769240SAlex Tomas * 135164769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 135264769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 135364769240SAlex Tomas * 135464769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 135564769240SAlex Tomas */ 13561de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 13571de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 135864769240SAlex Tomas { 1359791b7f08SAneesh Kumar K.V struct pagevec pvec; 1360791b7f08SAneesh Kumar K.V unsigned long index, end; 1361791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1362791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1363791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1364cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 13653ecdb3a1STheodore Ts'o unsigned int len, block_start; 13663ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 13671de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1368bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 136964769240SAlex Tomas 137064769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1371bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1372791b7f08SAneesh Kumar K.V /* 1373791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1374791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 13758dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1376791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1377791b7f08SAneesh Kumar K.V */ 137864769240SAlex Tomas index = mpd->first_page; 137964769240SAlex Tomas end = mpd->next_page - 1; 138064769240SAlex Tomas 1381791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 138264769240SAlex Tomas while (index <= end) { 1383791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 138464769240SAlex Tomas if (nr_pages == 0) 138564769240SAlex Tomas break; 138664769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 1387f8bec370SJan Kara int skip_page = 0; 138864769240SAlex Tomas struct page *page = pvec.pages[i]; 138964769240SAlex Tomas 1390791b7f08SAneesh Kumar K.V index = page->index; 1391791b7f08SAneesh Kumar K.V if (index > end) 1392791b7f08SAneesh Kumar K.V break; 1393cb20d518STheodore Ts'o 1394cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1395cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1396cb20d518STheodore Ts'o else 1397cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 13981de3e3dfSTheodore Ts'o if (map) { 13991de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 14001de3e3dfSTheodore Ts'o inode->i_blkbits); 14011de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 14021de3e3dfSTheodore Ts'o map->m_lblk); 14031de3e3dfSTheodore Ts'o } 1404791b7f08SAneesh Kumar K.V index++; 1405791b7f08SAneesh Kumar K.V 1406791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1407791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1408791b7f08SAneesh Kumar K.V 14093ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 14103ecdb3a1STheodore Ts'o block_start = 0; 14113ecdb3a1STheodore Ts'o do { 14121de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 14131de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 14141de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 14151de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 14161de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 14171de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 14181de3e3dfSTheodore Ts'o } 14191de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 14201de3e3dfSTheodore Ts'o buffer_mapped(bh)) 14211de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 14221de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 14231de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 14241de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 14251de3e3dfSTheodore Ts'o } 14261de3e3dfSTheodore Ts'o 142713a79a47SYongqiang Yang /* 142813a79a47SYongqiang Yang * skip page if block allocation undone and 142913a79a47SYongqiang Yang * block is dirty 143013a79a47SYongqiang Yang */ 143113a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 143297498956STheodore Ts'o skip_page = 1; 14333ecdb3a1STheodore Ts'o bh = bh->b_this_page; 14343ecdb3a1STheodore Ts'o block_start += bh->b_size; 14351de3e3dfSTheodore Ts'o cur_logical++; 14361de3e3dfSTheodore Ts'o pblock++; 14371de3e3dfSTheodore Ts'o } while (bh != page_bufs); 14381de3e3dfSTheodore Ts'o 1439f8bec370SJan Kara if (skip_page) { 1440f8bec370SJan Kara unlock_page(page); 1441f8bec370SJan Kara continue; 1442f8bec370SJan Kara } 1443cb20d518STheodore Ts'o 144497498956STheodore Ts'o clear_page_dirty_for_io(page); 1445fe089c77SJan Kara err = ext4_bio_write_page(&io_submit, page, len, 1446fe089c77SJan Kara mpd->wbc); 1447cb20d518STheodore Ts'o if (!err) 1448a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 144964769240SAlex Tomas /* 145064769240SAlex Tomas * In error case, we have to continue because 145164769240SAlex Tomas * remaining pages are still locked 145264769240SAlex Tomas */ 145364769240SAlex Tomas if (ret == 0) 145464769240SAlex Tomas ret = err; 145564769240SAlex Tomas } 145664769240SAlex Tomas pagevec_release(&pvec); 145764769240SAlex Tomas } 1458bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 145964769240SAlex Tomas return ret; 146064769240SAlex Tomas } 146164769240SAlex Tomas 1462c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1463c4a0c46eSAneesh Kumar K.V { 1464c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1465c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1466c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1467c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1468c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 146951865fdaSZheng Liu ext4_lblk_t start, last; 1470c4a0c46eSAneesh Kumar K.V 1471c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1472c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 147351865fdaSZheng Liu 147451865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 147551865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 147651865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 147751865fdaSZheng Liu 147866bea92cSEric Sandeen pagevec_init(&pvec, 0); 1479c4a0c46eSAneesh Kumar K.V while (index <= end) { 1480c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1481c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1482c4a0c46eSAneesh Kumar K.V break; 1483c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1484c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 14859b1d0998SJan Kara if (page->index > end) 1486c4a0c46eSAneesh Kumar K.V break; 1487c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1488c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1489c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1490c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1491c4a0c46eSAneesh Kumar K.V unlock_page(page); 1492c4a0c46eSAneesh Kumar K.V } 14939b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 14949b1d0998SJan Kara pagevec_release(&pvec); 1495c4a0c46eSAneesh Kumar K.V } 1496c4a0c46eSAneesh Kumar K.V return; 1497c4a0c46eSAneesh Kumar K.V } 1498c4a0c46eSAneesh Kumar K.V 1499df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1500df22291fSAneesh Kumar K.V { 1501df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 150292b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 150392b97816STheodore Ts'o 150492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 15055dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 15065dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 150792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 150892b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 150957042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 151057042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 151192b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 15127b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 15137b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 151492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 151592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1516df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 151792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1518df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1519df22291fSAneesh Kumar K.V return; 1520df22291fSAneesh Kumar K.V } 1521df22291fSAneesh Kumar K.V 1522b920c755STheodore Ts'o /* 15235a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 15245a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 152564769240SAlex Tomas * 15268dc207c0STheodore Ts'o * @mpd - bh describing space 152764769240SAlex Tomas * 152864769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 152964769240SAlex Tomas * 153064769240SAlex Tomas */ 15315a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 153264769240SAlex Tomas { 15332ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 15341de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 15352fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 15362fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 15372fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 15382fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 153964769240SAlex Tomas 154064769240SAlex Tomas /* 15415a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 15425a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 154364769240SAlex Tomas */ 15445a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 15455a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 154629fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 15475a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 15485a87b7a5STheodore Ts'o goto submit_io; 15492fa3cdfbSTheodore Ts'o 15502fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 15512fa3cdfbSTheodore Ts'o BUG_ON(!handle); 15522fa3cdfbSTheodore Ts'o 155379ffab34SAneesh Kumar K.V /* 155479e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 15552ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 15562ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 15572ac3b6e0STheodore Ts'o * one or more preallocated blocks). 15582ac3b6e0STheodore Ts'o * 15592ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 15602ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 15612ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 15622ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 156379e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1564f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 15652ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 15662ac3b6e0STheodore Ts'o * 15672ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 15682ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 15692ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 157079ffab34SAneesh Kumar K.V */ 15712ed88685STheodore Ts'o map.m_lblk = next; 15722ed88685STheodore Ts'o map.m_len = max_blocks; 15731296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1574744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1575744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 15762ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 15771296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 15781296cc85SAneesh Kumar K.V 15792ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 15802fa3cdfbSTheodore Ts'o if (blks < 0) { 1581e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1582e3570639SEric Sandeen 15832fa3cdfbSTheodore Ts'o err = blks; 1584ed5bde0bSTheodore Ts'o /* 15855a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 158697498956STheodore Ts'o * appears to be free blocks we will just let 158797498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1588c4a0c46eSAneesh Kumar K.V */ 1589c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 15905a87b7a5STheodore Ts'o goto submit_io; 1591df22291fSAneesh Kumar K.V 15925dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1593df22291fSAneesh Kumar K.V mpd->retval = err; 15945a87b7a5STheodore Ts'o goto submit_io; 1595df22291fSAneesh Kumar K.V } 1596df22291fSAneesh Kumar K.V 1597c4a0c46eSAneesh Kumar K.V /* 1598ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1599ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1600ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1601ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1602ed5bde0bSTheodore Ts'o * the same. 1603c4a0c46eSAneesh Kumar K.V */ 1604e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1605e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1606e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1607e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1608e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1609c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 16108dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1611e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1612e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1613e3570639SEric Sandeen if (err == -ENOSPC) 1614df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1615030ba6bcSAneesh Kumar K.V } 16162fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1617c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1618e0fd9b90SCurt Wohlgemuth 1619e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1620e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 16215a87b7a5STheodore Ts'o return; 1622c4a0c46eSAneesh Kumar K.V } 16232fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 16242fa3cdfbSTheodore Ts'o 16251de3e3dfSTheodore Ts'o mapp = ↦ 16262ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 16272ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 16282ed88685STheodore Ts'o int i; 162964769240SAlex Tomas 16302ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 16312ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 16322fa3cdfbSTheodore Ts'o } 16332fa3cdfbSTheodore Ts'o 16342fa3cdfbSTheodore Ts'o /* 163503f5d8bcSJan Kara * Update on-disk size along with block allocation. 16362fa3cdfbSTheodore Ts'o */ 16372fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 16382fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 16392fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 16402fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 16412fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 16425a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 16435a87b7a5STheodore Ts'o if (err) 16445a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 16455a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 16465a87b7a5STheodore Ts'o mpd->inode->i_ino); 16472fa3cdfbSTheodore Ts'o } 16482fa3cdfbSTheodore Ts'o 16495a87b7a5STheodore Ts'o submit_io: 16501de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 16515a87b7a5STheodore Ts'o mpd->io_done = 1; 165264769240SAlex Tomas } 165364769240SAlex Tomas 1654bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1655bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 165664769240SAlex Tomas 165764769240SAlex Tomas /* 165864769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 165964769240SAlex Tomas * 166064769240SAlex Tomas * @mpd->lbh - extent of blocks 166164769240SAlex Tomas * @logical - logical number of the block in the file 1662b6a8e62fSJan Kara * @b_state - b_state of the buffer head added 166364769240SAlex Tomas * 166464769240SAlex Tomas * the function is used to collect contig. blocks in same state 166564769240SAlex Tomas */ 1666b6a8e62fSJan Kara static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, 16678dc207c0STheodore Ts'o unsigned long b_state) 166864769240SAlex Tomas { 166964769240SAlex Tomas sector_t next; 1670b6a8e62fSJan Kara int blkbits = mpd->inode->i_blkbits; 1671b6a8e62fSJan Kara int nrblocks = mpd->b_size >> blkbits; 167264769240SAlex Tomas 1673c445e3e0SEric Sandeen /* 1674c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1675c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1676c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 167779e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1678c445e3e0SEric Sandeen */ 1679b6a8e62fSJan Kara if (nrblocks >= (8*1024*1024 >> blkbits)) 1680c445e3e0SEric Sandeen goto flush_it; 1681c445e3e0SEric Sandeen 1682525f4ed8SMingming Cao /* check if the reserved journal credits might overflow */ 1683b6a8e62fSJan Kara if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { 1684525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1685525f4ed8SMingming Cao /* 1686525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1687525f4ed8SMingming Cao * credit available. Total credit needed to insert 1688525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1689525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1690525f4ed8SMingming Cao */ 1691525f4ed8SMingming Cao goto flush_it; 1692525f4ed8SMingming Cao } 1693525f4ed8SMingming Cao } 169464769240SAlex Tomas /* 169564769240SAlex Tomas * First block in the extent 169664769240SAlex Tomas */ 16978dc207c0STheodore Ts'o if (mpd->b_size == 0) { 16988dc207c0STheodore Ts'o mpd->b_blocknr = logical; 1699b6a8e62fSJan Kara mpd->b_size = 1 << blkbits; 17008dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 170164769240SAlex Tomas return; 170264769240SAlex Tomas } 170364769240SAlex Tomas 17048dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 170564769240SAlex Tomas /* 170664769240SAlex Tomas * Can we merge the block to our big extent? 170764769240SAlex Tomas */ 17088dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 1709b6a8e62fSJan Kara mpd->b_size += 1 << blkbits; 171064769240SAlex Tomas return; 171164769240SAlex Tomas } 171264769240SAlex Tomas 1713525f4ed8SMingming Cao flush_it: 171464769240SAlex Tomas /* 171564769240SAlex Tomas * We couldn't merge the block to our extent, so we 171664769240SAlex Tomas * need to flush current extent and start new one 171764769240SAlex Tomas */ 17185a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1719a1d6cc56SAneesh Kumar K.V return; 172064769240SAlex Tomas } 172164769240SAlex Tomas 1722c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 172329fa89d0SAneesh Kumar K.V { 1724c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 172529fa89d0SAneesh Kumar K.V } 172629fa89d0SAneesh Kumar K.V 172764769240SAlex Tomas /* 17285356f261SAditya Kali * This function is grabs code from the very beginning of 17295356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 17305356f261SAditya Kali * time. This function looks up the requested blocks and sets the 17315356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 17325356f261SAditya Kali */ 17335356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 17345356f261SAditya Kali struct ext4_map_blocks *map, 17355356f261SAditya Kali struct buffer_head *bh) 17365356f261SAditya Kali { 17375356f261SAditya Kali int retval; 17385356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 17395356f261SAditya Kali 17405356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 17415356f261SAditya Kali invalid_block = ~0; 17425356f261SAditya Kali 17435356f261SAditya Kali map->m_flags = 0; 17445356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 17455356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 17465356f261SAditya Kali (unsigned long) map->m_lblk); 17475356f261SAditya Kali /* 17485356f261SAditya Kali * Try to see if we can get the block without requesting a new 17495356f261SAditya Kali * file system block. 17505356f261SAditya Kali */ 17515356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 17529c3569b5STao Ma if (ext4_has_inline_data(inode)) { 17539c3569b5STao Ma /* 17549c3569b5STao Ma * We will soon create blocks for this page, and let 17559c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 17569c3569b5STao Ma * In case of clusters, we have to handle the work 17579c3569b5STao Ma * of mapping from cluster so that the reserved space 17589c3569b5STao Ma * is calculated properly. 17599c3569b5STao Ma */ 17609c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 17619c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 17629c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 17639c3569b5STao Ma retval = 0; 17649c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 17655356f261SAditya Kali retval = ext4_ext_map_blocks(NULL, inode, map, 0); 17665356f261SAditya Kali else 17675356f261SAditya Kali retval = ext4_ind_map_blocks(NULL, inode, map, 0); 17685356f261SAditya Kali 17695356f261SAditya Kali if (retval == 0) { 17705356f261SAditya Kali /* 17715356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 17725356f261SAditya Kali * is it OK? 17735356f261SAditya Kali */ 17745356f261SAditya Kali /* If the block was allocated from previously allocated cluster, 17755356f261SAditya Kali * then we dont need to reserve it again. */ 17765356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 17775356f261SAditya Kali retval = ext4_da_reserve_space(inode, iblock); 17785356f261SAditya Kali if (retval) 17795356f261SAditya Kali /* not enough space to reserve */ 17805356f261SAditya Kali goto out_unlock; 17815356f261SAditya Kali } 17825356f261SAditya Kali 178351865fdaSZheng Liu retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len); 178451865fdaSZheng Liu if (retval) 178551865fdaSZheng Liu goto out_unlock; 178651865fdaSZheng Liu 17875356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 17885356f261SAditya Kali * and it should not appear on the bh->b_state. 17895356f261SAditya Kali */ 17905356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 17915356f261SAditya Kali 17925356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 17935356f261SAditya Kali set_buffer_new(bh); 17945356f261SAditya Kali set_buffer_delay(bh); 17955356f261SAditya Kali } 17965356f261SAditya Kali 17975356f261SAditya Kali out_unlock: 17985356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 17995356f261SAditya Kali 18005356f261SAditya Kali return retval; 18015356f261SAditya Kali } 18025356f261SAditya Kali 18035356f261SAditya Kali /* 1804b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1805b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1806b920c755STheodore Ts'o * reserve space for a single block. 180729fa89d0SAneesh Kumar K.V * 180829fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 180929fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 181029fa89d0SAneesh Kumar K.V * 181129fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 181229fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 181329fa89d0SAneesh Kumar K.V * initialized properly. 181464769240SAlex Tomas */ 18159c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 18162ed88685STheodore Ts'o struct buffer_head *bh, int create) 181764769240SAlex Tomas { 18182ed88685STheodore Ts'o struct ext4_map_blocks map; 181964769240SAlex Tomas int ret = 0; 182064769240SAlex Tomas 182164769240SAlex Tomas BUG_ON(create == 0); 18222ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 18232ed88685STheodore Ts'o 18242ed88685STheodore Ts'o map.m_lblk = iblock; 18252ed88685STheodore Ts'o map.m_len = 1; 182664769240SAlex Tomas 182764769240SAlex Tomas /* 182864769240SAlex Tomas * first, we need to know whether the block is allocated already 182964769240SAlex Tomas * preallocated blocks are unmapped but should treated 183064769240SAlex Tomas * the same as allocated blocks. 183164769240SAlex Tomas */ 18325356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 18335356f261SAditya Kali if (ret <= 0) 18342ed88685STheodore Ts'o return ret; 183564769240SAlex Tomas 18362ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 18372ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 18382ed88685STheodore Ts'o 18392ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 18402ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 18412ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 18422ed88685STheodore Ts'o * get_block multiple times when we write to the same 18432ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 18442ed88685STheodore Ts'o * for partial write. 18452ed88685STheodore Ts'o */ 18462ed88685STheodore Ts'o set_buffer_new(bh); 1847c8205636STheodore Ts'o set_buffer_mapped(bh); 18482ed88685STheodore Ts'o } 18492ed88685STheodore Ts'o return 0; 185064769240SAlex Tomas } 185161628a3fSMingming Cao 185262e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 185362e086beSAneesh Kumar K.V { 185462e086beSAneesh Kumar K.V get_bh(bh); 185562e086beSAneesh Kumar K.V return 0; 185662e086beSAneesh Kumar K.V } 185762e086beSAneesh Kumar K.V 185862e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 185962e086beSAneesh Kumar K.V { 186062e086beSAneesh Kumar K.V put_bh(bh); 186162e086beSAneesh Kumar K.V return 0; 186262e086beSAneesh Kumar K.V } 186362e086beSAneesh Kumar K.V 186462e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 186562e086beSAneesh Kumar K.V unsigned int len) 186662e086beSAneesh Kumar K.V { 186762e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 186862e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 18693fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 187062e086beSAneesh Kumar K.V handle_t *handle = NULL; 18713fdcfb66STao Ma int ret = 0, err = 0; 18723fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 18733fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 187462e086beSAneesh Kumar K.V 1875cb20d518STheodore Ts'o ClearPageChecked(page); 18763fdcfb66STao Ma 18773fdcfb66STao Ma if (inline_data) { 18783fdcfb66STao Ma BUG_ON(page->index != 0); 18793fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 18803fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 18813fdcfb66STao Ma if (inode_bh == NULL) 18823fdcfb66STao Ma goto out; 18833fdcfb66STao Ma } else { 188462e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 18853fdcfb66STao Ma if (!page_bufs) { 18863fdcfb66STao Ma BUG(); 18873fdcfb66STao Ma goto out; 18883fdcfb66STao Ma } 18893fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 18903fdcfb66STao Ma NULL, bget_one); 18913fdcfb66STao Ma } 189262e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 189362e086beSAneesh Kumar K.V * references to buffers so we are safe */ 189462e086beSAneesh Kumar K.V unlock_page(page); 189562e086beSAneesh Kumar K.V 18969924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 18979924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 189862e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 189962e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 190062e086beSAneesh Kumar K.V goto out; 190162e086beSAneesh Kumar K.V } 190262e086beSAneesh Kumar K.V 1903441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1904441c8508SCurt Wohlgemuth 19053fdcfb66STao Ma if (inline_data) { 19063fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 19073fdcfb66STao Ma 19083fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 19093fdcfb66STao Ma 19103fdcfb66STao Ma } else { 1911f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 191262e086beSAneesh Kumar K.V do_journal_get_write_access); 191362e086beSAneesh Kumar K.V 1914f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 191562e086beSAneesh Kumar K.V write_end_fn); 19163fdcfb66STao Ma } 191762e086beSAneesh Kumar K.V if (ret == 0) 191862e086beSAneesh Kumar K.V ret = err; 19192d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 192062e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 192162e086beSAneesh Kumar K.V if (!ret) 192262e086beSAneesh Kumar K.V ret = err; 192362e086beSAneesh Kumar K.V 19243fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 19253fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19263fdcfb66STao Ma NULL, bput_one); 192719f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 192862e086beSAneesh Kumar K.V out: 19293fdcfb66STao Ma brelse(inode_bh); 193062e086beSAneesh Kumar K.V return ret; 193162e086beSAneesh Kumar K.V } 193262e086beSAneesh Kumar K.V 193361628a3fSMingming Cao /* 193443ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 193543ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 193643ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 193743ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 193843ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 193943ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 194043ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 194143ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 194243ce1d23SAneesh Kumar K.V * 1943b920c755STheodore Ts'o * This function can get called via... 1944b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 1945b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1946f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1947b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 194843ce1d23SAneesh Kumar K.V * 194943ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 195043ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 195143ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 195243ce1d23SAneesh Kumar K.V * truncate(f, 1024); 195343ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 195443ce1d23SAneesh Kumar K.V * a[0] = 'a'; 195543ce1d23SAneesh Kumar K.V * truncate(f, 4096); 195643ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 195790802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 195843ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 195943ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 196043ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 196143ce1d23SAneesh Kumar K.V * buffer_heads mapped. 196243ce1d23SAneesh Kumar K.V * 196343ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 196443ce1d23SAneesh Kumar K.V * unwritten in the page. 196543ce1d23SAneesh Kumar K.V * 196643ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 196743ce1d23SAneesh Kumar K.V * 196843ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 196943ce1d23SAneesh Kumar K.V * ext4_writepage() 197043ce1d23SAneesh Kumar K.V * 197143ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 197243ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 197361628a3fSMingming Cao */ 197443ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 197564769240SAlex Tomas struct writeback_control *wbc) 197664769240SAlex Tomas { 1977f8bec370SJan Kara int ret = 0; 197861628a3fSMingming Cao loff_t size; 1979498e5f24STheodore Ts'o unsigned int len; 1980744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 198161628a3fSMingming Cao struct inode *inode = page->mapping->host; 198236ade451SJan Kara struct ext4_io_submit io_submit; 198364769240SAlex Tomas 1984a9c667f8SLukas Czerner trace_ext4_writepage(page); 198561628a3fSMingming Cao size = i_size_read(inode); 198661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 198761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 198861628a3fSMingming Cao else 198961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 199061628a3fSMingming Cao 1991f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 1992fe386132SJan Kara /* 1993fe386132SJan Kara * We cannot do block allocation or other extent handling in this 1994fe386132SJan Kara * function. If there are buffers needing that, we have to redirty 1995fe386132SJan Kara * the page. But we may reach here when we do a journal commit via 1996fe386132SJan Kara * journal_submit_inode_data_buffers() and in that case we must write 1997fe386132SJan Kara * allocated buffers to achieve data=ordered mode guarantees. 1998fe386132SJan Kara */ 1999f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2000c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 2001f8bec370SJan Kara redirty_page_for_writepage(wbc, page); 2002fe386132SJan Kara if (current->flags & PF_MEMALLOC) { 2003fe386132SJan Kara /* 2004fe386132SJan Kara * For memory cleaning there's no point in writing only 2005fe386132SJan Kara * some buffers. So just bail out. Warn if we came here 2006fe386132SJan Kara * from direct reclaim. 2007fe386132SJan Kara */ 2008fe386132SJan Kara WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 2009fe386132SJan Kara == PF_MEMALLOC); 2010f8bec370SJan Kara unlock_page(page); 2011f8bec370SJan Kara return 0; 2012f0e6c985SAneesh Kumar K.V } 2013fe386132SJan Kara } 201464769240SAlex Tomas 2015cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 201643ce1d23SAneesh Kumar K.V /* 201743ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 201843ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 201943ce1d23SAneesh Kumar K.V */ 20203f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 202143ce1d23SAneesh Kumar K.V 202236ade451SJan Kara memset(&io_submit, 0, sizeof(io_submit)); 202336ade451SJan Kara ret = ext4_bio_write_page(&io_submit, page, len, wbc); 202436ade451SJan Kara ext4_io_submit(&io_submit); 202564769240SAlex Tomas return ret; 202664769240SAlex Tomas } 202764769240SAlex Tomas 202861628a3fSMingming Cao /* 2029525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 203025985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 2031525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2032525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2033525f4ed8SMingming Cao * the block allocation. 203461628a3fSMingming Cao */ 2035525f4ed8SMingming Cao 2036525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2037525f4ed8SMingming Cao { 2038525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2039525f4ed8SMingming Cao 2040525f4ed8SMingming Cao /* 2041525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2042525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2043525f4ed8SMingming Cao * number of contiguous block. So we will limit 2044525f4ed8SMingming Cao * number of contiguous block to a sane value 2045525f4ed8SMingming Cao */ 204612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2047525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2048525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2049525f4ed8SMingming Cao 2050525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2051525f4ed8SMingming Cao } 205261628a3fSMingming Cao 20538e48dcfbSTheodore Ts'o /* 20548e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 20558eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2056168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2057168fc022STheodore Ts'o * and then write them. 20588e48dcfbSTheodore Ts'o */ 20599c3569b5STao Ma static int write_cache_pages_da(handle_t *handle, 20609c3569b5STao Ma struct address_space *mapping, 20618e48dcfbSTheodore Ts'o struct writeback_control *wbc, 206272f84e65SEric Sandeen struct mpage_da_data *mpd, 206372f84e65SEric Sandeen pgoff_t *done_index) 20648e48dcfbSTheodore Ts'o { 20658eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2066168fc022STheodore Ts'o struct inode *inode = mapping->host; 20678e48dcfbSTheodore Ts'o struct pagevec pvec; 20684f01b02cSTheodore Ts'o unsigned int nr_pages; 20694f01b02cSTheodore Ts'o sector_t logical; 20704f01b02cSTheodore Ts'o pgoff_t index, end; 20718e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 20724f01b02cSTheodore Ts'o int i, tag, ret = 0; 20738e48dcfbSTheodore Ts'o 2074168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2075168fc022STheodore Ts'o mpd->wbc = wbc; 2076168fc022STheodore Ts'o mpd->inode = inode; 20778e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 20788e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 20798e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 20808e48dcfbSTheodore Ts'o 20816e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 20825b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 20835b41d924SEric Sandeen else 20845b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 20855b41d924SEric Sandeen 208672f84e65SEric Sandeen *done_index = index; 20874f01b02cSTheodore Ts'o while (index <= end) { 20885b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 20898e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 20908e48dcfbSTheodore Ts'o if (nr_pages == 0) 20914f01b02cSTheodore Ts'o return 0; 20928e48dcfbSTheodore Ts'o 20938e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 20948e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 20958e48dcfbSTheodore Ts'o 20968e48dcfbSTheodore Ts'o /* 20978e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 20988e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 20998e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 21008e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 21018e48dcfbSTheodore Ts'o * because we have a reference on the page. 21028e48dcfbSTheodore Ts'o */ 21034f01b02cSTheodore Ts'o if (page->index > end) 21044f01b02cSTheodore Ts'o goto out; 21058e48dcfbSTheodore Ts'o 210672f84e65SEric Sandeen *done_index = page->index + 1; 210772f84e65SEric Sandeen 210878aaced3STheodore Ts'o /* 210978aaced3STheodore Ts'o * If we can't merge this page, and we have 211078aaced3STheodore Ts'o * accumulated an contiguous region, write it 211178aaced3STheodore Ts'o */ 211278aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 211378aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 211478aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 211578aaced3STheodore Ts'o goto ret_extent_tail; 211678aaced3STheodore Ts'o } 211778aaced3STheodore Ts'o 21188e48dcfbSTheodore Ts'o lock_page(page); 21198e48dcfbSTheodore Ts'o 21208e48dcfbSTheodore Ts'o /* 21214f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 21224f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 21234f01b02cSTheodore Ts'o * are writing (which means it has been 21244f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 21254f01b02cSTheodore Ts'o * already under writeback and we are not 21264f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 21278e48dcfbSTheodore Ts'o */ 21284f01b02cSTheodore Ts'o if (!PageDirty(page) || 21294f01b02cSTheodore Ts'o (PageWriteback(page) && 21304f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 21314f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 21328e48dcfbSTheodore Ts'o unlock_page(page); 21338e48dcfbSTheodore Ts'o continue; 21348e48dcfbSTheodore Ts'o } 21358e48dcfbSTheodore Ts'o 21368e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 21378e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 21388e48dcfbSTheodore Ts'o 21399c3569b5STao Ma /* 21409c3569b5STao Ma * If we have inline data and arrive here, it means that 21419c3569b5STao Ma * we will soon create the block for the 1st page, so 21429c3569b5STao Ma * we'd better clear the inline data here. 21439c3569b5STao Ma */ 21449c3569b5STao Ma if (ext4_has_inline_data(inode)) { 21459c3569b5STao Ma BUG_ON(ext4_test_inode_state(inode, 21469c3569b5STao Ma EXT4_STATE_MAY_INLINE_DATA)); 21479c3569b5STao Ma ext4_destroy_inline_data(handle, inode); 21489c3569b5STao Ma } 21499c3569b5STao Ma 2150168fc022STheodore Ts'o if (mpd->next_page != page->index) 21518eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 21528eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 21538eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 21548eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 21558eb9e5ceSTheodore Ts'o 2156f8bec370SJan Kara /* Add all dirty buffers to mpd */ 21578eb9e5ceSTheodore Ts'o head = page_buffers(page); 21588eb9e5ceSTheodore Ts'o bh = head; 21598eb9e5ceSTheodore Ts'o do { 21608eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 21618eb9e5ceSTheodore Ts'o /* 2162f8bec370SJan Kara * We need to try to allocate unmapped blocks 2163f8bec370SJan Kara * in the same page. Otherwise we won't make 2164f8bec370SJan Kara * progress with the page in ext4_writepage 21658eb9e5ceSTheodore Ts'o */ 21668eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 21678eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 21688eb9e5ceSTheodore Ts'o bh->b_state); 21694f01b02cSTheodore Ts'o if (mpd->io_done) 21704f01b02cSTheodore Ts'o goto ret_extent_tail; 2171f8bec370SJan Kara } else if (buffer_dirty(bh) && 2172f8bec370SJan Kara buffer_mapped(bh)) { 21738eb9e5ceSTheodore Ts'o /* 2174f8bec370SJan Kara * mapped dirty buffer. We need to 2175f8bec370SJan Kara * update the b_state because we look 2176f8bec370SJan Kara * at b_state in mpage_da_map_blocks. 2177f8bec370SJan Kara * We don't update b_size because if we 2178f8bec370SJan Kara * find an unmapped buffer_head later 2179f8bec370SJan Kara * we need to use the b_state flag of 2180f8bec370SJan Kara * that buffer_head. 21818eb9e5ceSTheodore Ts'o */ 21828eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 2183f8bec370SJan Kara mpd->b_state = 2184f8bec370SJan Kara bh->b_state & BH_FLAGS; 21858e48dcfbSTheodore Ts'o } 21868eb9e5ceSTheodore Ts'o logical++; 21878eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 21888e48dcfbSTheodore Ts'o 21898e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 21908e48dcfbSTheodore Ts'o nr_to_write--; 21918e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 21924f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 21938e48dcfbSTheodore Ts'o /* 21948e48dcfbSTheodore Ts'o * We stop writing back only if we are 21958e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 21968e48dcfbSTheodore Ts'o * integrity sync we have to keep going 21978e48dcfbSTheodore Ts'o * because someone may be concurrently 21988e48dcfbSTheodore Ts'o * dirtying pages, and we might have 21998e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 22008e48dcfbSTheodore Ts'o * pages, but have not synced all of the 22018e48dcfbSTheodore Ts'o * old dirty pages. 22028e48dcfbSTheodore Ts'o */ 22034f01b02cSTheodore Ts'o goto out; 22048e48dcfbSTheodore Ts'o } 22058e48dcfbSTheodore Ts'o } 22068e48dcfbSTheodore Ts'o pagevec_release(&pvec); 22078e48dcfbSTheodore Ts'o cond_resched(); 22088e48dcfbSTheodore Ts'o } 22094f01b02cSTheodore Ts'o return 0; 22104f01b02cSTheodore Ts'o ret_extent_tail: 22114f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 22128eb9e5ceSTheodore Ts'o out: 22138eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 22148eb9e5ceSTheodore Ts'o cond_resched(); 22158e48dcfbSTheodore Ts'o return ret; 22168e48dcfbSTheodore Ts'o } 22178e48dcfbSTheodore Ts'o 22188e48dcfbSTheodore Ts'o 221964769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 222064769240SAlex Tomas struct writeback_control *wbc) 222164769240SAlex Tomas { 222222208dedSAneesh Kumar K.V pgoff_t index; 222322208dedSAneesh Kumar K.V int range_whole = 0; 222461628a3fSMingming Cao handle_t *handle = NULL; 2225df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 22265e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2227498e5f24STheodore Ts'o int pages_written = 0; 222855138e0bSTheodore Ts'o unsigned int max_pages; 22292acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 223055138e0bSTheodore Ts'o int needed_blocks, ret = 0; 223155138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2232de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 22335e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 223472f84e65SEric Sandeen pgoff_t done_index = 0; 22355b41d924SEric Sandeen pgoff_t end; 22361bce63d1SShaohua Li struct blk_plug plug; 223761628a3fSMingming Cao 22389bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2239ba80b101STheodore Ts'o 224061628a3fSMingming Cao /* 224161628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 224261628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 224361628a3fSMingming Cao * because that could violate lock ordering on umount 224461628a3fSMingming Cao */ 2245a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 224661628a3fSMingming Cao return 0; 22472a21e37eSTheodore Ts'o 22482a21e37eSTheodore Ts'o /* 22492a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 22502a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 22512a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 22524ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 22532a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 22542a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 22552a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 22562a21e37eSTheodore Ts'o * the stack trace. 22572a21e37eSTheodore Ts'o */ 22584ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 22592a21e37eSTheodore Ts'o return -EROFS; 22602a21e37eSTheodore Ts'o 226122208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 226222208dedSAneesh Kumar K.V range_whole = 1; 226361628a3fSMingming Cao 22642acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 22652acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 226622208dedSAneesh Kumar K.V index = mapping->writeback_index; 22672acf2c26SAneesh Kumar K.V if (index) 22682acf2c26SAneesh Kumar K.V cycled = 0; 22692acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 22702acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 22712acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 22725b41d924SEric Sandeen end = -1; 22735b41d924SEric Sandeen } else { 227422208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 22755b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 22765b41d924SEric Sandeen } 2277a1d6cc56SAneesh Kumar K.V 227855138e0bSTheodore Ts'o /* 227955138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 228055138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 228155138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 228255138e0bSTheodore Ts'o * levels; different architectues have a different page size, 228355138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 228455138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 228555138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 228655138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 228755138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 228855138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 228955138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 229055138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 229155138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 229255138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 229355138e0bSTheodore Ts'o */ 229455138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2295b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2296b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2297b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 229855138e0bSTheodore Ts'o else 2299b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2300b443e733SEric Sandeen } else 230155138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 230255138e0bSTheodore Ts'o max_pages); 230355138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 230455138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 230555138e0bSTheodore Ts'o 230655138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 230755138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 230855138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 230955138e0bSTheodore Ts'o } 231055138e0bSTheodore Ts'o 23112acf2c26SAneesh Kumar K.V retry: 23126e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 23135b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 23145b41d924SEric Sandeen 23151bce63d1SShaohua Li blk_start_plug(&plug); 231622208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2317a1d6cc56SAneesh Kumar K.V 2318a1d6cc56SAneesh Kumar K.V /* 2319a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2320a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2321a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2322a1d6cc56SAneesh Kumar K.V * by delalloc 2323a1d6cc56SAneesh Kumar K.V */ 2324a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2325525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2326a1d6cc56SAneesh Kumar K.V 232761628a3fSMingming Cao /* start a new transaction*/ 23289924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 23299924a92aSTheodore Ts'o needed_blocks); 233061628a3fSMingming Cao if (IS_ERR(handle)) { 233161628a3fSMingming Cao ret = PTR_ERR(handle); 23321693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2333fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2334a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 23353c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 233661628a3fSMingming Cao goto out_writepages; 233761628a3fSMingming Cao } 2338f63e6005STheodore Ts'o 2339f63e6005STheodore Ts'o /* 23408eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2341f63e6005STheodore Ts'o * contiguous region of logical blocks that need 23428eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2343f63e6005STheodore Ts'o */ 23449c3569b5STao Ma ret = write_cache_pages_da(handle, mapping, 23459c3569b5STao Ma wbc, &mpd, &done_index); 2346f63e6005STheodore Ts'o /* 2347af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2348f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2349f63e6005STheodore Ts'o * them for I/O. 2350f63e6005STheodore Ts'o */ 2351f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 23525a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2353f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2354f63e6005STheodore Ts'o } 2355b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2356f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2357df22291fSAneesh Kumar K.V 235861628a3fSMingming Cao ext4_journal_stop(handle); 2359df22291fSAneesh Kumar K.V 23608f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 236122208dedSAneesh Kumar K.V /* commit the transaction which would 236222208dedSAneesh Kumar K.V * free blocks released in the transaction 236322208dedSAneesh Kumar K.V * and try again 236422208dedSAneesh Kumar K.V */ 2365df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 236622208dedSAneesh Kumar K.V ret = 0; 236722208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2368a1d6cc56SAneesh Kumar K.V /* 23698de49e67SKazuya Mio * Got one extent now try with rest of the pages. 23708de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 23718de49e67SKazuya Mio * So we don't need to write any more. 2372a1d6cc56SAneesh Kumar K.V */ 237322208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 23748de49e67SKazuya Mio ret = mpd.retval; 23752acf2c26SAneesh Kumar K.V io_done = 1; 237622208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 237761628a3fSMingming Cao /* 237861628a3fSMingming Cao * There is no more writeout needed 237961628a3fSMingming Cao * or we requested for a noblocking writeout 238061628a3fSMingming Cao * and we found the device congested 238161628a3fSMingming Cao */ 238261628a3fSMingming Cao break; 238361628a3fSMingming Cao } 23841bce63d1SShaohua Li blk_finish_plug(&plug); 23852acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 23862acf2c26SAneesh Kumar K.V cycled = 1; 23872acf2c26SAneesh Kumar K.V index = 0; 23882acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 23892acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 23902acf2c26SAneesh Kumar K.V goto retry; 23912acf2c26SAneesh Kumar K.V } 239261628a3fSMingming Cao 239322208dedSAneesh Kumar K.V /* Update index */ 23942acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 239522208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 239622208dedSAneesh Kumar K.V /* 239722208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 239822208dedSAneesh Kumar K.V * mode will write it back later 239922208dedSAneesh Kumar K.V */ 240072f84e65SEric Sandeen mapping->writeback_index = done_index; 2401a1d6cc56SAneesh Kumar K.V 240261628a3fSMingming Cao out_writepages: 240322208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2404de89de6eSTheodore Ts'o wbc->range_start = range_start; 24059bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 240661628a3fSMingming Cao return ret; 240764769240SAlex Tomas } 240864769240SAlex Tomas 240979f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 241079f0be8dSAneesh Kumar K.V { 241179f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 241279f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 241379f0be8dSAneesh Kumar K.V 241479f0be8dSAneesh Kumar K.V /* 241579f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 241679f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2417179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 241879f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 241979f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 242079f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 242179f0be8dSAneesh Kumar K.V */ 242257042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 242357042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 242457042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 242500d4e736STheodore Ts'o /* 242600d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 242700d4e736STheodore Ts'o */ 242800d4e736STheodore Ts'o if (dirty_blocks && (free_blocks < 2 * dirty_blocks) && 242900d4e736STheodore Ts'o !writeback_in_progress(sb->s_bdi) && 243000d4e736STheodore Ts'o down_read_trylock(&sb->s_umount)) { 243100d4e736STheodore Ts'o writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 243200d4e736STheodore Ts'o up_read(&sb->s_umount); 243300d4e736STheodore Ts'o } 243400d4e736STheodore Ts'o 243579f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2436df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 243779f0be8dSAneesh Kumar K.V /* 2438c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2439c8afb446SEric Sandeen * or free blocks is less than watermark 244079f0be8dSAneesh Kumar K.V */ 244179f0be8dSAneesh Kumar K.V return 1; 244279f0be8dSAneesh Kumar K.V } 244379f0be8dSAneesh Kumar K.V return 0; 244479f0be8dSAneesh Kumar K.V } 244579f0be8dSAneesh Kumar K.V 244664769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 244764769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 244864769240SAlex Tomas struct page **pagep, void **fsdata) 244964769240SAlex Tomas { 245072b8ab9dSEric Sandeen int ret, retries = 0; 245164769240SAlex Tomas struct page *page; 245264769240SAlex Tomas pgoff_t index; 245364769240SAlex Tomas struct inode *inode = mapping->host; 245464769240SAlex Tomas handle_t *handle; 245564769240SAlex Tomas 245664769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 245779f0be8dSAneesh Kumar K.V 245879f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 245979f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 246079f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 246179f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 246279f0be8dSAneesh Kumar K.V } 246379f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 24649bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 24659c3569b5STao Ma 24669c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 24679c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 24689c3569b5STao Ma pos, len, flags, 24699c3569b5STao Ma pagep, fsdata); 24709c3569b5STao Ma if (ret < 0) 247147564bfbSTheodore Ts'o return ret; 247247564bfbSTheodore Ts'o if (ret == 1) 247347564bfbSTheodore Ts'o return 0; 24749c3569b5STao Ma } 24759c3569b5STao Ma 247647564bfbSTheodore Ts'o /* 247747564bfbSTheodore Ts'o * grab_cache_page_write_begin() can take a long time if the 247847564bfbSTheodore Ts'o * system is thrashing due to memory pressure, or if the page 247947564bfbSTheodore Ts'o * is being written back. So grab it first before we start 248047564bfbSTheodore Ts'o * the transaction handle. This also allows us to allocate 248147564bfbSTheodore Ts'o * the page (if needed) without using GFP_NOFS. 248247564bfbSTheodore Ts'o */ 248347564bfbSTheodore Ts'o retry_grab: 248447564bfbSTheodore Ts'o page = grab_cache_page_write_begin(mapping, index, flags); 248547564bfbSTheodore Ts'o if (!page) 248647564bfbSTheodore Ts'o return -ENOMEM; 248747564bfbSTheodore Ts'o unlock_page(page); 248847564bfbSTheodore Ts'o 248964769240SAlex Tomas /* 249064769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 249164769240SAlex Tomas * if there is delayed block allocation. But we still need 249264769240SAlex Tomas * to journalling the i_disksize update if writes to the end 249364769240SAlex Tomas * of file which has an already mapped buffer. 249464769240SAlex Tomas */ 249547564bfbSTheodore Ts'o retry_journal: 24969924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); 249764769240SAlex Tomas if (IS_ERR(handle)) { 249847564bfbSTheodore Ts'o page_cache_release(page); 249947564bfbSTheodore Ts'o return PTR_ERR(handle); 250064769240SAlex Tomas } 250164769240SAlex Tomas 250247564bfbSTheodore Ts'o lock_page(page); 250347564bfbSTheodore Ts'o if (page->mapping != mapping) { 250447564bfbSTheodore Ts'o /* The page got truncated from under us */ 250547564bfbSTheodore Ts'o unlock_page(page); 250647564bfbSTheodore Ts'o page_cache_release(page); 2507d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 250847564bfbSTheodore Ts'o goto retry_grab; 2509d5a0d4f7SEric Sandeen } 251047564bfbSTheodore Ts'o /* In case writeback began while the page was unlocked */ 251147564bfbSTheodore Ts'o wait_on_page_writeback(page); 251264769240SAlex Tomas 25136e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 251464769240SAlex Tomas if (ret < 0) { 251564769240SAlex Tomas unlock_page(page); 251664769240SAlex Tomas ext4_journal_stop(handle); 2517ae4d5372SAneesh Kumar K.V /* 2518ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2519ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2520ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2521ae4d5372SAneesh Kumar K.V */ 2522ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2523b9a4207dSJan Kara ext4_truncate_failed_write(inode); 252447564bfbSTheodore Ts'o 252547564bfbSTheodore Ts'o if (ret == -ENOSPC && 252647564bfbSTheodore Ts'o ext4_should_retry_alloc(inode->i_sb, &retries)) 252747564bfbSTheodore Ts'o goto retry_journal; 252847564bfbSTheodore Ts'o 252947564bfbSTheodore Ts'o page_cache_release(page); 253047564bfbSTheodore Ts'o return ret; 253164769240SAlex Tomas } 253264769240SAlex Tomas 253347564bfbSTheodore Ts'o *pagep = page; 253464769240SAlex Tomas return ret; 253564769240SAlex Tomas } 253664769240SAlex Tomas 2537632eaeabSMingming Cao /* 2538632eaeabSMingming Cao * Check if we should update i_disksize 2539632eaeabSMingming Cao * when write to the end of file but not require block allocation 2540632eaeabSMingming Cao */ 2541632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2542632eaeabSMingming Cao unsigned long offset) 2543632eaeabSMingming Cao { 2544632eaeabSMingming Cao struct buffer_head *bh; 2545632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2546632eaeabSMingming Cao unsigned int idx; 2547632eaeabSMingming Cao int i; 2548632eaeabSMingming Cao 2549632eaeabSMingming Cao bh = page_buffers(page); 2550632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2551632eaeabSMingming Cao 2552632eaeabSMingming Cao for (i = 0; i < idx; i++) 2553632eaeabSMingming Cao bh = bh->b_this_page; 2554632eaeabSMingming Cao 255529fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2556632eaeabSMingming Cao return 0; 2557632eaeabSMingming Cao return 1; 2558632eaeabSMingming Cao } 2559632eaeabSMingming Cao 256064769240SAlex Tomas static int ext4_da_write_end(struct file *file, 256164769240SAlex Tomas struct address_space *mapping, 256264769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 256364769240SAlex Tomas struct page *page, void *fsdata) 256464769240SAlex Tomas { 256564769240SAlex Tomas struct inode *inode = mapping->host; 256664769240SAlex Tomas int ret = 0, ret2; 256764769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 256864769240SAlex Tomas loff_t new_i_size; 2569632eaeabSMingming Cao unsigned long start, end; 257079f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 257179f0be8dSAneesh Kumar K.V 257279f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 25733d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 25743d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 257579f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 257679f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 25773d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 257879f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 257979f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 25803d2b1582SLukas Czerner default: 258179f0be8dSAneesh Kumar K.V BUG(); 258279f0be8dSAneesh Kumar K.V } 258379f0be8dSAneesh Kumar K.V } 2584632eaeabSMingming Cao 25859bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2586632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2587632eaeabSMingming Cao end = start + copied - 1; 258864769240SAlex Tomas 258964769240SAlex Tomas /* 259064769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 259164769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 259264769240SAlex Tomas * into that. 259364769240SAlex Tomas */ 259464769240SAlex Tomas new_i_size = pos + copied; 2595ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 25969c3569b5STao Ma if (ext4_has_inline_data(inode) || 25979c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2598632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2599f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 260064769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2601632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2602cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2603cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2604cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2605cf17fea6SAneesh Kumar K.V */ 2606cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2607632eaeabSMingming Cao } 2608632eaeabSMingming Cao } 26099c3569b5STao Ma 26109c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 26119c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 26129c3569b5STao Ma ext4_has_inline_data(inode)) 26139c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 26149c3569b5STao Ma page); 26159c3569b5STao Ma else 261664769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 261764769240SAlex Tomas page, fsdata); 26189c3569b5STao Ma 261964769240SAlex Tomas copied = ret2; 262064769240SAlex Tomas if (ret2 < 0) 262164769240SAlex Tomas ret = ret2; 262264769240SAlex Tomas ret2 = ext4_journal_stop(handle); 262364769240SAlex Tomas if (!ret) 262464769240SAlex Tomas ret = ret2; 262564769240SAlex Tomas 262664769240SAlex Tomas return ret ? ret : copied; 262764769240SAlex Tomas } 262864769240SAlex Tomas 262964769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 263064769240SAlex Tomas { 263164769240SAlex Tomas /* 263264769240SAlex Tomas * Drop reserved blocks 263364769240SAlex Tomas */ 263464769240SAlex Tomas BUG_ON(!PageLocked(page)); 263564769240SAlex Tomas if (!page_has_buffers(page)) 263664769240SAlex Tomas goto out; 263764769240SAlex Tomas 2638d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 263964769240SAlex Tomas 264064769240SAlex Tomas out: 264164769240SAlex Tomas ext4_invalidatepage(page, offset); 264264769240SAlex Tomas 264364769240SAlex Tomas return; 264464769240SAlex Tomas } 264564769240SAlex Tomas 2646ccd2506bSTheodore Ts'o /* 2647ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2648ccd2506bSTheodore Ts'o */ 2649ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2650ccd2506bSTheodore Ts'o { 2651fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2652fb40ba0dSTheodore Ts'o 2653ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2654ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2655ccd2506bSTheodore Ts'o return 0; 2656ccd2506bSTheodore Ts'o 2657ccd2506bSTheodore Ts'o /* 2658ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2659ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2660ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2661ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2662ccd2506bSTheodore Ts'o * would require replicating code paths in: 2663ccd2506bSTheodore Ts'o * 2664ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2665ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2666ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2667ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2668ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2669ccd2506bSTheodore Ts'o * 2670ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2671ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2672ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2673ccd2506bSTheodore Ts'o * doing I/O at all. 2674ccd2506bSTheodore Ts'o * 2675ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2676380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2677ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2678ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 267925985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2680ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2681ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2682ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2683ccd2506bSTheodore Ts'o * 2684ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2685ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2686ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2687ccd2506bSTheodore Ts'o */ 2688ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2689ccd2506bSTheodore Ts'o } 269064769240SAlex Tomas 269164769240SAlex Tomas /* 2692ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2693ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2694ac27a0ecSDave Kleikamp * 2695ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2696617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2697ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2698ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2699ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2700ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2701ac27a0ecSDave Kleikamp * 2702ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2703ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2704ac27a0ecSDave Kleikamp */ 2705617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2706ac27a0ecSDave Kleikamp { 2707ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2708ac27a0ecSDave Kleikamp journal_t *journal; 2709ac27a0ecSDave Kleikamp int err; 2710ac27a0ecSDave Kleikamp 271146c7f254STao Ma /* 271246c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 271346c7f254STao Ma */ 271446c7f254STao Ma if (ext4_has_inline_data(inode)) 271546c7f254STao Ma return 0; 271646c7f254STao Ma 271764769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 271864769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 271964769240SAlex Tomas /* 272064769240SAlex Tomas * With delalloc we want to sync the file 272164769240SAlex Tomas * so that we can make sure we allocate 272264769240SAlex Tomas * blocks for file 272364769240SAlex Tomas */ 272464769240SAlex Tomas filemap_write_and_wait(mapping); 272564769240SAlex Tomas } 272664769240SAlex Tomas 272719f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 272819f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2729ac27a0ecSDave Kleikamp /* 2730ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2731ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2732ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2733ac27a0ecSDave Kleikamp * do we expect this to happen. 2734ac27a0ecSDave Kleikamp * 2735ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2736ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2737ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2738ac27a0ecSDave Kleikamp * will.) 2739ac27a0ecSDave Kleikamp * 2740617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2741ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2742ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2743ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2744ac27a0ecSDave Kleikamp * everything they get. 2745ac27a0ecSDave Kleikamp */ 2746ac27a0ecSDave Kleikamp 274719f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2748617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2749dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2750dab291afSMingming Cao err = jbd2_journal_flush(journal); 2751dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2752ac27a0ecSDave Kleikamp 2753ac27a0ecSDave Kleikamp if (err) 2754ac27a0ecSDave Kleikamp return 0; 2755ac27a0ecSDave Kleikamp } 2756ac27a0ecSDave Kleikamp 2757617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2758ac27a0ecSDave Kleikamp } 2759ac27a0ecSDave Kleikamp 2760617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2761ac27a0ecSDave Kleikamp { 276246c7f254STao Ma int ret = -EAGAIN; 276346c7f254STao Ma struct inode *inode = page->mapping->host; 276446c7f254STao Ma 27650562e0baSJiaying Zhang trace_ext4_readpage(page); 276646c7f254STao Ma 276746c7f254STao Ma if (ext4_has_inline_data(inode)) 276846c7f254STao Ma ret = ext4_readpage_inline(inode, page); 276946c7f254STao Ma 277046c7f254STao Ma if (ret == -EAGAIN) 2771617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 277246c7f254STao Ma 277346c7f254STao Ma return ret; 2774ac27a0ecSDave Kleikamp } 2775ac27a0ecSDave Kleikamp 2776ac27a0ecSDave Kleikamp static int 2777617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2778ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2779ac27a0ecSDave Kleikamp { 278046c7f254STao Ma struct inode *inode = mapping->host; 278146c7f254STao Ma 278246c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 278346c7f254STao Ma if (ext4_has_inline_data(inode)) 278446c7f254STao Ma return 0; 278546c7f254STao Ma 2786617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2787ac27a0ecSDave Kleikamp } 2788ac27a0ecSDave Kleikamp 2789617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2790ac27a0ecSDave Kleikamp { 27910562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 27920562e0baSJiaying Zhang 27934520fb3cSJan Kara /* No journalling happens on data buffers when this function is used */ 27944520fb3cSJan Kara WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 27954520fb3cSJan Kara 27964520fb3cSJan Kara block_invalidatepage(page, offset); 27974520fb3cSJan Kara } 27984520fb3cSJan Kara 279953e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page, 28004520fb3cSJan Kara unsigned long offset) 28014520fb3cSJan Kara { 28024520fb3cSJan Kara journal_t *journal = EXT4_JOURNAL(page->mapping->host); 28034520fb3cSJan Kara 28044520fb3cSJan Kara trace_ext4_journalled_invalidatepage(page, offset); 28054520fb3cSJan Kara 2806744692dcSJiaying Zhang /* 2807ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2808ac27a0ecSDave Kleikamp */ 2809ac27a0ecSDave Kleikamp if (offset == 0) 2810ac27a0ecSDave Kleikamp ClearPageChecked(page); 2811ac27a0ecSDave Kleikamp 281253e87268SJan Kara return jbd2_journal_invalidatepage(journal, page, offset); 281353e87268SJan Kara } 281453e87268SJan Kara 281553e87268SJan Kara /* Wrapper for aops... */ 281653e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page, 281753e87268SJan Kara unsigned long offset) 281853e87268SJan Kara { 281953e87268SJan Kara WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); 2820ac27a0ecSDave Kleikamp } 2821ac27a0ecSDave Kleikamp 2822617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2823ac27a0ecSDave Kleikamp { 2824617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2825ac27a0ecSDave Kleikamp 28260562e0baSJiaying Zhang trace_ext4_releasepage(page); 28270562e0baSJiaying Zhang 2828ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2829ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2830ac27a0ecSDave Kleikamp return 0; 28310390131bSFrank Mayhar if (journal) 2832dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 28330390131bSFrank Mayhar else 28340390131bSFrank Mayhar return try_to_free_buffers(page); 2835ac27a0ecSDave Kleikamp } 2836ac27a0ecSDave Kleikamp 2837ac27a0ecSDave Kleikamp /* 28382ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 28392ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 28402ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 28412ed88685STheodore Ts'o */ 2842f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 28434c0425ffSMingming Cao struct buffer_head *bh_result, int create) 28444c0425ffSMingming Cao { 2845c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 28468d5d02e6SMingming Cao inode->i_ino, create); 28472ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 28482ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 28494c0425ffSMingming Cao } 28504c0425ffSMingming Cao 2851729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 28528b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 2853729f52c6SZheng Liu { 28548b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 28558b0f165fSAnatol Pomozov inode->i_ino, create); 28568b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 28578b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 2858729f52c6SZheng Liu } 2859729f52c6SZheng Liu 28604c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2861552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2862552ef802SChristoph Hellwig bool is_async) 28634c0425ffSMingming Cao { 286472c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 28654c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 28664c0425ffSMingming Cao 28674b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 28684b70df18SMingming if (!io_end || !size) 2869552ef802SChristoph Hellwig goto out; 28704b70df18SMingming 28718d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 2872ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 28738d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 28748d5d02e6SMingming Cao size); 28758d5d02e6SMingming Cao 2876b5a7e970STheodore Ts'o iocb->private = NULL; 2877b5a7e970STheodore Ts'o 28788d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2879bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 28808d5d02e6SMingming Cao ext4_free_io_end(io_end); 28815b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 2882091e26dfSJan Kara inode_dio_done(inode); 28835b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 28845b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 28855b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 28868d5d02e6SMingming Cao } 28878d5d02e6SMingming Cao 28884c0425ffSMingming Cao io_end->offset = offset; 28894c0425ffSMingming Cao io_end->size = size; 28905b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 28915b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 28925b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 28935b3ff237Sjiayingz@google.com (Jiaying Zhang) } 28944c0425ffSMingming Cao 289528a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 28964c0425ffSMingming Cao } 2897c7064ef1SJiaying Zhang 28984c0425ffSMingming Cao /* 28994c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 29004c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 29014c0425ffSMingming Cao * fall back to buffered IO. 29024c0425ffSMingming Cao * 2903b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 290469c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 2905b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 29064c0425ffSMingming Cao * 290769c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 29088d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 290925985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 29108d5d02e6SMingming Cao * when async direct IO completed. 29114c0425ffSMingming Cao * 29124c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 29134c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 29144c0425ffSMingming Cao * if the machine crashes during the write. 29154c0425ffSMingming Cao * 29164c0425ffSMingming Cao */ 29174c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 29184c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 29194c0425ffSMingming Cao unsigned long nr_segs) 29204c0425ffSMingming Cao { 29214c0425ffSMingming Cao struct file *file = iocb->ki_filp; 29224c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 29234c0425ffSMingming Cao ssize_t ret; 29244c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 2925729f52c6SZheng Liu int overwrite = 0; 29268b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 29278b0f165fSAnatol Pomozov int dio_flags = 0; 292869c499d1STheodore Ts'o loff_t final_size = offset + count; 292969c499d1STheodore Ts'o 293069c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 293169c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 293269c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 2933729f52c6SZheng Liu 29344bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 29354bd809dbSZheng Liu 29364bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 29374bd809dbSZheng Liu overwrite = *((int *)iocb->private); 29384bd809dbSZheng Liu 29394bd809dbSZheng Liu if (overwrite) { 29401f555cfaSDmitry Monakhov atomic_inc(&inode->i_dio_count); 29414bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 29424bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 29434bd809dbSZheng Liu } 29444bd809dbSZheng Liu 29454c0425ffSMingming Cao /* 29468d5d02e6SMingming Cao * We could direct write to holes and fallocate. 29478d5d02e6SMingming Cao * 294869c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 294969c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 295069c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 29518d5d02e6SMingming Cao * 295269c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 295369c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 295469c499d1STheodore Ts'o * extents uninitialized. 29554c0425ffSMingming Cao * 295669c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 29578d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 29584c0425ffSMingming Cao * 295969c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 296069c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 296169c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 296269c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 29634c0425ffSMingming Cao */ 29648d5d02e6SMingming Cao iocb->private = NULL; 2965f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 29668d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 296769c499d1STheodore Ts'o ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 29684bd809dbSZheng Liu if (!io_end) { 29694bd809dbSZheng Liu ret = -ENOMEM; 29704bd809dbSZheng Liu goto retake_lock; 29714bd809dbSZheng Liu } 2972266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 2973266991b1SJeff Moyer iocb->private = io_end; 29748d5d02e6SMingming Cao /* 297569c499d1STheodore Ts'o * we save the io structure for current async direct 297669c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 297769c499d1STheodore Ts'o * io structure whether there is a unwritten extents 297869c499d1STheodore Ts'o * needs to be converted when IO is completed. 29798d5d02e6SMingming Cao */ 2980f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 29818d5d02e6SMingming Cao } 29828d5d02e6SMingming Cao 29838b0f165fSAnatol Pomozov if (overwrite) { 29848b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 29858b0f165fSAnatol Pomozov } else { 29868b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 29878b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 29888b0f165fSAnatol Pomozov } 2989729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 2990729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 2991729f52c6SZheng Liu offset, nr_segs, 29928b0f165fSAnatol Pomozov get_block_func, 2993729f52c6SZheng Liu ext4_end_io_dio, 2994729f52c6SZheng Liu NULL, 29958b0f165fSAnatol Pomozov dio_flags); 29968b0f165fSAnatol Pomozov 29978d5d02e6SMingming Cao if (iocb->private) 2998f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 29998d5d02e6SMingming Cao /* 300069c499d1STheodore Ts'o * The io_end structure takes a reference to the inode, that 300169c499d1STheodore Ts'o * structure needs to be destroyed and the reference to the 300269c499d1STheodore Ts'o * inode need to be dropped, when IO is complete, even with 0 300369c499d1STheodore Ts'o * byte write, or failed. 30048d5d02e6SMingming Cao * 300569c499d1STheodore Ts'o * In the successful AIO DIO case, the io_end structure will 300669c499d1STheodore Ts'o * be destroyed and the reference to the inode will be dropped 30078d5d02e6SMingming Cao * after the end_io call back function is called. 30088d5d02e6SMingming Cao * 300969c499d1STheodore Ts'o * In the case there is 0 byte write, or error case, since VFS 301069c499d1STheodore Ts'o * direct IO won't invoke the end_io call back function, we 301169c499d1STheodore Ts'o * need to free the end_io structure here. 30128d5d02e6SMingming Cao */ 30138d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 30148d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 30158d5d02e6SMingming Cao iocb->private = NULL; 3016729f52c6SZheng Liu } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 30175f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 3018109f5565SMingming int err; 30198d5d02e6SMingming Cao /* 30208d5d02e6SMingming Cao * for non AIO case, since the IO is already 302125985edcSLucas De Marchi * completed, we could do the conversion right here 30228d5d02e6SMingming Cao */ 3023109f5565SMingming err = ext4_convert_unwritten_extents(inode, 30248d5d02e6SMingming Cao offset, ret); 3025109f5565SMingming if (err < 0) 3026109f5565SMingming ret = err; 302719f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3028109f5565SMingming } 30294bd809dbSZheng Liu 30304bd809dbSZheng Liu retake_lock: 30314bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 30324bd809dbSZheng Liu if (overwrite) { 30331f555cfaSDmitry Monakhov inode_dio_done(inode); 30344bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 30354bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 30364bd809dbSZheng Liu } 30374bd809dbSZheng Liu 30384c0425ffSMingming Cao return ret; 30394c0425ffSMingming Cao } 30408d5d02e6SMingming Cao 30414c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 30424c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 30434c0425ffSMingming Cao unsigned long nr_segs) 30444c0425ffSMingming Cao { 30454c0425ffSMingming Cao struct file *file = iocb->ki_filp; 30464c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 30470562e0baSJiaying Zhang ssize_t ret; 30484c0425ffSMingming Cao 304984ebd795STheodore Ts'o /* 305084ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 305184ebd795STheodore Ts'o */ 305284ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 305384ebd795STheodore Ts'o return 0; 305484ebd795STheodore Ts'o 305546c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 305646c7f254STao Ma if (ext4_has_inline_data(inode)) 305746c7f254STao Ma return 0; 305846c7f254STao Ma 30590562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 306012e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 30610562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 30620562e0baSJiaying Zhang else 30630562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 30640562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 30650562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 30660562e0baSJiaying Zhang return ret; 30674c0425ffSMingming Cao } 30684c0425ffSMingming Cao 3069ac27a0ecSDave Kleikamp /* 3070617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3071ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3072ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3073ac27a0ecSDave Kleikamp * not necessarily locked. 3074ac27a0ecSDave Kleikamp * 3075ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3076ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3077ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3078ac27a0ecSDave Kleikamp * 3079ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3080ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3081ac27a0ecSDave Kleikamp */ 3082617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3083ac27a0ecSDave Kleikamp { 3084ac27a0ecSDave Kleikamp SetPageChecked(page); 3085ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3086ac27a0ecSDave Kleikamp } 3087ac27a0ecSDave Kleikamp 3088617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3089617ba13bSMingming Cao .readpage = ext4_readpage, 3090617ba13bSMingming Cao .readpages = ext4_readpages, 309143ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3092bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3093bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3094617ba13bSMingming Cao .bmap = ext4_bmap, 3095617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3096617ba13bSMingming Cao .releasepage = ext4_releasepage, 3097617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3098ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 30998ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3100aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3101ac27a0ecSDave Kleikamp }; 3102ac27a0ecSDave Kleikamp 3103617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3104617ba13bSMingming Cao .readpage = ext4_readpage, 3105617ba13bSMingming Cao .readpages = ext4_readpages, 310643ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3107bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3108bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3109617ba13bSMingming Cao .bmap = ext4_bmap, 3110617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3111617ba13bSMingming Cao .releasepage = ext4_releasepage, 3112617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3113ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 31148ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3115aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3116ac27a0ecSDave Kleikamp }; 3117ac27a0ecSDave Kleikamp 3118617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3119617ba13bSMingming Cao .readpage = ext4_readpage, 3120617ba13bSMingming Cao .readpages = ext4_readpages, 312143ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3122bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3123bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3124617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3125617ba13bSMingming Cao .bmap = ext4_bmap, 31264520fb3cSJan Kara .invalidatepage = ext4_journalled_invalidatepage, 3127617ba13bSMingming Cao .releasepage = ext4_releasepage, 312884ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 31298ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3130aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3131ac27a0ecSDave Kleikamp }; 3132ac27a0ecSDave Kleikamp 313364769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 313464769240SAlex Tomas .readpage = ext4_readpage, 313564769240SAlex Tomas .readpages = ext4_readpages, 313643ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 313764769240SAlex Tomas .writepages = ext4_da_writepages, 313864769240SAlex Tomas .write_begin = ext4_da_write_begin, 313964769240SAlex Tomas .write_end = ext4_da_write_end, 314064769240SAlex Tomas .bmap = ext4_bmap, 314164769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 314264769240SAlex Tomas .releasepage = ext4_releasepage, 314364769240SAlex Tomas .direct_IO = ext4_direct_IO, 314464769240SAlex Tomas .migratepage = buffer_migrate_page, 31458ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3146aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 314764769240SAlex Tomas }; 314864769240SAlex Tomas 3149617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3150ac27a0ecSDave Kleikamp { 31513d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 31523d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 31533d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 3154cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3155ac27a0ecSDave Kleikamp else 31563d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_ordered_aops; 31573d2b1582SLukas Czerner break; 31583d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 31593d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 31603d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_da_aops; 31613d2b1582SLukas Czerner else 31623d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_writeback_aops; 31633d2b1582SLukas Czerner break; 31643d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3165617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 31663d2b1582SLukas Czerner break; 31673d2b1582SLukas Czerner default: 31683d2b1582SLukas Czerner BUG(); 31693d2b1582SLukas Czerner } 3170ac27a0ecSDave Kleikamp } 3171ac27a0ecSDave Kleikamp 31724e96b2dbSAllison Henderson 31734e96b2dbSAllison Henderson /* 31744e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 31754e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 31764e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 31774e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 31784e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 31794e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 31804e96b2dbSAllison Henderson */ 31814e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 31824e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 31834e96b2dbSAllison Henderson loff_t length, int flags) 31844e96b2dbSAllison Henderson { 31854e96b2dbSAllison Henderson struct inode *inode = mapping->host; 31864e96b2dbSAllison Henderson struct page *page; 31874e96b2dbSAllison Henderson int err = 0; 31884e96b2dbSAllison Henderson 31894e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 31904e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 31914e96b2dbSAllison Henderson if (!page) 31925129d05fSYongqiang Yang return -ENOMEM; 31934e96b2dbSAllison Henderson 31944e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 31954e96b2dbSAllison Henderson from, length, flags); 31964e96b2dbSAllison Henderson 31974e96b2dbSAllison Henderson unlock_page(page); 31984e96b2dbSAllison Henderson page_cache_release(page); 31994e96b2dbSAllison Henderson return err; 32004e96b2dbSAllison Henderson } 32014e96b2dbSAllison Henderson 32024e96b2dbSAllison Henderson /* 32034e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 32044e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 32054e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 32064e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 32074e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 32084e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 32094e96b2dbSAllison Henderson * 32104e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 32114e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 32124e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 32134e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 32144e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 32154e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 32164e96b2dbSAllison Henderson * released. 32174e96b2dbSAllison Henderson * 32184e96b2dbSAllison Henderson * handle: The journal handle 32194e96b2dbSAllison Henderson * inode: The files inode 32204e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 32214907cb7bSAnatol Pomozov * from: The starting byte offset (from the beginning of the file) 32224e96b2dbSAllison Henderson * to begin discarding 32234e96b2dbSAllison Henderson * len: The length of bytes to discard 32244e96b2dbSAllison Henderson * flags: Optional flags that may be used: 32254e96b2dbSAllison Henderson * 32264e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 32274e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 32284e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 32294907cb7bSAnatol Pomozov * for updating the contents of a page whose blocks may 32304e96b2dbSAllison Henderson * have already been released, and we only want to zero 32314e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 32324e96b2dbSAllison Henderson * 32334907cb7bSAnatol Pomozov * Returns zero on success or negative on failure. 32344e96b2dbSAllison Henderson */ 32355f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 32364e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 32374e96b2dbSAllison Henderson loff_t length, int flags) 32384e96b2dbSAllison Henderson { 32394e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 32404e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 32414e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 32424e96b2dbSAllison Henderson ext4_lblk_t iblock; 32434e96b2dbSAllison Henderson struct buffer_head *bh; 32444e96b2dbSAllison Henderson int err = 0; 32454e96b2dbSAllison Henderson 32464e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 32474e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 32484e96b2dbSAllison Henderson 32494e96b2dbSAllison Henderson if (index != page->index) 32504e96b2dbSAllison Henderson return -EINVAL; 32514e96b2dbSAllison Henderson 32524e96b2dbSAllison Henderson /* 32534e96b2dbSAllison Henderson * correct length if it does not fall between 32544e96b2dbSAllison Henderson * 'from' and the end of the page 32554e96b2dbSAllison Henderson */ 32564e96b2dbSAllison Henderson if (length > max || length < 0) 32574e96b2dbSAllison Henderson length = max; 32584e96b2dbSAllison Henderson 32594e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 32604e96b2dbSAllison Henderson 3261093e6e36SYongqiang Yang if (!page_has_buffers(page)) 32624e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 32634e96b2dbSAllison Henderson 32644e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 32654e96b2dbSAllison Henderson bh = page_buffers(page); 32664e96b2dbSAllison Henderson pos = blocksize; 32674e96b2dbSAllison Henderson while (offset >= pos) { 32684e96b2dbSAllison Henderson bh = bh->b_this_page; 32694e96b2dbSAllison Henderson iblock++; 32704e96b2dbSAllison Henderson pos += blocksize; 32714e96b2dbSAllison Henderson } 32724e96b2dbSAllison Henderson 32734e96b2dbSAllison Henderson pos = offset; 32744e96b2dbSAllison Henderson while (pos < offset + length) { 3275e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3276e260daf2SYongqiang Yang 32774e96b2dbSAllison Henderson err = 0; 32784e96b2dbSAllison Henderson 32794e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 32804e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 32814e96b2dbSAllison Henderson 32824e96b2dbSAllison Henderson /* The length of space until the end of the block */ 32834e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 32844e96b2dbSAllison Henderson 32854e96b2dbSAllison Henderson /* 32864e96b2dbSAllison Henderson * Do not unmap or zero past end of block 32874e96b2dbSAllison Henderson * for this buffer head 32884e96b2dbSAllison Henderson */ 32894e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 32904e96b2dbSAllison Henderson range_to_discard = end_of_block; 32914e96b2dbSAllison Henderson 32924e96b2dbSAllison Henderson 32934e96b2dbSAllison Henderson /* 32944e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 32954e96b2dbSAllison Henderson * regions of the page 32964e96b2dbSAllison Henderson */ 32974e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 32984e96b2dbSAllison Henderson buffer_mapped(bh)) 32994e96b2dbSAllison Henderson goto next; 33004e96b2dbSAllison Henderson 33014e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 33024e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 33034e96b2dbSAllison Henderson clear_buffer_dirty(bh); 33044e96b2dbSAllison Henderson bh->b_bdev = NULL; 33054e96b2dbSAllison Henderson clear_buffer_mapped(bh); 33064e96b2dbSAllison Henderson clear_buffer_req(bh); 33074e96b2dbSAllison Henderson clear_buffer_new(bh); 33084e96b2dbSAllison Henderson clear_buffer_delay(bh); 33094e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 33104e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 33114e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 33124e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 33134e96b2dbSAllison Henderson goto next; 33144e96b2dbSAllison Henderson } 33154e96b2dbSAllison Henderson 33164e96b2dbSAllison Henderson /* 33174e96b2dbSAllison Henderson * If this block is not completely contained in the range 33184e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 33194e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 33204e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 33214e96b2dbSAllison Henderson * partial zeros on it. 33224e96b2dbSAllison Henderson */ 33234e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 33244e96b2dbSAllison Henderson /* 33254e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 33264e96b2dbSAllison Henderson * from the block 33274e96b2dbSAllison Henderson */ 33284e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 33294e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 33304e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 33314e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 33324e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 33334e96b2dbSAllison Henderson goto next; 33344e96b2dbSAllison Henderson } 33354e96b2dbSAllison Henderson } 33364e96b2dbSAllison Henderson 33374e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 33384e96b2dbSAllison Henderson if (PageUptodate(page)) 33394e96b2dbSAllison Henderson set_buffer_uptodate(bh); 33404e96b2dbSAllison Henderson 33414e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 33424e96b2dbSAllison Henderson err = -EIO; 33434e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 33444e96b2dbSAllison Henderson wait_on_buffer(bh); 33454e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 33464e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 33474e96b2dbSAllison Henderson goto next; 33484e96b2dbSAllison Henderson } 33494e96b2dbSAllison Henderson 33504e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33514e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 33524e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 33534e96b2dbSAllison Henderson if (err) 33544e96b2dbSAllison Henderson goto next; 33554e96b2dbSAllison Henderson } 33564e96b2dbSAllison Henderson 33574e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 33584e96b2dbSAllison Henderson 33594e96b2dbSAllison Henderson err = 0; 33604e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33614e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3362decbd919STheodore Ts'o } else 33634e96b2dbSAllison Henderson mark_buffer_dirty(bh); 33644e96b2dbSAllison Henderson 33654e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 33664e96b2dbSAllison Henderson next: 33674e96b2dbSAllison Henderson bh = bh->b_this_page; 33684e96b2dbSAllison Henderson iblock++; 33694e96b2dbSAllison Henderson pos += range_to_discard; 33704e96b2dbSAllison Henderson } 33714e96b2dbSAllison Henderson 33724e96b2dbSAllison Henderson return err; 33734e96b2dbSAllison Henderson } 33744e96b2dbSAllison Henderson 337591ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 337691ef4cafSDuane Griffin { 337791ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 337891ef4cafSDuane Griffin return 1; 337991ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 338091ef4cafSDuane Griffin return 1; 338191ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 338291ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 338391ef4cafSDuane Griffin return 0; 338491ef4cafSDuane Griffin } 338591ef4cafSDuane Griffin 3386ac27a0ecSDave Kleikamp /* 3387a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3388a4bb6b64SAllison Henderson * associated with the given offset and length 3389a4bb6b64SAllison Henderson * 3390a4bb6b64SAllison Henderson * @inode: File inode 3391a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3392a4bb6b64SAllison Henderson * @len: The length of the hole 3393a4bb6b64SAllison Henderson * 33944907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3395a4bb6b64SAllison Henderson */ 3396a4bb6b64SAllison Henderson 3397a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3398a4bb6b64SAllison Henderson { 3399a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3400a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 340173355192SAllison Henderson return -EOPNOTSUPP; 3402a4bb6b64SAllison Henderson 34038bad6fc8SZheng Liu if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 34048bad6fc8SZheng Liu return ext4_ind_punch_hole(file, offset, length); 3405a4bb6b64SAllison Henderson 3406bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3407bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 340873355192SAllison Henderson return -EOPNOTSUPP; 3409bab08ab9STheodore Ts'o } 3410bab08ab9STheodore Ts'o 3411aaddea81SZheng Liu trace_ext4_punch_hole(inode, offset, length); 3412aaddea81SZheng Liu 3413a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3414a4bb6b64SAllison Henderson } 3415a4bb6b64SAllison Henderson 3416a4bb6b64SAllison Henderson /* 3417617ba13bSMingming Cao * ext4_truncate() 3418ac27a0ecSDave Kleikamp * 3419617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3420617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3421ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3422ac27a0ecSDave Kleikamp * 342342b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3424ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3425ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3426ac27a0ecSDave Kleikamp * 3427ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3428ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3429ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3430ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3431ac27a0ecSDave Kleikamp * left-to-right works OK too). 3432ac27a0ecSDave Kleikamp * 3433ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3434ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3435ac27a0ecSDave Kleikamp * 3436ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3437617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3438ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3439617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3440617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3441ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3442617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3443ac27a0ecSDave Kleikamp */ 3444617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3445ac27a0ecSDave Kleikamp { 34460562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 34470562e0baSJiaying Zhang 344891ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3449ac27a0ecSDave Kleikamp return; 3450ac27a0ecSDave Kleikamp 345112e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3452c8d46e41SJiaying Zhang 34535534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 345419f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 34557d8f9f7dSTheodore Ts'o 3456aef1c851STao Ma if (ext4_has_inline_data(inode)) { 3457aef1c851STao Ma int has_inline = 1; 3458aef1c851STao Ma 3459aef1c851STao Ma ext4_inline_data_truncate(inode, &has_inline); 3460aef1c851STao Ma if (has_inline) 3461aef1c851STao Ma return; 3462aef1c851STao Ma } 3463aef1c851STao Ma 3464ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3465cf108bcaSJan Kara ext4_ext_truncate(inode); 3466ff9893dcSAmir Goldstein else 3467ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3468a86c6181SAlex Tomas 34690562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3470ac27a0ecSDave Kleikamp } 3471ac27a0ecSDave Kleikamp 3472ac27a0ecSDave Kleikamp /* 3473617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3474ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3475ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3476ac27a0ecSDave Kleikamp * inode. 3477ac27a0ecSDave Kleikamp */ 3478617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3479617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3480ac27a0ecSDave Kleikamp { 3481240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3482ac27a0ecSDave Kleikamp struct buffer_head *bh; 3483240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3484240799cdSTheodore Ts'o ext4_fsblk_t block; 3485240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3486ac27a0ecSDave Kleikamp 34873a06d778SAneesh Kumar K.V iloc->bh = NULL; 3488240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3489ac27a0ecSDave Kleikamp return -EIO; 3490ac27a0ecSDave Kleikamp 3491240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3492240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3493240799cdSTheodore Ts'o if (!gdp) 3494240799cdSTheodore Ts'o return -EIO; 3495240799cdSTheodore Ts'o 3496240799cdSTheodore Ts'o /* 3497240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3498240799cdSTheodore Ts'o */ 349900d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3500240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3501240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3502240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3503240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3504240799cdSTheodore Ts'o 3505240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3506aebf0243SWang Shilong if (unlikely(!bh)) 3507860d21e2STheodore Ts'o return -ENOMEM; 3508ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3509ac27a0ecSDave Kleikamp lock_buffer(bh); 35109c83a923SHidehiro Kawai 35119c83a923SHidehiro Kawai /* 35129c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 35139c83a923SHidehiro Kawai * to write out another inode in the same block. In this 35149c83a923SHidehiro Kawai * case, we don't have to read the block because we may 35159c83a923SHidehiro Kawai * read the old inode data successfully. 35169c83a923SHidehiro Kawai */ 35179c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 35189c83a923SHidehiro Kawai set_buffer_uptodate(bh); 35199c83a923SHidehiro Kawai 3520ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3521ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3522ac27a0ecSDave Kleikamp unlock_buffer(bh); 3523ac27a0ecSDave Kleikamp goto has_buffer; 3524ac27a0ecSDave Kleikamp } 3525ac27a0ecSDave Kleikamp 3526ac27a0ecSDave Kleikamp /* 3527ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3528ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3529ac27a0ecSDave Kleikamp * block. 3530ac27a0ecSDave Kleikamp */ 3531ac27a0ecSDave Kleikamp if (in_mem) { 3532ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3533240799cdSTheodore Ts'o int i, start; 3534ac27a0ecSDave Kleikamp 3535240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3536ac27a0ecSDave Kleikamp 3537ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3538240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3539aebf0243SWang Shilong if (unlikely(!bitmap_bh)) 3540ac27a0ecSDave Kleikamp goto make_io; 3541ac27a0ecSDave Kleikamp 3542ac27a0ecSDave Kleikamp /* 3543ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3544ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3545ac27a0ecSDave Kleikamp * of one, so skip it. 3546ac27a0ecSDave Kleikamp */ 3547ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3548ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3549ac27a0ecSDave Kleikamp goto make_io; 3550ac27a0ecSDave Kleikamp } 3551240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3552ac27a0ecSDave Kleikamp if (i == inode_offset) 3553ac27a0ecSDave Kleikamp continue; 3554617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3555ac27a0ecSDave Kleikamp break; 3556ac27a0ecSDave Kleikamp } 3557ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3558240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3559ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3560ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3561ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3562ac27a0ecSDave Kleikamp unlock_buffer(bh); 3563ac27a0ecSDave Kleikamp goto has_buffer; 3564ac27a0ecSDave Kleikamp } 3565ac27a0ecSDave Kleikamp } 3566ac27a0ecSDave Kleikamp 3567ac27a0ecSDave Kleikamp make_io: 3568ac27a0ecSDave Kleikamp /* 3569240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3570240799cdSTheodore Ts'o * blocks from the inode table. 3571240799cdSTheodore Ts'o */ 3572240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3573240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3574240799cdSTheodore Ts'o unsigned num; 3575240799cdSTheodore Ts'o 3576240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3577b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3578240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3579240799cdSTheodore Ts'o if (table > b) 3580240799cdSTheodore Ts'o b = table; 3581240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3582240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3583feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 3584560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3585240799cdSTheodore Ts'o table += num / inodes_per_block; 3586240799cdSTheodore Ts'o if (end > table) 3587240799cdSTheodore Ts'o end = table; 3588240799cdSTheodore Ts'o while (b <= end) 3589240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3590240799cdSTheodore Ts'o } 3591240799cdSTheodore Ts'o 3592240799cdSTheodore Ts'o /* 3593ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3594ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3595ac27a0ecSDave Kleikamp * Read the block from disk. 3596ac27a0ecSDave Kleikamp */ 35970562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3598ac27a0ecSDave Kleikamp get_bh(bh); 3599ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 360065299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3601ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3602ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3603c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3604c398eda0STheodore Ts'o "unable to read itable block"); 3605ac27a0ecSDave Kleikamp brelse(bh); 3606ac27a0ecSDave Kleikamp return -EIO; 3607ac27a0ecSDave Kleikamp } 3608ac27a0ecSDave Kleikamp } 3609ac27a0ecSDave Kleikamp has_buffer: 3610ac27a0ecSDave Kleikamp iloc->bh = bh; 3611ac27a0ecSDave Kleikamp return 0; 3612ac27a0ecSDave Kleikamp } 3613ac27a0ecSDave Kleikamp 3614617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3615ac27a0ecSDave Kleikamp { 3616ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3617617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 361819f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3619ac27a0ecSDave Kleikamp } 3620ac27a0ecSDave Kleikamp 3621617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3622ac27a0ecSDave Kleikamp { 3623617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3624ac27a0ecSDave Kleikamp 3625ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3626617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3627ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3628617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3629ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3630617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3631ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3632617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3633ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3634617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3635ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3636ac27a0ecSDave Kleikamp } 3637ac27a0ecSDave Kleikamp 3638ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3639ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3640ff9ddf7eSJan Kara { 364184a8dce2SDmitry Monakhov unsigned int vfs_fl; 364284a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3643ff9ddf7eSJan Kara 364484a8dce2SDmitry Monakhov do { 364584a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 364684a8dce2SDmitry Monakhov old_fl = ei->i_flags; 364784a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 364884a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 364984a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 365084a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 365184a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 365284a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 365384a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 365484a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 365584a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 365684a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 365784a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 365884a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 365984a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 366084a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3661ff9ddf7eSJan Kara } 3662de9a55b8STheodore Ts'o 36630fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 36640fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 36650fc1b451SAneesh Kumar K.V { 36660fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 36678180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 36688180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 36690fc1b451SAneesh Kumar K.V 36700fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 36710fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 36720fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 36730fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 36740fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 367507a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 36768180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 36778180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 36788180a562SAneesh Kumar K.V } else { 36790fc1b451SAneesh Kumar K.V return i_blocks; 36808180a562SAneesh Kumar K.V } 36810fc1b451SAneesh Kumar K.V } else { 36820fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 36830fc1b451SAneesh Kumar K.V } 36840fc1b451SAneesh Kumar K.V } 3685ff9ddf7eSJan Kara 3686152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 3687152a7b0aSTao Ma struct ext4_inode *raw_inode, 3688152a7b0aSTao Ma struct ext4_inode_info *ei) 3689152a7b0aSTao Ma { 3690152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 3691152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 369267cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3693152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 369467cf5b09STao Ma ext4_find_inline_data_nolock(inode); 3695f19d5870STao Ma } else 3696f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 3697152a7b0aSTao Ma } 3698152a7b0aSTao Ma 36991d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3700ac27a0ecSDave Kleikamp { 3701617ba13bSMingming Cao struct ext4_iloc iloc; 3702617ba13bSMingming Cao struct ext4_inode *raw_inode; 37031d1fe1eeSDavid Howells struct ext4_inode_info *ei; 37041d1fe1eeSDavid Howells struct inode *inode; 3705b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 37061d1fe1eeSDavid Howells long ret; 3707ac27a0ecSDave Kleikamp int block; 370808cefc7aSEric W. Biederman uid_t i_uid; 370908cefc7aSEric W. Biederman gid_t i_gid; 3710ac27a0ecSDave Kleikamp 37111d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 37121d1fe1eeSDavid Howells if (!inode) 37131d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 37141d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 37151d1fe1eeSDavid Howells return inode; 37161d1fe1eeSDavid Howells 37171d1fe1eeSDavid Howells ei = EXT4_I(inode); 37187dc57615SPeter Huewe iloc.bh = NULL; 3719ac27a0ecSDave Kleikamp 37201d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 37211d1fe1eeSDavid Howells if (ret < 0) 3722ac27a0ecSDave Kleikamp goto bad_inode; 3723617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3724814525f4SDarrick J. Wong 3725814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3726814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3727814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3728814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 3729814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 3730814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 3731814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 3732814525f4SDarrick J. Wong ret = -EIO; 3733814525f4SDarrick J. Wong goto bad_inode; 3734814525f4SDarrick J. Wong } 3735814525f4SDarrick J. Wong } else 3736814525f4SDarrick J. Wong ei->i_extra_isize = 0; 3737814525f4SDarrick J. Wong 3738814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 3739814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3740814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3741814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3742814525f4SDarrick J. Wong __u32 csum; 3743814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 3744814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 3745814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 3746814525f4SDarrick J. Wong sizeof(inum)); 3747814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 3748814525f4SDarrick J. Wong sizeof(gen)); 3749814525f4SDarrick J. Wong } 3750814525f4SDarrick J. Wong 3751814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 3752814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 3753814525f4SDarrick J. Wong ret = -EIO; 3754814525f4SDarrick J. Wong goto bad_inode; 3755814525f4SDarrick J. Wong } 3756814525f4SDarrick J. Wong 3757ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 375808cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 375908cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3760ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 376108cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 376208cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3763ac27a0ecSDave Kleikamp } 376408cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 376508cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 3766bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3767ac27a0ecSDave Kleikamp 3768353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 376967cf5b09STao Ma ei->i_inline_off = 0; 3770ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3771ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3772ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3773ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3774ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3775ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3776ac27a0ecSDave Kleikamp */ 3777ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3778ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3779617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3780ac27a0ecSDave Kleikamp /* this inode is deleted */ 37811d1fe1eeSDavid Howells ret = -ESTALE; 3782ac27a0ecSDave Kleikamp goto bad_inode; 3783ac27a0ecSDave Kleikamp } 3784ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3785ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3786ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3787ac27a0ecSDave Kleikamp * the process of deleting those. */ 3788ac27a0ecSDave Kleikamp } 3789ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 37900fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 37917973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3792a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3793a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3794a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3795a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3796ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3797a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3798a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3799a9e7f447SDmitry Monakhov #endif 3800ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3801ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3802a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3803ac27a0ecSDave Kleikamp /* 3804ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3805ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3806ac27a0ecSDave Kleikamp */ 3807617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3808ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3809ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3810ac27a0ecSDave Kleikamp 3811b436b9beSJan Kara /* 3812b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3813b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3814b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3815b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3816b436b9beSJan Kara * now it is reread from disk. 3817b436b9beSJan Kara */ 3818b436b9beSJan Kara if (journal) { 3819b436b9beSJan Kara transaction_t *transaction; 3820b436b9beSJan Kara tid_t tid; 3821b436b9beSJan Kara 3822a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3823b436b9beSJan Kara if (journal->j_running_transaction) 3824b436b9beSJan Kara transaction = journal->j_running_transaction; 3825b436b9beSJan Kara else 3826b436b9beSJan Kara transaction = journal->j_committing_transaction; 3827b436b9beSJan Kara if (transaction) 3828b436b9beSJan Kara tid = transaction->t_tid; 3829b436b9beSJan Kara else 3830b436b9beSJan Kara tid = journal->j_commit_sequence; 3831a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3832b436b9beSJan Kara ei->i_sync_tid = tid; 3833b436b9beSJan Kara ei->i_datasync_tid = tid; 3834b436b9beSJan Kara } 3835b436b9beSJan Kara 38360040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3837ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3838ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3839617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3840617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3841ac27a0ecSDave Kleikamp } else { 3842152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 3843ac27a0ecSDave Kleikamp } 3844814525f4SDarrick J. Wong } 3845ac27a0ecSDave Kleikamp 3846ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3847ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3848ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3849ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3850ef7f3835SKalpak Shah 385125ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 385225ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 385325ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 385425ec56b5SJean Noel Cordenner inode->i_version |= 385525ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 385625ec56b5SJean Noel Cordenner } 385725ec56b5SJean Noel Cordenner 3858c4b5a614STheodore Ts'o ret = 0; 3859485c26ecSTheodore Ts'o if (ei->i_file_acl && 38601032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 386124676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 386224676da4STheodore Ts'o ei->i_file_acl); 3863485c26ecSTheodore Ts'o ret = -EIO; 3864485c26ecSTheodore Ts'o goto bad_inode; 3865f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 3866f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3867f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3868c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 3869f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 38707a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 38717a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 3872fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3873fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 3874fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 3875fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 38761f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 3877fe2c8191SThiemo Nagel } 3878f19d5870STao Ma } 3879567f3e9aSTheodore Ts'o if (ret) 38807a262f7cSAneesh Kumar K.V goto bad_inode; 38817a262f7cSAneesh Kumar K.V 3882ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3883617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3884617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3885617ba13bSMingming Cao ext4_set_aops(inode); 3886ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3887617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3888617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3889ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3890e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 3891617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3892e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 3893e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 3894e83c1397SDuane Griffin } else { 3895617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3896617ba13bSMingming Cao ext4_set_aops(inode); 3897ac27a0ecSDave Kleikamp } 3898563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3899563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3900617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3901ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3902ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3903ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3904ac27a0ecSDave Kleikamp else 3905ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3906ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3907563bdd61STheodore Ts'o } else { 3908563bdd61STheodore Ts'o ret = -EIO; 390924676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3910563bdd61STheodore Ts'o goto bad_inode; 3911ac27a0ecSDave Kleikamp } 3912ac27a0ecSDave Kleikamp brelse(iloc.bh); 3913617ba13bSMingming Cao ext4_set_inode_flags(inode); 39141d1fe1eeSDavid Howells unlock_new_inode(inode); 39151d1fe1eeSDavid Howells return inode; 3916ac27a0ecSDave Kleikamp 3917ac27a0ecSDave Kleikamp bad_inode: 3918567f3e9aSTheodore Ts'o brelse(iloc.bh); 39191d1fe1eeSDavid Howells iget_failed(inode); 39201d1fe1eeSDavid Howells return ERR_PTR(ret); 3921ac27a0ecSDave Kleikamp } 3922ac27a0ecSDave Kleikamp 39230fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 39240fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 39250fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 39260fc1b451SAneesh Kumar K.V { 39270fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 39280fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 39290fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 39300fc1b451SAneesh Kumar K.V 39310fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 39320fc1b451SAneesh Kumar K.V /* 39334907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 39340fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39350fc1b451SAneesh Kumar K.V */ 39368180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39370fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 393884a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 3939f287a1a5STheodore Ts'o return 0; 3940f287a1a5STheodore Ts'o } 3941f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 3942f287a1a5STheodore Ts'o return -EFBIG; 3943f287a1a5STheodore Ts'o 3944f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 39450fc1b451SAneesh Kumar K.V /* 39460fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 39470fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39480fc1b451SAneesh Kumar K.V */ 39498180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39500fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 395184a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 39520fc1b451SAneesh Kumar K.V } else { 395384a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 39548180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 39558180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 39568180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39578180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 39580fc1b451SAneesh Kumar K.V } 3959f287a1a5STheodore Ts'o return 0; 39600fc1b451SAneesh Kumar K.V } 39610fc1b451SAneesh Kumar K.V 3962ac27a0ecSDave Kleikamp /* 3963ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 3964ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 3965ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 3966ac27a0ecSDave Kleikamp * 3967ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 3968ac27a0ecSDave Kleikamp */ 3969617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 3970ac27a0ecSDave Kleikamp struct inode *inode, 3971830156c7SFrank Mayhar struct ext4_iloc *iloc) 3972ac27a0ecSDave Kleikamp { 3973617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 3974617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3975ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 3976ac27a0ecSDave Kleikamp int err = 0, rc, block; 3977b71fc079SJan Kara int need_datasync = 0; 397808cefc7aSEric W. Biederman uid_t i_uid; 397908cefc7aSEric W. Biederman gid_t i_gid; 3980ac27a0ecSDave Kleikamp 3981ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 3982ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 398319f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 3984617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 3985ac27a0ecSDave Kleikamp 3986ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 3987ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 398808cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 398908cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 3990ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 399108cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 399208cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 3993ac27a0ecSDave Kleikamp /* 3994ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 3995ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 3996ac27a0ecSDave Kleikamp */ 3997ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 3998ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 399908cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 4000ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 400108cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 4002ac27a0ecSDave Kleikamp } else { 4003ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4004ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4005ac27a0ecSDave Kleikamp } 4006ac27a0ecSDave Kleikamp } else { 400708cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 400808cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4009ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4010ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4011ac27a0ecSDave Kleikamp } 4012ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4013ef7f3835SKalpak Shah 4014ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4015ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4016ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4017ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4018ef7f3835SKalpak Shah 40190fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 40200fc1b451SAneesh Kumar K.V goto out_brelse; 4021ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4022353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 40239b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 40249b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4025a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4026a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 40277973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4028b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4029a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4030b71fc079SJan Kara need_datasync = 1; 4031b71fc079SJan Kara } 4032ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4033ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4034617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4035617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4036617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4037617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4038ac27a0ecSDave Kleikamp /* If this is the first large file 4039ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4040ac27a0ecSDave Kleikamp */ 4041617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4042617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4043ac27a0ecSDave Kleikamp if (err) 4044ac27a0ecSDave Kleikamp goto out_brelse; 4045617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4046617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4047617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 40480390131bSFrank Mayhar ext4_handle_sync(handle); 4049b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4050ac27a0ecSDave Kleikamp } 4051ac27a0ecSDave Kleikamp } 4052ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4053ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4054ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4055ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4056ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4057ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4058ac27a0ecSDave Kleikamp } else { 4059ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4060ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4061ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4062ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4063ac27a0ecSDave Kleikamp } 4064f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4065de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4066ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4067f19d5870STao Ma } 4068ac27a0ecSDave Kleikamp 406925ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 407025ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 407125ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 407225ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 407325ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4074ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 407525ec56b5SJean Noel Cordenner } 407625ec56b5SJean Noel Cordenner 4077814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4078814525f4SDarrick J. Wong 40790390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 408073b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4081ac27a0ecSDave Kleikamp if (!err) 4082ac27a0ecSDave Kleikamp err = rc; 408319f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4084ac27a0ecSDave Kleikamp 4085b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4086ac27a0ecSDave Kleikamp out_brelse: 4087ac27a0ecSDave Kleikamp brelse(bh); 4088617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4089ac27a0ecSDave Kleikamp return err; 4090ac27a0ecSDave Kleikamp } 4091ac27a0ecSDave Kleikamp 4092ac27a0ecSDave Kleikamp /* 4093617ba13bSMingming Cao * ext4_write_inode() 4094ac27a0ecSDave Kleikamp * 4095ac27a0ecSDave Kleikamp * We are called from a few places: 4096ac27a0ecSDave Kleikamp * 4097ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4098ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 40994907cb7bSAnatol Pomozov * transaction to commit. 4100ac27a0ecSDave Kleikamp * 4101ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4102ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4103ac27a0ecSDave Kleikamp * 4104ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4105ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4106ac27a0ecSDave Kleikamp * journal commit. 4107ac27a0ecSDave Kleikamp * 4108ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4109ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4110617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4111ac27a0ecSDave Kleikamp * knfsd. 4112ac27a0ecSDave Kleikamp * 4113ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4114ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4115ac27a0ecSDave Kleikamp * which we are interested. 4116ac27a0ecSDave Kleikamp * 4117ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4118ac27a0ecSDave Kleikamp * 4119ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4120ac27a0ecSDave Kleikamp * stuff(); 4121ac27a0ecSDave Kleikamp * inode->i_size = expr; 4122ac27a0ecSDave Kleikamp * 4123ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4124ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4125ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4126ac27a0ecSDave Kleikamp */ 4127a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4128ac27a0ecSDave Kleikamp { 412991ac6f43SFrank Mayhar int err; 413091ac6f43SFrank Mayhar 4131ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4132ac27a0ecSDave Kleikamp return 0; 4133ac27a0ecSDave Kleikamp 413491ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4135617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4136b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4137ac27a0ecSDave Kleikamp dump_stack(); 4138ac27a0ecSDave Kleikamp return -EIO; 4139ac27a0ecSDave Kleikamp } 4140ac27a0ecSDave Kleikamp 4141a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4142ac27a0ecSDave Kleikamp return 0; 4143ac27a0ecSDave Kleikamp 414491ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 414591ac6f43SFrank Mayhar } else { 414691ac6f43SFrank Mayhar struct ext4_iloc iloc; 414791ac6f43SFrank Mayhar 41488b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 414991ac6f43SFrank Mayhar if (err) 415091ac6f43SFrank Mayhar return err; 4151a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4152830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4153830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4154c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4155c398eda0STheodore Ts'o "IO error syncing inode"); 4156830156c7SFrank Mayhar err = -EIO; 4157830156c7SFrank Mayhar } 4158fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 415991ac6f43SFrank Mayhar } 416091ac6f43SFrank Mayhar return err; 4161ac27a0ecSDave Kleikamp } 4162ac27a0ecSDave Kleikamp 4163ac27a0ecSDave Kleikamp /* 416453e87268SJan Kara * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 416553e87268SJan Kara * buffers that are attached to a page stradding i_size and are undergoing 416653e87268SJan Kara * commit. In that case we have to wait for commit to finish and try again. 416753e87268SJan Kara */ 416853e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode) 416953e87268SJan Kara { 417053e87268SJan Kara struct page *page; 417153e87268SJan Kara unsigned offset; 417253e87268SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 417353e87268SJan Kara tid_t commit_tid = 0; 417453e87268SJan Kara int ret; 417553e87268SJan Kara 417653e87268SJan Kara offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 417753e87268SJan Kara /* 417853e87268SJan Kara * All buffers in the last page remain valid? Then there's nothing to 417953e87268SJan Kara * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 418053e87268SJan Kara * blocksize case 418153e87268SJan Kara */ 418253e87268SJan Kara if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 418353e87268SJan Kara return; 418453e87268SJan Kara while (1) { 418553e87268SJan Kara page = find_lock_page(inode->i_mapping, 418653e87268SJan Kara inode->i_size >> PAGE_CACHE_SHIFT); 418753e87268SJan Kara if (!page) 418853e87268SJan Kara return; 418953e87268SJan Kara ret = __ext4_journalled_invalidatepage(page, offset); 419053e87268SJan Kara unlock_page(page); 419153e87268SJan Kara page_cache_release(page); 419253e87268SJan Kara if (ret != -EBUSY) 419353e87268SJan Kara return; 419453e87268SJan Kara commit_tid = 0; 419553e87268SJan Kara read_lock(&journal->j_state_lock); 419653e87268SJan Kara if (journal->j_committing_transaction) 419753e87268SJan Kara commit_tid = journal->j_committing_transaction->t_tid; 419853e87268SJan Kara read_unlock(&journal->j_state_lock); 419953e87268SJan Kara if (commit_tid) 420053e87268SJan Kara jbd2_log_wait_commit(journal, commit_tid); 420153e87268SJan Kara } 420253e87268SJan Kara } 420353e87268SJan Kara 420453e87268SJan Kara /* 4205617ba13bSMingming Cao * ext4_setattr() 4206ac27a0ecSDave Kleikamp * 4207ac27a0ecSDave Kleikamp * Called from notify_change. 4208ac27a0ecSDave Kleikamp * 4209ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4210ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4211ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4212ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4213ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4214ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4215ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4216ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4217ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4218ac27a0ecSDave Kleikamp * 4219678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4220678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4221678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4222678aaf48SJan Kara * This way we are sure that all the data written in the previous 4223678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4224678aaf48SJan Kara * writeback). 4225678aaf48SJan Kara * 4226678aaf48SJan Kara * Called with inode->i_mutex down. 4227ac27a0ecSDave Kleikamp */ 4228617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4229ac27a0ecSDave Kleikamp { 4230ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4231ac27a0ecSDave Kleikamp int error, rc = 0; 42323d287de3SDmitry Monakhov int orphan = 0; 4233ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4234ac27a0ecSDave Kleikamp 4235ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4236ac27a0ecSDave Kleikamp if (error) 4237ac27a0ecSDave Kleikamp return error; 4238ac27a0ecSDave Kleikamp 423912755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4240871a2931SChristoph Hellwig dquot_initialize(inode); 424108cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 424208cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4243ac27a0ecSDave Kleikamp handle_t *handle; 4244ac27a0ecSDave Kleikamp 4245ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4246ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 42479924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 42489924a92aSTheodore Ts'o (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 4249194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3); 4250ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4251ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4252ac27a0ecSDave Kleikamp goto err_out; 4253ac27a0ecSDave Kleikamp } 4254b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4255ac27a0ecSDave Kleikamp if (error) { 4256617ba13bSMingming Cao ext4_journal_stop(handle); 4257ac27a0ecSDave Kleikamp return error; 4258ac27a0ecSDave Kleikamp } 4259ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4260ac27a0ecSDave Kleikamp * one transaction */ 4261ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4262ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4263ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4264ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4265617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4266617ba13bSMingming Cao ext4_journal_stop(handle); 4267ac27a0ecSDave Kleikamp } 4268ac27a0ecSDave Kleikamp 4269e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4270562c72aaSChristoph Hellwig 427112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4272e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4273e2b46574SEric Sandeen 42740c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 42750c095c7fSTheodore Ts'o return -EFBIG; 4276e2b46574SEric Sandeen } 4277e2b46574SEric Sandeen } 4278e2b46574SEric Sandeen 4279ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4280c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4281072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4282ac27a0ecSDave Kleikamp handle_t *handle; 4283ac27a0ecSDave Kleikamp 42849924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 3); 4285ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4286ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4287ac27a0ecSDave Kleikamp goto err_out; 4288ac27a0ecSDave Kleikamp } 42893d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4290617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 42913d287de3SDmitry Monakhov orphan = 1; 42923d287de3SDmitry Monakhov } 4293617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4294617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4295ac27a0ecSDave Kleikamp if (!error) 4296ac27a0ecSDave Kleikamp error = rc; 4297617ba13bSMingming Cao ext4_journal_stop(handle); 4298678aaf48SJan Kara 4299678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4300678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4301678aaf48SJan Kara attr->ia_size); 4302678aaf48SJan Kara if (error) { 4303678aaf48SJan Kara /* Do as much error cleanup as possible */ 43049924a92aSTheodore Ts'o handle = ext4_journal_start(inode, 43059924a92aSTheodore Ts'o EXT4_HT_INODE, 3); 4306678aaf48SJan Kara if (IS_ERR(handle)) { 4307678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4308678aaf48SJan Kara goto err_out; 4309678aaf48SJan Kara } 4310678aaf48SJan Kara ext4_orphan_del(handle, inode); 43113d287de3SDmitry Monakhov orphan = 0; 4312678aaf48SJan Kara ext4_journal_stop(handle); 4313678aaf48SJan Kara goto err_out; 4314678aaf48SJan Kara } 4315678aaf48SJan Kara } 4316ac27a0ecSDave Kleikamp } 4317ac27a0ecSDave Kleikamp 4318072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 431953e87268SJan Kara if (attr->ia_size != inode->i_size) { 432053e87268SJan Kara loff_t oldsize = inode->i_size; 432153e87268SJan Kara 432253e87268SJan Kara i_size_write(inode, attr->ia_size); 432353e87268SJan Kara /* 432453e87268SJan Kara * Blocks are going to be removed from the inode. Wait 432553e87268SJan Kara * for dio in flight. Temporarily disable 432653e87268SJan Kara * dioread_nolock to prevent livelock. 432753e87268SJan Kara */ 43281b65007eSDmitry Monakhov if (orphan) { 432953e87268SJan Kara if (!ext4_should_journal_data(inode)) { 43301b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 43311c9114f9SDmitry Monakhov inode_dio_wait(inode); 43321b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 433353e87268SJan Kara } else 433453e87268SJan Kara ext4_wait_for_tail_page_commit(inode); 43351b65007eSDmitry Monakhov } 433653e87268SJan Kara /* 433753e87268SJan Kara * Truncate pagecache after we've waited for commit 433853e87268SJan Kara * in data=journal mode to make pages freeable. 433953e87268SJan Kara */ 434053e87268SJan Kara truncate_pagecache(inode, oldsize, inode->i_size); 43411c9114f9SDmitry Monakhov } 4342072bd7eaSTheodore Ts'o ext4_truncate(inode); 4343072bd7eaSTheodore Ts'o } 4344ac27a0ecSDave Kleikamp 43451025774cSChristoph Hellwig if (!rc) { 43461025774cSChristoph Hellwig setattr_copy(inode, attr); 43471025774cSChristoph Hellwig mark_inode_dirty(inode); 43481025774cSChristoph Hellwig } 43491025774cSChristoph Hellwig 43501025774cSChristoph Hellwig /* 43511025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 43521025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 43531025774cSChristoph Hellwig */ 43543d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4355617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4356ac27a0ecSDave Kleikamp 4357ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4358617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4359ac27a0ecSDave Kleikamp 4360ac27a0ecSDave Kleikamp err_out: 4361617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4362ac27a0ecSDave Kleikamp if (!error) 4363ac27a0ecSDave Kleikamp error = rc; 4364ac27a0ecSDave Kleikamp return error; 4365ac27a0ecSDave Kleikamp } 4366ac27a0ecSDave Kleikamp 43673e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 43683e3398a0SMingming Cao struct kstat *stat) 43693e3398a0SMingming Cao { 43703e3398a0SMingming Cao struct inode *inode; 43713e3398a0SMingming Cao unsigned long delalloc_blocks; 43723e3398a0SMingming Cao 43733e3398a0SMingming Cao inode = dentry->d_inode; 43743e3398a0SMingming Cao generic_fillattr(inode, stat); 43753e3398a0SMingming Cao 43763e3398a0SMingming Cao /* 43773e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 43783e3398a0SMingming Cao * otherwise in the case of system crash before the real block 43793e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 43803e3398a0SMingming Cao * on-disk file blocks. 43813e3398a0SMingming Cao * We always keep i_blocks updated together with real 43823e3398a0SMingming Cao * allocation. But to not confuse with user, stat 43833e3398a0SMingming Cao * will return the blocks that include the delayed allocation 43843e3398a0SMingming Cao * blocks for this file. 43853e3398a0SMingming Cao */ 438696607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 438796607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 43883e3398a0SMingming Cao 43893e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 43903e3398a0SMingming Cao return 0; 43913e3398a0SMingming Cao } 4392ac27a0ecSDave Kleikamp 4393a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4394a02908f1SMingming Cao { 439512e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 43968bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4397ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4398a02908f1SMingming Cao } 4399ac51d837STheodore Ts'o 4400a02908f1SMingming Cao /* 4401a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4402a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4403a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4404a02908f1SMingming Cao * 4405a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 44064907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4407a02908f1SMingming Cao * they could still across block group boundary. 4408a02908f1SMingming Cao * 4409a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4410a02908f1SMingming Cao */ 44111f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4412a02908f1SMingming Cao { 44138df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 44148df9675fSTheodore Ts'o int gdpblocks; 4415a02908f1SMingming Cao int idxblocks; 4416a02908f1SMingming Cao int ret = 0; 4417a02908f1SMingming Cao 4418a02908f1SMingming Cao /* 4419a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4420a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4421a02908f1SMingming Cao * physically contiguous on disk 4422a02908f1SMingming Cao * 4423a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4424a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4425a02908f1SMingming Cao */ 4426a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4427a02908f1SMingming Cao 4428a02908f1SMingming Cao ret = idxblocks; 4429a02908f1SMingming Cao 4430a02908f1SMingming Cao /* 4431a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4432a02908f1SMingming Cao * to account 4433a02908f1SMingming Cao */ 4434a02908f1SMingming Cao groups = idxblocks; 4435a02908f1SMingming Cao if (chunk) 4436a02908f1SMingming Cao groups += 1; 4437ac27a0ecSDave Kleikamp else 4438a02908f1SMingming Cao groups += nrblocks; 4439ac27a0ecSDave Kleikamp 4440a02908f1SMingming Cao gdpblocks = groups; 44418df9675fSTheodore Ts'o if (groups > ngroups) 44428df9675fSTheodore Ts'o groups = ngroups; 4443a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4444a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4445a02908f1SMingming Cao 4446a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4447a02908f1SMingming Cao ret += groups + gdpblocks; 4448a02908f1SMingming Cao 4449a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4450a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4451ac27a0ecSDave Kleikamp 4452ac27a0ecSDave Kleikamp return ret; 4453ac27a0ecSDave Kleikamp } 4454ac27a0ecSDave Kleikamp 4455ac27a0ecSDave Kleikamp /* 445625985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4457f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4458f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4459a02908f1SMingming Cao * 4460525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4461a02908f1SMingming Cao * 4462525f4ed8SMingming Cao * We need to consider the worse case, when 4463a02908f1SMingming Cao * one new block per extent. 4464a02908f1SMingming Cao */ 4465a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4466a02908f1SMingming Cao { 4467a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4468a02908f1SMingming Cao int ret; 4469a02908f1SMingming Cao 4470a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4471a02908f1SMingming Cao 4472a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4473a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4474a02908f1SMingming Cao ret += bpp; 4475a02908f1SMingming Cao return ret; 4476a02908f1SMingming Cao } 4477f3bd1f3fSMingming Cao 4478f3bd1f3fSMingming Cao /* 4479f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4480f3bd1f3fSMingming Cao * 4481f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 448279e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4483f3bd1f3fSMingming Cao * 4484f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4485f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4486f3bd1f3fSMingming Cao */ 4487f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4488f3bd1f3fSMingming Cao { 4489f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4490f3bd1f3fSMingming Cao } 4491f3bd1f3fSMingming Cao 4492a02908f1SMingming Cao /* 4493617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4494ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4495ac27a0ecSDave Kleikamp */ 4496617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4497617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4498ac27a0ecSDave Kleikamp { 4499ac27a0ecSDave Kleikamp int err = 0; 4500ac27a0ecSDave Kleikamp 4501c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 450225ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 450325ec56b5SJean Noel Cordenner 4504ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4505ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4506ac27a0ecSDave Kleikamp 4507dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4508830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4509ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4510ac27a0ecSDave Kleikamp return err; 4511ac27a0ecSDave Kleikamp } 4512ac27a0ecSDave Kleikamp 4513ac27a0ecSDave Kleikamp /* 4514ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4515ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4516ac27a0ecSDave Kleikamp */ 4517ac27a0ecSDave Kleikamp 4518ac27a0ecSDave Kleikamp int 4519617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4520617ba13bSMingming Cao struct ext4_iloc *iloc) 4521ac27a0ecSDave Kleikamp { 45220390131bSFrank Mayhar int err; 45230390131bSFrank Mayhar 4524617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4525ac27a0ecSDave Kleikamp if (!err) { 4526ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4527617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4528ac27a0ecSDave Kleikamp if (err) { 4529ac27a0ecSDave Kleikamp brelse(iloc->bh); 4530ac27a0ecSDave Kleikamp iloc->bh = NULL; 4531ac27a0ecSDave Kleikamp } 4532ac27a0ecSDave Kleikamp } 4533617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4534ac27a0ecSDave Kleikamp return err; 4535ac27a0ecSDave Kleikamp } 4536ac27a0ecSDave Kleikamp 4537ac27a0ecSDave Kleikamp /* 45386dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 45396dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 45406dd4ee7cSKalpak Shah */ 45411d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 45421d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 45431d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 45441d03ec98SAneesh Kumar K.V handle_t *handle) 45456dd4ee7cSKalpak Shah { 45466dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 45476dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 45486dd4ee7cSKalpak Shah 45496dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 45506dd4ee7cSKalpak Shah return 0; 45516dd4ee7cSKalpak Shah 45526dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 45536dd4ee7cSKalpak Shah 45546dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 45556dd4ee7cSKalpak Shah 45566dd4ee7cSKalpak Shah /* No extended attributes present */ 455719f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 45586dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 45596dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 45606dd4ee7cSKalpak Shah new_extra_isize); 45616dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 45626dd4ee7cSKalpak Shah return 0; 45636dd4ee7cSKalpak Shah } 45646dd4ee7cSKalpak Shah 45656dd4ee7cSKalpak Shah /* try to expand with EAs present */ 45666dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 45676dd4ee7cSKalpak Shah raw_inode, handle); 45686dd4ee7cSKalpak Shah } 45696dd4ee7cSKalpak Shah 45706dd4ee7cSKalpak Shah /* 4571ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4572ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4573ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4574ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4575ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4576ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4577ac27a0ecSDave Kleikamp * 4578ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4579ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4580ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4581ac27a0ecSDave Kleikamp * we start and wait on commits. 4582ac27a0ecSDave Kleikamp */ 4583617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4584ac27a0ecSDave Kleikamp { 4585617ba13bSMingming Cao struct ext4_iloc iloc; 45866dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 45876dd4ee7cSKalpak Shah static unsigned int mnt_count; 45886dd4ee7cSKalpak Shah int err, ret; 4589ac27a0ecSDave Kleikamp 4590ac27a0ecSDave Kleikamp might_sleep(); 45917ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4592617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 45930390131bSFrank Mayhar if (ext4_handle_valid(handle) && 45940390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 459519f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 45966dd4ee7cSKalpak Shah /* 45976dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 45986dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 45996dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 46006dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 46016dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 46026dd4ee7cSKalpak Shah */ 46036dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 46046dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 46056dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 46066dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 46076dd4ee7cSKalpak Shah iloc, handle); 46086dd4ee7cSKalpak Shah if (ret) { 460919f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 461019f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4611c1bddad9SAneesh Kumar K.V if (mnt_count != 4612c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 461312062dddSEric Sandeen ext4_warning(inode->i_sb, 46146dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 46156dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 46166dd4ee7cSKalpak Shah inode->i_ino); 4617c1bddad9SAneesh Kumar K.V mnt_count = 4618c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 46196dd4ee7cSKalpak Shah } 46206dd4ee7cSKalpak Shah } 46216dd4ee7cSKalpak Shah } 46226dd4ee7cSKalpak Shah } 4623ac27a0ecSDave Kleikamp if (!err) 4624617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4625ac27a0ecSDave Kleikamp return err; 4626ac27a0ecSDave Kleikamp } 4627ac27a0ecSDave Kleikamp 4628ac27a0ecSDave Kleikamp /* 4629617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4630ac27a0ecSDave Kleikamp * 4631ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4632ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4633ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4634ac27a0ecSDave Kleikamp * 46355dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4636ac27a0ecSDave Kleikamp * are allocated to the file. 4637ac27a0ecSDave Kleikamp * 4638ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4639ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4640ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4641ac27a0ecSDave Kleikamp */ 4642aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4643ac27a0ecSDave Kleikamp { 4644ac27a0ecSDave Kleikamp handle_t *handle; 4645ac27a0ecSDave Kleikamp 46469924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4647ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4648ac27a0ecSDave Kleikamp goto out; 4649f3dc272fSCurt Wohlgemuth 4650617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4651f3dc272fSCurt Wohlgemuth 4652617ba13bSMingming Cao ext4_journal_stop(handle); 4653ac27a0ecSDave Kleikamp out: 4654ac27a0ecSDave Kleikamp return; 4655ac27a0ecSDave Kleikamp } 4656ac27a0ecSDave Kleikamp 4657ac27a0ecSDave Kleikamp #if 0 4658ac27a0ecSDave Kleikamp /* 4659ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4660ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4661617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4662ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4663ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4664ac27a0ecSDave Kleikamp */ 4665617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4666ac27a0ecSDave Kleikamp { 4667617ba13bSMingming Cao struct ext4_iloc iloc; 4668ac27a0ecSDave Kleikamp 4669ac27a0ecSDave Kleikamp int err = 0; 4670ac27a0ecSDave Kleikamp if (handle) { 4671617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4672ac27a0ecSDave Kleikamp if (!err) { 4673ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4674dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4675ac27a0ecSDave Kleikamp if (!err) 46760390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 467773b50c1cSCurt Wohlgemuth NULL, 4678ac27a0ecSDave Kleikamp iloc.bh); 4679ac27a0ecSDave Kleikamp brelse(iloc.bh); 4680ac27a0ecSDave Kleikamp } 4681ac27a0ecSDave Kleikamp } 4682617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4683ac27a0ecSDave Kleikamp return err; 4684ac27a0ecSDave Kleikamp } 4685ac27a0ecSDave Kleikamp #endif 4686ac27a0ecSDave Kleikamp 4687617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4688ac27a0ecSDave Kleikamp { 4689ac27a0ecSDave Kleikamp journal_t *journal; 4690ac27a0ecSDave Kleikamp handle_t *handle; 4691ac27a0ecSDave Kleikamp int err; 4692ac27a0ecSDave Kleikamp 4693ac27a0ecSDave Kleikamp /* 4694ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4695ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4696ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4697ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4698ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4699ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4700ac27a0ecSDave Kleikamp * nobody is changing anything. 4701ac27a0ecSDave Kleikamp */ 4702ac27a0ecSDave Kleikamp 4703617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 47040390131bSFrank Mayhar if (!journal) 47050390131bSFrank Mayhar return 0; 4706d699594dSDave Hansen if (is_journal_aborted(journal)) 4707ac27a0ecSDave Kleikamp return -EROFS; 47082aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 47092aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 47102aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 47112aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 47122aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 47132aff57b0SYongqiang Yang */ 47142aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 47152aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 47162aff57b0SYongqiang Yang if (err < 0) 47172aff57b0SYongqiang Yang return err; 47182aff57b0SYongqiang Yang } 4719ac27a0ecSDave Kleikamp 472017335dccSDmitry Monakhov /* Wait for all existing dio workers */ 472117335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 472217335dccSDmitry Monakhov inode_dio_wait(inode); 472317335dccSDmitry Monakhov 4724dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4725ac27a0ecSDave Kleikamp 4726ac27a0ecSDave Kleikamp /* 4727ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4728ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4729ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4730ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4731ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4732ac27a0ecSDave Kleikamp */ 4733ac27a0ecSDave Kleikamp 4734ac27a0ecSDave Kleikamp if (val) 473512e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47365872ddaaSYongqiang Yang else { 47375872ddaaSYongqiang Yang jbd2_journal_flush(journal); 473812e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47395872ddaaSYongqiang Yang } 4740617ba13bSMingming Cao ext4_set_aops(inode); 4741ac27a0ecSDave Kleikamp 4742dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 474317335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 4744ac27a0ecSDave Kleikamp 4745ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4746ac27a0ecSDave Kleikamp 47479924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_INODE, 1); 4748ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4749ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4750ac27a0ecSDave Kleikamp 4751617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 47520390131bSFrank Mayhar ext4_handle_sync(handle); 4753617ba13bSMingming Cao ext4_journal_stop(handle); 4754617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4755ac27a0ecSDave Kleikamp 4756ac27a0ecSDave Kleikamp return err; 4757ac27a0ecSDave Kleikamp } 47582e9ee850SAneesh Kumar K.V 47592e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 47602e9ee850SAneesh Kumar K.V { 47612e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 47622e9ee850SAneesh Kumar K.V } 47632e9ee850SAneesh Kumar K.V 4764c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 47652e9ee850SAneesh Kumar K.V { 4766c2ec175cSNick Piggin struct page *page = vmf->page; 47672e9ee850SAneesh Kumar K.V loff_t size; 47682e9ee850SAneesh Kumar K.V unsigned long len; 47699ea7df53SJan Kara int ret; 47702e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 47712e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 47722e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 47739ea7df53SJan Kara handle_t *handle; 47749ea7df53SJan Kara get_block_t *get_block; 47759ea7df53SJan Kara int retries = 0; 47762e9ee850SAneesh Kumar K.V 47778e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 4778041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 47799ea7df53SJan Kara /* Delalloc case is easy... */ 47809ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 47819ea7df53SJan Kara !ext4_should_journal_data(inode) && 47829ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 47839ea7df53SJan Kara do { 47849ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 47859ea7df53SJan Kara ext4_da_get_block_prep); 47869ea7df53SJan Kara } while (ret == -ENOSPC && 47879ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 47889ea7df53SJan Kara goto out_ret; 47892e9ee850SAneesh Kumar K.V } 47900e499890SDarrick J. Wong 47910e499890SDarrick J. Wong lock_page(page); 47929ea7df53SJan Kara size = i_size_read(inode); 47939ea7df53SJan Kara /* Page got truncated from under us? */ 47949ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 47959ea7df53SJan Kara unlock_page(page); 47969ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 47979ea7df53SJan Kara goto out; 47980e499890SDarrick J. Wong } 47992e9ee850SAneesh Kumar K.V 48002e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 48012e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 48022e9ee850SAneesh Kumar K.V else 48032e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4804a827eaffSAneesh Kumar K.V /* 48059ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 48069ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4807a827eaffSAneesh Kumar K.V */ 48082e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 4809f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 4810f19d5870STao Ma 0, len, NULL, 4811a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 48129ea7df53SJan Kara /* Wait so that we don't change page under IO */ 48139ea7df53SJan Kara wait_on_page_writeback(page); 48149ea7df53SJan Kara ret = VM_FAULT_LOCKED; 48159ea7df53SJan Kara goto out; 48162e9ee850SAneesh Kumar K.V } 4817a827eaffSAneesh Kumar K.V } 4818a827eaffSAneesh Kumar K.V unlock_page(page); 48199ea7df53SJan Kara /* OK, we need to fill the hole... */ 48209ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 48219ea7df53SJan Kara get_block = ext4_get_block_write; 48229ea7df53SJan Kara else 48239ea7df53SJan Kara get_block = ext4_get_block; 48249ea7df53SJan Kara retry_alloc: 48259924a92aSTheodore Ts'o handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 48269924a92aSTheodore Ts'o ext4_writepage_trans_blocks(inode)); 48279ea7df53SJan Kara if (IS_ERR(handle)) { 4828c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 48299ea7df53SJan Kara goto out; 48309ea7df53SJan Kara } 48319ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 48329ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 4833f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 48349ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 48359ea7df53SJan Kara unlock_page(page); 48369ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 4837fcbb5515SYongqiang Yang ext4_journal_stop(handle); 48389ea7df53SJan Kara goto out; 48399ea7df53SJan Kara } 48409ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 48419ea7df53SJan Kara } 48429ea7df53SJan Kara ext4_journal_stop(handle); 48439ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 48449ea7df53SJan Kara goto retry_alloc; 48459ea7df53SJan Kara out_ret: 48469ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 48479ea7df53SJan Kara out: 48488e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 48492e9ee850SAneesh Kumar K.V return ret; 48502e9ee850SAneesh Kumar K.V } 4851