1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51814525f4SDarrick J. Wong struct ext4_inode_info *ei) 52814525f4SDarrick J. Wong { 53814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54814525f4SDarrick J. Wong __u16 csum_lo; 55814525f4SDarrick J. Wong __u16 csum_hi = 0; 56814525f4SDarrick J. Wong __u32 csum; 57814525f4SDarrick J. Wong 58814525f4SDarrick J. Wong csum_lo = raw->i_checksum_lo; 59814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 60814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62814525f4SDarrick J. Wong csum_hi = raw->i_checksum_hi; 63814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 64814525f4SDarrick J. Wong } 65814525f4SDarrick J. Wong 66814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 68814525f4SDarrick J. Wong 69814525f4SDarrick J. Wong raw->i_checksum_lo = csum_lo; 70814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72814525f4SDarrick J. Wong raw->i_checksum_hi = csum_hi; 73814525f4SDarrick J. Wong 74814525f4SDarrick J. Wong return csum; 75814525f4SDarrick J. Wong } 76814525f4SDarrick J. Wong 77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78814525f4SDarrick J. Wong struct ext4_inode_info *ei) 79814525f4SDarrick J. Wong { 80814525f4SDarrick J. Wong __u32 provided, calculated; 81814525f4SDarrick J. Wong 82814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 84814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86814525f4SDarrick J. Wong return 1; 87814525f4SDarrick J. Wong 88814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 89814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 90814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93814525f4SDarrick J. Wong else 94814525f4SDarrick J. Wong calculated &= 0xFFFF; 95814525f4SDarrick J. Wong 96814525f4SDarrick J. Wong return provided == calculated; 97814525f4SDarrick J. Wong } 98814525f4SDarrick J. Wong 99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100814525f4SDarrick J. Wong struct ext4_inode_info *ei) 101814525f4SDarrick J. Wong { 102814525f4SDarrick J. Wong __u32 csum; 103814525f4SDarrick J. Wong 104814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 106814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108814525f4SDarrick J. Wong return; 109814525f4SDarrick J. Wong 110814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 111814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115814525f4SDarrick J. Wong } 116814525f4SDarrick J. Wong 117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 118678aaf48SJan Kara loff_t new_size) 119678aaf48SJan Kara { 1207ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1218aefcd55STheodore Ts'o /* 1228aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1238aefcd55STheodore Ts'o * writing, so there's no need to call 1248aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1258aefcd55STheodore Ts'o * outstanding writes we need to flush. 1268aefcd55STheodore Ts'o */ 1278aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1288aefcd55STheodore Ts'o return 0; 1298aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1308aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 131678aaf48SJan Kara new_size); 132678aaf48SJan Kara } 133678aaf48SJan Kara 13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 135cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 136cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 1375f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 1385f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 1395f163cc7SEric Sandeen loff_t length, int flags); 14064769240SAlex Tomas 141ac27a0ecSDave Kleikamp /* 142ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 143ac27a0ecSDave Kleikamp */ 144617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 145ac27a0ecSDave Kleikamp { 146617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 147ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 148ac27a0ecSDave Kleikamp 149ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 150ac27a0ecSDave Kleikamp } 151ac27a0ecSDave Kleikamp 152ac27a0ecSDave Kleikamp /* 153ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 154ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 155ac27a0ecSDave Kleikamp * this transaction. 156ac27a0ecSDave Kleikamp */ 157487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 158487caeefSJan Kara int nblocks) 159ac27a0ecSDave Kleikamp { 160487caeefSJan Kara int ret; 161487caeefSJan Kara 162487caeefSJan Kara /* 163e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 164487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 165487caeefSJan Kara * page cache has been already dropped and writes are blocked by 166487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 167487caeefSJan Kara */ 1680390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 169ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 170487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1718e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 172487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 173fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 174487caeefSJan Kara 175487caeefSJan Kara return ret; 176ac27a0ecSDave Kleikamp } 177ac27a0ecSDave Kleikamp 178ac27a0ecSDave Kleikamp /* 179ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 180ac27a0ecSDave Kleikamp */ 1810930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 182ac27a0ecSDave Kleikamp { 183ac27a0ecSDave Kleikamp handle_t *handle; 184bc965ab3STheodore Ts'o int err; 185ac27a0ecSDave Kleikamp 1867ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1872581fdc8SJiaying Zhang 1882581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1892581fdc8SJiaying Zhang 1900930fcc1SAl Viro if (inode->i_nlink) { 1912d859db3SJan Kara /* 1922d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1932d859db3SJan Kara * journal. So although mm thinks everything is clean and 1942d859db3SJan Kara * ready for reaping the inode might still have some pages to 1952d859db3SJan Kara * write in the running transaction or waiting to be 1962d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1972d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1982d859db3SJan Kara * cause data loss. Also even if we did not discard these 1992d859db3SJan Kara * buffers, we would have no way to find them after the inode 2002d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 2012d859db3SJan Kara * read them before the transaction is checkpointed. So be 2022d859db3SJan Kara * careful and force everything to disk here... We use 2032d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2042d859db3SJan Kara * containing inode's data. 2052d859db3SJan Kara * 2062d859db3SJan Kara * Note that directories do not have this problem because they 2072d859db3SJan Kara * don't use page cache. 2082d859db3SJan Kara */ 2092d859db3SJan Kara if (ext4_should_journal_data(inode) && 2102d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 2112d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2122d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2132d859db3SJan Kara 2142d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 2152d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 2162d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2172d859db3SJan Kara } 2180930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2190930fcc1SAl Viro goto no_delete; 2200930fcc1SAl Viro } 2210930fcc1SAl Viro 222907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 223871a2931SChristoph Hellwig dquot_initialize(inode); 224907f4554SChristoph Hellwig 225678aaf48SJan Kara if (ext4_should_order_data(inode)) 226678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 227ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 228ac27a0ecSDave Kleikamp 229ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 230ac27a0ecSDave Kleikamp goto no_delete; 231ac27a0ecSDave Kleikamp 2328e8ad8a5SJan Kara /* 2338e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2348e8ad8a5SJan Kara * protection against it 2358e8ad8a5SJan Kara */ 2368e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2379f125d64STheodore Ts'o handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); 238ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 239bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 240ac27a0ecSDave Kleikamp /* 241ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 242ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 243ac27a0ecSDave Kleikamp * cleaned up. 244ac27a0ecSDave Kleikamp */ 245617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2468e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 247ac27a0ecSDave Kleikamp goto no_delete; 248ac27a0ecSDave Kleikamp } 249ac27a0ecSDave Kleikamp 250ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2510390131bSFrank Mayhar ext4_handle_sync(handle); 252ac27a0ecSDave Kleikamp inode->i_size = 0; 253bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 254bc965ab3STheodore Ts'o if (err) { 25512062dddSEric Sandeen ext4_warning(inode->i_sb, 256bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 257bc965ab3STheodore Ts'o goto stop_handle; 258bc965ab3STheodore Ts'o } 259ac27a0ecSDave Kleikamp if (inode->i_blocks) 260617ba13bSMingming Cao ext4_truncate(inode); 261bc965ab3STheodore Ts'o 262bc965ab3STheodore Ts'o /* 263bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 264bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 265bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 266bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 267bc965ab3STheodore Ts'o */ 2680390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 269bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 270bc965ab3STheodore Ts'o if (err > 0) 271bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 272bc965ab3STheodore Ts'o if (err != 0) { 27312062dddSEric Sandeen ext4_warning(inode->i_sb, 274bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 275bc965ab3STheodore Ts'o stop_handle: 276bc965ab3STheodore Ts'o ext4_journal_stop(handle); 27745388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2788e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 279bc965ab3STheodore Ts'o goto no_delete; 280bc965ab3STheodore Ts'o } 281bc965ab3STheodore Ts'o } 282bc965ab3STheodore Ts'o 283ac27a0ecSDave Kleikamp /* 284617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 285ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 286617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 287ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 288617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 289ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 290ac27a0ecSDave Kleikamp */ 291617ba13bSMingming Cao ext4_orphan_del(handle, inode); 292617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 293ac27a0ecSDave Kleikamp 294ac27a0ecSDave Kleikamp /* 295ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 296ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 297ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 298ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 299ac27a0ecSDave Kleikamp * fails. 300ac27a0ecSDave Kleikamp */ 301617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 302ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3030930fcc1SAl Viro ext4_clear_inode(inode); 304ac27a0ecSDave Kleikamp else 305617ba13bSMingming Cao ext4_free_inode(handle, inode); 306617ba13bSMingming Cao ext4_journal_stop(handle); 3078e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 308ac27a0ecSDave Kleikamp return; 309ac27a0ecSDave Kleikamp no_delete: 3100930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 311ac27a0ecSDave Kleikamp } 312ac27a0ecSDave Kleikamp 313a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 314a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31560e58e0fSMingming Cao { 316a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 31760e58e0fSMingming Cao } 318a9e7f447SDmitry Monakhov #endif 3199d0be502STheodore Ts'o 32012219aeaSAneesh Kumar K.V /* 32112219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3229d0be502STheodore Ts'o * to allocate a block located at @lblock 32312219aeaSAneesh Kumar K.V */ 32401f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32512219aeaSAneesh Kumar K.V { 32612e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3279d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 32812219aeaSAneesh Kumar K.V 3298bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33012219aeaSAneesh Kumar K.V } 33112219aeaSAneesh Kumar K.V 3320637c6f4STheodore Ts'o /* 3330637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3340637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3350637c6f4STheodore Ts'o */ 3365f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3375f634d06SAneesh Kumar K.V int used, int quota_claim) 33812219aeaSAneesh Kumar K.V { 33912219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3400637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34112219aeaSAneesh Kumar K.V 3420637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 343d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3440637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 3450637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 3461084f252STheodore Ts'o "with only %d reserved data blocks", 3470637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3480637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3490637c6f4STheodore Ts'o WARN_ON(1); 3500637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3516bc6e63fSAneesh Kumar K.V } 35212219aeaSAneesh Kumar K.V 35397795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 35497795d2aSBrian Foster ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d " 35597795d2aSBrian Foster "with only %d reserved metadata blocks\n", __func__, 35697795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 35797795d2aSBrian Foster ei->i_reserved_meta_blocks); 35897795d2aSBrian Foster WARN_ON(1); 35997795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36097795d2aSBrian Foster } 36197795d2aSBrian Foster 3620637c6f4STheodore Ts'o /* Update per-inode reservations */ 3630637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3640637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 36557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 36672b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3670637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3680637c6f4STheodore Ts'o 3690637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3700637c6f4STheodore Ts'o /* 3710637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3720637c6f4STheodore Ts'o * only when we have written all of the delayed 3730637c6f4STheodore Ts'o * allocation blocks. 3740637c6f4STheodore Ts'o */ 37557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37672b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 377ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3789d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3790637c6f4STheodore Ts'o } 38012219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38160e58e0fSMingming Cao 38272b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38372b8ab9dSEric Sandeen if (quota_claim) 3847b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 38572b8ab9dSEric Sandeen else { 3865f634d06SAneesh Kumar K.V /* 3875f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3885f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 38972b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3905f634d06SAneesh Kumar K.V */ 3917b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3925f634d06SAneesh Kumar K.V } 393d6014301SAneesh Kumar K.V 394d6014301SAneesh Kumar K.V /* 395d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 396d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 397d6014301SAneesh Kumar K.V * inode's preallocations. 398d6014301SAneesh Kumar K.V */ 3990637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4000637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 401d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40212219aeaSAneesh Kumar K.V } 40312219aeaSAneesh Kumar K.V 404e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 405c398eda0STheodore Ts'o unsigned int line, 40624676da4STheodore Ts'o struct ext4_map_blocks *map) 4076fd058f7STheodore Ts'o { 40824676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 40924676da4STheodore Ts'o map->m_len)) { 410c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 411c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41224676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 413c398eda0STheodore Ts'o map->m_len); 4146fd058f7STheodore Ts'o return -EIO; 4156fd058f7STheodore Ts'o } 4166fd058f7STheodore Ts'o return 0; 4176fd058f7STheodore Ts'o } 4186fd058f7STheodore Ts'o 419e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 420c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 421e29136f8STheodore Ts'o 422f5ab0d1fSMingming Cao /* 4231f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 4241f94533dSTheodore Ts'o * starting at page frame idx. 42555138e0bSTheodore Ts'o */ 42655138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 42755138e0bSTheodore Ts'o unsigned int max_pages) 42855138e0bSTheodore Ts'o { 42955138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 43055138e0bSTheodore Ts'o pgoff_t index; 43155138e0bSTheodore Ts'o struct pagevec pvec; 43255138e0bSTheodore Ts'o pgoff_t num = 0; 43355138e0bSTheodore Ts'o int i, nr_pages, done = 0; 43455138e0bSTheodore Ts'o 43555138e0bSTheodore Ts'o if (max_pages == 0) 43655138e0bSTheodore Ts'o return 0; 43755138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 43855138e0bSTheodore Ts'o while (!done) { 43955138e0bSTheodore Ts'o index = idx; 44055138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 44155138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 44255138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 44355138e0bSTheodore Ts'o if (nr_pages == 0) 44455138e0bSTheodore Ts'o break; 44555138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 44655138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 44755138e0bSTheodore Ts'o struct buffer_head *bh, *head; 44855138e0bSTheodore Ts'o 44955138e0bSTheodore Ts'o lock_page(page); 45055138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 45155138e0bSTheodore Ts'o !PageDirty(page) || 45255138e0bSTheodore Ts'o PageWriteback(page) || 45355138e0bSTheodore Ts'o page->index != idx) { 45455138e0bSTheodore Ts'o done = 1; 45555138e0bSTheodore Ts'o unlock_page(page); 45655138e0bSTheodore Ts'o break; 45755138e0bSTheodore Ts'o } 4581f94533dSTheodore Ts'o if (page_has_buffers(page)) { 4591f94533dSTheodore Ts'o bh = head = page_buffers(page); 46055138e0bSTheodore Ts'o do { 46155138e0bSTheodore Ts'o if (!buffer_delay(bh) && 4621f94533dSTheodore Ts'o !buffer_unwritten(bh)) 46355138e0bSTheodore Ts'o done = 1; 4641f94533dSTheodore Ts'o bh = bh->b_this_page; 4651f94533dSTheodore Ts'o } while (!done && (bh != head)); 46655138e0bSTheodore Ts'o } 46755138e0bSTheodore Ts'o unlock_page(page); 46855138e0bSTheodore Ts'o if (done) 46955138e0bSTheodore Ts'o break; 47055138e0bSTheodore Ts'o idx++; 47155138e0bSTheodore Ts'o num++; 472659c6009SEric Sandeen if (num >= max_pages) { 473659c6009SEric Sandeen done = 1; 47455138e0bSTheodore Ts'o break; 47555138e0bSTheodore Ts'o } 476659c6009SEric Sandeen } 47755138e0bSTheodore Ts'o pagevec_release(&pvec); 47855138e0bSTheodore Ts'o } 47955138e0bSTheodore Ts'o return num; 48055138e0bSTheodore Ts'o } 48155138e0bSTheodore Ts'o 48255138e0bSTheodore Ts'o /* 483e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4842b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 485f5ab0d1fSMingming Cao * 486f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 487f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 488f5ab0d1fSMingming Cao * mapped. 489f5ab0d1fSMingming Cao * 490e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 491e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 492f5ab0d1fSMingming Cao * based files 493f5ab0d1fSMingming Cao * 494f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 495f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 496f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 497f5ab0d1fSMingming Cao * the buffer head is mapped. 498f5ab0d1fSMingming Cao * 499f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 500df3ab170STao Ma * that case, buffer head is unmapped 501f5ab0d1fSMingming Cao * 502f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 503f5ab0d1fSMingming Cao */ 504e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 505e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5060e855ac8SAneesh Kumar K.V { 5070e855ac8SAneesh Kumar K.V int retval; 508f5ab0d1fSMingming Cao 509e35fd660STheodore Ts'o map->m_flags = 0; 510e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 511e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 512e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 5134df3d265SAneesh Kumar K.V /* 514b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 515b920c755STheodore Ts'o * file system block. 5164df3d265SAneesh Kumar K.V */ 517729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5180e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 51912e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 520a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 521a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5224df3d265SAneesh Kumar K.V } else { 523a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 524a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5250e855ac8SAneesh Kumar K.V } 526729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5274df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 528f5ab0d1fSMingming Cao 529e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 53051865fdaSZheng Liu int ret; 53151865fdaSZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 53251865fdaSZheng Liu /* delayed alloc may be allocated by fallocate and 53351865fdaSZheng Liu * coverted to initialized by directIO. 53451865fdaSZheng Liu * we need to handle delayed extent here. 53551865fdaSZheng Liu */ 53651865fdaSZheng Liu down_write((&EXT4_I(inode)->i_data_sem)); 53751865fdaSZheng Liu goto delayed_mapped; 53851865fdaSZheng Liu } 53951865fdaSZheng Liu ret = check_block_validity(inode, map); 5406fd058f7STheodore Ts'o if (ret != 0) 5416fd058f7STheodore Ts'o return ret; 5426fd058f7STheodore Ts'o } 5436fd058f7STheodore Ts'o 544f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 545c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 5464df3d265SAneesh Kumar K.V return retval; 5474df3d265SAneesh Kumar K.V 5484df3d265SAneesh Kumar K.V /* 549f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 550f5ab0d1fSMingming Cao * 551f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 552df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 553f5ab0d1fSMingming Cao * with buffer head unmapped. 554f5ab0d1fSMingming Cao */ 555e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 556f5ab0d1fSMingming Cao return retval; 557f5ab0d1fSMingming Cao 558f5ab0d1fSMingming Cao /* 5592a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 5602a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 5612a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 5622a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 5632a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 5642a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 5652a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 5662a8964d6SAneesh Kumar K.V * set on the buffer_head. 5672a8964d6SAneesh Kumar K.V */ 568e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 5692a8964d6SAneesh Kumar K.V 5702a8964d6SAneesh Kumar K.V /* 571f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 572f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 573f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 574f5ab0d1fSMingming Cao * with create == 1 flag. 5754df3d265SAneesh Kumar K.V */ 5764df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 577d2a17637SMingming Cao 578d2a17637SMingming Cao /* 579d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 580d2a17637SMingming Cao * we have already reserved fs blocks for allocation 581d2a17637SMingming Cao * let the underlying get_block() function know to 582d2a17637SMingming Cao * avoid double accounting 583d2a17637SMingming Cao */ 584c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 585f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 5864df3d265SAneesh Kumar K.V /* 5874df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 5884df3d265SAneesh Kumar K.V * could have changed the inode type in between 5894df3d265SAneesh Kumar K.V */ 59012e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 591e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 5920e855ac8SAneesh Kumar K.V } else { 593e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 594267e4db9SAneesh Kumar K.V 595e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 596267e4db9SAneesh Kumar K.V /* 597267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 598267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 599267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 600267e4db9SAneesh Kumar K.V */ 60119f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 602267e4db9SAneesh Kumar K.V } 6032ac3b6e0STheodore Ts'o 604d2a17637SMingming Cao /* 6052ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 6065f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 6075f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 6085f634d06SAneesh Kumar K.V * reserve space here. 609d2a17637SMingming Cao */ 6105f634d06SAneesh Kumar K.V if ((retval > 0) && 6111296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 6125f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 6135f634d06SAneesh Kumar K.V } 6145356f261SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 615f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 616d2a17637SMingming Cao 61751865fdaSZheng Liu if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 61851865fdaSZheng Liu int ret; 61951865fdaSZheng Liu delayed_mapped: 62051865fdaSZheng Liu /* delayed allocation blocks has been allocated */ 62151865fdaSZheng Liu ret = ext4_es_remove_extent(inode, map->m_lblk, 62251865fdaSZheng Liu map->m_len); 62351865fdaSZheng Liu if (ret < 0) 62451865fdaSZheng Liu retval = ret; 62551865fdaSZheng Liu } 6265356f261SAditya Kali } 6275356f261SAditya Kali 6280e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 629e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 630e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 6316fd058f7STheodore Ts'o if (ret != 0) 6326fd058f7STheodore Ts'o return ret; 6336fd058f7STheodore Ts'o } 6340e855ac8SAneesh Kumar K.V return retval; 6350e855ac8SAneesh Kumar K.V } 6360e855ac8SAneesh Kumar K.V 637f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 638f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 639f3bd1f3fSMingming Cao 6402ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 6412ed88685STheodore Ts'o struct buffer_head *bh, int flags) 642ac27a0ecSDave Kleikamp { 6433e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 6442ed88685STheodore Ts'o struct ext4_map_blocks map; 6457fb5409dSJan Kara int ret = 0, started = 0; 646f3bd1f3fSMingming Cao int dio_credits; 647ac27a0ecSDave Kleikamp 64846c7f254STao Ma if (ext4_has_inline_data(inode)) 64946c7f254STao Ma return -ERANGE; 65046c7f254STao Ma 6512ed88685STheodore Ts'o map.m_lblk = iblock; 6522ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 6532ed88685STheodore Ts'o 6548b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 6557fb5409dSJan Kara /* Direct IO write... */ 6562ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 6572ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 6582ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 659f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 6607fb5409dSJan Kara if (IS_ERR(handle)) { 661ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 6622ed88685STheodore Ts'o return ret; 6637fb5409dSJan Kara } 6647fb5409dSJan Kara started = 1; 665ac27a0ecSDave Kleikamp } 666ac27a0ecSDave Kleikamp 6672ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 668ac27a0ecSDave Kleikamp if (ret > 0) { 6692ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 6702ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 6712ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 672ac27a0ecSDave Kleikamp ret = 0; 673ac27a0ecSDave Kleikamp } 6747fb5409dSJan Kara if (started) 6757fb5409dSJan Kara ext4_journal_stop(handle); 676ac27a0ecSDave Kleikamp return ret; 677ac27a0ecSDave Kleikamp } 678ac27a0ecSDave Kleikamp 6792ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 6802ed88685STheodore Ts'o struct buffer_head *bh, int create) 6812ed88685STheodore Ts'o { 6822ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 6832ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6842ed88685STheodore Ts'o } 6852ed88685STheodore Ts'o 686ac27a0ecSDave Kleikamp /* 687ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 688ac27a0ecSDave Kleikamp */ 689617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 690725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 691ac27a0ecSDave Kleikamp { 6922ed88685STheodore Ts'o struct ext4_map_blocks map; 6932ed88685STheodore Ts'o struct buffer_head *bh; 694ac27a0ecSDave Kleikamp int fatal = 0, err; 695ac27a0ecSDave Kleikamp 696ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 697ac27a0ecSDave Kleikamp 6982ed88685STheodore Ts'o map.m_lblk = block; 6992ed88685STheodore Ts'o map.m_len = 1; 7002ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 7012ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7022ed88685STheodore Ts'o 70390b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 70490b0a973SCarlos Maiolino *errp = 0; 70590b0a973SCarlos Maiolino 7062ed88685STheodore Ts'o if (err < 0) 707ac27a0ecSDave Kleikamp *errp = err; 7082ed88685STheodore Ts'o if (err <= 0) 7092ed88685STheodore Ts'o return NULL; 7102ed88685STheodore Ts'o 7112ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 712aebf0243SWang Shilong if (unlikely(!bh)) { 713860d21e2STheodore Ts'o *errp = -ENOMEM; 7142ed88685STheodore Ts'o return NULL; 715ac27a0ecSDave Kleikamp } 7162ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 717ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 718ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 719ac27a0ecSDave Kleikamp 720ac27a0ecSDave Kleikamp /* 721ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 722ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 723ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 724617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 725ac27a0ecSDave Kleikamp * problem. 726ac27a0ecSDave Kleikamp */ 727ac27a0ecSDave Kleikamp lock_buffer(bh); 728ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 729617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 730ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 731ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 732ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 733ac27a0ecSDave Kleikamp } 734ac27a0ecSDave Kleikamp unlock_buffer(bh); 7350390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7360390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 737ac27a0ecSDave Kleikamp if (!fatal) 738ac27a0ecSDave Kleikamp fatal = err; 739ac27a0ecSDave Kleikamp } else { 740ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 741ac27a0ecSDave Kleikamp } 742ac27a0ecSDave Kleikamp if (fatal) { 743ac27a0ecSDave Kleikamp *errp = fatal; 744ac27a0ecSDave Kleikamp brelse(bh); 745ac27a0ecSDave Kleikamp bh = NULL; 746ac27a0ecSDave Kleikamp } 747ac27a0ecSDave Kleikamp return bh; 748ac27a0ecSDave Kleikamp } 749ac27a0ecSDave Kleikamp 750617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 751725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 752ac27a0ecSDave Kleikamp { 753ac27a0ecSDave Kleikamp struct buffer_head *bh; 754ac27a0ecSDave Kleikamp 755617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 756ac27a0ecSDave Kleikamp if (!bh) 757ac27a0ecSDave Kleikamp return bh; 758ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 759ac27a0ecSDave Kleikamp return bh; 76065299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 761ac27a0ecSDave Kleikamp wait_on_buffer(bh); 762ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 763ac27a0ecSDave Kleikamp return bh; 764ac27a0ecSDave Kleikamp put_bh(bh); 765ac27a0ecSDave Kleikamp *err = -EIO; 766ac27a0ecSDave Kleikamp return NULL; 767ac27a0ecSDave Kleikamp } 768ac27a0ecSDave Kleikamp 769f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 770ac27a0ecSDave Kleikamp struct buffer_head *head, 771ac27a0ecSDave Kleikamp unsigned from, 772ac27a0ecSDave Kleikamp unsigned to, 773ac27a0ecSDave Kleikamp int *partial, 774ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 775ac27a0ecSDave Kleikamp struct buffer_head *bh)) 776ac27a0ecSDave Kleikamp { 777ac27a0ecSDave Kleikamp struct buffer_head *bh; 778ac27a0ecSDave Kleikamp unsigned block_start, block_end; 779ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 780ac27a0ecSDave Kleikamp int err, ret = 0; 781ac27a0ecSDave Kleikamp struct buffer_head *next; 782ac27a0ecSDave Kleikamp 783ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 784ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 785de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 786ac27a0ecSDave Kleikamp next = bh->b_this_page; 787ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 788ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 789ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 790ac27a0ecSDave Kleikamp *partial = 1; 791ac27a0ecSDave Kleikamp continue; 792ac27a0ecSDave Kleikamp } 793ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 794ac27a0ecSDave Kleikamp if (!ret) 795ac27a0ecSDave Kleikamp ret = err; 796ac27a0ecSDave Kleikamp } 797ac27a0ecSDave Kleikamp return ret; 798ac27a0ecSDave Kleikamp } 799ac27a0ecSDave Kleikamp 800ac27a0ecSDave Kleikamp /* 801ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 802ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 803617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 804dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 805ac27a0ecSDave Kleikamp * prepare_write() is the right place. 806ac27a0ecSDave Kleikamp * 80736ade451SJan Kara * Also, this function can nest inside ext4_writepage(). In that case, we 80836ade451SJan Kara * *know* that ext4_writepage() has generated enough buffer credits to do the 80936ade451SJan Kara * whole page. So we won't block on the journal in that case, which is good, 81036ade451SJan Kara * because the caller may be PF_MEMALLOC. 811ac27a0ecSDave Kleikamp * 812617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 813ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 814ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 815ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 816ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 817ac27a0ecSDave Kleikamp * violation. 818ac27a0ecSDave Kleikamp * 819dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 820ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 821ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 822ac27a0ecSDave Kleikamp * write. 823ac27a0ecSDave Kleikamp */ 824f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 825ac27a0ecSDave Kleikamp struct buffer_head *bh) 826ac27a0ecSDave Kleikamp { 82756d35a4cSJan Kara int dirty = buffer_dirty(bh); 82856d35a4cSJan Kara int ret; 82956d35a4cSJan Kara 830ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 831ac27a0ecSDave Kleikamp return 0; 83256d35a4cSJan Kara /* 833ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 83456d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 83556d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 836ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 83756d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 83856d35a4cSJan Kara * ever write the buffer. 83956d35a4cSJan Kara */ 84056d35a4cSJan Kara if (dirty) 84156d35a4cSJan Kara clear_buffer_dirty(bh); 84256d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 84356d35a4cSJan Kara if (!ret && dirty) 84456d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 84556d35a4cSJan Kara return ret; 846ac27a0ecSDave Kleikamp } 847ac27a0ecSDave Kleikamp 8488b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 8498b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 850bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 851bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 852bfc1af65SNick Piggin struct page **pagep, void **fsdata) 853ac27a0ecSDave Kleikamp { 854bfc1af65SNick Piggin struct inode *inode = mapping->host; 8551938a150SAneesh Kumar K.V int ret, needed_blocks; 856ac27a0ecSDave Kleikamp handle_t *handle; 857ac27a0ecSDave Kleikamp int retries = 0; 858bfc1af65SNick Piggin struct page *page; 859bfc1af65SNick Piggin pgoff_t index; 860bfc1af65SNick Piggin unsigned from, to; 861bfc1af65SNick Piggin 8629bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 8631938a150SAneesh Kumar K.V /* 8641938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 8651938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 8661938a150SAneesh Kumar K.V */ 8671938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 868bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 869bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 870bfc1af65SNick Piggin to = from + len; 871ac27a0ecSDave Kleikamp 872f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 873f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 874f19d5870STao Ma flags, pagep); 875f19d5870STao Ma if (ret < 0) 876f19d5870STao Ma goto out; 877f19d5870STao Ma if (ret == 1) { 878f19d5870STao Ma ret = 0; 879f19d5870STao Ma goto out; 880f19d5870STao Ma } 881f19d5870STao Ma } 882f19d5870STao Ma 883ac27a0ecSDave Kleikamp retry: 884617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 8857479d2b9SAndrew Morton if (IS_ERR(handle)) { 8867479d2b9SAndrew Morton ret = PTR_ERR(handle); 8877479d2b9SAndrew Morton goto out; 8887479d2b9SAndrew Morton } 889ac27a0ecSDave Kleikamp 890ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 891ebd3610bSJan Kara * started */ 892ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 893ebd3610bSJan Kara 89454566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 895cf108bcaSJan Kara if (!page) { 896cf108bcaSJan Kara ext4_journal_stop(handle); 897cf108bcaSJan Kara ret = -ENOMEM; 898cf108bcaSJan Kara goto out; 899cf108bcaSJan Kara } 900f19d5870STao Ma 901cf108bcaSJan Kara *pagep = page; 902cf108bcaSJan Kara 903744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 9046e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 905744692dcSJiaying Zhang else 9066e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 907bfc1af65SNick Piggin 908bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 909f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 910f19d5870STao Ma from, to, NULL, 911f19d5870STao Ma do_journal_get_write_access); 912b46be050SAndrey Savochkin } 913bfc1af65SNick Piggin 914bfc1af65SNick Piggin if (ret) { 915bfc1af65SNick Piggin unlock_page(page); 916bfc1af65SNick Piggin page_cache_release(page); 917ae4d5372SAneesh Kumar K.V /* 9186e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 919ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 920ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 9211938a150SAneesh Kumar K.V * 9221938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 9231938a150SAneesh Kumar K.V * truncate finishes 924ae4d5372SAneesh Kumar K.V */ 925ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 9261938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 9271938a150SAneesh Kumar K.V 9281938a150SAneesh Kumar K.V ext4_journal_stop(handle); 9291938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 930b9a4207dSJan Kara ext4_truncate_failed_write(inode); 9311938a150SAneesh Kumar K.V /* 932ffacfa7aSJan Kara * If truncate failed early the inode might 9331938a150SAneesh Kumar K.V * still be on the orphan list; we need to 9341938a150SAneesh Kumar K.V * make sure the inode is removed from the 9351938a150SAneesh Kumar K.V * orphan list in that case. 9361938a150SAneesh Kumar K.V */ 9371938a150SAneesh Kumar K.V if (inode->i_nlink) 9381938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 9391938a150SAneesh Kumar K.V } 940bfc1af65SNick Piggin } 941bfc1af65SNick Piggin 942617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 943ac27a0ecSDave Kleikamp goto retry; 9447479d2b9SAndrew Morton out: 945ac27a0ecSDave Kleikamp return ret; 946ac27a0ecSDave Kleikamp } 947ac27a0ecSDave Kleikamp 948bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 949bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 950ac27a0ecSDave Kleikamp { 951ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 952ac27a0ecSDave Kleikamp return 0; 953ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 9540390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 955ac27a0ecSDave Kleikamp } 956ac27a0ecSDave Kleikamp 957f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 958f8514083SAneesh Kumar K.V struct address_space *mapping, 959f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 960f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 961f8514083SAneesh Kumar K.V { 962f8514083SAneesh Kumar K.V int i_size_changed = 0; 963f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 964f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 965f8514083SAneesh Kumar K.V 966f19d5870STao Ma if (ext4_has_inline_data(inode)) 967f19d5870STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 968f19d5870STao Ma copied, page); 969f19d5870STao Ma else 970f19d5870STao Ma copied = block_write_end(file, mapping, pos, 971f19d5870STao Ma len, copied, page, fsdata); 972f8514083SAneesh Kumar K.V 973f8514083SAneesh Kumar K.V /* 974f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 975f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 976f8514083SAneesh Kumar K.V * 977f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 978f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 979f8514083SAneesh Kumar K.V */ 980f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 981f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 982f8514083SAneesh Kumar K.V i_size_changed = 1; 983f8514083SAneesh Kumar K.V } 984f8514083SAneesh Kumar K.V 985f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 986f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 987f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 988f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 989f8514083SAneesh Kumar K.V */ 990f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 991f8514083SAneesh Kumar K.V i_size_changed = 1; 992f8514083SAneesh Kumar K.V } 993f8514083SAneesh Kumar K.V unlock_page(page); 994f8514083SAneesh Kumar K.V page_cache_release(page); 995f8514083SAneesh Kumar K.V 996f8514083SAneesh Kumar K.V /* 997f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 998f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 999f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1000f8514083SAneesh Kumar K.V * filesystems. 1001f8514083SAneesh Kumar K.V */ 1002f8514083SAneesh Kumar K.V if (i_size_changed) 1003f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1004f8514083SAneesh Kumar K.V 1005f8514083SAneesh Kumar K.V return copied; 1006f8514083SAneesh Kumar K.V } 1007f8514083SAneesh Kumar K.V 1008ac27a0ecSDave Kleikamp /* 1009ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1010ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1011ac27a0ecSDave Kleikamp * 1012617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1013ac27a0ecSDave Kleikamp * buffers are managed internally. 1014ac27a0ecSDave Kleikamp */ 1015bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1016bfc1af65SNick Piggin struct address_space *mapping, 1017bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1018bfc1af65SNick Piggin struct page *page, void *fsdata) 1019ac27a0ecSDave Kleikamp { 1020617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1021cf108bcaSJan Kara struct inode *inode = mapping->host; 1022ac27a0ecSDave Kleikamp int ret = 0, ret2; 1023ac27a0ecSDave Kleikamp 10249bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 1025678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1026ac27a0ecSDave Kleikamp 1027ac27a0ecSDave Kleikamp if (ret == 0) { 1028f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1029bfc1af65SNick Piggin page, fsdata); 1030f8a87d89SRoel Kluin copied = ret2; 1031ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1032f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1033f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1034f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1035f8514083SAneesh Kumar K.V */ 1036f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1037f8a87d89SRoel Kluin if (ret2 < 0) 1038f8a87d89SRoel Kluin ret = ret2; 103909e0834fSAkira Fujita } else { 104009e0834fSAkira Fujita unlock_page(page); 104109e0834fSAkira Fujita page_cache_release(page); 1042ac27a0ecSDave Kleikamp } 104309e0834fSAkira Fujita 1044617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1045ac27a0ecSDave Kleikamp if (!ret) 1046ac27a0ecSDave Kleikamp ret = ret2; 1047bfc1af65SNick Piggin 1048f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1049b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1050f8514083SAneesh Kumar K.V /* 1051ffacfa7aSJan Kara * If truncate failed early the inode might still be 1052f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1053f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1054f8514083SAneesh Kumar K.V */ 1055f8514083SAneesh Kumar K.V if (inode->i_nlink) 1056f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1057f8514083SAneesh Kumar K.V } 1058f8514083SAneesh Kumar K.V 1059f8514083SAneesh Kumar K.V 1060bfc1af65SNick Piggin return ret ? ret : copied; 1061ac27a0ecSDave Kleikamp } 1062ac27a0ecSDave Kleikamp 1063bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1064bfc1af65SNick Piggin struct address_space *mapping, 1065bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1066bfc1af65SNick Piggin struct page *page, void *fsdata) 1067ac27a0ecSDave Kleikamp { 1068617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1069cf108bcaSJan Kara struct inode *inode = mapping->host; 1070ac27a0ecSDave Kleikamp int ret = 0, ret2; 1071ac27a0ecSDave Kleikamp 10729bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 1073f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1074bfc1af65SNick Piggin page, fsdata); 1075f8a87d89SRoel Kluin copied = ret2; 1076ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1077f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1078f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1079f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1080f8514083SAneesh Kumar K.V */ 1081f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1082f8514083SAneesh Kumar K.V 1083f8a87d89SRoel Kluin if (ret2 < 0) 1084f8a87d89SRoel Kluin ret = ret2; 1085ac27a0ecSDave Kleikamp 1086617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1087ac27a0ecSDave Kleikamp if (!ret) 1088ac27a0ecSDave Kleikamp ret = ret2; 1089bfc1af65SNick Piggin 1090f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1091b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1092f8514083SAneesh Kumar K.V /* 1093ffacfa7aSJan Kara * If truncate failed early the inode might still be 1094f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1095f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1096f8514083SAneesh Kumar K.V */ 1097f8514083SAneesh Kumar K.V if (inode->i_nlink) 1098f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1099f8514083SAneesh Kumar K.V } 1100f8514083SAneesh Kumar K.V 1101bfc1af65SNick Piggin return ret ? ret : copied; 1102ac27a0ecSDave Kleikamp } 1103ac27a0ecSDave Kleikamp 1104bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1105bfc1af65SNick Piggin struct address_space *mapping, 1106bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1107bfc1af65SNick Piggin struct page *page, void *fsdata) 1108ac27a0ecSDave Kleikamp { 1109617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1110bfc1af65SNick Piggin struct inode *inode = mapping->host; 1111ac27a0ecSDave Kleikamp int ret = 0, ret2; 1112ac27a0ecSDave Kleikamp int partial = 0; 1113bfc1af65SNick Piggin unsigned from, to; 1114cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1115ac27a0ecSDave Kleikamp 11169bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1117bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1118bfc1af65SNick Piggin to = from + len; 1119bfc1af65SNick Piggin 1120441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1121441c8508SCurt Wohlgemuth 11223fdcfb66STao Ma if (ext4_has_inline_data(inode)) 11233fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 11243fdcfb66STao Ma copied, page); 11253fdcfb66STao Ma else { 1126bfc1af65SNick Piggin if (copied < len) { 1127bfc1af65SNick Piggin if (!PageUptodate(page)) 1128bfc1af65SNick Piggin copied = 0; 1129bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1130bfc1af65SNick Piggin } 1131ac27a0ecSDave Kleikamp 1132f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1133bfc1af65SNick Piggin to, &partial, write_end_fn); 1134ac27a0ecSDave Kleikamp if (!partial) 1135ac27a0ecSDave Kleikamp SetPageUptodate(page); 11363fdcfb66STao Ma } 1137cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1138cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1139bfc1af65SNick Piggin i_size_write(inode, pos+copied); 114019f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 11412d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1142cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1143cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1144617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1145ac27a0ecSDave Kleikamp if (!ret) 1146ac27a0ecSDave Kleikamp ret = ret2; 1147ac27a0ecSDave Kleikamp } 1148bfc1af65SNick Piggin 1149cf108bcaSJan Kara unlock_page(page); 1150f8514083SAneesh Kumar K.V page_cache_release(page); 1151ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1152f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1153f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1154f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1155f8514083SAneesh Kumar K.V */ 1156f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1157f8514083SAneesh Kumar K.V 1158617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1159ac27a0ecSDave Kleikamp if (!ret) 1160ac27a0ecSDave Kleikamp ret = ret2; 1161f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1162b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1163f8514083SAneesh Kumar K.V /* 1164ffacfa7aSJan Kara * If truncate failed early the inode might still be 1165f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1166f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1167f8514083SAneesh Kumar K.V */ 1168f8514083SAneesh Kumar K.V if (inode->i_nlink) 1169f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1170f8514083SAneesh Kumar K.V } 1171bfc1af65SNick Piggin 1172bfc1af65SNick Piggin return ret ? ret : copied; 1173ac27a0ecSDave Kleikamp } 1174d2a17637SMingming Cao 11759d0be502STheodore Ts'o /* 11767b415bf6SAditya Kali * Reserve a single cluster located at lblock 11779d0be502STheodore Ts'o */ 117801f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1179d2a17637SMingming Cao { 1180030ba6bcSAneesh Kumar K.V int retries = 0; 1181d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11820637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 11837b415bf6SAditya Kali unsigned int md_needed; 11845dd4056dSChristoph Hellwig int ret; 118503179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 118603179fe9STheodore Ts'o int save_len; 1187d2a17637SMingming Cao 118860e58e0fSMingming Cao /* 118972b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 119072b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 119172b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 119260e58e0fSMingming Cao */ 11937b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 11945dd4056dSChristoph Hellwig if (ret) 11955dd4056dSChristoph Hellwig return ret; 119603179fe9STheodore Ts'o 119703179fe9STheodore Ts'o /* 119803179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 119903179fe9STheodore Ts'o * in order to allocate nrblocks 120003179fe9STheodore Ts'o * worse case is one extent per block 120103179fe9STheodore Ts'o */ 120203179fe9STheodore Ts'o repeat: 120303179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 120403179fe9STheodore Ts'o /* 120503179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 120603179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 120703179fe9STheodore Ts'o */ 120803179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 120903179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 121003179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 121103179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 121203179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 121303179fe9STheodore Ts'o 121472b8ab9dSEric Sandeen /* 121572b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 121672b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 121772b8ab9dSEric Sandeen */ 1218e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 121903179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 122003179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 122103179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1222030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1223030ba6bcSAneesh Kumar K.V yield(); 1224030ba6bcSAneesh Kumar K.V goto repeat; 1225030ba6bcSAneesh Kumar K.V } 122603179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1227d2a17637SMingming Cao return -ENOSPC; 1228d2a17637SMingming Cao } 12299d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 12300637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 12310637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 123239bc680aSDmitry Monakhov 1233d2a17637SMingming Cao return 0; /* success */ 1234d2a17637SMingming Cao } 1235d2a17637SMingming Cao 123612219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1237d2a17637SMingming Cao { 1238d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12390637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1240d2a17637SMingming Cao 1241cd213226SMingming Cao if (!to_free) 1242cd213226SMingming Cao return; /* Nothing to release, exit */ 1243cd213226SMingming Cao 1244d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1245cd213226SMingming Cao 12465a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 12470637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1248cd213226SMingming Cao /* 12490637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 12500637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 12510637c6f4STheodore Ts'o * function is called from invalidate page, it's 12520637c6f4STheodore Ts'o * harmless to return without any action. 1253cd213226SMingming Cao */ 12540637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 12550637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 12561084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 12570637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 12580637c6f4STheodore Ts'o WARN_ON(1); 12590637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 12600637c6f4STheodore Ts'o } 12610637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 12620637c6f4STheodore Ts'o 12630637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 12640637c6f4STheodore Ts'o /* 12650637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 12660637c6f4STheodore Ts'o * only when we have written all of the delayed 12670637c6f4STheodore Ts'o * allocation blocks. 12687b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 12697b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 12700637c6f4STheodore Ts'o */ 127157042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 127272b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1273ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 12749d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1275cd213226SMingming Cao } 1276cd213226SMingming Cao 127772b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 127857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1279d2a17637SMingming Cao 1280d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 128160e58e0fSMingming Cao 12827b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1283d2a17637SMingming Cao } 1284d2a17637SMingming Cao 1285d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1286d2a17637SMingming Cao unsigned long offset) 1287d2a17637SMingming Cao { 1288d2a17637SMingming Cao int to_release = 0; 1289d2a17637SMingming Cao struct buffer_head *head, *bh; 1290d2a17637SMingming Cao unsigned int curr_off = 0; 12917b415bf6SAditya Kali struct inode *inode = page->mapping->host; 12927b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12937b415bf6SAditya Kali int num_clusters; 129451865fdaSZheng Liu ext4_fsblk_t lblk; 1295d2a17637SMingming Cao 1296d2a17637SMingming Cao head = page_buffers(page); 1297d2a17637SMingming Cao bh = head; 1298d2a17637SMingming Cao do { 1299d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1300d2a17637SMingming Cao 1301d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1302d2a17637SMingming Cao to_release++; 1303d2a17637SMingming Cao clear_buffer_delay(bh); 1304d2a17637SMingming Cao } 1305d2a17637SMingming Cao curr_off = next_off; 1306d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 13077b415bf6SAditya Kali 130851865fdaSZheng Liu if (to_release) { 130951865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 131051865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 131151865fdaSZheng Liu } 131251865fdaSZheng Liu 13137b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 13147b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 13157b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 13167b415bf6SAditya Kali while (num_clusters > 0) { 13177b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 13187b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 13197b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 13207d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 13217b415bf6SAditya Kali ext4_da_release_space(inode, 1); 13227b415bf6SAditya Kali 13237b415bf6SAditya Kali num_clusters--; 13247b415bf6SAditya Kali } 1325d2a17637SMingming Cao } 1326ac27a0ecSDave Kleikamp 1327ac27a0ecSDave Kleikamp /* 132864769240SAlex Tomas * Delayed allocation stuff 132964769240SAlex Tomas */ 133064769240SAlex Tomas 133164769240SAlex Tomas /* 133264769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1333a1d6cc56SAneesh Kumar K.V * them with writepage() call back 133464769240SAlex Tomas * 133564769240SAlex Tomas * @mpd->inode: inode 133664769240SAlex Tomas * @mpd->first_page: first page of the extent 133764769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 133864769240SAlex Tomas * 133964769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 134064769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 134164769240SAlex Tomas * 134264769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 134364769240SAlex Tomas */ 13441de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 13451de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 134664769240SAlex Tomas { 1347791b7f08SAneesh Kumar K.V struct pagevec pvec; 1348791b7f08SAneesh Kumar K.V unsigned long index, end; 1349791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1350791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1351791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1352cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 13533ecdb3a1STheodore Ts'o unsigned int len, block_start; 13543ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 13551de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1356bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 135764769240SAlex Tomas 135864769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1359bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1360791b7f08SAneesh Kumar K.V /* 1361791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1362791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 13638dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1364791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1365791b7f08SAneesh Kumar K.V */ 136664769240SAlex Tomas index = mpd->first_page; 136764769240SAlex Tomas end = mpd->next_page - 1; 136864769240SAlex Tomas 1369791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 137064769240SAlex Tomas while (index <= end) { 1371791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 137264769240SAlex Tomas if (nr_pages == 0) 137364769240SAlex Tomas break; 137464769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 1375f8bec370SJan Kara int skip_page = 0; 137664769240SAlex Tomas struct page *page = pvec.pages[i]; 137764769240SAlex Tomas 1378791b7f08SAneesh Kumar K.V index = page->index; 1379791b7f08SAneesh Kumar K.V if (index > end) 1380791b7f08SAneesh Kumar K.V break; 1381cb20d518STheodore Ts'o 1382cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1383cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1384cb20d518STheodore Ts'o else 1385cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 13861de3e3dfSTheodore Ts'o if (map) { 13871de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 13881de3e3dfSTheodore Ts'o inode->i_blkbits); 13891de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 13901de3e3dfSTheodore Ts'o map->m_lblk); 13911de3e3dfSTheodore Ts'o } 1392791b7f08SAneesh Kumar K.V index++; 1393791b7f08SAneesh Kumar K.V 1394791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1395791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1396791b7f08SAneesh Kumar K.V 13973ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 13983ecdb3a1STheodore Ts'o block_start = 0; 13993ecdb3a1STheodore Ts'o do { 14001de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 14011de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 14021de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 14031de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 14041de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 14051de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 14061de3e3dfSTheodore Ts'o } 14071de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 14081de3e3dfSTheodore Ts'o buffer_mapped(bh)) 14091de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 14101de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 14111de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 14121de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 14131de3e3dfSTheodore Ts'o } 14141de3e3dfSTheodore Ts'o 141513a79a47SYongqiang Yang /* 141613a79a47SYongqiang Yang * skip page if block allocation undone and 141713a79a47SYongqiang Yang * block is dirty 141813a79a47SYongqiang Yang */ 141913a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 142097498956STheodore Ts'o skip_page = 1; 14213ecdb3a1STheodore Ts'o bh = bh->b_this_page; 14223ecdb3a1STheodore Ts'o block_start += bh->b_size; 14231de3e3dfSTheodore Ts'o cur_logical++; 14241de3e3dfSTheodore Ts'o pblock++; 14251de3e3dfSTheodore Ts'o } while (bh != page_bufs); 14261de3e3dfSTheodore Ts'o 1427f8bec370SJan Kara if (skip_page) { 1428f8bec370SJan Kara unlock_page(page); 1429f8bec370SJan Kara continue; 1430f8bec370SJan Kara } 1431cb20d518STheodore Ts'o 143297498956STheodore Ts'o clear_page_dirty_for_io(page); 1433fe089c77SJan Kara err = ext4_bio_write_page(&io_submit, page, len, 1434fe089c77SJan Kara mpd->wbc); 1435cb20d518STheodore Ts'o if (!err) 1436a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 143764769240SAlex Tomas /* 143864769240SAlex Tomas * In error case, we have to continue because 143964769240SAlex Tomas * remaining pages are still locked 144064769240SAlex Tomas */ 144164769240SAlex Tomas if (ret == 0) 144264769240SAlex Tomas ret = err; 144364769240SAlex Tomas } 144464769240SAlex Tomas pagevec_release(&pvec); 144564769240SAlex Tomas } 1446bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 144764769240SAlex Tomas return ret; 144864769240SAlex Tomas } 144964769240SAlex Tomas 1450c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1451c4a0c46eSAneesh Kumar K.V { 1452c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1453c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1454c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1455c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1456c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 145751865fdaSZheng Liu ext4_lblk_t start, last; 1458c4a0c46eSAneesh Kumar K.V 1459c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1460c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 146151865fdaSZheng Liu 146251865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 146351865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 146451865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 146551865fdaSZheng Liu 146666bea92cSEric Sandeen pagevec_init(&pvec, 0); 1467c4a0c46eSAneesh Kumar K.V while (index <= end) { 1468c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1469c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1470c4a0c46eSAneesh Kumar K.V break; 1471c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1472c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 14739b1d0998SJan Kara if (page->index > end) 1474c4a0c46eSAneesh Kumar K.V break; 1475c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1476c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1477c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1478c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1479c4a0c46eSAneesh Kumar K.V unlock_page(page); 1480c4a0c46eSAneesh Kumar K.V } 14819b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 14829b1d0998SJan Kara pagevec_release(&pvec); 1483c4a0c46eSAneesh Kumar K.V } 1484c4a0c46eSAneesh Kumar K.V return; 1485c4a0c46eSAneesh Kumar K.V } 1486c4a0c46eSAneesh Kumar K.V 1487df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1488df22291fSAneesh Kumar K.V { 1489df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 149092b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 149192b97816STheodore Ts'o 149292b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 14935dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 14945dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 149592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 149692b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 149757042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 149857042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 149992b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 15007b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 15017b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 150292b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 150392b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1504df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 150592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1506df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1507df22291fSAneesh Kumar K.V return; 1508df22291fSAneesh Kumar K.V } 1509df22291fSAneesh Kumar K.V 1510b920c755STheodore Ts'o /* 15115a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 15125a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 151364769240SAlex Tomas * 15148dc207c0STheodore Ts'o * @mpd - bh describing space 151564769240SAlex Tomas * 151664769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 151764769240SAlex Tomas * 151864769240SAlex Tomas */ 15195a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 152064769240SAlex Tomas { 15212ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 15221de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 15232fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 15242fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 15252fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 15262fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 152764769240SAlex Tomas 152864769240SAlex Tomas /* 15295a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 15305a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 153164769240SAlex Tomas */ 15325a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 15335a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 153429fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 15355a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 15365a87b7a5STheodore Ts'o goto submit_io; 15372fa3cdfbSTheodore Ts'o 15382fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 15392fa3cdfbSTheodore Ts'o BUG_ON(!handle); 15402fa3cdfbSTheodore Ts'o 154179ffab34SAneesh Kumar K.V /* 154279e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 15432ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 15442ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 15452ac3b6e0STheodore Ts'o * one or more preallocated blocks). 15462ac3b6e0STheodore Ts'o * 15472ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 15482ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 15492ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 15502ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 155179e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1552f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 15532ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 15542ac3b6e0STheodore Ts'o * 15552ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 15562ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 15572ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 155879ffab34SAneesh Kumar K.V */ 15592ed88685STheodore Ts'o map.m_lblk = next; 15602ed88685STheodore Ts'o map.m_len = max_blocks; 15611296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1562744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1563744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 15642ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 15651296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 15661296cc85SAneesh Kumar K.V 15672ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 15682fa3cdfbSTheodore Ts'o if (blks < 0) { 1569e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1570e3570639SEric Sandeen 15712fa3cdfbSTheodore Ts'o err = blks; 1572ed5bde0bSTheodore Ts'o /* 15735a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 157497498956STheodore Ts'o * appears to be free blocks we will just let 157597498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1576c4a0c46eSAneesh Kumar K.V */ 1577c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 15785a87b7a5STheodore Ts'o goto submit_io; 1579df22291fSAneesh Kumar K.V 15805dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1581df22291fSAneesh Kumar K.V mpd->retval = err; 15825a87b7a5STheodore Ts'o goto submit_io; 1583df22291fSAneesh Kumar K.V } 1584df22291fSAneesh Kumar K.V 1585c4a0c46eSAneesh Kumar K.V /* 1586ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1587ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1588ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1589ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1590ed5bde0bSTheodore Ts'o * the same. 1591c4a0c46eSAneesh Kumar K.V */ 1592e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1593e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1594e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1595e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1596e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1597c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 15988dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1599e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1600e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1601e3570639SEric Sandeen if (err == -ENOSPC) 1602df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1603030ba6bcSAneesh Kumar K.V } 16042fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1605c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1606e0fd9b90SCurt Wohlgemuth 1607e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1608e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 16095a87b7a5STheodore Ts'o return; 1610c4a0c46eSAneesh Kumar K.V } 16112fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 16122fa3cdfbSTheodore Ts'o 16131de3e3dfSTheodore Ts'o mapp = ↦ 16142ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 16152ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 16162ed88685STheodore Ts'o int i; 161764769240SAlex Tomas 16182ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 16192ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 16202fa3cdfbSTheodore Ts'o } 16212fa3cdfbSTheodore Ts'o 16222fa3cdfbSTheodore Ts'o /* 162303f5d8bcSJan Kara * Update on-disk size along with block allocation. 16242fa3cdfbSTheodore Ts'o */ 16252fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 16262fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 16272fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 16282fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 16292fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 16305a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 16315a87b7a5STheodore Ts'o if (err) 16325a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 16335a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 16345a87b7a5STheodore Ts'o mpd->inode->i_ino); 16352fa3cdfbSTheodore Ts'o } 16362fa3cdfbSTheodore Ts'o 16375a87b7a5STheodore Ts'o submit_io: 16381de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 16395a87b7a5STheodore Ts'o mpd->io_done = 1; 164064769240SAlex Tomas } 164164769240SAlex Tomas 1642bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1643bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 164464769240SAlex Tomas 164564769240SAlex Tomas /* 164664769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 164764769240SAlex Tomas * 164864769240SAlex Tomas * @mpd->lbh - extent of blocks 164964769240SAlex Tomas * @logical - logical number of the block in the file 1650b6a8e62fSJan Kara * @b_state - b_state of the buffer head added 165164769240SAlex Tomas * 165264769240SAlex Tomas * the function is used to collect contig. blocks in same state 165364769240SAlex Tomas */ 1654b6a8e62fSJan Kara static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical, 16558dc207c0STheodore Ts'o unsigned long b_state) 165664769240SAlex Tomas { 165764769240SAlex Tomas sector_t next; 1658b6a8e62fSJan Kara int blkbits = mpd->inode->i_blkbits; 1659b6a8e62fSJan Kara int nrblocks = mpd->b_size >> blkbits; 166064769240SAlex Tomas 1661c445e3e0SEric Sandeen /* 1662c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1663c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1664c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 166579e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1666c445e3e0SEric Sandeen */ 1667b6a8e62fSJan Kara if (nrblocks >= (8*1024*1024 >> blkbits)) 1668c445e3e0SEric Sandeen goto flush_it; 1669c445e3e0SEric Sandeen 1670525f4ed8SMingming Cao /* check if the reserved journal credits might overflow */ 1671b6a8e62fSJan Kara if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) { 1672525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1673525f4ed8SMingming Cao /* 1674525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1675525f4ed8SMingming Cao * credit available. Total credit needed to insert 1676525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1677525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1678525f4ed8SMingming Cao */ 1679525f4ed8SMingming Cao goto flush_it; 1680525f4ed8SMingming Cao } 1681525f4ed8SMingming Cao } 168264769240SAlex Tomas /* 168364769240SAlex Tomas * First block in the extent 168464769240SAlex Tomas */ 16858dc207c0STheodore Ts'o if (mpd->b_size == 0) { 16868dc207c0STheodore Ts'o mpd->b_blocknr = logical; 1687b6a8e62fSJan Kara mpd->b_size = 1 << blkbits; 16888dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 168964769240SAlex Tomas return; 169064769240SAlex Tomas } 169164769240SAlex Tomas 16928dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 169364769240SAlex Tomas /* 169464769240SAlex Tomas * Can we merge the block to our big extent? 169564769240SAlex Tomas */ 16968dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 1697b6a8e62fSJan Kara mpd->b_size += 1 << blkbits; 169864769240SAlex Tomas return; 169964769240SAlex Tomas } 170064769240SAlex Tomas 1701525f4ed8SMingming Cao flush_it: 170264769240SAlex Tomas /* 170364769240SAlex Tomas * We couldn't merge the block to our extent, so we 170464769240SAlex Tomas * need to flush current extent and start new one 170564769240SAlex Tomas */ 17065a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1707a1d6cc56SAneesh Kumar K.V return; 170864769240SAlex Tomas } 170964769240SAlex Tomas 1710c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 171129fa89d0SAneesh Kumar K.V { 1712c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 171329fa89d0SAneesh Kumar K.V } 171429fa89d0SAneesh Kumar K.V 171564769240SAlex Tomas /* 17165356f261SAditya Kali * This function is grabs code from the very beginning of 17175356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 17185356f261SAditya Kali * time. This function looks up the requested blocks and sets the 17195356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 17205356f261SAditya Kali */ 17215356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 17225356f261SAditya Kali struct ext4_map_blocks *map, 17235356f261SAditya Kali struct buffer_head *bh) 17245356f261SAditya Kali { 17255356f261SAditya Kali int retval; 17265356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 17275356f261SAditya Kali 17285356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 17295356f261SAditya Kali invalid_block = ~0; 17305356f261SAditya Kali 17315356f261SAditya Kali map->m_flags = 0; 17325356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 17335356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 17345356f261SAditya Kali (unsigned long) map->m_lblk); 17355356f261SAditya Kali /* 17365356f261SAditya Kali * Try to see if we can get the block without requesting a new 17375356f261SAditya Kali * file system block. 17385356f261SAditya Kali */ 17395356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 17409c3569b5STao Ma if (ext4_has_inline_data(inode)) { 17419c3569b5STao Ma /* 17429c3569b5STao Ma * We will soon create blocks for this page, and let 17439c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 17449c3569b5STao Ma * In case of clusters, we have to handle the work 17459c3569b5STao Ma * of mapping from cluster so that the reserved space 17469c3569b5STao Ma * is calculated properly. 17479c3569b5STao Ma */ 17489c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 17499c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 17509c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 17519c3569b5STao Ma retval = 0; 17529c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 17535356f261SAditya Kali retval = ext4_ext_map_blocks(NULL, inode, map, 0); 17545356f261SAditya Kali else 17555356f261SAditya Kali retval = ext4_ind_map_blocks(NULL, inode, map, 0); 17565356f261SAditya Kali 17575356f261SAditya Kali if (retval == 0) { 17585356f261SAditya Kali /* 17595356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 17605356f261SAditya Kali * is it OK? 17615356f261SAditya Kali */ 17625356f261SAditya Kali /* If the block was allocated from previously allocated cluster, 17635356f261SAditya Kali * then we dont need to reserve it again. */ 17645356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 17655356f261SAditya Kali retval = ext4_da_reserve_space(inode, iblock); 17665356f261SAditya Kali if (retval) 17675356f261SAditya Kali /* not enough space to reserve */ 17685356f261SAditya Kali goto out_unlock; 17695356f261SAditya Kali } 17705356f261SAditya Kali 177151865fdaSZheng Liu retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len); 177251865fdaSZheng Liu if (retval) 177351865fdaSZheng Liu goto out_unlock; 177451865fdaSZheng Liu 17755356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 17765356f261SAditya Kali * and it should not appear on the bh->b_state. 17775356f261SAditya Kali */ 17785356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 17795356f261SAditya Kali 17805356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 17815356f261SAditya Kali set_buffer_new(bh); 17825356f261SAditya Kali set_buffer_delay(bh); 17835356f261SAditya Kali } 17845356f261SAditya Kali 17855356f261SAditya Kali out_unlock: 17865356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 17875356f261SAditya Kali 17885356f261SAditya Kali return retval; 17895356f261SAditya Kali } 17905356f261SAditya Kali 17915356f261SAditya Kali /* 1792b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1793b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1794b920c755STheodore Ts'o * reserve space for a single block. 179529fa89d0SAneesh Kumar K.V * 179629fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 179729fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 179829fa89d0SAneesh Kumar K.V * 179929fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 180029fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 180129fa89d0SAneesh Kumar K.V * initialized properly. 180264769240SAlex Tomas */ 18039c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 18042ed88685STheodore Ts'o struct buffer_head *bh, int create) 180564769240SAlex Tomas { 18062ed88685STheodore Ts'o struct ext4_map_blocks map; 180764769240SAlex Tomas int ret = 0; 180864769240SAlex Tomas 180964769240SAlex Tomas BUG_ON(create == 0); 18102ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 18112ed88685STheodore Ts'o 18122ed88685STheodore Ts'o map.m_lblk = iblock; 18132ed88685STheodore Ts'o map.m_len = 1; 181464769240SAlex Tomas 181564769240SAlex Tomas /* 181664769240SAlex Tomas * first, we need to know whether the block is allocated already 181764769240SAlex Tomas * preallocated blocks are unmapped but should treated 181864769240SAlex Tomas * the same as allocated blocks. 181964769240SAlex Tomas */ 18205356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 18215356f261SAditya Kali if (ret <= 0) 18222ed88685STheodore Ts'o return ret; 182364769240SAlex Tomas 18242ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 18252ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 18262ed88685STheodore Ts'o 18272ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 18282ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 18292ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 18302ed88685STheodore Ts'o * get_block multiple times when we write to the same 18312ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 18322ed88685STheodore Ts'o * for partial write. 18332ed88685STheodore Ts'o */ 18342ed88685STheodore Ts'o set_buffer_new(bh); 1835c8205636STheodore Ts'o set_buffer_mapped(bh); 18362ed88685STheodore Ts'o } 18372ed88685STheodore Ts'o return 0; 183864769240SAlex Tomas } 183961628a3fSMingming Cao 184062e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 184162e086beSAneesh Kumar K.V { 184262e086beSAneesh Kumar K.V get_bh(bh); 184362e086beSAneesh Kumar K.V return 0; 184462e086beSAneesh Kumar K.V } 184562e086beSAneesh Kumar K.V 184662e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 184762e086beSAneesh Kumar K.V { 184862e086beSAneesh Kumar K.V put_bh(bh); 184962e086beSAneesh Kumar K.V return 0; 185062e086beSAneesh Kumar K.V } 185162e086beSAneesh Kumar K.V 185262e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 185362e086beSAneesh Kumar K.V unsigned int len) 185462e086beSAneesh Kumar K.V { 185562e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 185662e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 18573fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 185862e086beSAneesh Kumar K.V handle_t *handle = NULL; 18593fdcfb66STao Ma int ret = 0, err = 0; 18603fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 18613fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 186262e086beSAneesh Kumar K.V 1863cb20d518STheodore Ts'o ClearPageChecked(page); 18643fdcfb66STao Ma 18653fdcfb66STao Ma if (inline_data) { 18663fdcfb66STao Ma BUG_ON(page->index != 0); 18673fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 18683fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 18693fdcfb66STao Ma if (inode_bh == NULL) 18703fdcfb66STao Ma goto out; 18713fdcfb66STao Ma } else { 187262e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 18733fdcfb66STao Ma if (!page_bufs) { 18743fdcfb66STao Ma BUG(); 18753fdcfb66STao Ma goto out; 18763fdcfb66STao Ma } 18773fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 18783fdcfb66STao Ma NULL, bget_one); 18793fdcfb66STao Ma } 188062e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 188162e086beSAneesh Kumar K.V * references to buffers so we are safe */ 188262e086beSAneesh Kumar K.V unlock_page(page); 188362e086beSAneesh Kumar K.V 188462e086beSAneesh Kumar K.V handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 188562e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 188662e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 188762e086beSAneesh Kumar K.V goto out; 188862e086beSAneesh Kumar K.V } 188962e086beSAneesh Kumar K.V 1890441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1891441c8508SCurt Wohlgemuth 18923fdcfb66STao Ma if (inline_data) { 18933fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 18943fdcfb66STao Ma 18953fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 18963fdcfb66STao Ma 18973fdcfb66STao Ma } else { 1898f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 189962e086beSAneesh Kumar K.V do_journal_get_write_access); 190062e086beSAneesh Kumar K.V 1901f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 190262e086beSAneesh Kumar K.V write_end_fn); 19033fdcfb66STao Ma } 190462e086beSAneesh Kumar K.V if (ret == 0) 190562e086beSAneesh Kumar K.V ret = err; 19062d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 190762e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 190862e086beSAneesh Kumar K.V if (!ret) 190962e086beSAneesh Kumar K.V ret = err; 191062e086beSAneesh Kumar K.V 19113fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 19123fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19133fdcfb66STao Ma NULL, bput_one); 191419f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 191562e086beSAneesh Kumar K.V out: 19163fdcfb66STao Ma brelse(inode_bh); 191762e086beSAneesh Kumar K.V return ret; 191862e086beSAneesh Kumar K.V } 191962e086beSAneesh Kumar K.V 192061628a3fSMingming Cao /* 192143ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 192243ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 192343ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 192443ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 192543ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 192643ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 192743ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 192843ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 192943ce1d23SAneesh Kumar K.V * 1930b920c755STheodore Ts'o * This function can get called via... 1931b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 1932b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1933f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1934b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 193543ce1d23SAneesh Kumar K.V * 193643ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 193743ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 193843ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 193943ce1d23SAneesh Kumar K.V * truncate(f, 1024); 194043ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 194143ce1d23SAneesh Kumar K.V * a[0] = 'a'; 194243ce1d23SAneesh Kumar K.V * truncate(f, 4096); 194343ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 194490802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 194543ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 194643ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 194743ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 194843ce1d23SAneesh Kumar K.V * buffer_heads mapped. 194943ce1d23SAneesh Kumar K.V * 195043ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 195143ce1d23SAneesh Kumar K.V * unwritten in the page. 195243ce1d23SAneesh Kumar K.V * 195343ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 195443ce1d23SAneesh Kumar K.V * 195543ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 195643ce1d23SAneesh Kumar K.V * ext4_writepage() 195743ce1d23SAneesh Kumar K.V * 195843ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 195943ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 196061628a3fSMingming Cao */ 196143ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 196264769240SAlex Tomas struct writeback_control *wbc) 196364769240SAlex Tomas { 1964f8bec370SJan Kara int ret = 0; 196561628a3fSMingming Cao loff_t size; 1966498e5f24STheodore Ts'o unsigned int len; 1967744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 196861628a3fSMingming Cao struct inode *inode = page->mapping->host; 196936ade451SJan Kara struct ext4_io_submit io_submit; 197064769240SAlex Tomas 1971a9c667f8SLukas Czerner trace_ext4_writepage(page); 197261628a3fSMingming Cao size = i_size_read(inode); 197361628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 197461628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 197561628a3fSMingming Cao else 197661628a3fSMingming Cao len = PAGE_CACHE_SIZE; 197761628a3fSMingming Cao 1978f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 1979*fe386132SJan Kara /* 1980*fe386132SJan Kara * We cannot do block allocation or other extent handling in this 1981*fe386132SJan Kara * function. If there are buffers needing that, we have to redirty 1982*fe386132SJan Kara * the page. But we may reach here when we do a journal commit via 1983*fe386132SJan Kara * journal_submit_inode_data_buffers() and in that case we must write 1984*fe386132SJan Kara * allocated buffers to achieve data=ordered mode guarantees. 1985*fe386132SJan Kara */ 1986f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1987c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 1988f8bec370SJan Kara redirty_page_for_writepage(wbc, page); 1989*fe386132SJan Kara if (current->flags & PF_MEMALLOC) { 1990*fe386132SJan Kara /* 1991*fe386132SJan Kara * For memory cleaning there's no point in writing only 1992*fe386132SJan Kara * some buffers. So just bail out. Warn if we came here 1993*fe386132SJan Kara * from direct reclaim. 1994*fe386132SJan Kara */ 1995*fe386132SJan Kara WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) 1996*fe386132SJan Kara == PF_MEMALLOC); 1997f8bec370SJan Kara unlock_page(page); 1998f8bec370SJan Kara return 0; 1999f0e6c985SAneesh Kumar K.V } 2000*fe386132SJan Kara } 200164769240SAlex Tomas 2002cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 200343ce1d23SAneesh Kumar K.V /* 200443ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 200543ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 200643ce1d23SAneesh Kumar K.V */ 20073f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 200843ce1d23SAneesh Kumar K.V 200936ade451SJan Kara memset(&io_submit, 0, sizeof(io_submit)); 201036ade451SJan Kara ret = ext4_bio_write_page(&io_submit, page, len, wbc); 201136ade451SJan Kara ext4_io_submit(&io_submit); 201264769240SAlex Tomas return ret; 201364769240SAlex Tomas } 201464769240SAlex Tomas 201561628a3fSMingming Cao /* 2016525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 201725985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 2018525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2019525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2020525f4ed8SMingming Cao * the block allocation. 202161628a3fSMingming Cao */ 2022525f4ed8SMingming Cao 2023525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2024525f4ed8SMingming Cao { 2025525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2026525f4ed8SMingming Cao 2027525f4ed8SMingming Cao /* 2028525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2029525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2030525f4ed8SMingming Cao * number of contiguous block. So we will limit 2031525f4ed8SMingming Cao * number of contiguous block to a sane value 2032525f4ed8SMingming Cao */ 203312e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2034525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2035525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2036525f4ed8SMingming Cao 2037525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2038525f4ed8SMingming Cao } 203961628a3fSMingming Cao 20408e48dcfbSTheodore Ts'o /* 20418e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 20428eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2043168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2044168fc022STheodore Ts'o * and then write them. 20458e48dcfbSTheodore Ts'o */ 20469c3569b5STao Ma static int write_cache_pages_da(handle_t *handle, 20479c3569b5STao Ma struct address_space *mapping, 20488e48dcfbSTheodore Ts'o struct writeback_control *wbc, 204972f84e65SEric Sandeen struct mpage_da_data *mpd, 205072f84e65SEric Sandeen pgoff_t *done_index) 20518e48dcfbSTheodore Ts'o { 20528eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2053168fc022STheodore Ts'o struct inode *inode = mapping->host; 20548e48dcfbSTheodore Ts'o struct pagevec pvec; 20554f01b02cSTheodore Ts'o unsigned int nr_pages; 20564f01b02cSTheodore Ts'o sector_t logical; 20574f01b02cSTheodore Ts'o pgoff_t index, end; 20588e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 20594f01b02cSTheodore Ts'o int i, tag, ret = 0; 20608e48dcfbSTheodore Ts'o 2061168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2062168fc022STheodore Ts'o mpd->wbc = wbc; 2063168fc022STheodore Ts'o mpd->inode = inode; 20648e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 20658e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 20668e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 20678e48dcfbSTheodore Ts'o 20686e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 20695b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 20705b41d924SEric Sandeen else 20715b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 20725b41d924SEric Sandeen 207372f84e65SEric Sandeen *done_index = index; 20744f01b02cSTheodore Ts'o while (index <= end) { 20755b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 20768e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 20778e48dcfbSTheodore Ts'o if (nr_pages == 0) 20784f01b02cSTheodore Ts'o return 0; 20798e48dcfbSTheodore Ts'o 20808e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 20818e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 20828e48dcfbSTheodore Ts'o 20838e48dcfbSTheodore Ts'o /* 20848e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 20858e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 20868e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 20878e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 20888e48dcfbSTheodore Ts'o * because we have a reference on the page. 20898e48dcfbSTheodore Ts'o */ 20904f01b02cSTheodore Ts'o if (page->index > end) 20914f01b02cSTheodore Ts'o goto out; 20928e48dcfbSTheodore Ts'o 209372f84e65SEric Sandeen *done_index = page->index + 1; 209472f84e65SEric Sandeen 209578aaced3STheodore Ts'o /* 209678aaced3STheodore Ts'o * If we can't merge this page, and we have 209778aaced3STheodore Ts'o * accumulated an contiguous region, write it 209878aaced3STheodore Ts'o */ 209978aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 210078aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 210178aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 210278aaced3STheodore Ts'o goto ret_extent_tail; 210378aaced3STheodore Ts'o } 210478aaced3STheodore Ts'o 21058e48dcfbSTheodore Ts'o lock_page(page); 21068e48dcfbSTheodore Ts'o 21078e48dcfbSTheodore Ts'o /* 21084f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 21094f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 21104f01b02cSTheodore Ts'o * are writing (which means it has been 21114f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 21124f01b02cSTheodore Ts'o * already under writeback and we are not 21134f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 21148e48dcfbSTheodore Ts'o */ 21154f01b02cSTheodore Ts'o if (!PageDirty(page) || 21164f01b02cSTheodore Ts'o (PageWriteback(page) && 21174f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 21184f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 21198e48dcfbSTheodore Ts'o unlock_page(page); 21208e48dcfbSTheodore Ts'o continue; 21218e48dcfbSTheodore Ts'o } 21228e48dcfbSTheodore Ts'o 21238e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 21248e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 21258e48dcfbSTheodore Ts'o 21269c3569b5STao Ma /* 21279c3569b5STao Ma * If we have inline data and arrive here, it means that 21289c3569b5STao Ma * we will soon create the block for the 1st page, so 21299c3569b5STao Ma * we'd better clear the inline data here. 21309c3569b5STao Ma */ 21319c3569b5STao Ma if (ext4_has_inline_data(inode)) { 21329c3569b5STao Ma BUG_ON(ext4_test_inode_state(inode, 21339c3569b5STao Ma EXT4_STATE_MAY_INLINE_DATA)); 21349c3569b5STao Ma ext4_destroy_inline_data(handle, inode); 21359c3569b5STao Ma } 21369c3569b5STao Ma 2137168fc022STheodore Ts'o if (mpd->next_page != page->index) 21388eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 21398eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 21408eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 21418eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 21428eb9e5ceSTheodore Ts'o 2143f8bec370SJan Kara /* Add all dirty buffers to mpd */ 21448eb9e5ceSTheodore Ts'o head = page_buffers(page); 21458eb9e5ceSTheodore Ts'o bh = head; 21468eb9e5ceSTheodore Ts'o do { 21478eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 21488eb9e5ceSTheodore Ts'o /* 2149f8bec370SJan Kara * We need to try to allocate unmapped blocks 2150f8bec370SJan Kara * in the same page. Otherwise we won't make 2151f8bec370SJan Kara * progress with the page in ext4_writepage 21528eb9e5ceSTheodore Ts'o */ 21538eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 21548eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 21558eb9e5ceSTheodore Ts'o bh->b_state); 21564f01b02cSTheodore Ts'o if (mpd->io_done) 21574f01b02cSTheodore Ts'o goto ret_extent_tail; 2158f8bec370SJan Kara } else if (buffer_dirty(bh) && 2159f8bec370SJan Kara buffer_mapped(bh)) { 21608eb9e5ceSTheodore Ts'o /* 2161f8bec370SJan Kara * mapped dirty buffer. We need to 2162f8bec370SJan Kara * update the b_state because we look 2163f8bec370SJan Kara * at b_state in mpage_da_map_blocks. 2164f8bec370SJan Kara * We don't update b_size because if we 2165f8bec370SJan Kara * find an unmapped buffer_head later 2166f8bec370SJan Kara * we need to use the b_state flag of 2167f8bec370SJan Kara * that buffer_head. 21688eb9e5ceSTheodore Ts'o */ 21698eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 2170f8bec370SJan Kara mpd->b_state = 2171f8bec370SJan Kara bh->b_state & BH_FLAGS; 21728e48dcfbSTheodore Ts'o } 21738eb9e5ceSTheodore Ts'o logical++; 21748eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 21758e48dcfbSTheodore Ts'o 21768e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 21778e48dcfbSTheodore Ts'o nr_to_write--; 21788e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 21794f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 21808e48dcfbSTheodore Ts'o /* 21818e48dcfbSTheodore Ts'o * We stop writing back only if we are 21828e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 21838e48dcfbSTheodore Ts'o * integrity sync we have to keep going 21848e48dcfbSTheodore Ts'o * because someone may be concurrently 21858e48dcfbSTheodore Ts'o * dirtying pages, and we might have 21868e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 21878e48dcfbSTheodore Ts'o * pages, but have not synced all of the 21888e48dcfbSTheodore Ts'o * old dirty pages. 21898e48dcfbSTheodore Ts'o */ 21904f01b02cSTheodore Ts'o goto out; 21918e48dcfbSTheodore Ts'o } 21928e48dcfbSTheodore Ts'o } 21938e48dcfbSTheodore Ts'o pagevec_release(&pvec); 21948e48dcfbSTheodore Ts'o cond_resched(); 21958e48dcfbSTheodore Ts'o } 21964f01b02cSTheodore Ts'o return 0; 21974f01b02cSTheodore Ts'o ret_extent_tail: 21984f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 21998eb9e5ceSTheodore Ts'o out: 22008eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 22018eb9e5ceSTheodore Ts'o cond_resched(); 22028e48dcfbSTheodore Ts'o return ret; 22038e48dcfbSTheodore Ts'o } 22048e48dcfbSTheodore Ts'o 22058e48dcfbSTheodore Ts'o 220664769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 220764769240SAlex Tomas struct writeback_control *wbc) 220864769240SAlex Tomas { 220922208dedSAneesh Kumar K.V pgoff_t index; 221022208dedSAneesh Kumar K.V int range_whole = 0; 221161628a3fSMingming Cao handle_t *handle = NULL; 2212df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 22135e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2214498e5f24STheodore Ts'o int pages_written = 0; 221555138e0bSTheodore Ts'o unsigned int max_pages; 22162acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 221755138e0bSTheodore Ts'o int needed_blocks, ret = 0; 221855138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2219de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 22205e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 222172f84e65SEric Sandeen pgoff_t done_index = 0; 22225b41d924SEric Sandeen pgoff_t end; 22231bce63d1SShaohua Li struct blk_plug plug; 222461628a3fSMingming Cao 22259bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2226ba80b101STheodore Ts'o 222761628a3fSMingming Cao /* 222861628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 222961628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 223061628a3fSMingming Cao * because that could violate lock ordering on umount 223161628a3fSMingming Cao */ 2232a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 223361628a3fSMingming Cao return 0; 22342a21e37eSTheodore Ts'o 22352a21e37eSTheodore Ts'o /* 22362a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 22372a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 22382a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 22394ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 22402a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 22412a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 22422a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 22432a21e37eSTheodore Ts'o * the stack trace. 22442a21e37eSTheodore Ts'o */ 22454ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 22462a21e37eSTheodore Ts'o return -EROFS; 22472a21e37eSTheodore Ts'o 224822208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 224922208dedSAneesh Kumar K.V range_whole = 1; 225061628a3fSMingming Cao 22512acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 22522acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 225322208dedSAneesh Kumar K.V index = mapping->writeback_index; 22542acf2c26SAneesh Kumar K.V if (index) 22552acf2c26SAneesh Kumar K.V cycled = 0; 22562acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 22572acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 22582acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 22595b41d924SEric Sandeen end = -1; 22605b41d924SEric Sandeen } else { 226122208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 22625b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 22635b41d924SEric Sandeen } 2264a1d6cc56SAneesh Kumar K.V 226555138e0bSTheodore Ts'o /* 226655138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 226755138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 226855138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 226955138e0bSTheodore Ts'o * levels; different architectues have a different page size, 227055138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 227155138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 227255138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 227355138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 227455138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 227555138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 227655138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 227755138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 227855138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 227955138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 228055138e0bSTheodore Ts'o */ 228155138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2282b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2283b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2284b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 228555138e0bSTheodore Ts'o else 2286b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2287b443e733SEric Sandeen } else 228855138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 228955138e0bSTheodore Ts'o max_pages); 229055138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 229155138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 229255138e0bSTheodore Ts'o 229355138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 229455138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 229555138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 229655138e0bSTheodore Ts'o } 229755138e0bSTheodore Ts'o 22982acf2c26SAneesh Kumar K.V retry: 22996e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 23005b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 23015b41d924SEric Sandeen 23021bce63d1SShaohua Li blk_start_plug(&plug); 230322208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2304a1d6cc56SAneesh Kumar K.V 2305a1d6cc56SAneesh Kumar K.V /* 2306a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2307a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2308a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2309a1d6cc56SAneesh Kumar K.V * by delalloc 2310a1d6cc56SAneesh Kumar K.V */ 2311a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2312525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2313a1d6cc56SAneesh Kumar K.V 231461628a3fSMingming Cao /* start a new transaction*/ 231561628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 231661628a3fSMingming Cao if (IS_ERR(handle)) { 231761628a3fSMingming Cao ret = PTR_ERR(handle); 23181693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2319fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2320a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 23213c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 232261628a3fSMingming Cao goto out_writepages; 232361628a3fSMingming Cao } 2324f63e6005STheodore Ts'o 2325f63e6005STheodore Ts'o /* 23268eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2327f63e6005STheodore Ts'o * contiguous region of logical blocks that need 23288eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2329f63e6005STheodore Ts'o */ 23309c3569b5STao Ma ret = write_cache_pages_da(handle, mapping, 23319c3569b5STao Ma wbc, &mpd, &done_index); 2332f63e6005STheodore Ts'o /* 2333af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2334f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2335f63e6005STheodore Ts'o * them for I/O. 2336f63e6005STheodore Ts'o */ 2337f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 23385a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2339f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2340f63e6005STheodore Ts'o } 2341b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2342f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2343df22291fSAneesh Kumar K.V 234461628a3fSMingming Cao ext4_journal_stop(handle); 2345df22291fSAneesh Kumar K.V 23468f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 234722208dedSAneesh Kumar K.V /* commit the transaction which would 234822208dedSAneesh Kumar K.V * free blocks released in the transaction 234922208dedSAneesh Kumar K.V * and try again 235022208dedSAneesh Kumar K.V */ 2351df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 235222208dedSAneesh Kumar K.V ret = 0; 235322208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2354a1d6cc56SAneesh Kumar K.V /* 23558de49e67SKazuya Mio * Got one extent now try with rest of the pages. 23568de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 23578de49e67SKazuya Mio * So we don't need to write any more. 2358a1d6cc56SAneesh Kumar K.V */ 235922208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 23608de49e67SKazuya Mio ret = mpd.retval; 23612acf2c26SAneesh Kumar K.V io_done = 1; 236222208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 236361628a3fSMingming Cao /* 236461628a3fSMingming Cao * There is no more writeout needed 236561628a3fSMingming Cao * or we requested for a noblocking writeout 236661628a3fSMingming Cao * and we found the device congested 236761628a3fSMingming Cao */ 236861628a3fSMingming Cao break; 236961628a3fSMingming Cao } 23701bce63d1SShaohua Li blk_finish_plug(&plug); 23712acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 23722acf2c26SAneesh Kumar K.V cycled = 1; 23732acf2c26SAneesh Kumar K.V index = 0; 23742acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 23752acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 23762acf2c26SAneesh Kumar K.V goto retry; 23772acf2c26SAneesh Kumar K.V } 237861628a3fSMingming Cao 237922208dedSAneesh Kumar K.V /* Update index */ 23802acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 238122208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 238222208dedSAneesh Kumar K.V /* 238322208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 238422208dedSAneesh Kumar K.V * mode will write it back later 238522208dedSAneesh Kumar K.V */ 238672f84e65SEric Sandeen mapping->writeback_index = done_index; 2387a1d6cc56SAneesh Kumar K.V 238861628a3fSMingming Cao out_writepages: 238922208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2390de89de6eSTheodore Ts'o wbc->range_start = range_start; 23919bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 239261628a3fSMingming Cao return ret; 239364769240SAlex Tomas } 239464769240SAlex Tomas 239579f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 239679f0be8dSAneesh Kumar K.V { 239779f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 239879f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 239979f0be8dSAneesh Kumar K.V 240079f0be8dSAneesh Kumar K.V /* 240179f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 240279f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2403179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 240479f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 240579f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 240679f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 240779f0be8dSAneesh Kumar K.V */ 240857042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 240957042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 241057042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 241100d4e736STheodore Ts'o /* 241200d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 241300d4e736STheodore Ts'o */ 241400d4e736STheodore Ts'o if (dirty_blocks && (free_blocks < 2 * dirty_blocks) && 241500d4e736STheodore Ts'o !writeback_in_progress(sb->s_bdi) && 241600d4e736STheodore Ts'o down_read_trylock(&sb->s_umount)) { 241700d4e736STheodore Ts'o writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 241800d4e736STheodore Ts'o up_read(&sb->s_umount); 241900d4e736STheodore Ts'o } 242000d4e736STheodore Ts'o 242179f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2422df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 242379f0be8dSAneesh Kumar K.V /* 2424c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2425c8afb446SEric Sandeen * or free blocks is less than watermark 242679f0be8dSAneesh Kumar K.V */ 242779f0be8dSAneesh Kumar K.V return 1; 242879f0be8dSAneesh Kumar K.V } 242979f0be8dSAneesh Kumar K.V return 0; 243079f0be8dSAneesh Kumar K.V } 243179f0be8dSAneesh Kumar K.V 243264769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 243364769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 243464769240SAlex Tomas struct page **pagep, void **fsdata) 243564769240SAlex Tomas { 243672b8ab9dSEric Sandeen int ret, retries = 0; 243764769240SAlex Tomas struct page *page; 243864769240SAlex Tomas pgoff_t index; 243964769240SAlex Tomas struct inode *inode = mapping->host; 244064769240SAlex Tomas handle_t *handle; 244164769240SAlex Tomas 244264769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 244379f0be8dSAneesh Kumar K.V 244479f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 244579f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 244679f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 244779f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 244879f0be8dSAneesh Kumar K.V } 244979f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 24509bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 24519c3569b5STao Ma 24529c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 24539c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 24549c3569b5STao Ma pos, len, flags, 24559c3569b5STao Ma pagep, fsdata); 24569c3569b5STao Ma if (ret < 0) 24579c3569b5STao Ma goto out; 24589c3569b5STao Ma if (ret == 1) { 24599c3569b5STao Ma ret = 0; 24609c3569b5STao Ma goto out; 24619c3569b5STao Ma } 24629c3569b5STao Ma } 24639c3569b5STao Ma 2464d2a17637SMingming Cao retry: 246564769240SAlex Tomas /* 246664769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 246764769240SAlex Tomas * if there is delayed block allocation. But we still need 246864769240SAlex Tomas * to journalling the i_disksize update if writes to the end 246964769240SAlex Tomas * of file which has an already mapped buffer. 247064769240SAlex Tomas */ 247164769240SAlex Tomas handle = ext4_journal_start(inode, 1); 247264769240SAlex Tomas if (IS_ERR(handle)) { 247364769240SAlex Tomas ret = PTR_ERR(handle); 247464769240SAlex Tomas goto out; 247564769240SAlex Tomas } 2476ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2477ebd3610bSJan Kara * started */ 2478ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 247964769240SAlex Tomas 248054566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2481d5a0d4f7SEric Sandeen if (!page) { 2482d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2483d5a0d4f7SEric Sandeen ret = -ENOMEM; 2484d5a0d4f7SEric Sandeen goto out; 2485d5a0d4f7SEric Sandeen } 248664769240SAlex Tomas *pagep = page; 248764769240SAlex Tomas 24886e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 248964769240SAlex Tomas if (ret < 0) { 249064769240SAlex Tomas unlock_page(page); 249164769240SAlex Tomas ext4_journal_stop(handle); 249264769240SAlex Tomas page_cache_release(page); 2493ae4d5372SAneesh Kumar K.V /* 2494ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2495ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2496ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2497ae4d5372SAneesh Kumar K.V */ 2498ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2499b9a4207dSJan Kara ext4_truncate_failed_write(inode); 250064769240SAlex Tomas } 250164769240SAlex Tomas 2502d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2503d2a17637SMingming Cao goto retry; 250464769240SAlex Tomas out: 250564769240SAlex Tomas return ret; 250664769240SAlex Tomas } 250764769240SAlex Tomas 2508632eaeabSMingming Cao /* 2509632eaeabSMingming Cao * Check if we should update i_disksize 2510632eaeabSMingming Cao * when write to the end of file but not require block allocation 2511632eaeabSMingming Cao */ 2512632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2513632eaeabSMingming Cao unsigned long offset) 2514632eaeabSMingming Cao { 2515632eaeabSMingming Cao struct buffer_head *bh; 2516632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2517632eaeabSMingming Cao unsigned int idx; 2518632eaeabSMingming Cao int i; 2519632eaeabSMingming Cao 2520632eaeabSMingming Cao bh = page_buffers(page); 2521632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2522632eaeabSMingming Cao 2523632eaeabSMingming Cao for (i = 0; i < idx; i++) 2524632eaeabSMingming Cao bh = bh->b_this_page; 2525632eaeabSMingming Cao 252629fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2527632eaeabSMingming Cao return 0; 2528632eaeabSMingming Cao return 1; 2529632eaeabSMingming Cao } 2530632eaeabSMingming Cao 253164769240SAlex Tomas static int ext4_da_write_end(struct file *file, 253264769240SAlex Tomas struct address_space *mapping, 253364769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 253464769240SAlex Tomas struct page *page, void *fsdata) 253564769240SAlex Tomas { 253664769240SAlex Tomas struct inode *inode = mapping->host; 253764769240SAlex Tomas int ret = 0, ret2; 253864769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 253964769240SAlex Tomas loff_t new_i_size; 2540632eaeabSMingming Cao unsigned long start, end; 254179f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 254279f0be8dSAneesh Kumar K.V 254379f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 25443d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 25453d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 254679f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 254779f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 25483d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 254979f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 255079f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 25513d2b1582SLukas Czerner default: 255279f0be8dSAneesh Kumar K.V BUG(); 255379f0be8dSAneesh Kumar K.V } 255479f0be8dSAneesh Kumar K.V } 2555632eaeabSMingming Cao 25569bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2557632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2558632eaeabSMingming Cao end = start + copied - 1; 255964769240SAlex Tomas 256064769240SAlex Tomas /* 256164769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 256264769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 256364769240SAlex Tomas * into that. 256464769240SAlex Tomas */ 256564769240SAlex Tomas new_i_size = pos + copied; 2566ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 25679c3569b5STao Ma if (ext4_has_inline_data(inode) || 25689c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2569632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2570f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 257164769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2572632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2573cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2574cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2575cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2576cf17fea6SAneesh Kumar K.V */ 2577cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2578632eaeabSMingming Cao } 2579632eaeabSMingming Cao } 25809c3569b5STao Ma 25819c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 25829c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 25839c3569b5STao Ma ext4_has_inline_data(inode)) 25849c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 25859c3569b5STao Ma page); 25869c3569b5STao Ma else 258764769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 258864769240SAlex Tomas page, fsdata); 25899c3569b5STao Ma 259064769240SAlex Tomas copied = ret2; 259164769240SAlex Tomas if (ret2 < 0) 259264769240SAlex Tomas ret = ret2; 259364769240SAlex Tomas ret2 = ext4_journal_stop(handle); 259464769240SAlex Tomas if (!ret) 259564769240SAlex Tomas ret = ret2; 259664769240SAlex Tomas 259764769240SAlex Tomas return ret ? ret : copied; 259864769240SAlex Tomas } 259964769240SAlex Tomas 260064769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 260164769240SAlex Tomas { 260264769240SAlex Tomas /* 260364769240SAlex Tomas * Drop reserved blocks 260464769240SAlex Tomas */ 260564769240SAlex Tomas BUG_ON(!PageLocked(page)); 260664769240SAlex Tomas if (!page_has_buffers(page)) 260764769240SAlex Tomas goto out; 260864769240SAlex Tomas 2609d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 261064769240SAlex Tomas 261164769240SAlex Tomas out: 261264769240SAlex Tomas ext4_invalidatepage(page, offset); 261364769240SAlex Tomas 261464769240SAlex Tomas return; 261564769240SAlex Tomas } 261664769240SAlex Tomas 2617ccd2506bSTheodore Ts'o /* 2618ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2619ccd2506bSTheodore Ts'o */ 2620ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2621ccd2506bSTheodore Ts'o { 2622fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2623fb40ba0dSTheodore Ts'o 2624ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2625ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2626ccd2506bSTheodore Ts'o return 0; 2627ccd2506bSTheodore Ts'o 2628ccd2506bSTheodore Ts'o /* 2629ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2630ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2631ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2632ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2633ccd2506bSTheodore Ts'o * would require replicating code paths in: 2634ccd2506bSTheodore Ts'o * 2635ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2636ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2637ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2638ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2639ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2640ccd2506bSTheodore Ts'o * 2641ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2642ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2643ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2644ccd2506bSTheodore Ts'o * doing I/O at all. 2645ccd2506bSTheodore Ts'o * 2646ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2647380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2648ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2649ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 265025985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2651ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2652ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2653ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2654ccd2506bSTheodore Ts'o * 2655ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2656ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2657ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2658ccd2506bSTheodore Ts'o */ 2659ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2660ccd2506bSTheodore Ts'o } 266164769240SAlex Tomas 266264769240SAlex Tomas /* 2663ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2664ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2665ac27a0ecSDave Kleikamp * 2666ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2667617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2668ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2669ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2670ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2671ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2672ac27a0ecSDave Kleikamp * 2673ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2674ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2675ac27a0ecSDave Kleikamp */ 2676617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2677ac27a0ecSDave Kleikamp { 2678ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2679ac27a0ecSDave Kleikamp journal_t *journal; 2680ac27a0ecSDave Kleikamp int err; 2681ac27a0ecSDave Kleikamp 268246c7f254STao Ma /* 268346c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 268446c7f254STao Ma */ 268546c7f254STao Ma if (ext4_has_inline_data(inode)) 268646c7f254STao Ma return 0; 268746c7f254STao Ma 268864769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 268964769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 269064769240SAlex Tomas /* 269164769240SAlex Tomas * With delalloc we want to sync the file 269264769240SAlex Tomas * so that we can make sure we allocate 269364769240SAlex Tomas * blocks for file 269464769240SAlex Tomas */ 269564769240SAlex Tomas filemap_write_and_wait(mapping); 269664769240SAlex Tomas } 269764769240SAlex Tomas 269819f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 269919f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2700ac27a0ecSDave Kleikamp /* 2701ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2702ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2703ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2704ac27a0ecSDave Kleikamp * do we expect this to happen. 2705ac27a0ecSDave Kleikamp * 2706ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2707ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2708ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2709ac27a0ecSDave Kleikamp * will.) 2710ac27a0ecSDave Kleikamp * 2711617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2712ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2713ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2714ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2715ac27a0ecSDave Kleikamp * everything they get. 2716ac27a0ecSDave Kleikamp */ 2717ac27a0ecSDave Kleikamp 271819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2719617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2720dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2721dab291afSMingming Cao err = jbd2_journal_flush(journal); 2722dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2723ac27a0ecSDave Kleikamp 2724ac27a0ecSDave Kleikamp if (err) 2725ac27a0ecSDave Kleikamp return 0; 2726ac27a0ecSDave Kleikamp } 2727ac27a0ecSDave Kleikamp 2728617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2729ac27a0ecSDave Kleikamp } 2730ac27a0ecSDave Kleikamp 2731617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2732ac27a0ecSDave Kleikamp { 273346c7f254STao Ma int ret = -EAGAIN; 273446c7f254STao Ma struct inode *inode = page->mapping->host; 273546c7f254STao Ma 27360562e0baSJiaying Zhang trace_ext4_readpage(page); 273746c7f254STao Ma 273846c7f254STao Ma if (ext4_has_inline_data(inode)) 273946c7f254STao Ma ret = ext4_readpage_inline(inode, page); 274046c7f254STao Ma 274146c7f254STao Ma if (ret == -EAGAIN) 2742617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 274346c7f254STao Ma 274446c7f254STao Ma return ret; 2745ac27a0ecSDave Kleikamp } 2746ac27a0ecSDave Kleikamp 2747ac27a0ecSDave Kleikamp static int 2748617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2749ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2750ac27a0ecSDave Kleikamp { 275146c7f254STao Ma struct inode *inode = mapping->host; 275246c7f254STao Ma 275346c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 275446c7f254STao Ma if (ext4_has_inline_data(inode)) 275546c7f254STao Ma return 0; 275646c7f254STao Ma 2757617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2758ac27a0ecSDave Kleikamp } 2759ac27a0ecSDave Kleikamp 2760617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2761ac27a0ecSDave Kleikamp { 27620562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 27630562e0baSJiaying Zhang 27644520fb3cSJan Kara /* No journalling happens on data buffers when this function is used */ 27654520fb3cSJan Kara WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 27664520fb3cSJan Kara 27674520fb3cSJan Kara block_invalidatepage(page, offset); 27684520fb3cSJan Kara } 27694520fb3cSJan Kara 277053e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page, 27714520fb3cSJan Kara unsigned long offset) 27724520fb3cSJan Kara { 27734520fb3cSJan Kara journal_t *journal = EXT4_JOURNAL(page->mapping->host); 27744520fb3cSJan Kara 27754520fb3cSJan Kara trace_ext4_journalled_invalidatepage(page, offset); 27764520fb3cSJan Kara 2777744692dcSJiaying Zhang /* 2778ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2779ac27a0ecSDave Kleikamp */ 2780ac27a0ecSDave Kleikamp if (offset == 0) 2781ac27a0ecSDave Kleikamp ClearPageChecked(page); 2782ac27a0ecSDave Kleikamp 278353e87268SJan Kara return jbd2_journal_invalidatepage(journal, page, offset); 278453e87268SJan Kara } 278553e87268SJan Kara 278653e87268SJan Kara /* Wrapper for aops... */ 278753e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page, 278853e87268SJan Kara unsigned long offset) 278953e87268SJan Kara { 279053e87268SJan Kara WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); 2791ac27a0ecSDave Kleikamp } 2792ac27a0ecSDave Kleikamp 2793617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2794ac27a0ecSDave Kleikamp { 2795617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2796ac27a0ecSDave Kleikamp 27970562e0baSJiaying Zhang trace_ext4_releasepage(page); 27980562e0baSJiaying Zhang 2799ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2800ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2801ac27a0ecSDave Kleikamp return 0; 28020390131bSFrank Mayhar if (journal) 2803dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 28040390131bSFrank Mayhar else 28050390131bSFrank Mayhar return try_to_free_buffers(page); 2806ac27a0ecSDave Kleikamp } 2807ac27a0ecSDave Kleikamp 2808ac27a0ecSDave Kleikamp /* 28092ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 28102ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 28112ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 28122ed88685STheodore Ts'o */ 2813f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 28144c0425ffSMingming Cao struct buffer_head *bh_result, int create) 28154c0425ffSMingming Cao { 2816c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 28178d5d02e6SMingming Cao inode->i_ino, create); 28182ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 28192ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 28204c0425ffSMingming Cao } 28214c0425ffSMingming Cao 2822729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 28238b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 2824729f52c6SZheng Liu { 28258b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 28268b0f165fSAnatol Pomozov inode->i_ino, create); 28278b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 28288b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 2829729f52c6SZheng Liu } 2830729f52c6SZheng Liu 28314c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2832552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2833552ef802SChristoph Hellwig bool is_async) 28344c0425ffSMingming Cao { 283572c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 28364c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 28374c0425ffSMingming Cao 28384b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 28394b70df18SMingming if (!io_end || !size) 2840552ef802SChristoph Hellwig goto out; 28414b70df18SMingming 28428d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 2843ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 28448d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 28458d5d02e6SMingming Cao size); 28468d5d02e6SMingming Cao 2847b5a7e970STheodore Ts'o iocb->private = NULL; 2848b5a7e970STheodore Ts'o 28498d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2850bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 28518d5d02e6SMingming Cao ext4_free_io_end(io_end); 28525b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 28535b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 28545b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 285572c5052dSChristoph Hellwig inode_dio_done(inode); 28565b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 28578d5d02e6SMingming Cao } 28588d5d02e6SMingming Cao 28594c0425ffSMingming Cao io_end->offset = offset; 28604c0425ffSMingming Cao io_end->size = size; 28615b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 28625b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 28635b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 28645b3ff237Sjiayingz@google.com (Jiaying Zhang) } 28654c0425ffSMingming Cao 286628a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 28674c0425ffSMingming Cao } 2868c7064ef1SJiaying Zhang 28694c0425ffSMingming Cao /* 28704c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 28714c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 28724c0425ffSMingming Cao * fall back to buffered IO. 28734c0425ffSMingming Cao * 2874b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 287569c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 2876b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 28774c0425ffSMingming Cao * 287869c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 28798d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 288025985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 28818d5d02e6SMingming Cao * when async direct IO completed. 28824c0425ffSMingming Cao * 28834c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 28844c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 28854c0425ffSMingming Cao * if the machine crashes during the write. 28864c0425ffSMingming Cao * 28874c0425ffSMingming Cao */ 28884c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 28894c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 28904c0425ffSMingming Cao unsigned long nr_segs) 28914c0425ffSMingming Cao { 28924c0425ffSMingming Cao struct file *file = iocb->ki_filp; 28934c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 28944c0425ffSMingming Cao ssize_t ret; 28954c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 2896729f52c6SZheng Liu int overwrite = 0; 28978b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 28988b0f165fSAnatol Pomozov int dio_flags = 0; 289969c499d1STheodore Ts'o loff_t final_size = offset + count; 290069c499d1STheodore Ts'o 290169c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 290269c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 290369c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 2904729f52c6SZheng Liu 29054bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 29064bd809dbSZheng Liu 29074bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 29084bd809dbSZheng Liu overwrite = *((int *)iocb->private); 29094bd809dbSZheng Liu 29104bd809dbSZheng Liu if (overwrite) { 29111f555cfaSDmitry Monakhov atomic_inc(&inode->i_dio_count); 29124bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 29134bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 29144bd809dbSZheng Liu } 29154bd809dbSZheng Liu 29164c0425ffSMingming Cao /* 29178d5d02e6SMingming Cao * We could direct write to holes and fallocate. 29188d5d02e6SMingming Cao * 291969c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 292069c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 292169c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 29228d5d02e6SMingming Cao * 292369c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 292469c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 292569c499d1STheodore Ts'o * extents uninitialized. 29264c0425ffSMingming Cao * 292769c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 29288d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 29294c0425ffSMingming Cao * 293069c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 293169c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 293269c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 293369c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 29344c0425ffSMingming Cao */ 29358d5d02e6SMingming Cao iocb->private = NULL; 2936f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 29378d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 293869c499d1STheodore Ts'o ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 29394bd809dbSZheng Liu if (!io_end) { 29404bd809dbSZheng Liu ret = -ENOMEM; 29414bd809dbSZheng Liu goto retake_lock; 29424bd809dbSZheng Liu } 2943266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 2944266991b1SJeff Moyer iocb->private = io_end; 29458d5d02e6SMingming Cao /* 294669c499d1STheodore Ts'o * we save the io structure for current async direct 294769c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 294869c499d1STheodore Ts'o * io structure whether there is a unwritten extents 294969c499d1STheodore Ts'o * needs to be converted when IO is completed. 29508d5d02e6SMingming Cao */ 2951f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 29528d5d02e6SMingming Cao } 29538d5d02e6SMingming Cao 29548b0f165fSAnatol Pomozov if (overwrite) { 29558b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 29568b0f165fSAnatol Pomozov } else { 29578b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 29588b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 29598b0f165fSAnatol Pomozov } 2960729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 2961729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 2962729f52c6SZheng Liu offset, nr_segs, 29638b0f165fSAnatol Pomozov get_block_func, 2964729f52c6SZheng Liu ext4_end_io_dio, 2965729f52c6SZheng Liu NULL, 29668b0f165fSAnatol Pomozov dio_flags); 29678b0f165fSAnatol Pomozov 29688d5d02e6SMingming Cao if (iocb->private) 2969f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 29708d5d02e6SMingming Cao /* 297169c499d1STheodore Ts'o * The io_end structure takes a reference to the inode, that 297269c499d1STheodore Ts'o * structure needs to be destroyed and the reference to the 297369c499d1STheodore Ts'o * inode need to be dropped, when IO is complete, even with 0 297469c499d1STheodore Ts'o * byte write, or failed. 29758d5d02e6SMingming Cao * 297669c499d1STheodore Ts'o * In the successful AIO DIO case, the io_end structure will 297769c499d1STheodore Ts'o * be destroyed and the reference to the inode will be dropped 29788d5d02e6SMingming Cao * after the end_io call back function is called. 29798d5d02e6SMingming Cao * 298069c499d1STheodore Ts'o * In the case there is 0 byte write, or error case, since VFS 298169c499d1STheodore Ts'o * direct IO won't invoke the end_io call back function, we 298269c499d1STheodore Ts'o * need to free the end_io structure here. 29838d5d02e6SMingming Cao */ 29848d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 29858d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 29868d5d02e6SMingming Cao iocb->private = NULL; 2987729f52c6SZheng Liu } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 29885f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 2989109f5565SMingming int err; 29908d5d02e6SMingming Cao /* 29918d5d02e6SMingming Cao * for non AIO case, since the IO is already 299225985edcSLucas De Marchi * completed, we could do the conversion right here 29938d5d02e6SMingming Cao */ 2994109f5565SMingming err = ext4_convert_unwritten_extents(inode, 29958d5d02e6SMingming Cao offset, ret); 2996109f5565SMingming if (err < 0) 2997109f5565SMingming ret = err; 299819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 2999109f5565SMingming } 30004bd809dbSZheng Liu 30014bd809dbSZheng Liu retake_lock: 30024bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 30034bd809dbSZheng Liu if (overwrite) { 30041f555cfaSDmitry Monakhov inode_dio_done(inode); 30054bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 30064bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 30074bd809dbSZheng Liu } 30084bd809dbSZheng Liu 30094c0425ffSMingming Cao return ret; 30104c0425ffSMingming Cao } 30118d5d02e6SMingming Cao 30124c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 30134c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 30144c0425ffSMingming Cao unsigned long nr_segs) 30154c0425ffSMingming Cao { 30164c0425ffSMingming Cao struct file *file = iocb->ki_filp; 30174c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 30180562e0baSJiaying Zhang ssize_t ret; 30194c0425ffSMingming Cao 302084ebd795STheodore Ts'o /* 302184ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 302284ebd795STheodore Ts'o */ 302384ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 302484ebd795STheodore Ts'o return 0; 302584ebd795STheodore Ts'o 302646c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 302746c7f254STao Ma if (ext4_has_inline_data(inode)) 302846c7f254STao Ma return 0; 302946c7f254STao Ma 30300562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 303112e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 30320562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 30330562e0baSJiaying Zhang else 30340562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 30350562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 30360562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 30370562e0baSJiaying Zhang return ret; 30384c0425ffSMingming Cao } 30394c0425ffSMingming Cao 3040ac27a0ecSDave Kleikamp /* 3041617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3042ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3043ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3044ac27a0ecSDave Kleikamp * not necessarily locked. 3045ac27a0ecSDave Kleikamp * 3046ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3047ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3048ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3049ac27a0ecSDave Kleikamp * 3050ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3051ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3052ac27a0ecSDave Kleikamp */ 3053617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3054ac27a0ecSDave Kleikamp { 3055ac27a0ecSDave Kleikamp SetPageChecked(page); 3056ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3057ac27a0ecSDave Kleikamp } 3058ac27a0ecSDave Kleikamp 3059617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3060617ba13bSMingming Cao .readpage = ext4_readpage, 3061617ba13bSMingming Cao .readpages = ext4_readpages, 306243ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3063bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3064bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3065617ba13bSMingming Cao .bmap = ext4_bmap, 3066617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3067617ba13bSMingming Cao .releasepage = ext4_releasepage, 3068617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3069ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 30708ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3071aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3072ac27a0ecSDave Kleikamp }; 3073ac27a0ecSDave Kleikamp 3074617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3075617ba13bSMingming Cao .readpage = ext4_readpage, 3076617ba13bSMingming Cao .readpages = ext4_readpages, 307743ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3078bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3079bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3080617ba13bSMingming Cao .bmap = ext4_bmap, 3081617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3082617ba13bSMingming Cao .releasepage = ext4_releasepage, 3083617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3084ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 30858ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3086aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3087ac27a0ecSDave Kleikamp }; 3088ac27a0ecSDave Kleikamp 3089617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3090617ba13bSMingming Cao .readpage = ext4_readpage, 3091617ba13bSMingming Cao .readpages = ext4_readpages, 309243ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3093bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3094bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3095617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3096617ba13bSMingming Cao .bmap = ext4_bmap, 30974520fb3cSJan Kara .invalidatepage = ext4_journalled_invalidatepage, 3098617ba13bSMingming Cao .releasepage = ext4_releasepage, 309984ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 31008ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3101aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3102ac27a0ecSDave Kleikamp }; 3103ac27a0ecSDave Kleikamp 310464769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 310564769240SAlex Tomas .readpage = ext4_readpage, 310664769240SAlex Tomas .readpages = ext4_readpages, 310743ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 310864769240SAlex Tomas .writepages = ext4_da_writepages, 310964769240SAlex Tomas .write_begin = ext4_da_write_begin, 311064769240SAlex Tomas .write_end = ext4_da_write_end, 311164769240SAlex Tomas .bmap = ext4_bmap, 311264769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 311364769240SAlex Tomas .releasepage = ext4_releasepage, 311464769240SAlex Tomas .direct_IO = ext4_direct_IO, 311564769240SAlex Tomas .migratepage = buffer_migrate_page, 31168ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3117aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 311864769240SAlex Tomas }; 311964769240SAlex Tomas 3120617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3121ac27a0ecSDave Kleikamp { 31223d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 31233d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 31243d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 3125cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3126ac27a0ecSDave Kleikamp else 31273d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_ordered_aops; 31283d2b1582SLukas Czerner break; 31293d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 31303d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 31313d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_da_aops; 31323d2b1582SLukas Czerner else 31333d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_writeback_aops; 31343d2b1582SLukas Czerner break; 31353d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3136617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 31373d2b1582SLukas Czerner break; 31383d2b1582SLukas Czerner default: 31393d2b1582SLukas Czerner BUG(); 31403d2b1582SLukas Czerner } 3141ac27a0ecSDave Kleikamp } 3142ac27a0ecSDave Kleikamp 31434e96b2dbSAllison Henderson 31444e96b2dbSAllison Henderson /* 31454e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 31464e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 31474e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 31484e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 31494e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 31504e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 31514e96b2dbSAllison Henderson */ 31524e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 31534e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 31544e96b2dbSAllison Henderson loff_t length, int flags) 31554e96b2dbSAllison Henderson { 31564e96b2dbSAllison Henderson struct inode *inode = mapping->host; 31574e96b2dbSAllison Henderson struct page *page; 31584e96b2dbSAllison Henderson int err = 0; 31594e96b2dbSAllison Henderson 31604e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 31614e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 31624e96b2dbSAllison Henderson if (!page) 31635129d05fSYongqiang Yang return -ENOMEM; 31644e96b2dbSAllison Henderson 31654e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 31664e96b2dbSAllison Henderson from, length, flags); 31674e96b2dbSAllison Henderson 31684e96b2dbSAllison Henderson unlock_page(page); 31694e96b2dbSAllison Henderson page_cache_release(page); 31704e96b2dbSAllison Henderson return err; 31714e96b2dbSAllison Henderson } 31724e96b2dbSAllison Henderson 31734e96b2dbSAllison Henderson /* 31744e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 31754e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 31764e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 31774e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 31784e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 31794e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 31804e96b2dbSAllison Henderson * 31814e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 31824e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 31834e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 31844e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 31854e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 31864e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 31874e96b2dbSAllison Henderson * released. 31884e96b2dbSAllison Henderson * 31894e96b2dbSAllison Henderson * handle: The journal handle 31904e96b2dbSAllison Henderson * inode: The files inode 31914e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 31924907cb7bSAnatol Pomozov * from: The starting byte offset (from the beginning of the file) 31934e96b2dbSAllison Henderson * to begin discarding 31944e96b2dbSAllison Henderson * len: The length of bytes to discard 31954e96b2dbSAllison Henderson * flags: Optional flags that may be used: 31964e96b2dbSAllison Henderson * 31974e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 31984e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 31994e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 32004907cb7bSAnatol Pomozov * for updating the contents of a page whose blocks may 32014e96b2dbSAllison Henderson * have already been released, and we only want to zero 32024e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 32034e96b2dbSAllison Henderson * 32044907cb7bSAnatol Pomozov * Returns zero on success or negative on failure. 32054e96b2dbSAllison Henderson */ 32065f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 32074e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 32084e96b2dbSAllison Henderson loff_t length, int flags) 32094e96b2dbSAllison Henderson { 32104e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 32114e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 32124e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 32134e96b2dbSAllison Henderson ext4_lblk_t iblock; 32144e96b2dbSAllison Henderson struct buffer_head *bh; 32154e96b2dbSAllison Henderson int err = 0; 32164e96b2dbSAllison Henderson 32174e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 32184e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 32194e96b2dbSAllison Henderson 32204e96b2dbSAllison Henderson if (index != page->index) 32214e96b2dbSAllison Henderson return -EINVAL; 32224e96b2dbSAllison Henderson 32234e96b2dbSAllison Henderson /* 32244e96b2dbSAllison Henderson * correct length if it does not fall between 32254e96b2dbSAllison Henderson * 'from' and the end of the page 32264e96b2dbSAllison Henderson */ 32274e96b2dbSAllison Henderson if (length > max || length < 0) 32284e96b2dbSAllison Henderson length = max; 32294e96b2dbSAllison Henderson 32304e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 32314e96b2dbSAllison Henderson 3232093e6e36SYongqiang Yang if (!page_has_buffers(page)) 32334e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 32344e96b2dbSAllison Henderson 32354e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 32364e96b2dbSAllison Henderson bh = page_buffers(page); 32374e96b2dbSAllison Henderson pos = blocksize; 32384e96b2dbSAllison Henderson while (offset >= pos) { 32394e96b2dbSAllison Henderson bh = bh->b_this_page; 32404e96b2dbSAllison Henderson iblock++; 32414e96b2dbSAllison Henderson pos += blocksize; 32424e96b2dbSAllison Henderson } 32434e96b2dbSAllison Henderson 32444e96b2dbSAllison Henderson pos = offset; 32454e96b2dbSAllison Henderson while (pos < offset + length) { 3246e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3247e260daf2SYongqiang Yang 32484e96b2dbSAllison Henderson err = 0; 32494e96b2dbSAllison Henderson 32504e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 32514e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 32524e96b2dbSAllison Henderson 32534e96b2dbSAllison Henderson /* The length of space until the end of the block */ 32544e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 32554e96b2dbSAllison Henderson 32564e96b2dbSAllison Henderson /* 32574e96b2dbSAllison Henderson * Do not unmap or zero past end of block 32584e96b2dbSAllison Henderson * for this buffer head 32594e96b2dbSAllison Henderson */ 32604e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 32614e96b2dbSAllison Henderson range_to_discard = end_of_block; 32624e96b2dbSAllison Henderson 32634e96b2dbSAllison Henderson 32644e96b2dbSAllison Henderson /* 32654e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 32664e96b2dbSAllison Henderson * regions of the page 32674e96b2dbSAllison Henderson */ 32684e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 32694e96b2dbSAllison Henderson buffer_mapped(bh)) 32704e96b2dbSAllison Henderson goto next; 32714e96b2dbSAllison Henderson 32724e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 32734e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 32744e96b2dbSAllison Henderson clear_buffer_dirty(bh); 32754e96b2dbSAllison Henderson bh->b_bdev = NULL; 32764e96b2dbSAllison Henderson clear_buffer_mapped(bh); 32774e96b2dbSAllison Henderson clear_buffer_req(bh); 32784e96b2dbSAllison Henderson clear_buffer_new(bh); 32794e96b2dbSAllison Henderson clear_buffer_delay(bh); 32804e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 32814e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 32824e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 32834e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 32844e96b2dbSAllison Henderson goto next; 32854e96b2dbSAllison Henderson } 32864e96b2dbSAllison Henderson 32874e96b2dbSAllison Henderson /* 32884e96b2dbSAllison Henderson * If this block is not completely contained in the range 32894e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 32904e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 32914e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 32924e96b2dbSAllison Henderson * partial zeros on it. 32934e96b2dbSAllison Henderson */ 32944e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 32954e96b2dbSAllison Henderson /* 32964e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 32974e96b2dbSAllison Henderson * from the block 32984e96b2dbSAllison Henderson */ 32994e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 33004e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 33014e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 33024e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 33034e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 33044e96b2dbSAllison Henderson goto next; 33054e96b2dbSAllison Henderson } 33064e96b2dbSAllison Henderson } 33074e96b2dbSAllison Henderson 33084e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 33094e96b2dbSAllison Henderson if (PageUptodate(page)) 33104e96b2dbSAllison Henderson set_buffer_uptodate(bh); 33114e96b2dbSAllison Henderson 33124e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 33134e96b2dbSAllison Henderson err = -EIO; 33144e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 33154e96b2dbSAllison Henderson wait_on_buffer(bh); 33164e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 33174e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 33184e96b2dbSAllison Henderson goto next; 33194e96b2dbSAllison Henderson } 33204e96b2dbSAllison Henderson 33214e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33224e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 33234e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 33244e96b2dbSAllison Henderson if (err) 33254e96b2dbSAllison Henderson goto next; 33264e96b2dbSAllison Henderson } 33274e96b2dbSAllison Henderson 33284e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 33294e96b2dbSAllison Henderson 33304e96b2dbSAllison Henderson err = 0; 33314e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33324e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3333decbd919STheodore Ts'o } else 33344e96b2dbSAllison Henderson mark_buffer_dirty(bh); 33354e96b2dbSAllison Henderson 33364e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 33374e96b2dbSAllison Henderson next: 33384e96b2dbSAllison Henderson bh = bh->b_this_page; 33394e96b2dbSAllison Henderson iblock++; 33404e96b2dbSAllison Henderson pos += range_to_discard; 33414e96b2dbSAllison Henderson } 33424e96b2dbSAllison Henderson 33434e96b2dbSAllison Henderson return err; 33444e96b2dbSAllison Henderson } 33454e96b2dbSAllison Henderson 334691ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 334791ef4cafSDuane Griffin { 334891ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 334991ef4cafSDuane Griffin return 1; 335091ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 335191ef4cafSDuane Griffin return 1; 335291ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 335391ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 335491ef4cafSDuane Griffin return 0; 335591ef4cafSDuane Griffin } 335691ef4cafSDuane Griffin 3357ac27a0ecSDave Kleikamp /* 3358a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3359a4bb6b64SAllison Henderson * associated with the given offset and length 3360a4bb6b64SAllison Henderson * 3361a4bb6b64SAllison Henderson * @inode: File inode 3362a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3363a4bb6b64SAllison Henderson * @len: The length of the hole 3364a4bb6b64SAllison Henderson * 33654907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3366a4bb6b64SAllison Henderson */ 3367a4bb6b64SAllison Henderson 3368a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3369a4bb6b64SAllison Henderson { 3370a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3371a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 337273355192SAllison Henderson return -EOPNOTSUPP; 3373a4bb6b64SAllison Henderson 33748bad6fc8SZheng Liu if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 33758bad6fc8SZheng Liu return ext4_ind_punch_hole(file, offset, length); 3376a4bb6b64SAllison Henderson 3377bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3378bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 337973355192SAllison Henderson return -EOPNOTSUPP; 3380bab08ab9STheodore Ts'o } 3381bab08ab9STheodore Ts'o 3382aaddea81SZheng Liu trace_ext4_punch_hole(inode, offset, length); 3383aaddea81SZheng Liu 3384a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3385a4bb6b64SAllison Henderson } 3386a4bb6b64SAllison Henderson 3387a4bb6b64SAllison Henderson /* 3388617ba13bSMingming Cao * ext4_truncate() 3389ac27a0ecSDave Kleikamp * 3390617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3391617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3392ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3393ac27a0ecSDave Kleikamp * 339442b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3395ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3396ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3397ac27a0ecSDave Kleikamp * 3398ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3399ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3400ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3401ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3402ac27a0ecSDave Kleikamp * left-to-right works OK too). 3403ac27a0ecSDave Kleikamp * 3404ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3405ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3406ac27a0ecSDave Kleikamp * 3407ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3408617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3409ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3410617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3411617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3412ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3413617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3414ac27a0ecSDave Kleikamp */ 3415617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3416ac27a0ecSDave Kleikamp { 34170562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 34180562e0baSJiaying Zhang 341991ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3420ac27a0ecSDave Kleikamp return; 3421ac27a0ecSDave Kleikamp 342212e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3423c8d46e41SJiaying Zhang 34245534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 342519f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 34267d8f9f7dSTheodore Ts'o 3427aef1c851STao Ma if (ext4_has_inline_data(inode)) { 3428aef1c851STao Ma int has_inline = 1; 3429aef1c851STao Ma 3430aef1c851STao Ma ext4_inline_data_truncate(inode, &has_inline); 3431aef1c851STao Ma if (has_inline) 3432aef1c851STao Ma return; 3433aef1c851STao Ma } 3434aef1c851STao Ma 3435ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3436cf108bcaSJan Kara ext4_ext_truncate(inode); 3437ff9893dcSAmir Goldstein else 3438ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3439a86c6181SAlex Tomas 34400562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3441ac27a0ecSDave Kleikamp } 3442ac27a0ecSDave Kleikamp 3443ac27a0ecSDave Kleikamp /* 3444617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3445ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3446ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3447ac27a0ecSDave Kleikamp * inode. 3448ac27a0ecSDave Kleikamp */ 3449617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3450617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3451ac27a0ecSDave Kleikamp { 3452240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3453ac27a0ecSDave Kleikamp struct buffer_head *bh; 3454240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3455240799cdSTheodore Ts'o ext4_fsblk_t block; 3456240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3457ac27a0ecSDave Kleikamp 34583a06d778SAneesh Kumar K.V iloc->bh = NULL; 3459240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3460ac27a0ecSDave Kleikamp return -EIO; 3461ac27a0ecSDave Kleikamp 3462240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3463240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3464240799cdSTheodore Ts'o if (!gdp) 3465240799cdSTheodore Ts'o return -EIO; 3466240799cdSTheodore Ts'o 3467240799cdSTheodore Ts'o /* 3468240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3469240799cdSTheodore Ts'o */ 347000d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3471240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3472240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3473240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3474240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3475240799cdSTheodore Ts'o 3476240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3477aebf0243SWang Shilong if (unlikely(!bh)) 3478860d21e2STheodore Ts'o return -ENOMEM; 3479ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3480ac27a0ecSDave Kleikamp lock_buffer(bh); 34819c83a923SHidehiro Kawai 34829c83a923SHidehiro Kawai /* 34839c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 34849c83a923SHidehiro Kawai * to write out another inode in the same block. In this 34859c83a923SHidehiro Kawai * case, we don't have to read the block because we may 34869c83a923SHidehiro Kawai * read the old inode data successfully. 34879c83a923SHidehiro Kawai */ 34889c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 34899c83a923SHidehiro Kawai set_buffer_uptodate(bh); 34909c83a923SHidehiro Kawai 3491ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3492ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3493ac27a0ecSDave Kleikamp unlock_buffer(bh); 3494ac27a0ecSDave Kleikamp goto has_buffer; 3495ac27a0ecSDave Kleikamp } 3496ac27a0ecSDave Kleikamp 3497ac27a0ecSDave Kleikamp /* 3498ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3499ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3500ac27a0ecSDave Kleikamp * block. 3501ac27a0ecSDave Kleikamp */ 3502ac27a0ecSDave Kleikamp if (in_mem) { 3503ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3504240799cdSTheodore Ts'o int i, start; 3505ac27a0ecSDave Kleikamp 3506240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3507ac27a0ecSDave Kleikamp 3508ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3509240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3510aebf0243SWang Shilong if (unlikely(!bitmap_bh)) 3511ac27a0ecSDave Kleikamp goto make_io; 3512ac27a0ecSDave Kleikamp 3513ac27a0ecSDave Kleikamp /* 3514ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3515ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3516ac27a0ecSDave Kleikamp * of one, so skip it. 3517ac27a0ecSDave Kleikamp */ 3518ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3519ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3520ac27a0ecSDave Kleikamp goto make_io; 3521ac27a0ecSDave Kleikamp } 3522240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3523ac27a0ecSDave Kleikamp if (i == inode_offset) 3524ac27a0ecSDave Kleikamp continue; 3525617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3526ac27a0ecSDave Kleikamp break; 3527ac27a0ecSDave Kleikamp } 3528ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3529240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3530ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3531ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3532ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3533ac27a0ecSDave Kleikamp unlock_buffer(bh); 3534ac27a0ecSDave Kleikamp goto has_buffer; 3535ac27a0ecSDave Kleikamp } 3536ac27a0ecSDave Kleikamp } 3537ac27a0ecSDave Kleikamp 3538ac27a0ecSDave Kleikamp make_io: 3539ac27a0ecSDave Kleikamp /* 3540240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3541240799cdSTheodore Ts'o * blocks from the inode table. 3542240799cdSTheodore Ts'o */ 3543240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3544240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3545240799cdSTheodore Ts'o unsigned num; 3546240799cdSTheodore Ts'o 3547240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3548b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3549240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3550240799cdSTheodore Ts'o if (table > b) 3551240799cdSTheodore Ts'o b = table; 3552240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3553240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3554feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 3555560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3556240799cdSTheodore Ts'o table += num / inodes_per_block; 3557240799cdSTheodore Ts'o if (end > table) 3558240799cdSTheodore Ts'o end = table; 3559240799cdSTheodore Ts'o while (b <= end) 3560240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3561240799cdSTheodore Ts'o } 3562240799cdSTheodore Ts'o 3563240799cdSTheodore Ts'o /* 3564ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3565ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3566ac27a0ecSDave Kleikamp * Read the block from disk. 3567ac27a0ecSDave Kleikamp */ 35680562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3569ac27a0ecSDave Kleikamp get_bh(bh); 3570ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 357165299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3572ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3573ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3574c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3575c398eda0STheodore Ts'o "unable to read itable block"); 3576ac27a0ecSDave Kleikamp brelse(bh); 3577ac27a0ecSDave Kleikamp return -EIO; 3578ac27a0ecSDave Kleikamp } 3579ac27a0ecSDave Kleikamp } 3580ac27a0ecSDave Kleikamp has_buffer: 3581ac27a0ecSDave Kleikamp iloc->bh = bh; 3582ac27a0ecSDave Kleikamp return 0; 3583ac27a0ecSDave Kleikamp } 3584ac27a0ecSDave Kleikamp 3585617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3586ac27a0ecSDave Kleikamp { 3587ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3588617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 358919f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3590ac27a0ecSDave Kleikamp } 3591ac27a0ecSDave Kleikamp 3592617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3593ac27a0ecSDave Kleikamp { 3594617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3595ac27a0ecSDave Kleikamp 3596ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3597617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3598ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3599617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3600ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3601617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3602ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3603617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3604ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3605617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3606ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3607ac27a0ecSDave Kleikamp } 3608ac27a0ecSDave Kleikamp 3609ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3610ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3611ff9ddf7eSJan Kara { 361284a8dce2SDmitry Monakhov unsigned int vfs_fl; 361384a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3614ff9ddf7eSJan Kara 361584a8dce2SDmitry Monakhov do { 361684a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 361784a8dce2SDmitry Monakhov old_fl = ei->i_flags; 361884a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 361984a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 362084a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 362184a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 362284a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 362384a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 362484a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 362584a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 362684a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 362784a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 362884a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 362984a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 363084a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 363184a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3632ff9ddf7eSJan Kara } 3633de9a55b8STheodore Ts'o 36340fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 36350fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 36360fc1b451SAneesh Kumar K.V { 36370fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 36388180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 36398180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 36400fc1b451SAneesh Kumar K.V 36410fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 36420fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 36430fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 36440fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 36450fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 364607a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 36478180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 36488180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 36498180a562SAneesh Kumar K.V } else { 36500fc1b451SAneesh Kumar K.V return i_blocks; 36518180a562SAneesh Kumar K.V } 36520fc1b451SAneesh Kumar K.V } else { 36530fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 36540fc1b451SAneesh Kumar K.V } 36550fc1b451SAneesh Kumar K.V } 3656ff9ddf7eSJan Kara 3657152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 3658152a7b0aSTao Ma struct ext4_inode *raw_inode, 3659152a7b0aSTao Ma struct ext4_inode_info *ei) 3660152a7b0aSTao Ma { 3661152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 3662152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 366367cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3664152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 366567cf5b09STao Ma ext4_find_inline_data_nolock(inode); 3666f19d5870STao Ma } else 3667f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 3668152a7b0aSTao Ma } 3669152a7b0aSTao Ma 36701d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3671ac27a0ecSDave Kleikamp { 3672617ba13bSMingming Cao struct ext4_iloc iloc; 3673617ba13bSMingming Cao struct ext4_inode *raw_inode; 36741d1fe1eeSDavid Howells struct ext4_inode_info *ei; 36751d1fe1eeSDavid Howells struct inode *inode; 3676b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 36771d1fe1eeSDavid Howells long ret; 3678ac27a0ecSDave Kleikamp int block; 367908cefc7aSEric W. Biederman uid_t i_uid; 368008cefc7aSEric W. Biederman gid_t i_gid; 3681ac27a0ecSDave Kleikamp 36821d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 36831d1fe1eeSDavid Howells if (!inode) 36841d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 36851d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 36861d1fe1eeSDavid Howells return inode; 36871d1fe1eeSDavid Howells 36881d1fe1eeSDavid Howells ei = EXT4_I(inode); 36897dc57615SPeter Huewe iloc.bh = NULL; 3690ac27a0ecSDave Kleikamp 36911d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 36921d1fe1eeSDavid Howells if (ret < 0) 3693ac27a0ecSDave Kleikamp goto bad_inode; 3694617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3695814525f4SDarrick J. Wong 3696814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3697814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3698814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3699814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 3700814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 3701814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 3702814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 3703814525f4SDarrick J. Wong ret = -EIO; 3704814525f4SDarrick J. Wong goto bad_inode; 3705814525f4SDarrick J. Wong } 3706814525f4SDarrick J. Wong } else 3707814525f4SDarrick J. Wong ei->i_extra_isize = 0; 3708814525f4SDarrick J. Wong 3709814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 3710814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3711814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3712814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3713814525f4SDarrick J. Wong __u32 csum; 3714814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 3715814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 3716814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 3717814525f4SDarrick J. Wong sizeof(inum)); 3718814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 3719814525f4SDarrick J. Wong sizeof(gen)); 3720814525f4SDarrick J. Wong } 3721814525f4SDarrick J. Wong 3722814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 3723814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 3724814525f4SDarrick J. Wong ret = -EIO; 3725814525f4SDarrick J. Wong goto bad_inode; 3726814525f4SDarrick J. Wong } 3727814525f4SDarrick J. Wong 3728ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 372908cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 373008cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3731ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 373208cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 373308cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3734ac27a0ecSDave Kleikamp } 373508cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 373608cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 3737bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3738ac27a0ecSDave Kleikamp 3739353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 374067cf5b09STao Ma ei->i_inline_off = 0; 3741ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3742ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3743ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3744ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3745ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3746ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3747ac27a0ecSDave Kleikamp */ 3748ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3749ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3750617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3751ac27a0ecSDave Kleikamp /* this inode is deleted */ 37521d1fe1eeSDavid Howells ret = -ESTALE; 3753ac27a0ecSDave Kleikamp goto bad_inode; 3754ac27a0ecSDave Kleikamp } 3755ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3756ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3757ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3758ac27a0ecSDave Kleikamp * the process of deleting those. */ 3759ac27a0ecSDave Kleikamp } 3760ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 37610fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 37627973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3763a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3764a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3765a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3766a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3767ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3768a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3769a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3770a9e7f447SDmitry Monakhov #endif 3771ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3772ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3773a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3774ac27a0ecSDave Kleikamp /* 3775ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3776ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3777ac27a0ecSDave Kleikamp */ 3778617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3779ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3780ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3781ac27a0ecSDave Kleikamp 3782b436b9beSJan Kara /* 3783b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3784b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3785b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3786b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3787b436b9beSJan Kara * now it is reread from disk. 3788b436b9beSJan Kara */ 3789b436b9beSJan Kara if (journal) { 3790b436b9beSJan Kara transaction_t *transaction; 3791b436b9beSJan Kara tid_t tid; 3792b436b9beSJan Kara 3793a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3794b436b9beSJan Kara if (journal->j_running_transaction) 3795b436b9beSJan Kara transaction = journal->j_running_transaction; 3796b436b9beSJan Kara else 3797b436b9beSJan Kara transaction = journal->j_committing_transaction; 3798b436b9beSJan Kara if (transaction) 3799b436b9beSJan Kara tid = transaction->t_tid; 3800b436b9beSJan Kara else 3801b436b9beSJan Kara tid = journal->j_commit_sequence; 3802a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3803b436b9beSJan Kara ei->i_sync_tid = tid; 3804b436b9beSJan Kara ei->i_datasync_tid = tid; 3805b436b9beSJan Kara } 3806b436b9beSJan Kara 38070040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3808ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3809ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3810617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3811617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3812ac27a0ecSDave Kleikamp } else { 3813152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 3814ac27a0ecSDave Kleikamp } 3815814525f4SDarrick J. Wong } 3816ac27a0ecSDave Kleikamp 3817ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3818ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3819ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3820ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3821ef7f3835SKalpak Shah 382225ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 382325ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 382425ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 382525ec56b5SJean Noel Cordenner inode->i_version |= 382625ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 382725ec56b5SJean Noel Cordenner } 382825ec56b5SJean Noel Cordenner 3829c4b5a614STheodore Ts'o ret = 0; 3830485c26ecSTheodore Ts'o if (ei->i_file_acl && 38311032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 383224676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 383324676da4STheodore Ts'o ei->i_file_acl); 3834485c26ecSTheodore Ts'o ret = -EIO; 3835485c26ecSTheodore Ts'o goto bad_inode; 3836f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 3837f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3838f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3839c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 3840f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 38417a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 38427a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 3843fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3844fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 3845fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 3846fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 38471f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 3848fe2c8191SThiemo Nagel } 3849f19d5870STao Ma } 3850567f3e9aSTheodore Ts'o if (ret) 38517a262f7cSAneesh Kumar K.V goto bad_inode; 38527a262f7cSAneesh Kumar K.V 3853ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3854617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3855617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3856617ba13bSMingming Cao ext4_set_aops(inode); 3857ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3858617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3859617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3860ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3861e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 3862617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3863e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 3864e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 3865e83c1397SDuane Griffin } else { 3866617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3867617ba13bSMingming Cao ext4_set_aops(inode); 3868ac27a0ecSDave Kleikamp } 3869563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3870563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3871617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3872ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3873ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3874ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3875ac27a0ecSDave Kleikamp else 3876ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3877ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3878563bdd61STheodore Ts'o } else { 3879563bdd61STheodore Ts'o ret = -EIO; 388024676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3881563bdd61STheodore Ts'o goto bad_inode; 3882ac27a0ecSDave Kleikamp } 3883ac27a0ecSDave Kleikamp brelse(iloc.bh); 3884617ba13bSMingming Cao ext4_set_inode_flags(inode); 38851d1fe1eeSDavid Howells unlock_new_inode(inode); 38861d1fe1eeSDavid Howells return inode; 3887ac27a0ecSDave Kleikamp 3888ac27a0ecSDave Kleikamp bad_inode: 3889567f3e9aSTheodore Ts'o brelse(iloc.bh); 38901d1fe1eeSDavid Howells iget_failed(inode); 38911d1fe1eeSDavid Howells return ERR_PTR(ret); 3892ac27a0ecSDave Kleikamp } 3893ac27a0ecSDave Kleikamp 38940fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 38950fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 38960fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 38970fc1b451SAneesh Kumar K.V { 38980fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 38990fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 39000fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 39010fc1b451SAneesh Kumar K.V 39020fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 39030fc1b451SAneesh Kumar K.V /* 39044907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 39050fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39060fc1b451SAneesh Kumar K.V */ 39078180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39080fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 390984a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 3910f287a1a5STheodore Ts'o return 0; 3911f287a1a5STheodore Ts'o } 3912f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 3913f287a1a5STheodore Ts'o return -EFBIG; 3914f287a1a5STheodore Ts'o 3915f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 39160fc1b451SAneesh Kumar K.V /* 39170fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 39180fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39190fc1b451SAneesh Kumar K.V */ 39208180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39210fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 392284a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 39230fc1b451SAneesh Kumar K.V } else { 392484a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 39258180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 39268180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 39278180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39288180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 39290fc1b451SAneesh Kumar K.V } 3930f287a1a5STheodore Ts'o return 0; 39310fc1b451SAneesh Kumar K.V } 39320fc1b451SAneesh Kumar K.V 3933ac27a0ecSDave Kleikamp /* 3934ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 3935ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 3936ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 3937ac27a0ecSDave Kleikamp * 3938ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 3939ac27a0ecSDave Kleikamp */ 3940617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 3941ac27a0ecSDave Kleikamp struct inode *inode, 3942830156c7SFrank Mayhar struct ext4_iloc *iloc) 3943ac27a0ecSDave Kleikamp { 3944617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 3945617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3946ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 3947ac27a0ecSDave Kleikamp int err = 0, rc, block; 3948b71fc079SJan Kara int need_datasync = 0; 394908cefc7aSEric W. Biederman uid_t i_uid; 395008cefc7aSEric W. Biederman gid_t i_gid; 3951ac27a0ecSDave Kleikamp 3952ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 3953ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 395419f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 3955617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 3956ac27a0ecSDave Kleikamp 3957ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 3958ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 395908cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 396008cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 3961ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 396208cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 396308cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 3964ac27a0ecSDave Kleikamp /* 3965ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 3966ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 3967ac27a0ecSDave Kleikamp */ 3968ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 3969ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 397008cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 3971ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 397208cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 3973ac27a0ecSDave Kleikamp } else { 3974ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3975ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3976ac27a0ecSDave Kleikamp } 3977ac27a0ecSDave Kleikamp } else { 397808cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 397908cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 3980ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3981ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3982ac27a0ecSDave Kleikamp } 3983ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 3984ef7f3835SKalpak Shah 3985ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 3986ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 3987ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 3988ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 3989ef7f3835SKalpak Shah 39900fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 39910fc1b451SAneesh Kumar K.V goto out_brelse; 3992ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 3993353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 39949b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 39959b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 3996a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 3997a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 39987973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 3999b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4000a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4001b71fc079SJan Kara need_datasync = 1; 4002b71fc079SJan Kara } 4003ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4004ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4005617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4006617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4007617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4008617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4009ac27a0ecSDave Kleikamp /* If this is the first large file 4010ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4011ac27a0ecSDave Kleikamp */ 4012617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4013617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4014ac27a0ecSDave Kleikamp if (err) 4015ac27a0ecSDave Kleikamp goto out_brelse; 4016617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4017617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4018617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 40190390131bSFrank Mayhar ext4_handle_sync(handle); 4020b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4021ac27a0ecSDave Kleikamp } 4022ac27a0ecSDave Kleikamp } 4023ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4024ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4025ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4026ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4027ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4028ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4029ac27a0ecSDave Kleikamp } else { 4030ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4031ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4032ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4033ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4034ac27a0ecSDave Kleikamp } 4035f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4036de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4037ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4038f19d5870STao Ma } 4039ac27a0ecSDave Kleikamp 404025ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 404125ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 404225ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 404325ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 404425ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4045ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 404625ec56b5SJean Noel Cordenner } 404725ec56b5SJean Noel Cordenner 4048814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4049814525f4SDarrick J. Wong 40500390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 405173b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4052ac27a0ecSDave Kleikamp if (!err) 4053ac27a0ecSDave Kleikamp err = rc; 405419f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4055ac27a0ecSDave Kleikamp 4056b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4057ac27a0ecSDave Kleikamp out_brelse: 4058ac27a0ecSDave Kleikamp brelse(bh); 4059617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4060ac27a0ecSDave Kleikamp return err; 4061ac27a0ecSDave Kleikamp } 4062ac27a0ecSDave Kleikamp 4063ac27a0ecSDave Kleikamp /* 4064617ba13bSMingming Cao * ext4_write_inode() 4065ac27a0ecSDave Kleikamp * 4066ac27a0ecSDave Kleikamp * We are called from a few places: 4067ac27a0ecSDave Kleikamp * 4068ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4069ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 40704907cb7bSAnatol Pomozov * transaction to commit. 4071ac27a0ecSDave Kleikamp * 4072ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4073ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4074ac27a0ecSDave Kleikamp * 4075ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4076ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4077ac27a0ecSDave Kleikamp * journal commit. 4078ac27a0ecSDave Kleikamp * 4079ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4080ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4081617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4082ac27a0ecSDave Kleikamp * knfsd. 4083ac27a0ecSDave Kleikamp * 4084ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4085ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4086ac27a0ecSDave Kleikamp * which we are interested. 4087ac27a0ecSDave Kleikamp * 4088ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4089ac27a0ecSDave Kleikamp * 4090ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4091ac27a0ecSDave Kleikamp * stuff(); 4092ac27a0ecSDave Kleikamp * inode->i_size = expr; 4093ac27a0ecSDave Kleikamp * 4094ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4095ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4096ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4097ac27a0ecSDave Kleikamp */ 4098a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4099ac27a0ecSDave Kleikamp { 410091ac6f43SFrank Mayhar int err; 410191ac6f43SFrank Mayhar 4102ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4103ac27a0ecSDave Kleikamp return 0; 4104ac27a0ecSDave Kleikamp 410591ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4106617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4107b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4108ac27a0ecSDave Kleikamp dump_stack(); 4109ac27a0ecSDave Kleikamp return -EIO; 4110ac27a0ecSDave Kleikamp } 4111ac27a0ecSDave Kleikamp 4112a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4113ac27a0ecSDave Kleikamp return 0; 4114ac27a0ecSDave Kleikamp 411591ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 411691ac6f43SFrank Mayhar } else { 411791ac6f43SFrank Mayhar struct ext4_iloc iloc; 411891ac6f43SFrank Mayhar 41198b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 412091ac6f43SFrank Mayhar if (err) 412191ac6f43SFrank Mayhar return err; 4122a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4123830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4124830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4125c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4126c398eda0STheodore Ts'o "IO error syncing inode"); 4127830156c7SFrank Mayhar err = -EIO; 4128830156c7SFrank Mayhar } 4129fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 413091ac6f43SFrank Mayhar } 413191ac6f43SFrank Mayhar return err; 4132ac27a0ecSDave Kleikamp } 4133ac27a0ecSDave Kleikamp 4134ac27a0ecSDave Kleikamp /* 413553e87268SJan Kara * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 413653e87268SJan Kara * buffers that are attached to a page stradding i_size and are undergoing 413753e87268SJan Kara * commit. In that case we have to wait for commit to finish and try again. 413853e87268SJan Kara */ 413953e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode) 414053e87268SJan Kara { 414153e87268SJan Kara struct page *page; 414253e87268SJan Kara unsigned offset; 414353e87268SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 414453e87268SJan Kara tid_t commit_tid = 0; 414553e87268SJan Kara int ret; 414653e87268SJan Kara 414753e87268SJan Kara offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 414853e87268SJan Kara /* 414953e87268SJan Kara * All buffers in the last page remain valid? Then there's nothing to 415053e87268SJan Kara * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 415153e87268SJan Kara * blocksize case 415253e87268SJan Kara */ 415353e87268SJan Kara if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 415453e87268SJan Kara return; 415553e87268SJan Kara while (1) { 415653e87268SJan Kara page = find_lock_page(inode->i_mapping, 415753e87268SJan Kara inode->i_size >> PAGE_CACHE_SHIFT); 415853e87268SJan Kara if (!page) 415953e87268SJan Kara return; 416053e87268SJan Kara ret = __ext4_journalled_invalidatepage(page, offset); 416153e87268SJan Kara unlock_page(page); 416253e87268SJan Kara page_cache_release(page); 416353e87268SJan Kara if (ret != -EBUSY) 416453e87268SJan Kara return; 416553e87268SJan Kara commit_tid = 0; 416653e87268SJan Kara read_lock(&journal->j_state_lock); 416753e87268SJan Kara if (journal->j_committing_transaction) 416853e87268SJan Kara commit_tid = journal->j_committing_transaction->t_tid; 416953e87268SJan Kara read_unlock(&journal->j_state_lock); 417053e87268SJan Kara if (commit_tid) 417153e87268SJan Kara jbd2_log_wait_commit(journal, commit_tid); 417253e87268SJan Kara } 417353e87268SJan Kara } 417453e87268SJan Kara 417553e87268SJan Kara /* 4176617ba13bSMingming Cao * ext4_setattr() 4177ac27a0ecSDave Kleikamp * 4178ac27a0ecSDave Kleikamp * Called from notify_change. 4179ac27a0ecSDave Kleikamp * 4180ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4181ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4182ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4183ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4184ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4185ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4186ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4187ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4188ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4189ac27a0ecSDave Kleikamp * 4190678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4191678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4192678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4193678aaf48SJan Kara * This way we are sure that all the data written in the previous 4194678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4195678aaf48SJan Kara * writeback). 4196678aaf48SJan Kara * 4197678aaf48SJan Kara * Called with inode->i_mutex down. 4198ac27a0ecSDave Kleikamp */ 4199617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4200ac27a0ecSDave Kleikamp { 4201ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4202ac27a0ecSDave Kleikamp int error, rc = 0; 42033d287de3SDmitry Monakhov int orphan = 0; 4204ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4205ac27a0ecSDave Kleikamp 4206ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4207ac27a0ecSDave Kleikamp if (error) 4208ac27a0ecSDave Kleikamp return error; 4209ac27a0ecSDave Kleikamp 421012755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4211871a2931SChristoph Hellwig dquot_initialize(inode); 421208cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 421308cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4214ac27a0ecSDave Kleikamp handle_t *handle; 4215ac27a0ecSDave Kleikamp 4216ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4217ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 42185aca07ebSDmitry Monakhov handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 4219194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 4220ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4221ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4222ac27a0ecSDave Kleikamp goto err_out; 4223ac27a0ecSDave Kleikamp } 4224b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4225ac27a0ecSDave Kleikamp if (error) { 4226617ba13bSMingming Cao ext4_journal_stop(handle); 4227ac27a0ecSDave Kleikamp return error; 4228ac27a0ecSDave Kleikamp } 4229ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4230ac27a0ecSDave Kleikamp * one transaction */ 4231ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4232ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4233ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4234ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4235617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4236617ba13bSMingming Cao ext4_journal_stop(handle); 4237ac27a0ecSDave Kleikamp } 4238ac27a0ecSDave Kleikamp 4239e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4240562c72aaSChristoph Hellwig 424112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4242e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4243e2b46574SEric Sandeen 42440c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 42450c095c7fSTheodore Ts'o return -EFBIG; 4246e2b46574SEric Sandeen } 4247e2b46574SEric Sandeen } 4248e2b46574SEric Sandeen 4249ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4250c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4251072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4252ac27a0ecSDave Kleikamp handle_t *handle; 4253ac27a0ecSDave Kleikamp 4254617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4255ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4256ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4257ac27a0ecSDave Kleikamp goto err_out; 4258ac27a0ecSDave Kleikamp } 42593d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4260617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 42613d287de3SDmitry Monakhov orphan = 1; 42623d287de3SDmitry Monakhov } 4263617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4264617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4265ac27a0ecSDave Kleikamp if (!error) 4266ac27a0ecSDave Kleikamp error = rc; 4267617ba13bSMingming Cao ext4_journal_stop(handle); 4268678aaf48SJan Kara 4269678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4270678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4271678aaf48SJan Kara attr->ia_size); 4272678aaf48SJan Kara if (error) { 4273678aaf48SJan Kara /* Do as much error cleanup as possible */ 4274678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4275678aaf48SJan Kara if (IS_ERR(handle)) { 4276678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4277678aaf48SJan Kara goto err_out; 4278678aaf48SJan Kara } 4279678aaf48SJan Kara ext4_orphan_del(handle, inode); 42803d287de3SDmitry Monakhov orphan = 0; 4281678aaf48SJan Kara ext4_journal_stop(handle); 4282678aaf48SJan Kara goto err_out; 4283678aaf48SJan Kara } 4284678aaf48SJan Kara } 4285ac27a0ecSDave Kleikamp } 4286ac27a0ecSDave Kleikamp 4287072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 428853e87268SJan Kara if (attr->ia_size != inode->i_size) { 428953e87268SJan Kara loff_t oldsize = inode->i_size; 429053e87268SJan Kara 429153e87268SJan Kara i_size_write(inode, attr->ia_size); 429253e87268SJan Kara /* 429353e87268SJan Kara * Blocks are going to be removed from the inode. Wait 429453e87268SJan Kara * for dio in flight. Temporarily disable 429553e87268SJan Kara * dioread_nolock to prevent livelock. 429653e87268SJan Kara */ 42971b65007eSDmitry Monakhov if (orphan) { 429853e87268SJan Kara if (!ext4_should_journal_data(inode)) { 42991b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 43001c9114f9SDmitry Monakhov inode_dio_wait(inode); 43011b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 430253e87268SJan Kara } else 430353e87268SJan Kara ext4_wait_for_tail_page_commit(inode); 43041b65007eSDmitry Monakhov } 430553e87268SJan Kara /* 430653e87268SJan Kara * Truncate pagecache after we've waited for commit 430753e87268SJan Kara * in data=journal mode to make pages freeable. 430853e87268SJan Kara */ 430953e87268SJan Kara truncate_pagecache(inode, oldsize, inode->i_size); 43101c9114f9SDmitry Monakhov } 4311072bd7eaSTheodore Ts'o ext4_truncate(inode); 4312072bd7eaSTheodore Ts'o } 4313ac27a0ecSDave Kleikamp 43141025774cSChristoph Hellwig if (!rc) { 43151025774cSChristoph Hellwig setattr_copy(inode, attr); 43161025774cSChristoph Hellwig mark_inode_dirty(inode); 43171025774cSChristoph Hellwig } 43181025774cSChristoph Hellwig 43191025774cSChristoph Hellwig /* 43201025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 43211025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 43221025774cSChristoph Hellwig */ 43233d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4324617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4325ac27a0ecSDave Kleikamp 4326ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4327617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4328ac27a0ecSDave Kleikamp 4329ac27a0ecSDave Kleikamp err_out: 4330617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4331ac27a0ecSDave Kleikamp if (!error) 4332ac27a0ecSDave Kleikamp error = rc; 4333ac27a0ecSDave Kleikamp return error; 4334ac27a0ecSDave Kleikamp } 4335ac27a0ecSDave Kleikamp 43363e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 43373e3398a0SMingming Cao struct kstat *stat) 43383e3398a0SMingming Cao { 43393e3398a0SMingming Cao struct inode *inode; 43403e3398a0SMingming Cao unsigned long delalloc_blocks; 43413e3398a0SMingming Cao 43423e3398a0SMingming Cao inode = dentry->d_inode; 43433e3398a0SMingming Cao generic_fillattr(inode, stat); 43443e3398a0SMingming Cao 43453e3398a0SMingming Cao /* 43463e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 43473e3398a0SMingming Cao * otherwise in the case of system crash before the real block 43483e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 43493e3398a0SMingming Cao * on-disk file blocks. 43503e3398a0SMingming Cao * We always keep i_blocks updated together with real 43513e3398a0SMingming Cao * allocation. But to not confuse with user, stat 43523e3398a0SMingming Cao * will return the blocks that include the delayed allocation 43533e3398a0SMingming Cao * blocks for this file. 43543e3398a0SMingming Cao */ 435596607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 435696607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 43573e3398a0SMingming Cao 43583e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 43593e3398a0SMingming Cao return 0; 43603e3398a0SMingming Cao } 4361ac27a0ecSDave Kleikamp 4362a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4363a02908f1SMingming Cao { 436412e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 43658bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4366ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4367a02908f1SMingming Cao } 4368ac51d837STheodore Ts'o 4369a02908f1SMingming Cao /* 4370a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4371a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4372a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4373a02908f1SMingming Cao * 4374a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 43754907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4376a02908f1SMingming Cao * they could still across block group boundary. 4377a02908f1SMingming Cao * 4378a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4379a02908f1SMingming Cao */ 43801f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4381a02908f1SMingming Cao { 43828df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 43838df9675fSTheodore Ts'o int gdpblocks; 4384a02908f1SMingming Cao int idxblocks; 4385a02908f1SMingming Cao int ret = 0; 4386a02908f1SMingming Cao 4387a02908f1SMingming Cao /* 4388a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4389a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4390a02908f1SMingming Cao * physically contiguous on disk 4391a02908f1SMingming Cao * 4392a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4393a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4394a02908f1SMingming Cao */ 4395a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4396a02908f1SMingming Cao 4397a02908f1SMingming Cao ret = idxblocks; 4398a02908f1SMingming Cao 4399a02908f1SMingming Cao /* 4400a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4401a02908f1SMingming Cao * to account 4402a02908f1SMingming Cao */ 4403a02908f1SMingming Cao groups = idxblocks; 4404a02908f1SMingming Cao if (chunk) 4405a02908f1SMingming Cao groups += 1; 4406ac27a0ecSDave Kleikamp else 4407a02908f1SMingming Cao groups += nrblocks; 4408ac27a0ecSDave Kleikamp 4409a02908f1SMingming Cao gdpblocks = groups; 44108df9675fSTheodore Ts'o if (groups > ngroups) 44118df9675fSTheodore Ts'o groups = ngroups; 4412a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4413a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4414a02908f1SMingming Cao 4415a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4416a02908f1SMingming Cao ret += groups + gdpblocks; 4417a02908f1SMingming Cao 4418a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4419a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4420ac27a0ecSDave Kleikamp 4421ac27a0ecSDave Kleikamp return ret; 4422ac27a0ecSDave Kleikamp } 4423ac27a0ecSDave Kleikamp 4424ac27a0ecSDave Kleikamp /* 442525985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4426f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4427f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4428a02908f1SMingming Cao * 4429525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4430a02908f1SMingming Cao * 4431525f4ed8SMingming Cao * We need to consider the worse case, when 4432a02908f1SMingming Cao * one new block per extent. 4433a02908f1SMingming Cao */ 4434a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4435a02908f1SMingming Cao { 4436a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4437a02908f1SMingming Cao int ret; 4438a02908f1SMingming Cao 4439a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4440a02908f1SMingming Cao 4441a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4442a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4443a02908f1SMingming Cao ret += bpp; 4444a02908f1SMingming Cao return ret; 4445a02908f1SMingming Cao } 4446f3bd1f3fSMingming Cao 4447f3bd1f3fSMingming Cao /* 4448f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4449f3bd1f3fSMingming Cao * 4450f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 445179e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4452f3bd1f3fSMingming Cao * 4453f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4454f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4455f3bd1f3fSMingming Cao */ 4456f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4457f3bd1f3fSMingming Cao { 4458f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4459f3bd1f3fSMingming Cao } 4460f3bd1f3fSMingming Cao 4461a02908f1SMingming Cao /* 4462617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4463ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4464ac27a0ecSDave Kleikamp */ 4465617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4466617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4467ac27a0ecSDave Kleikamp { 4468ac27a0ecSDave Kleikamp int err = 0; 4469ac27a0ecSDave Kleikamp 4470c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 447125ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 447225ec56b5SJean Noel Cordenner 4473ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4474ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4475ac27a0ecSDave Kleikamp 4476dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4477830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4478ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4479ac27a0ecSDave Kleikamp return err; 4480ac27a0ecSDave Kleikamp } 4481ac27a0ecSDave Kleikamp 4482ac27a0ecSDave Kleikamp /* 4483ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4484ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4485ac27a0ecSDave Kleikamp */ 4486ac27a0ecSDave Kleikamp 4487ac27a0ecSDave Kleikamp int 4488617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4489617ba13bSMingming Cao struct ext4_iloc *iloc) 4490ac27a0ecSDave Kleikamp { 44910390131bSFrank Mayhar int err; 44920390131bSFrank Mayhar 4493617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4494ac27a0ecSDave Kleikamp if (!err) { 4495ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4496617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4497ac27a0ecSDave Kleikamp if (err) { 4498ac27a0ecSDave Kleikamp brelse(iloc->bh); 4499ac27a0ecSDave Kleikamp iloc->bh = NULL; 4500ac27a0ecSDave Kleikamp } 4501ac27a0ecSDave Kleikamp } 4502617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4503ac27a0ecSDave Kleikamp return err; 4504ac27a0ecSDave Kleikamp } 4505ac27a0ecSDave Kleikamp 4506ac27a0ecSDave Kleikamp /* 45076dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 45086dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 45096dd4ee7cSKalpak Shah */ 45101d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 45111d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 45121d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 45131d03ec98SAneesh Kumar K.V handle_t *handle) 45146dd4ee7cSKalpak Shah { 45156dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 45166dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 45176dd4ee7cSKalpak Shah 45186dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 45196dd4ee7cSKalpak Shah return 0; 45206dd4ee7cSKalpak Shah 45216dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 45226dd4ee7cSKalpak Shah 45236dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 45246dd4ee7cSKalpak Shah 45256dd4ee7cSKalpak Shah /* No extended attributes present */ 452619f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 45276dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 45286dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 45296dd4ee7cSKalpak Shah new_extra_isize); 45306dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 45316dd4ee7cSKalpak Shah return 0; 45326dd4ee7cSKalpak Shah } 45336dd4ee7cSKalpak Shah 45346dd4ee7cSKalpak Shah /* try to expand with EAs present */ 45356dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 45366dd4ee7cSKalpak Shah raw_inode, handle); 45376dd4ee7cSKalpak Shah } 45386dd4ee7cSKalpak Shah 45396dd4ee7cSKalpak Shah /* 4540ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4541ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4542ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4543ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4544ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4545ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4546ac27a0ecSDave Kleikamp * 4547ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4548ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4549ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4550ac27a0ecSDave Kleikamp * we start and wait on commits. 4551ac27a0ecSDave Kleikamp */ 4552617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4553ac27a0ecSDave Kleikamp { 4554617ba13bSMingming Cao struct ext4_iloc iloc; 45556dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 45566dd4ee7cSKalpak Shah static unsigned int mnt_count; 45576dd4ee7cSKalpak Shah int err, ret; 4558ac27a0ecSDave Kleikamp 4559ac27a0ecSDave Kleikamp might_sleep(); 45607ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4561617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 45620390131bSFrank Mayhar if (ext4_handle_valid(handle) && 45630390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 456419f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 45656dd4ee7cSKalpak Shah /* 45666dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 45676dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 45686dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 45696dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 45706dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 45716dd4ee7cSKalpak Shah */ 45726dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 45736dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 45746dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 45756dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 45766dd4ee7cSKalpak Shah iloc, handle); 45776dd4ee7cSKalpak Shah if (ret) { 457819f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 457919f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4580c1bddad9SAneesh Kumar K.V if (mnt_count != 4581c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 458212062dddSEric Sandeen ext4_warning(inode->i_sb, 45836dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 45846dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 45856dd4ee7cSKalpak Shah inode->i_ino); 4586c1bddad9SAneesh Kumar K.V mnt_count = 4587c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 45886dd4ee7cSKalpak Shah } 45896dd4ee7cSKalpak Shah } 45906dd4ee7cSKalpak Shah } 45916dd4ee7cSKalpak Shah } 4592ac27a0ecSDave Kleikamp if (!err) 4593617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4594ac27a0ecSDave Kleikamp return err; 4595ac27a0ecSDave Kleikamp } 4596ac27a0ecSDave Kleikamp 4597ac27a0ecSDave Kleikamp /* 4598617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4599ac27a0ecSDave Kleikamp * 4600ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4601ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4602ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4603ac27a0ecSDave Kleikamp * 46045dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4605ac27a0ecSDave Kleikamp * are allocated to the file. 4606ac27a0ecSDave Kleikamp * 4607ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4608ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4609ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4610ac27a0ecSDave Kleikamp */ 4611aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4612ac27a0ecSDave Kleikamp { 4613ac27a0ecSDave Kleikamp handle_t *handle; 4614ac27a0ecSDave Kleikamp 4615617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4616ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4617ac27a0ecSDave Kleikamp goto out; 4618f3dc272fSCurt Wohlgemuth 4619617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4620f3dc272fSCurt Wohlgemuth 4621617ba13bSMingming Cao ext4_journal_stop(handle); 4622ac27a0ecSDave Kleikamp out: 4623ac27a0ecSDave Kleikamp return; 4624ac27a0ecSDave Kleikamp } 4625ac27a0ecSDave Kleikamp 4626ac27a0ecSDave Kleikamp #if 0 4627ac27a0ecSDave Kleikamp /* 4628ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4629ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4630617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4631ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4632ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4633ac27a0ecSDave Kleikamp */ 4634617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4635ac27a0ecSDave Kleikamp { 4636617ba13bSMingming Cao struct ext4_iloc iloc; 4637ac27a0ecSDave Kleikamp 4638ac27a0ecSDave Kleikamp int err = 0; 4639ac27a0ecSDave Kleikamp if (handle) { 4640617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4641ac27a0ecSDave Kleikamp if (!err) { 4642ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4643dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4644ac27a0ecSDave Kleikamp if (!err) 46450390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 464673b50c1cSCurt Wohlgemuth NULL, 4647ac27a0ecSDave Kleikamp iloc.bh); 4648ac27a0ecSDave Kleikamp brelse(iloc.bh); 4649ac27a0ecSDave Kleikamp } 4650ac27a0ecSDave Kleikamp } 4651617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4652ac27a0ecSDave Kleikamp return err; 4653ac27a0ecSDave Kleikamp } 4654ac27a0ecSDave Kleikamp #endif 4655ac27a0ecSDave Kleikamp 4656617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4657ac27a0ecSDave Kleikamp { 4658ac27a0ecSDave Kleikamp journal_t *journal; 4659ac27a0ecSDave Kleikamp handle_t *handle; 4660ac27a0ecSDave Kleikamp int err; 4661ac27a0ecSDave Kleikamp 4662ac27a0ecSDave Kleikamp /* 4663ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4664ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4665ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4666ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4667ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4668ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4669ac27a0ecSDave Kleikamp * nobody is changing anything. 4670ac27a0ecSDave Kleikamp */ 4671ac27a0ecSDave Kleikamp 4672617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 46730390131bSFrank Mayhar if (!journal) 46740390131bSFrank Mayhar return 0; 4675d699594dSDave Hansen if (is_journal_aborted(journal)) 4676ac27a0ecSDave Kleikamp return -EROFS; 46772aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 46782aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 46792aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 46802aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 46812aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 46822aff57b0SYongqiang Yang */ 46832aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 46842aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 46852aff57b0SYongqiang Yang if (err < 0) 46862aff57b0SYongqiang Yang return err; 46872aff57b0SYongqiang Yang } 4688ac27a0ecSDave Kleikamp 468917335dccSDmitry Monakhov /* Wait for all existing dio workers */ 469017335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 469117335dccSDmitry Monakhov inode_dio_wait(inode); 469217335dccSDmitry Monakhov 4693dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4694ac27a0ecSDave Kleikamp 4695ac27a0ecSDave Kleikamp /* 4696ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4697ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4698ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4699ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4700ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4701ac27a0ecSDave Kleikamp */ 4702ac27a0ecSDave Kleikamp 4703ac27a0ecSDave Kleikamp if (val) 470412e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47055872ddaaSYongqiang Yang else { 47065872ddaaSYongqiang Yang jbd2_journal_flush(journal); 470712e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47085872ddaaSYongqiang Yang } 4709617ba13bSMingming Cao ext4_set_aops(inode); 4710ac27a0ecSDave Kleikamp 4711dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 471217335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 4713ac27a0ecSDave Kleikamp 4714ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4715ac27a0ecSDave Kleikamp 4716617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4717ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4718ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4719ac27a0ecSDave Kleikamp 4720617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 47210390131bSFrank Mayhar ext4_handle_sync(handle); 4722617ba13bSMingming Cao ext4_journal_stop(handle); 4723617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4724ac27a0ecSDave Kleikamp 4725ac27a0ecSDave Kleikamp return err; 4726ac27a0ecSDave Kleikamp } 47272e9ee850SAneesh Kumar K.V 47282e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 47292e9ee850SAneesh Kumar K.V { 47302e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 47312e9ee850SAneesh Kumar K.V } 47322e9ee850SAneesh Kumar K.V 4733c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 47342e9ee850SAneesh Kumar K.V { 4735c2ec175cSNick Piggin struct page *page = vmf->page; 47362e9ee850SAneesh Kumar K.V loff_t size; 47372e9ee850SAneesh Kumar K.V unsigned long len; 47389ea7df53SJan Kara int ret; 47392e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 47402e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 47412e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 47429ea7df53SJan Kara handle_t *handle; 47439ea7df53SJan Kara get_block_t *get_block; 47449ea7df53SJan Kara int retries = 0; 47452e9ee850SAneesh Kumar K.V 47468e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 4747041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 47489ea7df53SJan Kara /* Delalloc case is easy... */ 47499ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 47509ea7df53SJan Kara !ext4_should_journal_data(inode) && 47519ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 47529ea7df53SJan Kara do { 47539ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 47549ea7df53SJan Kara ext4_da_get_block_prep); 47559ea7df53SJan Kara } while (ret == -ENOSPC && 47569ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 47579ea7df53SJan Kara goto out_ret; 47582e9ee850SAneesh Kumar K.V } 47590e499890SDarrick J. Wong 47600e499890SDarrick J. Wong lock_page(page); 47619ea7df53SJan Kara size = i_size_read(inode); 47629ea7df53SJan Kara /* Page got truncated from under us? */ 47639ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 47649ea7df53SJan Kara unlock_page(page); 47659ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 47669ea7df53SJan Kara goto out; 47670e499890SDarrick J. Wong } 47682e9ee850SAneesh Kumar K.V 47692e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 47702e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 47712e9ee850SAneesh Kumar K.V else 47722e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4773a827eaffSAneesh Kumar K.V /* 47749ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 47759ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4776a827eaffSAneesh Kumar K.V */ 47772e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 4778f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 4779f19d5870STao Ma 0, len, NULL, 4780a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 47819ea7df53SJan Kara /* Wait so that we don't change page under IO */ 47829ea7df53SJan Kara wait_on_page_writeback(page); 47839ea7df53SJan Kara ret = VM_FAULT_LOCKED; 47849ea7df53SJan Kara goto out; 47852e9ee850SAneesh Kumar K.V } 4786a827eaffSAneesh Kumar K.V } 4787a827eaffSAneesh Kumar K.V unlock_page(page); 47889ea7df53SJan Kara /* OK, we need to fill the hole... */ 47899ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 47909ea7df53SJan Kara get_block = ext4_get_block_write; 47919ea7df53SJan Kara else 47929ea7df53SJan Kara get_block = ext4_get_block; 47939ea7df53SJan Kara retry_alloc: 47949ea7df53SJan Kara handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 47959ea7df53SJan Kara if (IS_ERR(handle)) { 4796c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 47979ea7df53SJan Kara goto out; 47989ea7df53SJan Kara } 47999ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 48009ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 4801f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 48029ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 48039ea7df53SJan Kara unlock_page(page); 48049ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 4805fcbb5515SYongqiang Yang ext4_journal_stop(handle); 48069ea7df53SJan Kara goto out; 48079ea7df53SJan Kara } 48089ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 48099ea7df53SJan Kara } 48109ea7df53SJan Kara ext4_journal_stop(handle); 48119ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 48129ea7df53SJan Kara goto retry_alloc; 48139ea7df53SJan Kara out_ret: 48149ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 48159ea7df53SJan Kara out: 48168e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 48172e9ee850SAneesh Kumar K.V return ret; 48182e9ee850SAneesh Kumar K.V } 4819