1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51814525f4SDarrick J. Wong struct ext4_inode_info *ei) 52814525f4SDarrick J. Wong { 53814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54814525f4SDarrick J. Wong __u16 csum_lo; 55814525f4SDarrick J. Wong __u16 csum_hi = 0; 56814525f4SDarrick J. Wong __u32 csum; 57814525f4SDarrick J. Wong 58814525f4SDarrick J. Wong csum_lo = raw->i_checksum_lo; 59814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 60814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62814525f4SDarrick J. Wong csum_hi = raw->i_checksum_hi; 63814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 64814525f4SDarrick J. Wong } 65814525f4SDarrick J. Wong 66814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 68814525f4SDarrick J. Wong 69814525f4SDarrick J. Wong raw->i_checksum_lo = csum_lo; 70814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72814525f4SDarrick J. Wong raw->i_checksum_hi = csum_hi; 73814525f4SDarrick J. Wong 74814525f4SDarrick J. Wong return csum; 75814525f4SDarrick J. Wong } 76814525f4SDarrick J. Wong 77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78814525f4SDarrick J. Wong struct ext4_inode_info *ei) 79814525f4SDarrick J. Wong { 80814525f4SDarrick J. Wong __u32 provided, calculated; 81814525f4SDarrick J. Wong 82814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 84814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86814525f4SDarrick J. Wong return 1; 87814525f4SDarrick J. Wong 88814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 89814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 90814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93814525f4SDarrick J. Wong else 94814525f4SDarrick J. Wong calculated &= 0xFFFF; 95814525f4SDarrick J. Wong 96814525f4SDarrick J. Wong return provided == calculated; 97814525f4SDarrick J. Wong } 98814525f4SDarrick J. Wong 99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100814525f4SDarrick J. Wong struct ext4_inode_info *ei) 101814525f4SDarrick J. Wong { 102814525f4SDarrick J. Wong __u32 csum; 103814525f4SDarrick J. Wong 104814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 106814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108814525f4SDarrick J. Wong return; 109814525f4SDarrick J. Wong 110814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 111814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115814525f4SDarrick J. Wong } 116814525f4SDarrick J. Wong 117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 118678aaf48SJan Kara loff_t new_size) 119678aaf48SJan Kara { 1207ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1218aefcd55STheodore Ts'o /* 1228aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1238aefcd55STheodore Ts'o * writing, so there's no need to call 1248aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1258aefcd55STheodore Ts'o * outstanding writes we need to flush. 1268aefcd55STheodore Ts'o */ 1278aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1288aefcd55STheodore Ts'o return 0; 1298aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1308aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 131678aaf48SJan Kara new_size); 132678aaf48SJan Kara } 133678aaf48SJan Kara 13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 135cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 136cb20d518STheodore Ts'o struct buffer_head *bh_result, int create); 137cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 138cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 1395f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 1405f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 1415f163cc7SEric Sandeen loff_t length, int flags); 14264769240SAlex Tomas 143ac27a0ecSDave Kleikamp /* 144ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 145ac27a0ecSDave Kleikamp */ 146617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 147ac27a0ecSDave Kleikamp { 148617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 149ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 150ac27a0ecSDave Kleikamp 151ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 152ac27a0ecSDave Kleikamp } 153ac27a0ecSDave Kleikamp 154ac27a0ecSDave Kleikamp /* 155ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 156ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 157ac27a0ecSDave Kleikamp * this transaction. 158ac27a0ecSDave Kleikamp */ 159487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 160487caeefSJan Kara int nblocks) 161ac27a0ecSDave Kleikamp { 162487caeefSJan Kara int ret; 163487caeefSJan Kara 164487caeefSJan Kara /* 165e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 166487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 167487caeefSJan Kara * page cache has been already dropped and writes are blocked by 168487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 169487caeefSJan Kara */ 1700390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 171ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 172487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1738e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 174487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 175fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 176487caeefSJan Kara 177487caeefSJan Kara return ret; 178ac27a0ecSDave Kleikamp } 179ac27a0ecSDave Kleikamp 180ac27a0ecSDave Kleikamp /* 181ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 182ac27a0ecSDave Kleikamp */ 1830930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 184ac27a0ecSDave Kleikamp { 185ac27a0ecSDave Kleikamp handle_t *handle; 186bc965ab3STheodore Ts'o int err; 187ac27a0ecSDave Kleikamp 1887ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1892581fdc8SJiaying Zhang 1902581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1912581fdc8SJiaying Zhang 1920930fcc1SAl Viro if (inode->i_nlink) { 1932d859db3SJan Kara /* 1942d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1952d859db3SJan Kara * journal. So although mm thinks everything is clean and 1962d859db3SJan Kara * ready for reaping the inode might still have some pages to 1972d859db3SJan Kara * write in the running transaction or waiting to be 1982d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1992d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 2002d859db3SJan Kara * cause data loss. Also even if we did not discard these 2012d859db3SJan Kara * buffers, we would have no way to find them after the inode 2022d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 2032d859db3SJan Kara * read them before the transaction is checkpointed. So be 2042d859db3SJan Kara * careful and force everything to disk here... We use 2052d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2062d859db3SJan Kara * containing inode's data. 2072d859db3SJan Kara * 2082d859db3SJan Kara * Note that directories do not have this problem because they 2092d859db3SJan Kara * don't use page cache. 2102d859db3SJan Kara */ 2112d859db3SJan Kara if (ext4_should_journal_data(inode) && 2122d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 2132d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2142d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2152d859db3SJan Kara 2162d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 2172d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 2182d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2192d859db3SJan Kara } 2200930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2210930fcc1SAl Viro goto no_delete; 2220930fcc1SAl Viro } 2230930fcc1SAl Viro 224907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 225871a2931SChristoph Hellwig dquot_initialize(inode); 226907f4554SChristoph Hellwig 227678aaf48SJan Kara if (ext4_should_order_data(inode)) 228678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 229ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 230ac27a0ecSDave Kleikamp 231ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 232ac27a0ecSDave Kleikamp goto no_delete; 233ac27a0ecSDave Kleikamp 2348e8ad8a5SJan Kara /* 2358e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2368e8ad8a5SJan Kara * protection against it 2378e8ad8a5SJan Kara */ 2388e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2399f125d64STheodore Ts'o handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); 240ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 241bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 242ac27a0ecSDave Kleikamp /* 243ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 244ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 245ac27a0ecSDave Kleikamp * cleaned up. 246ac27a0ecSDave Kleikamp */ 247617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2488e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 249ac27a0ecSDave Kleikamp goto no_delete; 250ac27a0ecSDave Kleikamp } 251ac27a0ecSDave Kleikamp 252ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2530390131bSFrank Mayhar ext4_handle_sync(handle); 254ac27a0ecSDave Kleikamp inode->i_size = 0; 255bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 256bc965ab3STheodore Ts'o if (err) { 25712062dddSEric Sandeen ext4_warning(inode->i_sb, 258bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 259bc965ab3STheodore Ts'o goto stop_handle; 260bc965ab3STheodore Ts'o } 261ac27a0ecSDave Kleikamp if (inode->i_blocks) 262617ba13bSMingming Cao ext4_truncate(inode); 263bc965ab3STheodore Ts'o 264bc965ab3STheodore Ts'o /* 265bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 266bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 267bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 268bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 269bc965ab3STheodore Ts'o */ 2700390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 271bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 272bc965ab3STheodore Ts'o if (err > 0) 273bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 274bc965ab3STheodore Ts'o if (err != 0) { 27512062dddSEric Sandeen ext4_warning(inode->i_sb, 276bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 277bc965ab3STheodore Ts'o stop_handle: 278bc965ab3STheodore Ts'o ext4_journal_stop(handle); 27945388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2808e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 281bc965ab3STheodore Ts'o goto no_delete; 282bc965ab3STheodore Ts'o } 283bc965ab3STheodore Ts'o } 284bc965ab3STheodore Ts'o 285ac27a0ecSDave Kleikamp /* 286617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 287ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 288617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 289ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 290617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 291ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 292ac27a0ecSDave Kleikamp */ 293617ba13bSMingming Cao ext4_orphan_del(handle, inode); 294617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 295ac27a0ecSDave Kleikamp 296ac27a0ecSDave Kleikamp /* 297ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 298ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 299ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 300ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 301ac27a0ecSDave Kleikamp * fails. 302ac27a0ecSDave Kleikamp */ 303617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 304ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3050930fcc1SAl Viro ext4_clear_inode(inode); 306ac27a0ecSDave Kleikamp else 307617ba13bSMingming Cao ext4_free_inode(handle, inode); 308617ba13bSMingming Cao ext4_journal_stop(handle); 3098e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 310ac27a0ecSDave Kleikamp return; 311ac27a0ecSDave Kleikamp no_delete: 3120930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 313ac27a0ecSDave Kleikamp } 314ac27a0ecSDave Kleikamp 315a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 316a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31760e58e0fSMingming Cao { 318a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 31960e58e0fSMingming Cao } 320a9e7f447SDmitry Monakhov #endif 3219d0be502STheodore Ts'o 32212219aeaSAneesh Kumar K.V /* 32312219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3249d0be502STheodore Ts'o * to allocate a block located at @lblock 32512219aeaSAneesh Kumar K.V */ 32601f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32712219aeaSAneesh Kumar K.V { 32812e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3299d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 33012219aeaSAneesh Kumar K.V 3318bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33212219aeaSAneesh Kumar K.V } 33312219aeaSAneesh Kumar K.V 3340637c6f4STheodore Ts'o /* 3350637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3360637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3370637c6f4STheodore Ts'o */ 3385f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3395f634d06SAneesh Kumar K.V int used, int quota_claim) 34012219aeaSAneesh Kumar K.V { 34112219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3420637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34312219aeaSAneesh Kumar K.V 3440637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 345d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3460637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 3470637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 3481084f252STheodore Ts'o "with only %d reserved data blocks", 3490637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3500637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3510637c6f4STheodore Ts'o WARN_ON(1); 3520637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3536bc6e63fSAneesh Kumar K.V } 35412219aeaSAneesh Kumar K.V 35597795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 35697795d2aSBrian Foster ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d " 35797795d2aSBrian Foster "with only %d reserved metadata blocks\n", __func__, 35897795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 35997795d2aSBrian Foster ei->i_reserved_meta_blocks); 36097795d2aSBrian Foster WARN_ON(1); 36197795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36297795d2aSBrian Foster } 36397795d2aSBrian Foster 3640637c6f4STheodore Ts'o /* Update per-inode reservations */ 3650637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3660637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 36757042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 36872b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3690637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3700637c6f4STheodore Ts'o 3710637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3720637c6f4STheodore Ts'o /* 3730637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3740637c6f4STheodore Ts'o * only when we have written all of the delayed 3750637c6f4STheodore Ts'o * allocation blocks. 3760637c6f4STheodore Ts'o */ 37757042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37872b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 379ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3809d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3810637c6f4STheodore Ts'o } 38212219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38360e58e0fSMingming Cao 38472b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38572b8ab9dSEric Sandeen if (quota_claim) 3867b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 38772b8ab9dSEric Sandeen else { 3885f634d06SAneesh Kumar K.V /* 3895f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3905f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 39172b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3925f634d06SAneesh Kumar K.V */ 3937b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3945f634d06SAneesh Kumar K.V } 395d6014301SAneesh Kumar K.V 396d6014301SAneesh Kumar K.V /* 397d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 398d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 399d6014301SAneesh Kumar K.V * inode's preallocations. 400d6014301SAneesh Kumar K.V */ 4010637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4020637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 403d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40412219aeaSAneesh Kumar K.V } 40512219aeaSAneesh Kumar K.V 406e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 407c398eda0STheodore Ts'o unsigned int line, 40824676da4STheodore Ts'o struct ext4_map_blocks *map) 4096fd058f7STheodore Ts'o { 41024676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 41124676da4STheodore Ts'o map->m_len)) { 412c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 413c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41424676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 415c398eda0STheodore Ts'o map->m_len); 4166fd058f7STheodore Ts'o return -EIO; 4176fd058f7STheodore Ts'o } 4186fd058f7STheodore Ts'o return 0; 4196fd058f7STheodore Ts'o } 4206fd058f7STheodore Ts'o 421e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 422c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 423e29136f8STheodore Ts'o 424f5ab0d1fSMingming Cao /* 4251f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 4261f94533dSTheodore Ts'o * starting at page frame idx. 42755138e0bSTheodore Ts'o */ 42855138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 42955138e0bSTheodore Ts'o unsigned int max_pages) 43055138e0bSTheodore Ts'o { 43155138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 43255138e0bSTheodore Ts'o pgoff_t index; 43355138e0bSTheodore Ts'o struct pagevec pvec; 43455138e0bSTheodore Ts'o pgoff_t num = 0; 43555138e0bSTheodore Ts'o int i, nr_pages, done = 0; 43655138e0bSTheodore Ts'o 43755138e0bSTheodore Ts'o if (max_pages == 0) 43855138e0bSTheodore Ts'o return 0; 43955138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 44055138e0bSTheodore Ts'o while (!done) { 44155138e0bSTheodore Ts'o index = idx; 44255138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 44355138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 44455138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 44555138e0bSTheodore Ts'o if (nr_pages == 0) 44655138e0bSTheodore Ts'o break; 44755138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 44855138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 44955138e0bSTheodore Ts'o struct buffer_head *bh, *head; 45055138e0bSTheodore Ts'o 45155138e0bSTheodore Ts'o lock_page(page); 45255138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 45355138e0bSTheodore Ts'o !PageDirty(page) || 45455138e0bSTheodore Ts'o PageWriteback(page) || 45555138e0bSTheodore Ts'o page->index != idx) { 45655138e0bSTheodore Ts'o done = 1; 45755138e0bSTheodore Ts'o unlock_page(page); 45855138e0bSTheodore Ts'o break; 45955138e0bSTheodore Ts'o } 4601f94533dSTheodore Ts'o if (page_has_buffers(page)) { 4611f94533dSTheodore Ts'o bh = head = page_buffers(page); 46255138e0bSTheodore Ts'o do { 46355138e0bSTheodore Ts'o if (!buffer_delay(bh) && 4641f94533dSTheodore Ts'o !buffer_unwritten(bh)) 46555138e0bSTheodore Ts'o done = 1; 4661f94533dSTheodore Ts'o bh = bh->b_this_page; 4671f94533dSTheodore Ts'o } while (!done && (bh != head)); 46855138e0bSTheodore Ts'o } 46955138e0bSTheodore Ts'o unlock_page(page); 47055138e0bSTheodore Ts'o if (done) 47155138e0bSTheodore Ts'o break; 47255138e0bSTheodore Ts'o idx++; 47355138e0bSTheodore Ts'o num++; 474659c6009SEric Sandeen if (num >= max_pages) { 475659c6009SEric Sandeen done = 1; 47655138e0bSTheodore Ts'o break; 47755138e0bSTheodore Ts'o } 478659c6009SEric Sandeen } 47955138e0bSTheodore Ts'o pagevec_release(&pvec); 48055138e0bSTheodore Ts'o } 48155138e0bSTheodore Ts'o return num; 48255138e0bSTheodore Ts'o } 48355138e0bSTheodore Ts'o 48455138e0bSTheodore Ts'o /* 485e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4862b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 487f5ab0d1fSMingming Cao * 488f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 489f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 490f5ab0d1fSMingming Cao * mapped. 491f5ab0d1fSMingming Cao * 492e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 493e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 494f5ab0d1fSMingming Cao * based files 495f5ab0d1fSMingming Cao * 496f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 497f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 498f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 499f5ab0d1fSMingming Cao * the buffer head is mapped. 500f5ab0d1fSMingming Cao * 501f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 502df3ab170STao Ma * that case, buffer head is unmapped 503f5ab0d1fSMingming Cao * 504f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 505f5ab0d1fSMingming Cao */ 506e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 507e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5080e855ac8SAneesh Kumar K.V { 5090e855ac8SAneesh Kumar K.V int retval; 510f5ab0d1fSMingming Cao 511e35fd660STheodore Ts'o map->m_flags = 0; 512e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 513e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 514e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 5154df3d265SAneesh Kumar K.V /* 516b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 517b920c755STheodore Ts'o * file system block. 5184df3d265SAneesh Kumar K.V */ 519729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5200e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 52112e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 522a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 523a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5244df3d265SAneesh Kumar K.V } else { 525a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 526a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5270e855ac8SAneesh Kumar K.V } 528729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5294df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 530f5ab0d1fSMingming Cao 531e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 53251865fdaSZheng Liu int ret; 53351865fdaSZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 53451865fdaSZheng Liu /* delayed alloc may be allocated by fallocate and 53551865fdaSZheng Liu * coverted to initialized by directIO. 53651865fdaSZheng Liu * we need to handle delayed extent here. 53751865fdaSZheng Liu */ 53851865fdaSZheng Liu down_write((&EXT4_I(inode)->i_data_sem)); 53951865fdaSZheng Liu goto delayed_mapped; 54051865fdaSZheng Liu } 54151865fdaSZheng Liu ret = check_block_validity(inode, map); 5426fd058f7STheodore Ts'o if (ret != 0) 5436fd058f7STheodore Ts'o return ret; 5446fd058f7STheodore Ts'o } 5456fd058f7STheodore Ts'o 546f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 547c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 5484df3d265SAneesh Kumar K.V return retval; 5494df3d265SAneesh Kumar K.V 5504df3d265SAneesh Kumar K.V /* 551f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 552f5ab0d1fSMingming Cao * 553f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 554df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 555f5ab0d1fSMingming Cao * with buffer head unmapped. 556f5ab0d1fSMingming Cao */ 557e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 558f5ab0d1fSMingming Cao return retval; 559f5ab0d1fSMingming Cao 560f5ab0d1fSMingming Cao /* 5612a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 5622a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 5632a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 5642a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 5652a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 5662a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 5672a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 5682a8964d6SAneesh Kumar K.V * set on the buffer_head. 5692a8964d6SAneesh Kumar K.V */ 570e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 5712a8964d6SAneesh Kumar K.V 5722a8964d6SAneesh Kumar K.V /* 573f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 574f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 575f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 576f5ab0d1fSMingming Cao * with create == 1 flag. 5774df3d265SAneesh Kumar K.V */ 5784df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 579d2a17637SMingming Cao 580d2a17637SMingming Cao /* 581d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 582d2a17637SMingming Cao * we have already reserved fs blocks for allocation 583d2a17637SMingming Cao * let the underlying get_block() function know to 584d2a17637SMingming Cao * avoid double accounting 585d2a17637SMingming Cao */ 586c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 587f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 5884df3d265SAneesh Kumar K.V /* 5894df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 5904df3d265SAneesh Kumar K.V * could have changed the inode type in between 5914df3d265SAneesh Kumar K.V */ 59212e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 593e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 5940e855ac8SAneesh Kumar K.V } else { 595e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 596267e4db9SAneesh Kumar K.V 597e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 598267e4db9SAneesh Kumar K.V /* 599267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 600267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 601267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 602267e4db9SAneesh Kumar K.V */ 60319f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 604267e4db9SAneesh Kumar K.V } 6052ac3b6e0STheodore Ts'o 606d2a17637SMingming Cao /* 6072ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 6085f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 6095f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 6105f634d06SAneesh Kumar K.V * reserve space here. 611d2a17637SMingming Cao */ 6125f634d06SAneesh Kumar K.V if ((retval > 0) && 6131296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 6145f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 6155f634d06SAneesh Kumar K.V } 6165356f261SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 617f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 618d2a17637SMingming Cao 61951865fdaSZheng Liu if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 62051865fdaSZheng Liu int ret; 62151865fdaSZheng Liu delayed_mapped: 62251865fdaSZheng Liu /* delayed allocation blocks has been allocated */ 62351865fdaSZheng Liu ret = ext4_es_remove_extent(inode, map->m_lblk, 62451865fdaSZheng Liu map->m_len); 62551865fdaSZheng Liu if (ret < 0) 62651865fdaSZheng Liu retval = ret; 62751865fdaSZheng Liu } 6285356f261SAditya Kali } 6295356f261SAditya Kali 6300e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 631e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 632e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 6336fd058f7STheodore Ts'o if (ret != 0) 6346fd058f7STheodore Ts'o return ret; 6356fd058f7STheodore Ts'o } 6360e855ac8SAneesh Kumar K.V return retval; 6370e855ac8SAneesh Kumar K.V } 6380e855ac8SAneesh Kumar K.V 639f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 640f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 641f3bd1f3fSMingming Cao 6422ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 6432ed88685STheodore Ts'o struct buffer_head *bh, int flags) 644ac27a0ecSDave Kleikamp { 6453e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 6462ed88685STheodore Ts'o struct ext4_map_blocks map; 6477fb5409dSJan Kara int ret = 0, started = 0; 648f3bd1f3fSMingming Cao int dio_credits; 649ac27a0ecSDave Kleikamp 65046c7f254STao Ma if (ext4_has_inline_data(inode)) 65146c7f254STao Ma return -ERANGE; 65246c7f254STao Ma 6532ed88685STheodore Ts'o map.m_lblk = iblock; 6542ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 6552ed88685STheodore Ts'o 6568b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 6577fb5409dSJan Kara /* Direct IO write... */ 6582ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 6592ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 6602ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 661f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 6627fb5409dSJan Kara if (IS_ERR(handle)) { 663ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 6642ed88685STheodore Ts'o return ret; 6657fb5409dSJan Kara } 6667fb5409dSJan Kara started = 1; 667ac27a0ecSDave Kleikamp } 668ac27a0ecSDave Kleikamp 6692ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 670ac27a0ecSDave Kleikamp if (ret > 0) { 6712ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 6722ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 6732ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 674ac27a0ecSDave Kleikamp ret = 0; 675ac27a0ecSDave Kleikamp } 6767fb5409dSJan Kara if (started) 6777fb5409dSJan Kara ext4_journal_stop(handle); 678ac27a0ecSDave Kleikamp return ret; 679ac27a0ecSDave Kleikamp } 680ac27a0ecSDave Kleikamp 6812ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 6822ed88685STheodore Ts'o struct buffer_head *bh, int create) 6832ed88685STheodore Ts'o { 6842ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 6852ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6862ed88685STheodore Ts'o } 6872ed88685STheodore Ts'o 688ac27a0ecSDave Kleikamp /* 689ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 690ac27a0ecSDave Kleikamp */ 691617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 692725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 693ac27a0ecSDave Kleikamp { 6942ed88685STheodore Ts'o struct ext4_map_blocks map; 6952ed88685STheodore Ts'o struct buffer_head *bh; 696ac27a0ecSDave Kleikamp int fatal = 0, err; 697ac27a0ecSDave Kleikamp 698ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 699ac27a0ecSDave Kleikamp 7002ed88685STheodore Ts'o map.m_lblk = block; 7012ed88685STheodore Ts'o map.m_len = 1; 7022ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 7032ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7042ed88685STheodore Ts'o 70590b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 70690b0a973SCarlos Maiolino *errp = 0; 70790b0a973SCarlos Maiolino 7082ed88685STheodore Ts'o if (err < 0) 709ac27a0ecSDave Kleikamp *errp = err; 7102ed88685STheodore Ts'o if (err <= 0) 7112ed88685STheodore Ts'o return NULL; 7122ed88685STheodore Ts'o 7132ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 714aebf0243SWang Shilong if (unlikely(!bh)) { 715860d21e2STheodore Ts'o *errp = -ENOMEM; 7162ed88685STheodore Ts'o return NULL; 717ac27a0ecSDave Kleikamp } 7182ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 719ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 720ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 721ac27a0ecSDave Kleikamp 722ac27a0ecSDave Kleikamp /* 723ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 724ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 725ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 726617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 727ac27a0ecSDave Kleikamp * problem. 728ac27a0ecSDave Kleikamp */ 729ac27a0ecSDave Kleikamp lock_buffer(bh); 730ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 731617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 732ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 733ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 734ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 735ac27a0ecSDave Kleikamp } 736ac27a0ecSDave Kleikamp unlock_buffer(bh); 7370390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7380390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 739ac27a0ecSDave Kleikamp if (!fatal) 740ac27a0ecSDave Kleikamp fatal = err; 741ac27a0ecSDave Kleikamp } else { 742ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 743ac27a0ecSDave Kleikamp } 744ac27a0ecSDave Kleikamp if (fatal) { 745ac27a0ecSDave Kleikamp *errp = fatal; 746ac27a0ecSDave Kleikamp brelse(bh); 747ac27a0ecSDave Kleikamp bh = NULL; 748ac27a0ecSDave Kleikamp } 749ac27a0ecSDave Kleikamp return bh; 750ac27a0ecSDave Kleikamp } 751ac27a0ecSDave Kleikamp 752617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 753725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 754ac27a0ecSDave Kleikamp { 755ac27a0ecSDave Kleikamp struct buffer_head *bh; 756ac27a0ecSDave Kleikamp 757617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 758ac27a0ecSDave Kleikamp if (!bh) 759ac27a0ecSDave Kleikamp return bh; 760ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 761ac27a0ecSDave Kleikamp return bh; 76265299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 763ac27a0ecSDave Kleikamp wait_on_buffer(bh); 764ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 765ac27a0ecSDave Kleikamp return bh; 766ac27a0ecSDave Kleikamp put_bh(bh); 767ac27a0ecSDave Kleikamp *err = -EIO; 768ac27a0ecSDave Kleikamp return NULL; 769ac27a0ecSDave Kleikamp } 770ac27a0ecSDave Kleikamp 771f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 772ac27a0ecSDave Kleikamp struct buffer_head *head, 773ac27a0ecSDave Kleikamp unsigned from, 774ac27a0ecSDave Kleikamp unsigned to, 775ac27a0ecSDave Kleikamp int *partial, 776ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 777ac27a0ecSDave Kleikamp struct buffer_head *bh)) 778ac27a0ecSDave Kleikamp { 779ac27a0ecSDave Kleikamp struct buffer_head *bh; 780ac27a0ecSDave Kleikamp unsigned block_start, block_end; 781ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 782ac27a0ecSDave Kleikamp int err, ret = 0; 783ac27a0ecSDave Kleikamp struct buffer_head *next; 784ac27a0ecSDave Kleikamp 785ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 786ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 787de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 788ac27a0ecSDave Kleikamp next = bh->b_this_page; 789ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 790ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 791ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 792ac27a0ecSDave Kleikamp *partial = 1; 793ac27a0ecSDave Kleikamp continue; 794ac27a0ecSDave Kleikamp } 795ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 796ac27a0ecSDave Kleikamp if (!ret) 797ac27a0ecSDave Kleikamp ret = err; 798ac27a0ecSDave Kleikamp } 799ac27a0ecSDave Kleikamp return ret; 800ac27a0ecSDave Kleikamp } 801ac27a0ecSDave Kleikamp 802ac27a0ecSDave Kleikamp /* 803ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 804ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 805617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 806dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 807ac27a0ecSDave Kleikamp * prepare_write() is the right place. 808ac27a0ecSDave Kleikamp * 80936ade451SJan Kara * Also, this function can nest inside ext4_writepage(). In that case, we 81036ade451SJan Kara * *know* that ext4_writepage() has generated enough buffer credits to do the 81136ade451SJan Kara * whole page. So we won't block on the journal in that case, which is good, 81236ade451SJan Kara * because the caller may be PF_MEMALLOC. 813ac27a0ecSDave Kleikamp * 814617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 815ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 816ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 817ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 818ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 819ac27a0ecSDave Kleikamp * violation. 820ac27a0ecSDave Kleikamp * 821dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 822ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 823ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 824ac27a0ecSDave Kleikamp * write. 825ac27a0ecSDave Kleikamp */ 826f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 827ac27a0ecSDave Kleikamp struct buffer_head *bh) 828ac27a0ecSDave Kleikamp { 82956d35a4cSJan Kara int dirty = buffer_dirty(bh); 83056d35a4cSJan Kara int ret; 83156d35a4cSJan Kara 832ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 833ac27a0ecSDave Kleikamp return 0; 83456d35a4cSJan Kara /* 835ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 83656d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 83756d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 838ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 83956d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 84056d35a4cSJan Kara * ever write the buffer. 84156d35a4cSJan Kara */ 84256d35a4cSJan Kara if (dirty) 84356d35a4cSJan Kara clear_buffer_dirty(bh); 84456d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 84556d35a4cSJan Kara if (!ret && dirty) 84656d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 84756d35a4cSJan Kara return ret; 848ac27a0ecSDave Kleikamp } 849ac27a0ecSDave Kleikamp 8508b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 8518b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 852bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 853bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 854bfc1af65SNick Piggin struct page **pagep, void **fsdata) 855ac27a0ecSDave Kleikamp { 856bfc1af65SNick Piggin struct inode *inode = mapping->host; 8571938a150SAneesh Kumar K.V int ret, needed_blocks; 858ac27a0ecSDave Kleikamp handle_t *handle; 859ac27a0ecSDave Kleikamp int retries = 0; 860bfc1af65SNick Piggin struct page *page; 861bfc1af65SNick Piggin pgoff_t index; 862bfc1af65SNick Piggin unsigned from, to; 863bfc1af65SNick Piggin 8649bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 8651938a150SAneesh Kumar K.V /* 8661938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 8671938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 8681938a150SAneesh Kumar K.V */ 8691938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 870bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 871bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 872bfc1af65SNick Piggin to = from + len; 873ac27a0ecSDave Kleikamp 874f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 875f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 876f19d5870STao Ma flags, pagep); 877f19d5870STao Ma if (ret < 0) 878f19d5870STao Ma goto out; 879f19d5870STao Ma if (ret == 1) { 880f19d5870STao Ma ret = 0; 881f19d5870STao Ma goto out; 882f19d5870STao Ma } 883f19d5870STao Ma } 884f19d5870STao Ma 885ac27a0ecSDave Kleikamp retry: 886617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 8877479d2b9SAndrew Morton if (IS_ERR(handle)) { 8887479d2b9SAndrew Morton ret = PTR_ERR(handle); 8897479d2b9SAndrew Morton goto out; 8907479d2b9SAndrew Morton } 891ac27a0ecSDave Kleikamp 892ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 893ebd3610bSJan Kara * started */ 894ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 895ebd3610bSJan Kara 89654566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 897cf108bcaSJan Kara if (!page) { 898cf108bcaSJan Kara ext4_journal_stop(handle); 899cf108bcaSJan Kara ret = -ENOMEM; 900cf108bcaSJan Kara goto out; 901cf108bcaSJan Kara } 902f19d5870STao Ma 903cf108bcaSJan Kara *pagep = page; 904cf108bcaSJan Kara 905744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 9066e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 907744692dcSJiaying Zhang else 9086e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 909bfc1af65SNick Piggin 910bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 911f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 912f19d5870STao Ma from, to, NULL, 913f19d5870STao Ma do_journal_get_write_access); 914b46be050SAndrey Savochkin } 915bfc1af65SNick Piggin 916bfc1af65SNick Piggin if (ret) { 917bfc1af65SNick Piggin unlock_page(page); 918bfc1af65SNick Piggin page_cache_release(page); 919ae4d5372SAneesh Kumar K.V /* 9206e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 921ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 922ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 9231938a150SAneesh Kumar K.V * 9241938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 9251938a150SAneesh Kumar K.V * truncate finishes 926ae4d5372SAneesh Kumar K.V */ 927ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 9281938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 9291938a150SAneesh Kumar K.V 9301938a150SAneesh Kumar K.V ext4_journal_stop(handle); 9311938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 932b9a4207dSJan Kara ext4_truncate_failed_write(inode); 9331938a150SAneesh Kumar K.V /* 934ffacfa7aSJan Kara * If truncate failed early the inode might 9351938a150SAneesh Kumar K.V * still be on the orphan list; we need to 9361938a150SAneesh Kumar K.V * make sure the inode is removed from the 9371938a150SAneesh Kumar K.V * orphan list in that case. 9381938a150SAneesh Kumar K.V */ 9391938a150SAneesh Kumar K.V if (inode->i_nlink) 9401938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 9411938a150SAneesh Kumar K.V } 942bfc1af65SNick Piggin } 943bfc1af65SNick Piggin 944617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 945ac27a0ecSDave Kleikamp goto retry; 9467479d2b9SAndrew Morton out: 947ac27a0ecSDave Kleikamp return ret; 948ac27a0ecSDave Kleikamp } 949ac27a0ecSDave Kleikamp 950bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 951bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 952ac27a0ecSDave Kleikamp { 953ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 954ac27a0ecSDave Kleikamp return 0; 955ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 9560390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 957ac27a0ecSDave Kleikamp } 958ac27a0ecSDave Kleikamp 959f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 960f8514083SAneesh Kumar K.V struct address_space *mapping, 961f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 962f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 963f8514083SAneesh Kumar K.V { 964f8514083SAneesh Kumar K.V int i_size_changed = 0; 965f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 966f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 967f8514083SAneesh Kumar K.V 968f19d5870STao Ma if (ext4_has_inline_data(inode)) 969f19d5870STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 970f19d5870STao Ma copied, page); 971f19d5870STao Ma else 972f19d5870STao Ma copied = block_write_end(file, mapping, pos, 973f19d5870STao Ma len, copied, page, fsdata); 974f8514083SAneesh Kumar K.V 975f8514083SAneesh Kumar K.V /* 976f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 977f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 978f8514083SAneesh Kumar K.V * 979f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 980f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 981f8514083SAneesh Kumar K.V */ 982f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 983f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 984f8514083SAneesh Kumar K.V i_size_changed = 1; 985f8514083SAneesh Kumar K.V } 986f8514083SAneesh Kumar K.V 987f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 988f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 989f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 990f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 991f8514083SAneesh Kumar K.V */ 992f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 993f8514083SAneesh Kumar K.V i_size_changed = 1; 994f8514083SAneesh Kumar K.V } 995f8514083SAneesh Kumar K.V unlock_page(page); 996f8514083SAneesh Kumar K.V page_cache_release(page); 997f8514083SAneesh Kumar K.V 998f8514083SAneesh Kumar K.V /* 999f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 1000f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 1001f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1002f8514083SAneesh Kumar K.V * filesystems. 1003f8514083SAneesh Kumar K.V */ 1004f8514083SAneesh Kumar K.V if (i_size_changed) 1005f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1006f8514083SAneesh Kumar K.V 1007f8514083SAneesh Kumar K.V return copied; 1008f8514083SAneesh Kumar K.V } 1009f8514083SAneesh Kumar K.V 1010ac27a0ecSDave Kleikamp /* 1011ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1012ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1013ac27a0ecSDave Kleikamp * 1014617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1015ac27a0ecSDave Kleikamp * buffers are managed internally. 1016ac27a0ecSDave Kleikamp */ 1017bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1018bfc1af65SNick Piggin struct address_space *mapping, 1019bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1020bfc1af65SNick Piggin struct page *page, void *fsdata) 1021ac27a0ecSDave Kleikamp { 1022617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1023cf108bcaSJan Kara struct inode *inode = mapping->host; 1024ac27a0ecSDave Kleikamp int ret = 0, ret2; 1025ac27a0ecSDave Kleikamp 10269bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 1027678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1028ac27a0ecSDave Kleikamp 1029ac27a0ecSDave Kleikamp if (ret == 0) { 1030f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1031bfc1af65SNick Piggin page, fsdata); 1032f8a87d89SRoel Kluin copied = ret2; 1033ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1034f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1035f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1036f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1037f8514083SAneesh Kumar K.V */ 1038f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1039f8a87d89SRoel Kluin if (ret2 < 0) 1040f8a87d89SRoel Kluin ret = ret2; 104109e0834fSAkira Fujita } else { 104209e0834fSAkira Fujita unlock_page(page); 104309e0834fSAkira Fujita page_cache_release(page); 1044ac27a0ecSDave Kleikamp } 104509e0834fSAkira Fujita 1046617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1047ac27a0ecSDave Kleikamp if (!ret) 1048ac27a0ecSDave Kleikamp ret = ret2; 1049bfc1af65SNick Piggin 1050f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1051b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1052f8514083SAneesh Kumar K.V /* 1053ffacfa7aSJan Kara * If truncate failed early the inode might still be 1054f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1055f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1056f8514083SAneesh Kumar K.V */ 1057f8514083SAneesh Kumar K.V if (inode->i_nlink) 1058f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1059f8514083SAneesh Kumar K.V } 1060f8514083SAneesh Kumar K.V 1061f8514083SAneesh Kumar K.V 1062bfc1af65SNick Piggin return ret ? ret : copied; 1063ac27a0ecSDave Kleikamp } 1064ac27a0ecSDave Kleikamp 1065bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1066bfc1af65SNick Piggin struct address_space *mapping, 1067bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1068bfc1af65SNick Piggin struct page *page, void *fsdata) 1069ac27a0ecSDave Kleikamp { 1070617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1071cf108bcaSJan Kara struct inode *inode = mapping->host; 1072ac27a0ecSDave Kleikamp int ret = 0, ret2; 1073ac27a0ecSDave Kleikamp 10749bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 1075f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1076bfc1af65SNick Piggin page, fsdata); 1077f8a87d89SRoel Kluin copied = ret2; 1078ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1079f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1080f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1081f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1082f8514083SAneesh Kumar K.V */ 1083f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1084f8514083SAneesh Kumar K.V 1085f8a87d89SRoel Kluin if (ret2 < 0) 1086f8a87d89SRoel Kluin ret = ret2; 1087ac27a0ecSDave Kleikamp 1088617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1089ac27a0ecSDave Kleikamp if (!ret) 1090ac27a0ecSDave Kleikamp ret = ret2; 1091bfc1af65SNick Piggin 1092f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1093b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1094f8514083SAneesh Kumar K.V /* 1095ffacfa7aSJan Kara * If truncate failed early the inode might still be 1096f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1097f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1098f8514083SAneesh Kumar K.V */ 1099f8514083SAneesh Kumar K.V if (inode->i_nlink) 1100f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1101f8514083SAneesh Kumar K.V } 1102f8514083SAneesh Kumar K.V 1103bfc1af65SNick Piggin return ret ? ret : copied; 1104ac27a0ecSDave Kleikamp } 1105ac27a0ecSDave Kleikamp 1106bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1107bfc1af65SNick Piggin struct address_space *mapping, 1108bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1109bfc1af65SNick Piggin struct page *page, void *fsdata) 1110ac27a0ecSDave Kleikamp { 1111617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1112bfc1af65SNick Piggin struct inode *inode = mapping->host; 1113ac27a0ecSDave Kleikamp int ret = 0, ret2; 1114ac27a0ecSDave Kleikamp int partial = 0; 1115bfc1af65SNick Piggin unsigned from, to; 1116cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1117ac27a0ecSDave Kleikamp 11189bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1119bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1120bfc1af65SNick Piggin to = from + len; 1121bfc1af65SNick Piggin 1122441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1123441c8508SCurt Wohlgemuth 11243fdcfb66STao Ma if (ext4_has_inline_data(inode)) 11253fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 11263fdcfb66STao Ma copied, page); 11273fdcfb66STao Ma else { 1128bfc1af65SNick Piggin if (copied < len) { 1129bfc1af65SNick Piggin if (!PageUptodate(page)) 1130bfc1af65SNick Piggin copied = 0; 1131bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1132bfc1af65SNick Piggin } 1133ac27a0ecSDave Kleikamp 1134f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1135bfc1af65SNick Piggin to, &partial, write_end_fn); 1136ac27a0ecSDave Kleikamp if (!partial) 1137ac27a0ecSDave Kleikamp SetPageUptodate(page); 11383fdcfb66STao Ma } 1139cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1140cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1141bfc1af65SNick Piggin i_size_write(inode, pos+copied); 114219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 11432d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1144cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1145cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1146617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1147ac27a0ecSDave Kleikamp if (!ret) 1148ac27a0ecSDave Kleikamp ret = ret2; 1149ac27a0ecSDave Kleikamp } 1150bfc1af65SNick Piggin 1151cf108bcaSJan Kara unlock_page(page); 1152f8514083SAneesh Kumar K.V page_cache_release(page); 1153ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1154f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1155f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1156f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1157f8514083SAneesh Kumar K.V */ 1158f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1159f8514083SAneesh Kumar K.V 1160617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1161ac27a0ecSDave Kleikamp if (!ret) 1162ac27a0ecSDave Kleikamp ret = ret2; 1163f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1164b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1165f8514083SAneesh Kumar K.V /* 1166ffacfa7aSJan Kara * If truncate failed early the inode might still be 1167f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1168f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1169f8514083SAneesh Kumar K.V */ 1170f8514083SAneesh Kumar K.V if (inode->i_nlink) 1171f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1172f8514083SAneesh Kumar K.V } 1173bfc1af65SNick Piggin 1174bfc1af65SNick Piggin return ret ? ret : copied; 1175ac27a0ecSDave Kleikamp } 1176d2a17637SMingming Cao 11779d0be502STheodore Ts'o /* 11787b415bf6SAditya Kali * Reserve a single cluster located at lblock 11799d0be502STheodore Ts'o */ 118001f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1181d2a17637SMingming Cao { 1182030ba6bcSAneesh Kumar K.V int retries = 0; 1183d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11840637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 11857b415bf6SAditya Kali unsigned int md_needed; 11865dd4056dSChristoph Hellwig int ret; 118703179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 118803179fe9STheodore Ts'o int save_len; 1189d2a17637SMingming Cao 119060e58e0fSMingming Cao /* 119172b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 119272b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 119372b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 119460e58e0fSMingming Cao */ 11957b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 11965dd4056dSChristoph Hellwig if (ret) 11975dd4056dSChristoph Hellwig return ret; 119803179fe9STheodore Ts'o 119903179fe9STheodore Ts'o /* 120003179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 120103179fe9STheodore Ts'o * in order to allocate nrblocks 120203179fe9STheodore Ts'o * worse case is one extent per block 120303179fe9STheodore Ts'o */ 120403179fe9STheodore Ts'o repeat: 120503179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 120603179fe9STheodore Ts'o /* 120703179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 120803179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 120903179fe9STheodore Ts'o */ 121003179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 121103179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 121203179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 121303179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 121403179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 121503179fe9STheodore Ts'o 121672b8ab9dSEric Sandeen /* 121772b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 121872b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 121972b8ab9dSEric Sandeen */ 1220e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 122103179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 122203179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 122303179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1224030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1225030ba6bcSAneesh Kumar K.V yield(); 1226030ba6bcSAneesh Kumar K.V goto repeat; 1227030ba6bcSAneesh Kumar K.V } 122803179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1229d2a17637SMingming Cao return -ENOSPC; 1230d2a17637SMingming Cao } 12319d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 12320637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 12330637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 123439bc680aSDmitry Monakhov 1235d2a17637SMingming Cao return 0; /* success */ 1236d2a17637SMingming Cao } 1237d2a17637SMingming Cao 123812219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1239d2a17637SMingming Cao { 1240d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12410637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1242d2a17637SMingming Cao 1243cd213226SMingming Cao if (!to_free) 1244cd213226SMingming Cao return; /* Nothing to release, exit */ 1245cd213226SMingming Cao 1246d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1247cd213226SMingming Cao 12485a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 12490637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1250cd213226SMingming Cao /* 12510637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 12520637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 12530637c6f4STheodore Ts'o * function is called from invalidate page, it's 12540637c6f4STheodore Ts'o * harmless to return without any action. 1255cd213226SMingming Cao */ 12560637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 12570637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 12581084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 12590637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 12600637c6f4STheodore Ts'o WARN_ON(1); 12610637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 12620637c6f4STheodore Ts'o } 12630637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 12640637c6f4STheodore Ts'o 12650637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 12660637c6f4STheodore Ts'o /* 12670637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 12680637c6f4STheodore Ts'o * only when we have written all of the delayed 12690637c6f4STheodore Ts'o * allocation blocks. 12707b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 12717b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 12720637c6f4STheodore Ts'o */ 127357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 127472b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1275ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 12769d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1277cd213226SMingming Cao } 1278cd213226SMingming Cao 127972b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 128057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1281d2a17637SMingming Cao 1282d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 128360e58e0fSMingming Cao 12847b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1285d2a17637SMingming Cao } 1286d2a17637SMingming Cao 1287d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1288d2a17637SMingming Cao unsigned long offset) 1289d2a17637SMingming Cao { 1290d2a17637SMingming Cao int to_release = 0; 1291d2a17637SMingming Cao struct buffer_head *head, *bh; 1292d2a17637SMingming Cao unsigned int curr_off = 0; 12937b415bf6SAditya Kali struct inode *inode = page->mapping->host; 12947b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12957b415bf6SAditya Kali int num_clusters; 129651865fdaSZheng Liu ext4_fsblk_t lblk; 1297d2a17637SMingming Cao 1298d2a17637SMingming Cao head = page_buffers(page); 1299d2a17637SMingming Cao bh = head; 1300d2a17637SMingming Cao do { 1301d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1302d2a17637SMingming Cao 1303d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1304d2a17637SMingming Cao to_release++; 1305d2a17637SMingming Cao clear_buffer_delay(bh); 1306d2a17637SMingming Cao } 1307d2a17637SMingming Cao curr_off = next_off; 1308d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 13097b415bf6SAditya Kali 131051865fdaSZheng Liu if (to_release) { 131151865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 131251865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 131351865fdaSZheng Liu } 131451865fdaSZheng Liu 13157b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 13167b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 13177b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 13187b415bf6SAditya Kali while (num_clusters > 0) { 13197b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 13207b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 13217b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 13227d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 13237b415bf6SAditya Kali ext4_da_release_space(inode, 1); 13247b415bf6SAditya Kali 13257b415bf6SAditya Kali num_clusters--; 13267b415bf6SAditya Kali } 1327d2a17637SMingming Cao } 1328ac27a0ecSDave Kleikamp 1329ac27a0ecSDave Kleikamp /* 133064769240SAlex Tomas * Delayed allocation stuff 133164769240SAlex Tomas */ 133264769240SAlex Tomas 133364769240SAlex Tomas /* 133464769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1335a1d6cc56SAneesh Kumar K.V * them with writepage() call back 133664769240SAlex Tomas * 133764769240SAlex Tomas * @mpd->inode: inode 133864769240SAlex Tomas * @mpd->first_page: first page of the extent 133964769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 134064769240SAlex Tomas * 134164769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 134264769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 134364769240SAlex Tomas * 134464769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 134564769240SAlex Tomas */ 13461de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 13471de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 134864769240SAlex Tomas { 1349791b7f08SAneesh Kumar K.V struct pagevec pvec; 1350791b7f08SAneesh Kumar K.V unsigned long index, end; 1351791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1352791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1353791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1354cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 13553ecdb3a1STheodore Ts'o unsigned int len, block_start; 13563ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 13571de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1358bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 135964769240SAlex Tomas 136064769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1361bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1362791b7f08SAneesh Kumar K.V /* 1363791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1364791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 13658dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1366791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1367791b7f08SAneesh Kumar K.V */ 136864769240SAlex Tomas index = mpd->first_page; 136964769240SAlex Tomas end = mpd->next_page - 1; 137064769240SAlex Tomas 1371791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 137264769240SAlex Tomas while (index <= end) { 1373791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 137464769240SAlex Tomas if (nr_pages == 0) 137564769240SAlex Tomas break; 137664769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 137797498956STheodore Ts'o int commit_write = 0, skip_page = 0; 137864769240SAlex Tomas struct page *page = pvec.pages[i]; 137964769240SAlex Tomas 1380791b7f08SAneesh Kumar K.V index = page->index; 1381791b7f08SAneesh Kumar K.V if (index > end) 1382791b7f08SAneesh Kumar K.V break; 1383cb20d518STheodore Ts'o 1384cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1385cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1386cb20d518STheodore Ts'o else 1387cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 13881de3e3dfSTheodore Ts'o if (map) { 13891de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 13901de3e3dfSTheodore Ts'o inode->i_blkbits); 13911de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 13921de3e3dfSTheodore Ts'o map->m_lblk); 13931de3e3dfSTheodore Ts'o } 1394791b7f08SAneesh Kumar K.V index++; 1395791b7f08SAneesh Kumar K.V 1396791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1397791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1398791b7f08SAneesh Kumar K.V 139922208dedSAneesh Kumar K.V /* 1400cb20d518STheodore Ts'o * If the page does not have buffers (for 1401cb20d518STheodore Ts'o * whatever reason), try to create them using 1402a107e5a3STheodore Ts'o * __block_write_begin. If this fails, 140397498956STheodore Ts'o * skip the page and move on. 140422208dedSAneesh Kumar K.V */ 1405cb20d518STheodore Ts'o if (!page_has_buffers(page)) { 1406a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1407cb20d518STheodore Ts'o noalloc_get_block_write)) { 140897498956STheodore Ts'o skip_page: 1409cb20d518STheodore Ts'o unlock_page(page); 1410cb20d518STheodore Ts'o continue; 1411cb20d518STheodore Ts'o } 1412cb20d518STheodore Ts'o commit_write = 1; 1413cb20d518STheodore Ts'o } 14143ecdb3a1STheodore Ts'o 14153ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 14163ecdb3a1STheodore Ts'o block_start = 0; 14173ecdb3a1STheodore Ts'o do { 14181de3e3dfSTheodore Ts'o if (!bh) 141997498956STheodore Ts'o goto skip_page; 14201de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 14211de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 14221de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 14231de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 14241de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 14251de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 14261de3e3dfSTheodore Ts'o } 14271de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 14281de3e3dfSTheodore Ts'o buffer_mapped(bh)) 14291de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 14301de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 14311de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 14321de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 14331de3e3dfSTheodore Ts'o } 14341de3e3dfSTheodore Ts'o 143513a79a47SYongqiang Yang /* 143613a79a47SYongqiang Yang * skip page if block allocation undone and 143713a79a47SYongqiang Yang * block is dirty 143813a79a47SYongqiang Yang */ 143913a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 144097498956STheodore Ts'o skip_page = 1; 14413ecdb3a1STheodore Ts'o bh = bh->b_this_page; 14423ecdb3a1STheodore Ts'o block_start += bh->b_size; 14431de3e3dfSTheodore Ts'o cur_logical++; 14441de3e3dfSTheodore Ts'o pblock++; 14451de3e3dfSTheodore Ts'o } while (bh != page_bufs); 14461de3e3dfSTheodore Ts'o 144797498956STheodore Ts'o if (skip_page) 144897498956STheodore Ts'o goto skip_page; 1449cb20d518STheodore Ts'o 1450cb20d518STheodore Ts'o if (commit_write) 1451cb20d518STheodore Ts'o /* mark the buffer_heads as dirty & uptodate */ 1452cb20d518STheodore Ts'o block_commit_write(page, 0, len); 1453cb20d518STheodore Ts'o 145497498956STheodore Ts'o clear_page_dirty_for_io(page); 1455*fe089c77SJan Kara err = ext4_bio_write_page(&io_submit, page, len, 1456*fe089c77SJan Kara mpd->wbc); 1457cb20d518STheodore Ts'o if (!err) 1458a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 145964769240SAlex Tomas /* 146064769240SAlex Tomas * In error case, we have to continue because 146164769240SAlex Tomas * remaining pages are still locked 146264769240SAlex Tomas */ 146364769240SAlex Tomas if (ret == 0) 146464769240SAlex Tomas ret = err; 146564769240SAlex Tomas } 146664769240SAlex Tomas pagevec_release(&pvec); 146764769240SAlex Tomas } 1468bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 146964769240SAlex Tomas return ret; 147064769240SAlex Tomas } 147164769240SAlex Tomas 1472c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1473c4a0c46eSAneesh Kumar K.V { 1474c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1475c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1476c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1477c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1478c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 147951865fdaSZheng Liu ext4_lblk_t start, last; 1480c4a0c46eSAneesh Kumar K.V 1481c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1482c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 148351865fdaSZheng Liu 148451865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 148551865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 148651865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 148751865fdaSZheng Liu 148866bea92cSEric Sandeen pagevec_init(&pvec, 0); 1489c4a0c46eSAneesh Kumar K.V while (index <= end) { 1490c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1491c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1492c4a0c46eSAneesh Kumar K.V break; 1493c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1494c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 14959b1d0998SJan Kara if (page->index > end) 1496c4a0c46eSAneesh Kumar K.V break; 1497c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1498c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1499c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1500c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1501c4a0c46eSAneesh Kumar K.V unlock_page(page); 1502c4a0c46eSAneesh Kumar K.V } 15039b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 15049b1d0998SJan Kara pagevec_release(&pvec); 1505c4a0c46eSAneesh Kumar K.V } 1506c4a0c46eSAneesh Kumar K.V return; 1507c4a0c46eSAneesh Kumar K.V } 1508c4a0c46eSAneesh Kumar K.V 1509df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1510df22291fSAneesh Kumar K.V { 1511df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 151292b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 151392b97816STheodore Ts'o 151492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 15155dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 15165dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 151792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 151892b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 151957042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 152057042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 152192b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 15227b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 15237b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 152492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 152592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1526df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 152792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1528df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1529df22291fSAneesh Kumar K.V return; 1530df22291fSAneesh Kumar K.V } 1531df22291fSAneesh Kumar K.V 1532b920c755STheodore Ts'o /* 15335a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 15345a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 153564769240SAlex Tomas * 15368dc207c0STheodore Ts'o * @mpd - bh describing space 153764769240SAlex Tomas * 153864769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 153964769240SAlex Tomas * 154064769240SAlex Tomas */ 15415a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 154264769240SAlex Tomas { 15432ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 15441de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 15452fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 15462fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 15472fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 15482fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 154964769240SAlex Tomas 155064769240SAlex Tomas /* 15515a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 15525a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 155364769240SAlex Tomas */ 15545a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 15555a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 155629fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 15575a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 15585a87b7a5STheodore Ts'o goto submit_io; 15592fa3cdfbSTheodore Ts'o 15602fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 15612fa3cdfbSTheodore Ts'o BUG_ON(!handle); 15622fa3cdfbSTheodore Ts'o 156379ffab34SAneesh Kumar K.V /* 156479e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 15652ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 15662ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 15672ac3b6e0STheodore Ts'o * one or more preallocated blocks). 15682ac3b6e0STheodore Ts'o * 15692ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 15702ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 15712ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 15722ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 157379e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1574f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 15752ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 15762ac3b6e0STheodore Ts'o * 15772ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 15782ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 15792ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 158079ffab34SAneesh Kumar K.V */ 15812ed88685STheodore Ts'o map.m_lblk = next; 15822ed88685STheodore Ts'o map.m_len = max_blocks; 15831296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1584744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1585744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 15862ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 15871296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 15881296cc85SAneesh Kumar K.V 15892ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 15902fa3cdfbSTheodore Ts'o if (blks < 0) { 1591e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1592e3570639SEric Sandeen 15932fa3cdfbSTheodore Ts'o err = blks; 1594ed5bde0bSTheodore Ts'o /* 15955a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 159697498956STheodore Ts'o * appears to be free blocks we will just let 159797498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1598c4a0c46eSAneesh Kumar K.V */ 1599c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 16005a87b7a5STheodore Ts'o goto submit_io; 1601df22291fSAneesh Kumar K.V 16025dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1603df22291fSAneesh Kumar K.V mpd->retval = err; 16045a87b7a5STheodore Ts'o goto submit_io; 1605df22291fSAneesh Kumar K.V } 1606df22291fSAneesh Kumar K.V 1607c4a0c46eSAneesh Kumar K.V /* 1608ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1609ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1610ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1611ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1612ed5bde0bSTheodore Ts'o * the same. 1613c4a0c46eSAneesh Kumar K.V */ 1614e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1615e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1616e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1617e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1618e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1619c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 16208dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1621e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1622e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1623e3570639SEric Sandeen if (err == -ENOSPC) 1624df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1625030ba6bcSAneesh Kumar K.V } 16262fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1627c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1628e0fd9b90SCurt Wohlgemuth 1629e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1630e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 16315a87b7a5STheodore Ts'o return; 1632c4a0c46eSAneesh Kumar K.V } 16332fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 16342fa3cdfbSTheodore Ts'o 16351de3e3dfSTheodore Ts'o mapp = ↦ 16362ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 16372ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 16382ed88685STheodore Ts'o int i; 163964769240SAlex Tomas 16402ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 16412ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 16422fa3cdfbSTheodore Ts'o } 16432fa3cdfbSTheodore Ts'o 16442fa3cdfbSTheodore Ts'o /* 164503f5d8bcSJan Kara * Update on-disk size along with block allocation. 16462fa3cdfbSTheodore Ts'o */ 16472fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 16482fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 16492fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 16502fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 16512fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 16525a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 16535a87b7a5STheodore Ts'o if (err) 16545a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 16555a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 16565a87b7a5STheodore Ts'o mpd->inode->i_ino); 16572fa3cdfbSTheodore Ts'o } 16582fa3cdfbSTheodore Ts'o 16595a87b7a5STheodore Ts'o submit_io: 16601de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 16615a87b7a5STheodore Ts'o mpd->io_done = 1; 166264769240SAlex Tomas } 166364769240SAlex Tomas 1664bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1665bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 166664769240SAlex Tomas 166764769240SAlex Tomas /* 166864769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 166964769240SAlex Tomas * 167064769240SAlex Tomas * @mpd->lbh - extent of blocks 167164769240SAlex Tomas * @logical - logical number of the block in the file 167264769240SAlex Tomas * @bh - bh of the block (used to access block's state) 167364769240SAlex Tomas * 167464769240SAlex Tomas * the function is used to collect contig. blocks in same state 167564769240SAlex Tomas */ 167664769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 16778dc207c0STheodore Ts'o sector_t logical, size_t b_size, 16788dc207c0STheodore Ts'o unsigned long b_state) 167964769240SAlex Tomas { 168064769240SAlex Tomas sector_t next; 16818dc207c0STheodore Ts'o int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 168264769240SAlex Tomas 1683c445e3e0SEric Sandeen /* 1684c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1685c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1686c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 168779e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1688c445e3e0SEric Sandeen */ 1689c445e3e0SEric Sandeen if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 1690c445e3e0SEric Sandeen goto flush_it; 1691c445e3e0SEric Sandeen 1692525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 169312e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 1694525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1695525f4ed8SMingming Cao /* 1696525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1697525f4ed8SMingming Cao * credit available. Total credit needed to insert 1698525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1699525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1700525f4ed8SMingming Cao */ 1701525f4ed8SMingming Cao goto flush_it; 1702525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1703525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 1704525f4ed8SMingming Cao /* 1705525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 1706525f4ed8SMingming Cao * allowed limit for which we have journal credit 1707525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 1708525f4ed8SMingming Cao */ 1709525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1710525f4ed8SMingming Cao mpd->inode->i_blkbits; 1711525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 1712525f4ed8SMingming Cao } 1713525f4ed8SMingming Cao } 171464769240SAlex Tomas /* 171564769240SAlex Tomas * First block in the extent 171664769240SAlex Tomas */ 17178dc207c0STheodore Ts'o if (mpd->b_size == 0) { 17188dc207c0STheodore Ts'o mpd->b_blocknr = logical; 17198dc207c0STheodore Ts'o mpd->b_size = b_size; 17208dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 172164769240SAlex Tomas return; 172264769240SAlex Tomas } 172364769240SAlex Tomas 17248dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 172564769240SAlex Tomas /* 172664769240SAlex Tomas * Can we merge the block to our big extent? 172764769240SAlex Tomas */ 17288dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 17298dc207c0STheodore Ts'o mpd->b_size += b_size; 173064769240SAlex Tomas return; 173164769240SAlex Tomas } 173264769240SAlex Tomas 1733525f4ed8SMingming Cao flush_it: 173464769240SAlex Tomas /* 173564769240SAlex Tomas * We couldn't merge the block to our extent, so we 173664769240SAlex Tomas * need to flush current extent and start new one 173764769240SAlex Tomas */ 17385a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1739a1d6cc56SAneesh Kumar K.V return; 174064769240SAlex Tomas } 174164769240SAlex Tomas 1742c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 174329fa89d0SAneesh Kumar K.V { 1744c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 174529fa89d0SAneesh Kumar K.V } 174629fa89d0SAneesh Kumar K.V 174764769240SAlex Tomas /* 17485356f261SAditya Kali * This function is grabs code from the very beginning of 17495356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 17505356f261SAditya Kali * time. This function looks up the requested blocks and sets the 17515356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 17525356f261SAditya Kali */ 17535356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 17545356f261SAditya Kali struct ext4_map_blocks *map, 17555356f261SAditya Kali struct buffer_head *bh) 17565356f261SAditya Kali { 17575356f261SAditya Kali int retval; 17585356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 17595356f261SAditya Kali 17605356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 17615356f261SAditya Kali invalid_block = ~0; 17625356f261SAditya Kali 17635356f261SAditya Kali map->m_flags = 0; 17645356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 17655356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 17665356f261SAditya Kali (unsigned long) map->m_lblk); 17675356f261SAditya Kali /* 17685356f261SAditya Kali * Try to see if we can get the block without requesting a new 17695356f261SAditya Kali * file system block. 17705356f261SAditya Kali */ 17715356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 17729c3569b5STao Ma if (ext4_has_inline_data(inode)) { 17739c3569b5STao Ma /* 17749c3569b5STao Ma * We will soon create blocks for this page, and let 17759c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 17769c3569b5STao Ma * In case of clusters, we have to handle the work 17779c3569b5STao Ma * of mapping from cluster so that the reserved space 17789c3569b5STao Ma * is calculated properly. 17799c3569b5STao Ma */ 17809c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 17819c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 17829c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 17839c3569b5STao Ma retval = 0; 17849c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 17855356f261SAditya Kali retval = ext4_ext_map_blocks(NULL, inode, map, 0); 17865356f261SAditya Kali else 17875356f261SAditya Kali retval = ext4_ind_map_blocks(NULL, inode, map, 0); 17885356f261SAditya Kali 17895356f261SAditya Kali if (retval == 0) { 17905356f261SAditya Kali /* 17915356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 17925356f261SAditya Kali * is it OK? 17935356f261SAditya Kali */ 17945356f261SAditya Kali /* If the block was allocated from previously allocated cluster, 17955356f261SAditya Kali * then we dont need to reserve it again. */ 17965356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 17975356f261SAditya Kali retval = ext4_da_reserve_space(inode, iblock); 17985356f261SAditya Kali if (retval) 17995356f261SAditya Kali /* not enough space to reserve */ 18005356f261SAditya Kali goto out_unlock; 18015356f261SAditya Kali } 18025356f261SAditya Kali 180351865fdaSZheng Liu retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len); 180451865fdaSZheng Liu if (retval) 180551865fdaSZheng Liu goto out_unlock; 180651865fdaSZheng Liu 18075356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 18085356f261SAditya Kali * and it should not appear on the bh->b_state. 18095356f261SAditya Kali */ 18105356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 18115356f261SAditya Kali 18125356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 18135356f261SAditya Kali set_buffer_new(bh); 18145356f261SAditya Kali set_buffer_delay(bh); 18155356f261SAditya Kali } 18165356f261SAditya Kali 18175356f261SAditya Kali out_unlock: 18185356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 18195356f261SAditya Kali 18205356f261SAditya Kali return retval; 18215356f261SAditya Kali } 18225356f261SAditya Kali 18235356f261SAditya Kali /* 1824b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1825b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1826b920c755STheodore Ts'o * reserve space for a single block. 182729fa89d0SAneesh Kumar K.V * 182829fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 182929fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 183029fa89d0SAneesh Kumar K.V * 183129fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 183229fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 183329fa89d0SAneesh Kumar K.V * initialized properly. 183464769240SAlex Tomas */ 18359c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 18362ed88685STheodore Ts'o struct buffer_head *bh, int create) 183764769240SAlex Tomas { 18382ed88685STheodore Ts'o struct ext4_map_blocks map; 183964769240SAlex Tomas int ret = 0; 184064769240SAlex Tomas 184164769240SAlex Tomas BUG_ON(create == 0); 18422ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 18432ed88685STheodore Ts'o 18442ed88685STheodore Ts'o map.m_lblk = iblock; 18452ed88685STheodore Ts'o map.m_len = 1; 184664769240SAlex Tomas 184764769240SAlex Tomas /* 184864769240SAlex Tomas * first, we need to know whether the block is allocated already 184964769240SAlex Tomas * preallocated blocks are unmapped but should treated 185064769240SAlex Tomas * the same as allocated blocks. 185164769240SAlex Tomas */ 18525356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 18535356f261SAditya Kali if (ret <= 0) 18542ed88685STheodore Ts'o return ret; 185564769240SAlex Tomas 18562ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 18572ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 18582ed88685STheodore Ts'o 18592ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 18602ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 18612ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 18622ed88685STheodore Ts'o * get_block multiple times when we write to the same 18632ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 18642ed88685STheodore Ts'o * for partial write. 18652ed88685STheodore Ts'o */ 18662ed88685STheodore Ts'o set_buffer_new(bh); 1867c8205636STheodore Ts'o set_buffer_mapped(bh); 18682ed88685STheodore Ts'o } 18692ed88685STheodore Ts'o return 0; 187064769240SAlex Tomas } 187161628a3fSMingming Cao 1872b920c755STheodore Ts'o /* 187336ade451SJan Kara * This function is used as a standard get_block_t calback function when there 187436ade451SJan Kara * is no desire to allocate any blocks. It is used as a callback function for 187536ade451SJan Kara * block_write_begin(). These functions should only try to map a single block 187636ade451SJan Kara * at a time. 1877b920c755STheodore Ts'o * 1878b920c755STheodore Ts'o * Since this function doesn't do block allocations even if the caller 1879b920c755STheodore Ts'o * requests it by passing in create=1, it is critically important that 1880b920c755STheodore Ts'o * any caller checks to make sure that any buffer heads are returned 1881b920c755STheodore Ts'o * by this function are either all already mapped or marked for 188236ade451SJan Kara * delayed allocation before calling ext4_bio_write_page(). Otherwise, 1883206f7ab4SChristoph Hellwig * b_blocknr could be left unitialized, and the page write functions will 1884206f7ab4SChristoph Hellwig * be taken by surprise. 1885b920c755STheodore Ts'o */ 1886b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 1887f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 1888f0e6c985SAneesh Kumar K.V { 1889a2dc52b5STheodore Ts'o BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 18902ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 0); 189161628a3fSMingming Cao } 189261628a3fSMingming Cao 189362e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 189462e086beSAneesh Kumar K.V { 189562e086beSAneesh Kumar K.V get_bh(bh); 189662e086beSAneesh Kumar K.V return 0; 189762e086beSAneesh Kumar K.V } 189862e086beSAneesh Kumar K.V 189962e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 190062e086beSAneesh Kumar K.V { 190162e086beSAneesh Kumar K.V put_bh(bh); 190262e086beSAneesh Kumar K.V return 0; 190362e086beSAneesh Kumar K.V } 190462e086beSAneesh Kumar K.V 190562e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 190662e086beSAneesh Kumar K.V unsigned int len) 190762e086beSAneesh Kumar K.V { 190862e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 190962e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 19103fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 191162e086beSAneesh Kumar K.V handle_t *handle = NULL; 19123fdcfb66STao Ma int ret = 0, err = 0; 19133fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 19143fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 191562e086beSAneesh Kumar K.V 1916cb20d518STheodore Ts'o ClearPageChecked(page); 19173fdcfb66STao Ma 19183fdcfb66STao Ma if (inline_data) { 19193fdcfb66STao Ma BUG_ON(page->index != 0); 19203fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 19213fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 19223fdcfb66STao Ma if (inode_bh == NULL) 19233fdcfb66STao Ma goto out; 19243fdcfb66STao Ma } else { 192562e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 19263fdcfb66STao Ma if (!page_bufs) { 19273fdcfb66STao Ma BUG(); 19283fdcfb66STao Ma goto out; 19293fdcfb66STao Ma } 19303fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19313fdcfb66STao Ma NULL, bget_one); 19323fdcfb66STao Ma } 193362e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 193462e086beSAneesh Kumar K.V * references to buffers so we are safe */ 193562e086beSAneesh Kumar K.V unlock_page(page); 193662e086beSAneesh Kumar K.V 193762e086beSAneesh Kumar K.V handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 193862e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 193962e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 194062e086beSAneesh Kumar K.V goto out; 194162e086beSAneesh Kumar K.V } 194262e086beSAneesh Kumar K.V 1943441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1944441c8508SCurt Wohlgemuth 19453fdcfb66STao Ma if (inline_data) { 19463fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 19473fdcfb66STao Ma 19483fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 19493fdcfb66STao Ma 19503fdcfb66STao Ma } else { 1951f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 195262e086beSAneesh Kumar K.V do_journal_get_write_access); 195362e086beSAneesh Kumar K.V 1954f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 195562e086beSAneesh Kumar K.V write_end_fn); 19563fdcfb66STao Ma } 195762e086beSAneesh Kumar K.V if (ret == 0) 195862e086beSAneesh Kumar K.V ret = err; 19592d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 196062e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 196162e086beSAneesh Kumar K.V if (!ret) 196262e086beSAneesh Kumar K.V ret = err; 196362e086beSAneesh Kumar K.V 19643fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 19653fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19663fdcfb66STao Ma NULL, bput_one); 196719f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 196862e086beSAneesh Kumar K.V out: 19693fdcfb66STao Ma brelse(inode_bh); 197062e086beSAneesh Kumar K.V return ret; 197162e086beSAneesh Kumar K.V } 197262e086beSAneesh Kumar K.V 197361628a3fSMingming Cao /* 197443ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 197543ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 197643ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 197743ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 197843ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 197943ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 198043ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 198143ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 198243ce1d23SAneesh Kumar K.V * 1983b920c755STheodore Ts'o * This function can get called via... 1984b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 1985b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1986f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 1987b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 198843ce1d23SAneesh Kumar K.V * 198943ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 199043ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 199143ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 199243ce1d23SAneesh Kumar K.V * truncate(f, 1024); 199343ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 199443ce1d23SAneesh Kumar K.V * a[0] = 'a'; 199543ce1d23SAneesh Kumar K.V * truncate(f, 4096); 199643ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 199790802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 199843ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 199943ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 200043ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 200143ce1d23SAneesh Kumar K.V * buffer_heads mapped. 200243ce1d23SAneesh Kumar K.V * 200343ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 200443ce1d23SAneesh Kumar K.V * unwritten in the page. 200543ce1d23SAneesh Kumar K.V * 200643ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 200743ce1d23SAneesh Kumar K.V * 200843ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 200943ce1d23SAneesh Kumar K.V * ext4_writepage() 201043ce1d23SAneesh Kumar K.V * 201143ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 201243ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 201361628a3fSMingming Cao */ 201443ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 201564769240SAlex Tomas struct writeback_control *wbc) 201664769240SAlex Tomas { 2017a42afc5fSTheodore Ts'o int ret = 0, commit_write = 0; 201861628a3fSMingming Cao loff_t size; 2019498e5f24STheodore Ts'o unsigned int len; 2020744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 202161628a3fSMingming Cao struct inode *inode = page->mapping->host; 202236ade451SJan Kara struct ext4_io_submit io_submit; 202364769240SAlex Tomas 2024a9c667f8SLukas Czerner trace_ext4_writepage(page); 202561628a3fSMingming Cao size = i_size_read(inode); 202661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 202761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 202861628a3fSMingming Cao else 202961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 203061628a3fSMingming Cao 2031a42afc5fSTheodore Ts'o /* 2032a42afc5fSTheodore Ts'o * If the page does not have buffers (for whatever reason), 2033a107e5a3STheodore Ts'o * try to create them using __block_write_begin. If this 2034a42afc5fSTheodore Ts'o * fails, redirty the page and move on. 2035a42afc5fSTheodore Ts'o */ 2036b1142e8fSTheodore Ts'o if (!page_has_buffers(page)) { 2037a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 2038a42afc5fSTheodore Ts'o noalloc_get_block_write)) { 2039a42afc5fSTheodore Ts'o redirty_page: 2040a42afc5fSTheodore Ts'o redirty_page_for_writepage(wbc, page); 2041a42afc5fSTheodore Ts'o unlock_page(page); 2042a42afc5fSTheodore Ts'o return 0; 2043a42afc5fSTheodore Ts'o } 2044a42afc5fSTheodore Ts'o commit_write = 1; 2045a42afc5fSTheodore Ts'o } 2046f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2047f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2048c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 204961628a3fSMingming Cao /* 2050b1142e8fSTheodore Ts'o * We don't want to do block allocation, so redirty 2051b1142e8fSTheodore Ts'o * the page and return. We may reach here when we do 2052b1142e8fSTheodore Ts'o * a journal commit via journal_submit_inode_data_buffers. 2053966dbde2SMel Gorman * We can also reach here via shrink_page_list but it 2054966dbde2SMel Gorman * should never be for direct reclaim so warn if that 2055966dbde2SMel Gorman * happens 2056f0e6c985SAneesh Kumar K.V */ 2057966dbde2SMel Gorman WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 2058966dbde2SMel Gorman PF_MEMALLOC); 2059a42afc5fSTheodore Ts'o goto redirty_page; 2060f0e6c985SAneesh Kumar K.V } 2061a42afc5fSTheodore Ts'o if (commit_write) 2062ed9b3e33SAneesh Kumar K.V /* now mark the buffer_heads as dirty and uptodate */ 2063b767e78aSAneesh Kumar K.V block_commit_write(page, 0, len); 206464769240SAlex Tomas 2065cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 206643ce1d23SAneesh Kumar K.V /* 206743ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 206843ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 206943ce1d23SAneesh Kumar K.V */ 20703f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 207143ce1d23SAneesh Kumar K.V 207236ade451SJan Kara memset(&io_submit, 0, sizeof(io_submit)); 207336ade451SJan Kara ret = ext4_bio_write_page(&io_submit, page, len, wbc); 207436ade451SJan Kara ext4_io_submit(&io_submit); 207564769240SAlex Tomas return ret; 207664769240SAlex Tomas } 207764769240SAlex Tomas 207861628a3fSMingming Cao /* 2079525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 208025985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 2081525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2082525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2083525f4ed8SMingming Cao * the block allocation. 208461628a3fSMingming Cao */ 2085525f4ed8SMingming Cao 2086525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2087525f4ed8SMingming Cao { 2088525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2089525f4ed8SMingming Cao 2090525f4ed8SMingming Cao /* 2091525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2092525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2093525f4ed8SMingming Cao * number of contiguous block. So we will limit 2094525f4ed8SMingming Cao * number of contiguous block to a sane value 2095525f4ed8SMingming Cao */ 209612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2097525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2098525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2099525f4ed8SMingming Cao 2100525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2101525f4ed8SMingming Cao } 210261628a3fSMingming Cao 21038e48dcfbSTheodore Ts'o /* 21048e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 21058eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2106168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2107168fc022STheodore Ts'o * and then write them. 21088e48dcfbSTheodore Ts'o */ 21099c3569b5STao Ma static int write_cache_pages_da(handle_t *handle, 21109c3569b5STao Ma struct address_space *mapping, 21118e48dcfbSTheodore Ts'o struct writeback_control *wbc, 211272f84e65SEric Sandeen struct mpage_da_data *mpd, 211372f84e65SEric Sandeen pgoff_t *done_index) 21148e48dcfbSTheodore Ts'o { 21158eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2116168fc022STheodore Ts'o struct inode *inode = mapping->host; 21178e48dcfbSTheodore Ts'o struct pagevec pvec; 21184f01b02cSTheodore Ts'o unsigned int nr_pages; 21194f01b02cSTheodore Ts'o sector_t logical; 21204f01b02cSTheodore Ts'o pgoff_t index, end; 21218e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 21224f01b02cSTheodore Ts'o int i, tag, ret = 0; 21238e48dcfbSTheodore Ts'o 2124168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2125168fc022STheodore Ts'o mpd->wbc = wbc; 2126168fc022STheodore Ts'o mpd->inode = inode; 21278e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 21288e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 21298e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 21308e48dcfbSTheodore Ts'o 21316e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 21325b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 21335b41d924SEric Sandeen else 21345b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 21355b41d924SEric Sandeen 213672f84e65SEric Sandeen *done_index = index; 21374f01b02cSTheodore Ts'o while (index <= end) { 21385b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 21398e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 21408e48dcfbSTheodore Ts'o if (nr_pages == 0) 21414f01b02cSTheodore Ts'o return 0; 21428e48dcfbSTheodore Ts'o 21438e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 21448e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 21458e48dcfbSTheodore Ts'o 21468e48dcfbSTheodore Ts'o /* 21478e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 21488e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 21498e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 21508e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 21518e48dcfbSTheodore Ts'o * because we have a reference on the page. 21528e48dcfbSTheodore Ts'o */ 21534f01b02cSTheodore Ts'o if (page->index > end) 21544f01b02cSTheodore Ts'o goto out; 21558e48dcfbSTheodore Ts'o 215672f84e65SEric Sandeen *done_index = page->index + 1; 215772f84e65SEric Sandeen 215878aaced3STheodore Ts'o /* 215978aaced3STheodore Ts'o * If we can't merge this page, and we have 216078aaced3STheodore Ts'o * accumulated an contiguous region, write it 216178aaced3STheodore Ts'o */ 216278aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 216378aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 216478aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 216578aaced3STheodore Ts'o goto ret_extent_tail; 216678aaced3STheodore Ts'o } 216778aaced3STheodore Ts'o 21688e48dcfbSTheodore Ts'o lock_page(page); 21698e48dcfbSTheodore Ts'o 21708e48dcfbSTheodore Ts'o /* 21714f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 21724f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 21734f01b02cSTheodore Ts'o * are writing (which means it has been 21744f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 21754f01b02cSTheodore Ts'o * already under writeback and we are not 21764f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 21778e48dcfbSTheodore Ts'o */ 21784f01b02cSTheodore Ts'o if (!PageDirty(page) || 21794f01b02cSTheodore Ts'o (PageWriteback(page) && 21804f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 21814f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 21828e48dcfbSTheodore Ts'o unlock_page(page); 21838e48dcfbSTheodore Ts'o continue; 21848e48dcfbSTheodore Ts'o } 21858e48dcfbSTheodore Ts'o 21868e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 21878e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 21888e48dcfbSTheodore Ts'o 21899c3569b5STao Ma /* 21909c3569b5STao Ma * If we have inline data and arrive here, it means that 21919c3569b5STao Ma * we will soon create the block for the 1st page, so 21929c3569b5STao Ma * we'd better clear the inline data here. 21939c3569b5STao Ma */ 21949c3569b5STao Ma if (ext4_has_inline_data(inode)) { 21959c3569b5STao Ma BUG_ON(ext4_test_inode_state(inode, 21969c3569b5STao Ma EXT4_STATE_MAY_INLINE_DATA)); 21979c3569b5STao Ma ext4_destroy_inline_data(handle, inode); 21989c3569b5STao Ma } 21999c3569b5STao Ma 2200168fc022STheodore Ts'o if (mpd->next_page != page->index) 22018eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 22028eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 22038eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 22048eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 22058eb9e5ceSTheodore Ts'o 22068eb9e5ceSTheodore Ts'o if (!page_has_buffers(page)) { 22074f01b02cSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 22084f01b02cSTheodore Ts'o PAGE_CACHE_SIZE, 22098eb9e5ceSTheodore Ts'o (1 << BH_Dirty) | (1 << BH_Uptodate)); 22104f01b02cSTheodore Ts'o if (mpd->io_done) 22114f01b02cSTheodore Ts'o goto ret_extent_tail; 22128e48dcfbSTheodore Ts'o } else { 22138eb9e5ceSTheodore Ts'o /* 22144f01b02cSTheodore Ts'o * Page with regular buffer heads, 22154f01b02cSTheodore Ts'o * just add all dirty ones 22168eb9e5ceSTheodore Ts'o */ 22178eb9e5ceSTheodore Ts'o head = page_buffers(page); 22188eb9e5ceSTheodore Ts'o bh = head; 22198eb9e5ceSTheodore Ts'o do { 22208eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 22218eb9e5ceSTheodore Ts'o /* 22228eb9e5ceSTheodore Ts'o * We need to try to allocate 22238eb9e5ceSTheodore Ts'o * unmapped blocks in the same page. 22248eb9e5ceSTheodore Ts'o * Otherwise we won't make progress 22258eb9e5ceSTheodore Ts'o * with the page in ext4_writepage 22268eb9e5ceSTheodore Ts'o */ 22278eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 22288eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 22298eb9e5ceSTheodore Ts'o bh->b_size, 22308eb9e5ceSTheodore Ts'o bh->b_state); 22314f01b02cSTheodore Ts'o if (mpd->io_done) 22324f01b02cSTheodore Ts'o goto ret_extent_tail; 22338eb9e5ceSTheodore Ts'o } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 22348eb9e5ceSTheodore Ts'o /* 22354f01b02cSTheodore Ts'o * mapped dirty buffer. We need 22364f01b02cSTheodore Ts'o * to update the b_state 22374f01b02cSTheodore Ts'o * because we look at b_state 22384f01b02cSTheodore Ts'o * in mpage_da_map_blocks. We 22394f01b02cSTheodore Ts'o * don't update b_size because 22404f01b02cSTheodore Ts'o * if we find an unmapped 22414f01b02cSTheodore Ts'o * buffer_head later we need to 22424f01b02cSTheodore Ts'o * use the b_state flag of that 22434f01b02cSTheodore Ts'o * buffer_head. 22448eb9e5ceSTheodore Ts'o */ 22458eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 22468eb9e5ceSTheodore Ts'o mpd->b_state = bh->b_state & BH_FLAGS; 22478e48dcfbSTheodore Ts'o } 22488eb9e5ceSTheodore Ts'o logical++; 22498eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 22508e48dcfbSTheodore Ts'o } 22518e48dcfbSTheodore Ts'o 22528e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 22538e48dcfbSTheodore Ts'o nr_to_write--; 22548e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 22554f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 22568e48dcfbSTheodore Ts'o /* 22578e48dcfbSTheodore Ts'o * We stop writing back only if we are 22588e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 22598e48dcfbSTheodore Ts'o * integrity sync we have to keep going 22608e48dcfbSTheodore Ts'o * because someone may be concurrently 22618e48dcfbSTheodore Ts'o * dirtying pages, and we might have 22628e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 22638e48dcfbSTheodore Ts'o * pages, but have not synced all of the 22648e48dcfbSTheodore Ts'o * old dirty pages. 22658e48dcfbSTheodore Ts'o */ 22664f01b02cSTheodore Ts'o goto out; 22678e48dcfbSTheodore Ts'o } 22688e48dcfbSTheodore Ts'o } 22698e48dcfbSTheodore Ts'o pagevec_release(&pvec); 22708e48dcfbSTheodore Ts'o cond_resched(); 22718e48dcfbSTheodore Ts'o } 22724f01b02cSTheodore Ts'o return 0; 22734f01b02cSTheodore Ts'o ret_extent_tail: 22744f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 22758eb9e5ceSTheodore Ts'o out: 22768eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 22778eb9e5ceSTheodore Ts'o cond_resched(); 22788e48dcfbSTheodore Ts'o return ret; 22798e48dcfbSTheodore Ts'o } 22808e48dcfbSTheodore Ts'o 22818e48dcfbSTheodore Ts'o 228264769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 228364769240SAlex Tomas struct writeback_control *wbc) 228464769240SAlex Tomas { 228522208dedSAneesh Kumar K.V pgoff_t index; 228622208dedSAneesh Kumar K.V int range_whole = 0; 228761628a3fSMingming Cao handle_t *handle = NULL; 2288df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 22895e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2290498e5f24STheodore Ts'o int pages_written = 0; 229155138e0bSTheodore Ts'o unsigned int max_pages; 22922acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 229355138e0bSTheodore Ts'o int needed_blocks, ret = 0; 229455138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2295de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 22965e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 229772f84e65SEric Sandeen pgoff_t done_index = 0; 22985b41d924SEric Sandeen pgoff_t end; 22991bce63d1SShaohua Li struct blk_plug plug; 230061628a3fSMingming Cao 23019bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2302ba80b101STheodore Ts'o 230361628a3fSMingming Cao /* 230461628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 230561628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 230661628a3fSMingming Cao * because that could violate lock ordering on umount 230761628a3fSMingming Cao */ 2308a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 230961628a3fSMingming Cao return 0; 23102a21e37eSTheodore Ts'o 23112a21e37eSTheodore Ts'o /* 23122a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 23132a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 23142a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 23154ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 23162a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 23172a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 23182a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 23192a21e37eSTheodore Ts'o * the stack trace. 23202a21e37eSTheodore Ts'o */ 23214ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 23222a21e37eSTheodore Ts'o return -EROFS; 23232a21e37eSTheodore Ts'o 232422208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 232522208dedSAneesh Kumar K.V range_whole = 1; 232661628a3fSMingming Cao 23272acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 23282acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 232922208dedSAneesh Kumar K.V index = mapping->writeback_index; 23302acf2c26SAneesh Kumar K.V if (index) 23312acf2c26SAneesh Kumar K.V cycled = 0; 23322acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 23332acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 23342acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 23355b41d924SEric Sandeen end = -1; 23365b41d924SEric Sandeen } else { 233722208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 23385b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 23395b41d924SEric Sandeen } 2340a1d6cc56SAneesh Kumar K.V 234155138e0bSTheodore Ts'o /* 234255138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 234355138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 234455138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 234555138e0bSTheodore Ts'o * levels; different architectues have a different page size, 234655138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 234755138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 234855138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 234955138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 235055138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 235155138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 235255138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 235355138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 235455138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 235555138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 235655138e0bSTheodore Ts'o */ 235755138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2358b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2359b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2360b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 236155138e0bSTheodore Ts'o else 2362b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2363b443e733SEric Sandeen } else 236455138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 236555138e0bSTheodore Ts'o max_pages); 236655138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 236755138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 236855138e0bSTheodore Ts'o 236955138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 237055138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 237155138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 237255138e0bSTheodore Ts'o } 237355138e0bSTheodore Ts'o 23742acf2c26SAneesh Kumar K.V retry: 23756e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 23765b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 23775b41d924SEric Sandeen 23781bce63d1SShaohua Li blk_start_plug(&plug); 237922208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2380a1d6cc56SAneesh Kumar K.V 2381a1d6cc56SAneesh Kumar K.V /* 2382a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2383a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2384a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2385a1d6cc56SAneesh Kumar K.V * by delalloc 2386a1d6cc56SAneesh Kumar K.V */ 2387a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2388525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2389a1d6cc56SAneesh Kumar K.V 239061628a3fSMingming Cao /* start a new transaction*/ 239161628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 239261628a3fSMingming Cao if (IS_ERR(handle)) { 239361628a3fSMingming Cao ret = PTR_ERR(handle); 23941693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2395fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2396a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 23973c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 239861628a3fSMingming Cao goto out_writepages; 239961628a3fSMingming Cao } 2400f63e6005STheodore Ts'o 2401f63e6005STheodore Ts'o /* 24028eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2403f63e6005STheodore Ts'o * contiguous region of logical blocks that need 24048eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2405f63e6005STheodore Ts'o */ 24069c3569b5STao Ma ret = write_cache_pages_da(handle, mapping, 24079c3569b5STao Ma wbc, &mpd, &done_index); 2408f63e6005STheodore Ts'o /* 2409af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2410f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2411f63e6005STheodore Ts'o * them for I/O. 2412f63e6005STheodore Ts'o */ 2413f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 24145a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2415f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2416f63e6005STheodore Ts'o } 2417b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2418f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2419df22291fSAneesh Kumar K.V 242061628a3fSMingming Cao ext4_journal_stop(handle); 2421df22291fSAneesh Kumar K.V 24228f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 242322208dedSAneesh Kumar K.V /* commit the transaction which would 242422208dedSAneesh Kumar K.V * free blocks released in the transaction 242522208dedSAneesh Kumar K.V * and try again 242622208dedSAneesh Kumar K.V */ 2427df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 242822208dedSAneesh Kumar K.V ret = 0; 242922208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2430a1d6cc56SAneesh Kumar K.V /* 24318de49e67SKazuya Mio * Got one extent now try with rest of the pages. 24328de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 24338de49e67SKazuya Mio * So we don't need to write any more. 2434a1d6cc56SAneesh Kumar K.V */ 243522208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 24368de49e67SKazuya Mio ret = mpd.retval; 24372acf2c26SAneesh Kumar K.V io_done = 1; 243822208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 243961628a3fSMingming Cao /* 244061628a3fSMingming Cao * There is no more writeout needed 244161628a3fSMingming Cao * or we requested for a noblocking writeout 244261628a3fSMingming Cao * and we found the device congested 244361628a3fSMingming Cao */ 244461628a3fSMingming Cao break; 244561628a3fSMingming Cao } 24461bce63d1SShaohua Li blk_finish_plug(&plug); 24472acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 24482acf2c26SAneesh Kumar K.V cycled = 1; 24492acf2c26SAneesh Kumar K.V index = 0; 24502acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 24512acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 24522acf2c26SAneesh Kumar K.V goto retry; 24532acf2c26SAneesh Kumar K.V } 245461628a3fSMingming Cao 245522208dedSAneesh Kumar K.V /* Update index */ 24562acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 245722208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 245822208dedSAneesh Kumar K.V /* 245922208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 246022208dedSAneesh Kumar K.V * mode will write it back later 246122208dedSAneesh Kumar K.V */ 246272f84e65SEric Sandeen mapping->writeback_index = done_index; 2463a1d6cc56SAneesh Kumar K.V 246461628a3fSMingming Cao out_writepages: 246522208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2466de89de6eSTheodore Ts'o wbc->range_start = range_start; 24679bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 246861628a3fSMingming Cao return ret; 246964769240SAlex Tomas } 247064769240SAlex Tomas 247179f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 247279f0be8dSAneesh Kumar K.V { 247379f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 247479f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 247579f0be8dSAneesh Kumar K.V 247679f0be8dSAneesh Kumar K.V /* 247779f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 247879f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2479179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 248079f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 248179f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 248279f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 248379f0be8dSAneesh Kumar K.V */ 248457042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 248557042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 248657042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 248700d4e736STheodore Ts'o /* 248800d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 248900d4e736STheodore Ts'o */ 249000d4e736STheodore Ts'o if (dirty_blocks && (free_blocks < 2 * dirty_blocks) && 249100d4e736STheodore Ts'o !writeback_in_progress(sb->s_bdi) && 249200d4e736STheodore Ts'o down_read_trylock(&sb->s_umount)) { 249300d4e736STheodore Ts'o writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 249400d4e736STheodore Ts'o up_read(&sb->s_umount); 249500d4e736STheodore Ts'o } 249600d4e736STheodore Ts'o 249779f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2498df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 249979f0be8dSAneesh Kumar K.V /* 2500c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2501c8afb446SEric Sandeen * or free blocks is less than watermark 250279f0be8dSAneesh Kumar K.V */ 250379f0be8dSAneesh Kumar K.V return 1; 250479f0be8dSAneesh Kumar K.V } 250579f0be8dSAneesh Kumar K.V return 0; 250679f0be8dSAneesh Kumar K.V } 250779f0be8dSAneesh Kumar K.V 250864769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 250964769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 251064769240SAlex Tomas struct page **pagep, void **fsdata) 251164769240SAlex Tomas { 251272b8ab9dSEric Sandeen int ret, retries = 0; 251364769240SAlex Tomas struct page *page; 251464769240SAlex Tomas pgoff_t index; 251564769240SAlex Tomas struct inode *inode = mapping->host; 251664769240SAlex Tomas handle_t *handle; 251764769240SAlex Tomas 251864769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 251979f0be8dSAneesh Kumar K.V 252079f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 252179f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 252279f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 252379f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 252479f0be8dSAneesh Kumar K.V } 252579f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 25269bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 25279c3569b5STao Ma 25289c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 25299c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 25309c3569b5STao Ma pos, len, flags, 25319c3569b5STao Ma pagep, fsdata); 25329c3569b5STao Ma if (ret < 0) 25339c3569b5STao Ma goto out; 25349c3569b5STao Ma if (ret == 1) { 25359c3569b5STao Ma ret = 0; 25369c3569b5STao Ma goto out; 25379c3569b5STao Ma } 25389c3569b5STao Ma } 25399c3569b5STao Ma 2540d2a17637SMingming Cao retry: 254164769240SAlex Tomas /* 254264769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 254364769240SAlex Tomas * if there is delayed block allocation. But we still need 254464769240SAlex Tomas * to journalling the i_disksize update if writes to the end 254564769240SAlex Tomas * of file which has an already mapped buffer. 254664769240SAlex Tomas */ 254764769240SAlex Tomas handle = ext4_journal_start(inode, 1); 254864769240SAlex Tomas if (IS_ERR(handle)) { 254964769240SAlex Tomas ret = PTR_ERR(handle); 255064769240SAlex Tomas goto out; 255164769240SAlex Tomas } 2552ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2553ebd3610bSJan Kara * started */ 2554ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 255564769240SAlex Tomas 255654566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2557d5a0d4f7SEric Sandeen if (!page) { 2558d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2559d5a0d4f7SEric Sandeen ret = -ENOMEM; 2560d5a0d4f7SEric Sandeen goto out; 2561d5a0d4f7SEric Sandeen } 256264769240SAlex Tomas *pagep = page; 256364769240SAlex Tomas 25646e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 256564769240SAlex Tomas if (ret < 0) { 256664769240SAlex Tomas unlock_page(page); 256764769240SAlex Tomas ext4_journal_stop(handle); 256864769240SAlex Tomas page_cache_release(page); 2569ae4d5372SAneesh Kumar K.V /* 2570ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2571ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2572ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2573ae4d5372SAneesh Kumar K.V */ 2574ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2575b9a4207dSJan Kara ext4_truncate_failed_write(inode); 257664769240SAlex Tomas } 257764769240SAlex Tomas 2578d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2579d2a17637SMingming Cao goto retry; 258064769240SAlex Tomas out: 258164769240SAlex Tomas return ret; 258264769240SAlex Tomas } 258364769240SAlex Tomas 2584632eaeabSMingming Cao /* 2585632eaeabSMingming Cao * Check if we should update i_disksize 2586632eaeabSMingming Cao * when write to the end of file but not require block allocation 2587632eaeabSMingming Cao */ 2588632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2589632eaeabSMingming Cao unsigned long offset) 2590632eaeabSMingming Cao { 2591632eaeabSMingming Cao struct buffer_head *bh; 2592632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2593632eaeabSMingming Cao unsigned int idx; 2594632eaeabSMingming Cao int i; 2595632eaeabSMingming Cao 2596632eaeabSMingming Cao bh = page_buffers(page); 2597632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2598632eaeabSMingming Cao 2599632eaeabSMingming Cao for (i = 0; i < idx; i++) 2600632eaeabSMingming Cao bh = bh->b_this_page; 2601632eaeabSMingming Cao 260229fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2603632eaeabSMingming Cao return 0; 2604632eaeabSMingming Cao return 1; 2605632eaeabSMingming Cao } 2606632eaeabSMingming Cao 260764769240SAlex Tomas static int ext4_da_write_end(struct file *file, 260864769240SAlex Tomas struct address_space *mapping, 260964769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 261064769240SAlex Tomas struct page *page, void *fsdata) 261164769240SAlex Tomas { 261264769240SAlex Tomas struct inode *inode = mapping->host; 261364769240SAlex Tomas int ret = 0, ret2; 261464769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 261564769240SAlex Tomas loff_t new_i_size; 2616632eaeabSMingming Cao unsigned long start, end; 261779f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 261879f0be8dSAneesh Kumar K.V 261979f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 26203d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 26213d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 262279f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 262379f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 26243d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 262579f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 262679f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 26273d2b1582SLukas Czerner default: 262879f0be8dSAneesh Kumar K.V BUG(); 262979f0be8dSAneesh Kumar K.V } 263079f0be8dSAneesh Kumar K.V } 2631632eaeabSMingming Cao 26329bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2633632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2634632eaeabSMingming Cao end = start + copied - 1; 263564769240SAlex Tomas 263664769240SAlex Tomas /* 263764769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 263864769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 263964769240SAlex Tomas * into that. 264064769240SAlex Tomas */ 264164769240SAlex Tomas new_i_size = pos + copied; 2642ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 26439c3569b5STao Ma if (ext4_has_inline_data(inode) || 26449c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2645632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2646f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 264764769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2648632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2649cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2650cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2651cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2652cf17fea6SAneesh Kumar K.V */ 2653cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2654632eaeabSMingming Cao } 2655632eaeabSMingming Cao } 26569c3569b5STao Ma 26579c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 26589c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 26599c3569b5STao Ma ext4_has_inline_data(inode)) 26609c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 26619c3569b5STao Ma page); 26629c3569b5STao Ma else 266364769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 266464769240SAlex Tomas page, fsdata); 26659c3569b5STao Ma 266664769240SAlex Tomas copied = ret2; 266764769240SAlex Tomas if (ret2 < 0) 266864769240SAlex Tomas ret = ret2; 266964769240SAlex Tomas ret2 = ext4_journal_stop(handle); 267064769240SAlex Tomas if (!ret) 267164769240SAlex Tomas ret = ret2; 267264769240SAlex Tomas 267364769240SAlex Tomas return ret ? ret : copied; 267464769240SAlex Tomas } 267564769240SAlex Tomas 267664769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 267764769240SAlex Tomas { 267864769240SAlex Tomas /* 267964769240SAlex Tomas * Drop reserved blocks 268064769240SAlex Tomas */ 268164769240SAlex Tomas BUG_ON(!PageLocked(page)); 268264769240SAlex Tomas if (!page_has_buffers(page)) 268364769240SAlex Tomas goto out; 268464769240SAlex Tomas 2685d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 268664769240SAlex Tomas 268764769240SAlex Tomas out: 268864769240SAlex Tomas ext4_invalidatepage(page, offset); 268964769240SAlex Tomas 269064769240SAlex Tomas return; 269164769240SAlex Tomas } 269264769240SAlex Tomas 2693ccd2506bSTheodore Ts'o /* 2694ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2695ccd2506bSTheodore Ts'o */ 2696ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2697ccd2506bSTheodore Ts'o { 2698fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2699fb40ba0dSTheodore Ts'o 2700ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2701ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2702ccd2506bSTheodore Ts'o return 0; 2703ccd2506bSTheodore Ts'o 2704ccd2506bSTheodore Ts'o /* 2705ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2706ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2707ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2708ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2709ccd2506bSTheodore Ts'o * would require replicating code paths in: 2710ccd2506bSTheodore Ts'o * 2711ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2712ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2713ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2714ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2715ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2716ccd2506bSTheodore Ts'o * 2717ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2718ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2719ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2720ccd2506bSTheodore Ts'o * doing I/O at all. 2721ccd2506bSTheodore Ts'o * 2722ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2723380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2724ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2725ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 272625985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2727ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2728ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2729ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2730ccd2506bSTheodore Ts'o * 2731ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2732ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2733ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2734ccd2506bSTheodore Ts'o */ 2735ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2736ccd2506bSTheodore Ts'o } 273764769240SAlex Tomas 273864769240SAlex Tomas /* 2739ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2740ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2741ac27a0ecSDave Kleikamp * 2742ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2743617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2744ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2745ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2746ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2747ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2748ac27a0ecSDave Kleikamp * 2749ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2750ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2751ac27a0ecSDave Kleikamp */ 2752617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2753ac27a0ecSDave Kleikamp { 2754ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2755ac27a0ecSDave Kleikamp journal_t *journal; 2756ac27a0ecSDave Kleikamp int err; 2757ac27a0ecSDave Kleikamp 275846c7f254STao Ma /* 275946c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 276046c7f254STao Ma */ 276146c7f254STao Ma if (ext4_has_inline_data(inode)) 276246c7f254STao Ma return 0; 276346c7f254STao Ma 276464769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 276564769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 276664769240SAlex Tomas /* 276764769240SAlex Tomas * With delalloc we want to sync the file 276864769240SAlex Tomas * so that we can make sure we allocate 276964769240SAlex Tomas * blocks for file 277064769240SAlex Tomas */ 277164769240SAlex Tomas filemap_write_and_wait(mapping); 277264769240SAlex Tomas } 277364769240SAlex Tomas 277419f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 277519f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2776ac27a0ecSDave Kleikamp /* 2777ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2778ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2779ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2780ac27a0ecSDave Kleikamp * do we expect this to happen. 2781ac27a0ecSDave Kleikamp * 2782ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2783ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2784ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2785ac27a0ecSDave Kleikamp * will.) 2786ac27a0ecSDave Kleikamp * 2787617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2788ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2789ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2790ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2791ac27a0ecSDave Kleikamp * everything they get. 2792ac27a0ecSDave Kleikamp */ 2793ac27a0ecSDave Kleikamp 279419f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2795617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2796dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2797dab291afSMingming Cao err = jbd2_journal_flush(journal); 2798dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2799ac27a0ecSDave Kleikamp 2800ac27a0ecSDave Kleikamp if (err) 2801ac27a0ecSDave Kleikamp return 0; 2802ac27a0ecSDave Kleikamp } 2803ac27a0ecSDave Kleikamp 2804617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2805ac27a0ecSDave Kleikamp } 2806ac27a0ecSDave Kleikamp 2807617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2808ac27a0ecSDave Kleikamp { 280946c7f254STao Ma int ret = -EAGAIN; 281046c7f254STao Ma struct inode *inode = page->mapping->host; 281146c7f254STao Ma 28120562e0baSJiaying Zhang trace_ext4_readpage(page); 281346c7f254STao Ma 281446c7f254STao Ma if (ext4_has_inline_data(inode)) 281546c7f254STao Ma ret = ext4_readpage_inline(inode, page); 281646c7f254STao Ma 281746c7f254STao Ma if (ret == -EAGAIN) 2818617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 281946c7f254STao Ma 282046c7f254STao Ma return ret; 2821ac27a0ecSDave Kleikamp } 2822ac27a0ecSDave Kleikamp 2823ac27a0ecSDave Kleikamp static int 2824617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2825ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2826ac27a0ecSDave Kleikamp { 282746c7f254STao Ma struct inode *inode = mapping->host; 282846c7f254STao Ma 282946c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 283046c7f254STao Ma if (ext4_has_inline_data(inode)) 283146c7f254STao Ma return 0; 283246c7f254STao Ma 2833617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2834ac27a0ecSDave Kleikamp } 2835ac27a0ecSDave Kleikamp 2836617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2837ac27a0ecSDave Kleikamp { 28380562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 28390562e0baSJiaying Zhang 28404520fb3cSJan Kara /* No journalling happens on data buffers when this function is used */ 28414520fb3cSJan Kara WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page))); 28424520fb3cSJan Kara 28434520fb3cSJan Kara block_invalidatepage(page, offset); 28444520fb3cSJan Kara } 28454520fb3cSJan Kara 284653e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page, 28474520fb3cSJan Kara unsigned long offset) 28484520fb3cSJan Kara { 28494520fb3cSJan Kara journal_t *journal = EXT4_JOURNAL(page->mapping->host); 28504520fb3cSJan Kara 28514520fb3cSJan Kara trace_ext4_journalled_invalidatepage(page, offset); 28524520fb3cSJan Kara 2853744692dcSJiaying Zhang /* 2854ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2855ac27a0ecSDave Kleikamp */ 2856ac27a0ecSDave Kleikamp if (offset == 0) 2857ac27a0ecSDave Kleikamp ClearPageChecked(page); 2858ac27a0ecSDave Kleikamp 285953e87268SJan Kara return jbd2_journal_invalidatepage(journal, page, offset); 286053e87268SJan Kara } 286153e87268SJan Kara 286253e87268SJan Kara /* Wrapper for aops... */ 286353e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page, 286453e87268SJan Kara unsigned long offset) 286553e87268SJan Kara { 286653e87268SJan Kara WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0); 2867ac27a0ecSDave Kleikamp } 2868ac27a0ecSDave Kleikamp 2869617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2870ac27a0ecSDave Kleikamp { 2871617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2872ac27a0ecSDave Kleikamp 28730562e0baSJiaying Zhang trace_ext4_releasepage(page); 28740562e0baSJiaying Zhang 2875ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2876ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2877ac27a0ecSDave Kleikamp return 0; 28780390131bSFrank Mayhar if (journal) 2879dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 28800390131bSFrank Mayhar else 28810390131bSFrank Mayhar return try_to_free_buffers(page); 2882ac27a0ecSDave Kleikamp } 2883ac27a0ecSDave Kleikamp 2884ac27a0ecSDave Kleikamp /* 28852ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 28862ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 28872ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 28882ed88685STheodore Ts'o */ 2889f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 28904c0425ffSMingming Cao struct buffer_head *bh_result, int create) 28914c0425ffSMingming Cao { 2892c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 28938d5d02e6SMingming Cao inode->i_ino, create); 28942ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 28952ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 28964c0425ffSMingming Cao } 28974c0425ffSMingming Cao 2898729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 28998b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 2900729f52c6SZheng Liu { 29018b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 29028b0f165fSAnatol Pomozov inode->i_ino, create); 29038b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 29048b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 2905729f52c6SZheng Liu } 2906729f52c6SZheng Liu 29074c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2908552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2909552ef802SChristoph Hellwig bool is_async) 29104c0425ffSMingming Cao { 291172c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 29124c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 29134c0425ffSMingming Cao 29144b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 29154b70df18SMingming if (!io_end || !size) 2916552ef802SChristoph Hellwig goto out; 29174b70df18SMingming 29188d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 2919ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 29208d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 29218d5d02e6SMingming Cao size); 29228d5d02e6SMingming Cao 2923b5a7e970STheodore Ts'o iocb->private = NULL; 2924b5a7e970STheodore Ts'o 29258d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2926bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 29278d5d02e6SMingming Cao ext4_free_io_end(io_end); 29285b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 29295b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 29305b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 293172c5052dSChristoph Hellwig inode_dio_done(inode); 29325b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 29338d5d02e6SMingming Cao } 29348d5d02e6SMingming Cao 29354c0425ffSMingming Cao io_end->offset = offset; 29364c0425ffSMingming Cao io_end->size = size; 29375b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 29385b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 29395b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 29405b3ff237Sjiayingz@google.com (Jiaying Zhang) } 29414c0425ffSMingming Cao 294228a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 29434c0425ffSMingming Cao } 2944c7064ef1SJiaying Zhang 29454c0425ffSMingming Cao /* 29464c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 29474c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 29484c0425ffSMingming Cao * fall back to buffered IO. 29494c0425ffSMingming Cao * 2950b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 295169c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 2952b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 29534c0425ffSMingming Cao * 295469c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 29558d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 295625985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 29578d5d02e6SMingming Cao * when async direct IO completed. 29584c0425ffSMingming Cao * 29594c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 29604c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 29614c0425ffSMingming Cao * if the machine crashes during the write. 29624c0425ffSMingming Cao * 29634c0425ffSMingming Cao */ 29644c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 29654c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 29664c0425ffSMingming Cao unsigned long nr_segs) 29674c0425ffSMingming Cao { 29684c0425ffSMingming Cao struct file *file = iocb->ki_filp; 29694c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 29704c0425ffSMingming Cao ssize_t ret; 29714c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 2972729f52c6SZheng Liu int overwrite = 0; 29738b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 29748b0f165fSAnatol Pomozov int dio_flags = 0; 297569c499d1STheodore Ts'o loff_t final_size = offset + count; 297669c499d1STheodore Ts'o 297769c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 297869c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 297969c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 2980729f52c6SZheng Liu 29814bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 29824bd809dbSZheng Liu 29834bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 29844bd809dbSZheng Liu overwrite = *((int *)iocb->private); 29854bd809dbSZheng Liu 29864bd809dbSZheng Liu if (overwrite) { 29871f555cfaSDmitry Monakhov atomic_inc(&inode->i_dio_count); 29884bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 29894bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 29904bd809dbSZheng Liu } 29914bd809dbSZheng Liu 29924c0425ffSMingming Cao /* 29938d5d02e6SMingming Cao * We could direct write to holes and fallocate. 29948d5d02e6SMingming Cao * 299569c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 299669c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 299769c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 29988d5d02e6SMingming Cao * 299969c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 300069c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 300169c499d1STheodore Ts'o * extents uninitialized. 30024c0425ffSMingming Cao * 300369c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 30048d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 30054c0425ffSMingming Cao * 300669c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 300769c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 300869c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 300969c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 30104c0425ffSMingming Cao */ 30118d5d02e6SMingming Cao iocb->private = NULL; 3012f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 30138d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 301469c499d1STheodore Ts'o ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 30154bd809dbSZheng Liu if (!io_end) { 30164bd809dbSZheng Liu ret = -ENOMEM; 30174bd809dbSZheng Liu goto retake_lock; 30184bd809dbSZheng Liu } 3019266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 3020266991b1SJeff Moyer iocb->private = io_end; 30218d5d02e6SMingming Cao /* 302269c499d1STheodore Ts'o * we save the io structure for current async direct 302369c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 302469c499d1STheodore Ts'o * io structure whether there is a unwritten extents 302569c499d1STheodore Ts'o * needs to be converted when IO is completed. 30268d5d02e6SMingming Cao */ 3027f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 30288d5d02e6SMingming Cao } 30298d5d02e6SMingming Cao 30308b0f165fSAnatol Pomozov if (overwrite) { 30318b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 30328b0f165fSAnatol Pomozov } else { 30338b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 30348b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 30358b0f165fSAnatol Pomozov } 3036729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 3037729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 3038729f52c6SZheng Liu offset, nr_segs, 30398b0f165fSAnatol Pomozov get_block_func, 3040729f52c6SZheng Liu ext4_end_io_dio, 3041729f52c6SZheng Liu NULL, 30428b0f165fSAnatol Pomozov dio_flags); 30438b0f165fSAnatol Pomozov 30448d5d02e6SMingming Cao if (iocb->private) 3045f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 30468d5d02e6SMingming Cao /* 304769c499d1STheodore Ts'o * The io_end structure takes a reference to the inode, that 304869c499d1STheodore Ts'o * structure needs to be destroyed and the reference to the 304969c499d1STheodore Ts'o * inode need to be dropped, when IO is complete, even with 0 305069c499d1STheodore Ts'o * byte write, or failed. 30518d5d02e6SMingming Cao * 305269c499d1STheodore Ts'o * In the successful AIO DIO case, the io_end structure will 305369c499d1STheodore Ts'o * be destroyed and the reference to the inode will be dropped 30548d5d02e6SMingming Cao * after the end_io call back function is called. 30558d5d02e6SMingming Cao * 305669c499d1STheodore Ts'o * In the case there is 0 byte write, or error case, since VFS 305769c499d1STheodore Ts'o * direct IO won't invoke the end_io call back function, we 305869c499d1STheodore Ts'o * need to free the end_io structure here. 30598d5d02e6SMingming Cao */ 30608d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 30618d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 30628d5d02e6SMingming Cao iocb->private = NULL; 3063729f52c6SZheng Liu } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 30645f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 3065109f5565SMingming int err; 30668d5d02e6SMingming Cao /* 30678d5d02e6SMingming Cao * for non AIO case, since the IO is already 306825985edcSLucas De Marchi * completed, we could do the conversion right here 30698d5d02e6SMingming Cao */ 3070109f5565SMingming err = ext4_convert_unwritten_extents(inode, 30718d5d02e6SMingming Cao offset, ret); 3072109f5565SMingming if (err < 0) 3073109f5565SMingming ret = err; 307419f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3075109f5565SMingming } 30764bd809dbSZheng Liu 30774bd809dbSZheng Liu retake_lock: 30784bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 30794bd809dbSZheng Liu if (overwrite) { 30801f555cfaSDmitry Monakhov inode_dio_done(inode); 30814bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 30824bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 30834bd809dbSZheng Liu } 30844bd809dbSZheng Liu 30854c0425ffSMingming Cao return ret; 30864c0425ffSMingming Cao } 30878d5d02e6SMingming Cao 30884c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 30894c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 30904c0425ffSMingming Cao unsigned long nr_segs) 30914c0425ffSMingming Cao { 30924c0425ffSMingming Cao struct file *file = iocb->ki_filp; 30934c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 30940562e0baSJiaying Zhang ssize_t ret; 30954c0425ffSMingming Cao 309684ebd795STheodore Ts'o /* 309784ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 309884ebd795STheodore Ts'o */ 309984ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 310084ebd795STheodore Ts'o return 0; 310184ebd795STheodore Ts'o 310246c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 310346c7f254STao Ma if (ext4_has_inline_data(inode)) 310446c7f254STao Ma return 0; 310546c7f254STao Ma 31060562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 310712e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 31080562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 31090562e0baSJiaying Zhang else 31100562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 31110562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 31120562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 31130562e0baSJiaying Zhang return ret; 31144c0425ffSMingming Cao } 31154c0425ffSMingming Cao 3116ac27a0ecSDave Kleikamp /* 3117617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3118ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3119ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3120ac27a0ecSDave Kleikamp * not necessarily locked. 3121ac27a0ecSDave Kleikamp * 3122ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3123ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3124ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3125ac27a0ecSDave Kleikamp * 3126ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3127ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3128ac27a0ecSDave Kleikamp */ 3129617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3130ac27a0ecSDave Kleikamp { 3131ac27a0ecSDave Kleikamp SetPageChecked(page); 3132ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3133ac27a0ecSDave Kleikamp } 3134ac27a0ecSDave Kleikamp 3135617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3136617ba13bSMingming Cao .readpage = ext4_readpage, 3137617ba13bSMingming Cao .readpages = ext4_readpages, 313843ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3139bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3140bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3141617ba13bSMingming Cao .bmap = ext4_bmap, 3142617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3143617ba13bSMingming Cao .releasepage = ext4_releasepage, 3144617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3145ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 31468ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3147aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3148ac27a0ecSDave Kleikamp }; 3149ac27a0ecSDave Kleikamp 3150617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3151617ba13bSMingming Cao .readpage = ext4_readpage, 3152617ba13bSMingming Cao .readpages = ext4_readpages, 315343ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3154bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3155bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3156617ba13bSMingming Cao .bmap = ext4_bmap, 3157617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3158617ba13bSMingming Cao .releasepage = ext4_releasepage, 3159617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3160ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 31618ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3162aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3163ac27a0ecSDave Kleikamp }; 3164ac27a0ecSDave Kleikamp 3165617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3166617ba13bSMingming Cao .readpage = ext4_readpage, 3167617ba13bSMingming Cao .readpages = ext4_readpages, 316843ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3169bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3170bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3171617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3172617ba13bSMingming Cao .bmap = ext4_bmap, 31734520fb3cSJan Kara .invalidatepage = ext4_journalled_invalidatepage, 3174617ba13bSMingming Cao .releasepage = ext4_releasepage, 317584ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 31768ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3177aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3178ac27a0ecSDave Kleikamp }; 3179ac27a0ecSDave Kleikamp 318064769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 318164769240SAlex Tomas .readpage = ext4_readpage, 318264769240SAlex Tomas .readpages = ext4_readpages, 318343ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 318464769240SAlex Tomas .writepages = ext4_da_writepages, 318564769240SAlex Tomas .write_begin = ext4_da_write_begin, 318664769240SAlex Tomas .write_end = ext4_da_write_end, 318764769240SAlex Tomas .bmap = ext4_bmap, 318864769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 318964769240SAlex Tomas .releasepage = ext4_releasepage, 319064769240SAlex Tomas .direct_IO = ext4_direct_IO, 319164769240SAlex Tomas .migratepage = buffer_migrate_page, 31928ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3193aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 319464769240SAlex Tomas }; 319564769240SAlex Tomas 3196617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3197ac27a0ecSDave Kleikamp { 31983d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 31993d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 32003d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 3201cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3202ac27a0ecSDave Kleikamp else 32033d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_ordered_aops; 32043d2b1582SLukas Czerner break; 32053d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 32063d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 32073d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_da_aops; 32083d2b1582SLukas Czerner else 32093d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_writeback_aops; 32103d2b1582SLukas Czerner break; 32113d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3212617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 32133d2b1582SLukas Czerner break; 32143d2b1582SLukas Czerner default: 32153d2b1582SLukas Czerner BUG(); 32163d2b1582SLukas Czerner } 3217ac27a0ecSDave Kleikamp } 3218ac27a0ecSDave Kleikamp 32194e96b2dbSAllison Henderson 32204e96b2dbSAllison Henderson /* 32214e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 32224e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 32234e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 32244e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 32254e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 32264e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 32274e96b2dbSAllison Henderson */ 32284e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 32294e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 32304e96b2dbSAllison Henderson loff_t length, int flags) 32314e96b2dbSAllison Henderson { 32324e96b2dbSAllison Henderson struct inode *inode = mapping->host; 32334e96b2dbSAllison Henderson struct page *page; 32344e96b2dbSAllison Henderson int err = 0; 32354e96b2dbSAllison Henderson 32364e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 32374e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 32384e96b2dbSAllison Henderson if (!page) 32395129d05fSYongqiang Yang return -ENOMEM; 32404e96b2dbSAllison Henderson 32414e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 32424e96b2dbSAllison Henderson from, length, flags); 32434e96b2dbSAllison Henderson 32444e96b2dbSAllison Henderson unlock_page(page); 32454e96b2dbSAllison Henderson page_cache_release(page); 32464e96b2dbSAllison Henderson return err; 32474e96b2dbSAllison Henderson } 32484e96b2dbSAllison Henderson 32494e96b2dbSAllison Henderson /* 32504e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 32514e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 32524e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 32534e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 32544e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 32554e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 32564e96b2dbSAllison Henderson * 32574e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 32584e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 32594e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 32604e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 32614e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 32624e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 32634e96b2dbSAllison Henderson * released. 32644e96b2dbSAllison Henderson * 32654e96b2dbSAllison Henderson * handle: The journal handle 32664e96b2dbSAllison Henderson * inode: The files inode 32674e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 32684907cb7bSAnatol Pomozov * from: The starting byte offset (from the beginning of the file) 32694e96b2dbSAllison Henderson * to begin discarding 32704e96b2dbSAllison Henderson * len: The length of bytes to discard 32714e96b2dbSAllison Henderson * flags: Optional flags that may be used: 32724e96b2dbSAllison Henderson * 32734e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 32744e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 32754e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 32764907cb7bSAnatol Pomozov * for updating the contents of a page whose blocks may 32774e96b2dbSAllison Henderson * have already been released, and we only want to zero 32784e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 32794e96b2dbSAllison Henderson * 32804907cb7bSAnatol Pomozov * Returns zero on success or negative on failure. 32814e96b2dbSAllison Henderson */ 32825f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 32834e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 32844e96b2dbSAllison Henderson loff_t length, int flags) 32854e96b2dbSAllison Henderson { 32864e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 32874e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 32884e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 32894e96b2dbSAllison Henderson ext4_lblk_t iblock; 32904e96b2dbSAllison Henderson struct buffer_head *bh; 32914e96b2dbSAllison Henderson int err = 0; 32924e96b2dbSAllison Henderson 32934e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 32944e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 32954e96b2dbSAllison Henderson 32964e96b2dbSAllison Henderson if (index != page->index) 32974e96b2dbSAllison Henderson return -EINVAL; 32984e96b2dbSAllison Henderson 32994e96b2dbSAllison Henderson /* 33004e96b2dbSAllison Henderson * correct length if it does not fall between 33014e96b2dbSAllison Henderson * 'from' and the end of the page 33024e96b2dbSAllison Henderson */ 33034e96b2dbSAllison Henderson if (length > max || length < 0) 33044e96b2dbSAllison Henderson length = max; 33054e96b2dbSAllison Henderson 33064e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 33074e96b2dbSAllison Henderson 3308093e6e36SYongqiang Yang if (!page_has_buffers(page)) 33094e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 33104e96b2dbSAllison Henderson 33114e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 33124e96b2dbSAllison Henderson bh = page_buffers(page); 33134e96b2dbSAllison Henderson pos = blocksize; 33144e96b2dbSAllison Henderson while (offset >= pos) { 33154e96b2dbSAllison Henderson bh = bh->b_this_page; 33164e96b2dbSAllison Henderson iblock++; 33174e96b2dbSAllison Henderson pos += blocksize; 33184e96b2dbSAllison Henderson } 33194e96b2dbSAllison Henderson 33204e96b2dbSAllison Henderson pos = offset; 33214e96b2dbSAllison Henderson while (pos < offset + length) { 3322e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3323e260daf2SYongqiang Yang 33244e96b2dbSAllison Henderson err = 0; 33254e96b2dbSAllison Henderson 33264e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 33274e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 33284e96b2dbSAllison Henderson 33294e96b2dbSAllison Henderson /* The length of space until the end of the block */ 33304e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 33314e96b2dbSAllison Henderson 33324e96b2dbSAllison Henderson /* 33334e96b2dbSAllison Henderson * Do not unmap or zero past end of block 33344e96b2dbSAllison Henderson * for this buffer head 33354e96b2dbSAllison Henderson */ 33364e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 33374e96b2dbSAllison Henderson range_to_discard = end_of_block; 33384e96b2dbSAllison Henderson 33394e96b2dbSAllison Henderson 33404e96b2dbSAllison Henderson /* 33414e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 33424e96b2dbSAllison Henderson * regions of the page 33434e96b2dbSAllison Henderson */ 33444e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 33454e96b2dbSAllison Henderson buffer_mapped(bh)) 33464e96b2dbSAllison Henderson goto next; 33474e96b2dbSAllison Henderson 33484e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 33494e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 33504e96b2dbSAllison Henderson clear_buffer_dirty(bh); 33514e96b2dbSAllison Henderson bh->b_bdev = NULL; 33524e96b2dbSAllison Henderson clear_buffer_mapped(bh); 33534e96b2dbSAllison Henderson clear_buffer_req(bh); 33544e96b2dbSAllison Henderson clear_buffer_new(bh); 33554e96b2dbSAllison Henderson clear_buffer_delay(bh); 33564e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 33574e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 33584e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 33594e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 33604e96b2dbSAllison Henderson goto next; 33614e96b2dbSAllison Henderson } 33624e96b2dbSAllison Henderson 33634e96b2dbSAllison Henderson /* 33644e96b2dbSAllison Henderson * If this block is not completely contained in the range 33654e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 33664e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 33674e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 33684e96b2dbSAllison Henderson * partial zeros on it. 33694e96b2dbSAllison Henderson */ 33704e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 33714e96b2dbSAllison Henderson /* 33724e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 33734e96b2dbSAllison Henderson * from the block 33744e96b2dbSAllison Henderson */ 33754e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 33764e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 33774e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 33784e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 33794e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 33804e96b2dbSAllison Henderson goto next; 33814e96b2dbSAllison Henderson } 33824e96b2dbSAllison Henderson } 33834e96b2dbSAllison Henderson 33844e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 33854e96b2dbSAllison Henderson if (PageUptodate(page)) 33864e96b2dbSAllison Henderson set_buffer_uptodate(bh); 33874e96b2dbSAllison Henderson 33884e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 33894e96b2dbSAllison Henderson err = -EIO; 33904e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 33914e96b2dbSAllison Henderson wait_on_buffer(bh); 33924e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 33934e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 33944e96b2dbSAllison Henderson goto next; 33954e96b2dbSAllison Henderson } 33964e96b2dbSAllison Henderson 33974e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33984e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 33994e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 34004e96b2dbSAllison Henderson if (err) 34014e96b2dbSAllison Henderson goto next; 34024e96b2dbSAllison Henderson } 34034e96b2dbSAllison Henderson 34044e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 34054e96b2dbSAllison Henderson 34064e96b2dbSAllison Henderson err = 0; 34074e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 34084e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3409decbd919STheodore Ts'o } else 34104e96b2dbSAllison Henderson mark_buffer_dirty(bh); 34114e96b2dbSAllison Henderson 34124e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 34134e96b2dbSAllison Henderson next: 34144e96b2dbSAllison Henderson bh = bh->b_this_page; 34154e96b2dbSAllison Henderson iblock++; 34164e96b2dbSAllison Henderson pos += range_to_discard; 34174e96b2dbSAllison Henderson } 34184e96b2dbSAllison Henderson 34194e96b2dbSAllison Henderson return err; 34204e96b2dbSAllison Henderson } 34214e96b2dbSAllison Henderson 342291ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 342391ef4cafSDuane Griffin { 342491ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 342591ef4cafSDuane Griffin return 1; 342691ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 342791ef4cafSDuane Griffin return 1; 342891ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 342991ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 343091ef4cafSDuane Griffin return 0; 343191ef4cafSDuane Griffin } 343291ef4cafSDuane Griffin 3433ac27a0ecSDave Kleikamp /* 3434a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3435a4bb6b64SAllison Henderson * associated with the given offset and length 3436a4bb6b64SAllison Henderson * 3437a4bb6b64SAllison Henderson * @inode: File inode 3438a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3439a4bb6b64SAllison Henderson * @len: The length of the hole 3440a4bb6b64SAllison Henderson * 34414907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3442a4bb6b64SAllison Henderson */ 3443a4bb6b64SAllison Henderson 3444a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3445a4bb6b64SAllison Henderson { 3446a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3447a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 344873355192SAllison Henderson return -EOPNOTSUPP; 3449a4bb6b64SAllison Henderson 34508bad6fc8SZheng Liu if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 34518bad6fc8SZheng Liu return ext4_ind_punch_hole(file, offset, length); 3452a4bb6b64SAllison Henderson 3453bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3454bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 345573355192SAllison Henderson return -EOPNOTSUPP; 3456bab08ab9STheodore Ts'o } 3457bab08ab9STheodore Ts'o 3458aaddea81SZheng Liu trace_ext4_punch_hole(inode, offset, length); 3459aaddea81SZheng Liu 3460a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3461a4bb6b64SAllison Henderson } 3462a4bb6b64SAllison Henderson 3463a4bb6b64SAllison Henderson /* 3464617ba13bSMingming Cao * ext4_truncate() 3465ac27a0ecSDave Kleikamp * 3466617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3467617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3468ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3469ac27a0ecSDave Kleikamp * 347042b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3471ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3472ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3473ac27a0ecSDave Kleikamp * 3474ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3475ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3476ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3477ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3478ac27a0ecSDave Kleikamp * left-to-right works OK too). 3479ac27a0ecSDave Kleikamp * 3480ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3481ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3482ac27a0ecSDave Kleikamp * 3483ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3484617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3485ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3486617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3487617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3488ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3489617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3490ac27a0ecSDave Kleikamp */ 3491617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3492ac27a0ecSDave Kleikamp { 34930562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 34940562e0baSJiaying Zhang 349591ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3496ac27a0ecSDave Kleikamp return; 3497ac27a0ecSDave Kleikamp 349812e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3499c8d46e41SJiaying Zhang 35005534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 350119f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 35027d8f9f7dSTheodore Ts'o 3503aef1c851STao Ma if (ext4_has_inline_data(inode)) { 3504aef1c851STao Ma int has_inline = 1; 3505aef1c851STao Ma 3506aef1c851STao Ma ext4_inline_data_truncate(inode, &has_inline); 3507aef1c851STao Ma if (has_inline) 3508aef1c851STao Ma return; 3509aef1c851STao Ma } 3510aef1c851STao Ma 3511ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3512cf108bcaSJan Kara ext4_ext_truncate(inode); 3513ff9893dcSAmir Goldstein else 3514ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3515a86c6181SAlex Tomas 35160562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3517ac27a0ecSDave Kleikamp } 3518ac27a0ecSDave Kleikamp 3519ac27a0ecSDave Kleikamp /* 3520617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3521ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3522ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3523ac27a0ecSDave Kleikamp * inode. 3524ac27a0ecSDave Kleikamp */ 3525617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3526617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3527ac27a0ecSDave Kleikamp { 3528240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3529ac27a0ecSDave Kleikamp struct buffer_head *bh; 3530240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3531240799cdSTheodore Ts'o ext4_fsblk_t block; 3532240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3533ac27a0ecSDave Kleikamp 35343a06d778SAneesh Kumar K.V iloc->bh = NULL; 3535240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3536ac27a0ecSDave Kleikamp return -EIO; 3537ac27a0ecSDave Kleikamp 3538240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3539240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3540240799cdSTheodore Ts'o if (!gdp) 3541240799cdSTheodore Ts'o return -EIO; 3542240799cdSTheodore Ts'o 3543240799cdSTheodore Ts'o /* 3544240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3545240799cdSTheodore Ts'o */ 354600d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3547240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3548240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3549240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3550240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3551240799cdSTheodore Ts'o 3552240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3553aebf0243SWang Shilong if (unlikely(!bh)) 3554860d21e2STheodore Ts'o return -ENOMEM; 3555ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3556ac27a0ecSDave Kleikamp lock_buffer(bh); 35579c83a923SHidehiro Kawai 35589c83a923SHidehiro Kawai /* 35599c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 35609c83a923SHidehiro Kawai * to write out another inode in the same block. In this 35619c83a923SHidehiro Kawai * case, we don't have to read the block because we may 35629c83a923SHidehiro Kawai * read the old inode data successfully. 35639c83a923SHidehiro Kawai */ 35649c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 35659c83a923SHidehiro Kawai set_buffer_uptodate(bh); 35669c83a923SHidehiro Kawai 3567ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3568ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3569ac27a0ecSDave Kleikamp unlock_buffer(bh); 3570ac27a0ecSDave Kleikamp goto has_buffer; 3571ac27a0ecSDave Kleikamp } 3572ac27a0ecSDave Kleikamp 3573ac27a0ecSDave Kleikamp /* 3574ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3575ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3576ac27a0ecSDave Kleikamp * block. 3577ac27a0ecSDave Kleikamp */ 3578ac27a0ecSDave Kleikamp if (in_mem) { 3579ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3580240799cdSTheodore Ts'o int i, start; 3581ac27a0ecSDave Kleikamp 3582240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3583ac27a0ecSDave Kleikamp 3584ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3585240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3586aebf0243SWang Shilong if (unlikely(!bitmap_bh)) 3587ac27a0ecSDave Kleikamp goto make_io; 3588ac27a0ecSDave Kleikamp 3589ac27a0ecSDave Kleikamp /* 3590ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3591ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3592ac27a0ecSDave Kleikamp * of one, so skip it. 3593ac27a0ecSDave Kleikamp */ 3594ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3595ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3596ac27a0ecSDave Kleikamp goto make_io; 3597ac27a0ecSDave Kleikamp } 3598240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3599ac27a0ecSDave Kleikamp if (i == inode_offset) 3600ac27a0ecSDave Kleikamp continue; 3601617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3602ac27a0ecSDave Kleikamp break; 3603ac27a0ecSDave Kleikamp } 3604ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3605240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3606ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3607ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3608ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3609ac27a0ecSDave Kleikamp unlock_buffer(bh); 3610ac27a0ecSDave Kleikamp goto has_buffer; 3611ac27a0ecSDave Kleikamp } 3612ac27a0ecSDave Kleikamp } 3613ac27a0ecSDave Kleikamp 3614ac27a0ecSDave Kleikamp make_io: 3615ac27a0ecSDave Kleikamp /* 3616240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3617240799cdSTheodore Ts'o * blocks from the inode table. 3618240799cdSTheodore Ts'o */ 3619240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3620240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3621240799cdSTheodore Ts'o unsigned num; 3622240799cdSTheodore Ts'o 3623240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3624b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3625240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3626240799cdSTheodore Ts'o if (table > b) 3627240799cdSTheodore Ts'o b = table; 3628240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3629240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3630feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 3631560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3632240799cdSTheodore Ts'o table += num / inodes_per_block; 3633240799cdSTheodore Ts'o if (end > table) 3634240799cdSTheodore Ts'o end = table; 3635240799cdSTheodore Ts'o while (b <= end) 3636240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3637240799cdSTheodore Ts'o } 3638240799cdSTheodore Ts'o 3639240799cdSTheodore Ts'o /* 3640ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3641ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3642ac27a0ecSDave Kleikamp * Read the block from disk. 3643ac27a0ecSDave Kleikamp */ 36440562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3645ac27a0ecSDave Kleikamp get_bh(bh); 3646ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 364765299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3648ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3649ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3650c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3651c398eda0STheodore Ts'o "unable to read itable block"); 3652ac27a0ecSDave Kleikamp brelse(bh); 3653ac27a0ecSDave Kleikamp return -EIO; 3654ac27a0ecSDave Kleikamp } 3655ac27a0ecSDave Kleikamp } 3656ac27a0ecSDave Kleikamp has_buffer: 3657ac27a0ecSDave Kleikamp iloc->bh = bh; 3658ac27a0ecSDave Kleikamp return 0; 3659ac27a0ecSDave Kleikamp } 3660ac27a0ecSDave Kleikamp 3661617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3662ac27a0ecSDave Kleikamp { 3663ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3664617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 366519f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3666ac27a0ecSDave Kleikamp } 3667ac27a0ecSDave Kleikamp 3668617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3669ac27a0ecSDave Kleikamp { 3670617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3671ac27a0ecSDave Kleikamp 3672ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3673617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3674ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3675617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3676ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3677617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3678ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3679617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3680ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3681617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3682ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3683ac27a0ecSDave Kleikamp } 3684ac27a0ecSDave Kleikamp 3685ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3686ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3687ff9ddf7eSJan Kara { 368884a8dce2SDmitry Monakhov unsigned int vfs_fl; 368984a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3690ff9ddf7eSJan Kara 369184a8dce2SDmitry Monakhov do { 369284a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 369384a8dce2SDmitry Monakhov old_fl = ei->i_flags; 369484a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 369584a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 369684a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 369784a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 369884a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 369984a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 370084a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 370184a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 370284a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 370384a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 370484a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 370584a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 370684a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 370784a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3708ff9ddf7eSJan Kara } 3709de9a55b8STheodore Ts'o 37100fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 37110fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 37120fc1b451SAneesh Kumar K.V { 37130fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 37148180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 37158180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 37160fc1b451SAneesh Kumar K.V 37170fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 37180fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 37190fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 37200fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 37210fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 372207a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 37238180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 37248180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 37258180a562SAneesh Kumar K.V } else { 37260fc1b451SAneesh Kumar K.V return i_blocks; 37278180a562SAneesh Kumar K.V } 37280fc1b451SAneesh Kumar K.V } else { 37290fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 37300fc1b451SAneesh Kumar K.V } 37310fc1b451SAneesh Kumar K.V } 3732ff9ddf7eSJan Kara 3733152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 3734152a7b0aSTao Ma struct ext4_inode *raw_inode, 3735152a7b0aSTao Ma struct ext4_inode_info *ei) 3736152a7b0aSTao Ma { 3737152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 3738152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 373967cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3740152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 374167cf5b09STao Ma ext4_find_inline_data_nolock(inode); 3742f19d5870STao Ma } else 3743f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 3744152a7b0aSTao Ma } 3745152a7b0aSTao Ma 37461d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3747ac27a0ecSDave Kleikamp { 3748617ba13bSMingming Cao struct ext4_iloc iloc; 3749617ba13bSMingming Cao struct ext4_inode *raw_inode; 37501d1fe1eeSDavid Howells struct ext4_inode_info *ei; 37511d1fe1eeSDavid Howells struct inode *inode; 3752b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 37531d1fe1eeSDavid Howells long ret; 3754ac27a0ecSDave Kleikamp int block; 375508cefc7aSEric W. Biederman uid_t i_uid; 375608cefc7aSEric W. Biederman gid_t i_gid; 3757ac27a0ecSDave Kleikamp 37581d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 37591d1fe1eeSDavid Howells if (!inode) 37601d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 37611d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 37621d1fe1eeSDavid Howells return inode; 37631d1fe1eeSDavid Howells 37641d1fe1eeSDavid Howells ei = EXT4_I(inode); 37657dc57615SPeter Huewe iloc.bh = NULL; 3766ac27a0ecSDave Kleikamp 37671d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 37681d1fe1eeSDavid Howells if (ret < 0) 3769ac27a0ecSDave Kleikamp goto bad_inode; 3770617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3771814525f4SDarrick J. Wong 3772814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3773814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3774814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3775814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 3776814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 3777814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 3778814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 3779814525f4SDarrick J. Wong ret = -EIO; 3780814525f4SDarrick J. Wong goto bad_inode; 3781814525f4SDarrick J. Wong } 3782814525f4SDarrick J. Wong } else 3783814525f4SDarrick J. Wong ei->i_extra_isize = 0; 3784814525f4SDarrick J. Wong 3785814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 3786814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3787814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3788814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3789814525f4SDarrick J. Wong __u32 csum; 3790814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 3791814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 3792814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 3793814525f4SDarrick J. Wong sizeof(inum)); 3794814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 3795814525f4SDarrick J. Wong sizeof(gen)); 3796814525f4SDarrick J. Wong } 3797814525f4SDarrick J. Wong 3798814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 3799814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 3800814525f4SDarrick J. Wong ret = -EIO; 3801814525f4SDarrick J. Wong goto bad_inode; 3802814525f4SDarrick J. Wong } 3803814525f4SDarrick J. Wong 3804ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 380508cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 380608cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3807ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 380808cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 380908cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3810ac27a0ecSDave Kleikamp } 381108cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 381208cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 3813bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3814ac27a0ecSDave Kleikamp 3815353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 381667cf5b09STao Ma ei->i_inline_off = 0; 3817ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3818ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3819ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3820ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3821ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3822ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3823ac27a0ecSDave Kleikamp */ 3824ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3825ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3826617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3827ac27a0ecSDave Kleikamp /* this inode is deleted */ 38281d1fe1eeSDavid Howells ret = -ESTALE; 3829ac27a0ecSDave Kleikamp goto bad_inode; 3830ac27a0ecSDave Kleikamp } 3831ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3832ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3833ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3834ac27a0ecSDave Kleikamp * the process of deleting those. */ 3835ac27a0ecSDave Kleikamp } 3836ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 38370fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 38387973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3839a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3840a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3841a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3842a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3843ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3844a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3845a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3846a9e7f447SDmitry Monakhov #endif 3847ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3848ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3849a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3850ac27a0ecSDave Kleikamp /* 3851ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3852ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3853ac27a0ecSDave Kleikamp */ 3854617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3855ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3856ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3857ac27a0ecSDave Kleikamp 3858b436b9beSJan Kara /* 3859b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3860b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3861b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3862b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3863b436b9beSJan Kara * now it is reread from disk. 3864b436b9beSJan Kara */ 3865b436b9beSJan Kara if (journal) { 3866b436b9beSJan Kara transaction_t *transaction; 3867b436b9beSJan Kara tid_t tid; 3868b436b9beSJan Kara 3869a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3870b436b9beSJan Kara if (journal->j_running_transaction) 3871b436b9beSJan Kara transaction = journal->j_running_transaction; 3872b436b9beSJan Kara else 3873b436b9beSJan Kara transaction = journal->j_committing_transaction; 3874b436b9beSJan Kara if (transaction) 3875b436b9beSJan Kara tid = transaction->t_tid; 3876b436b9beSJan Kara else 3877b436b9beSJan Kara tid = journal->j_commit_sequence; 3878a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3879b436b9beSJan Kara ei->i_sync_tid = tid; 3880b436b9beSJan Kara ei->i_datasync_tid = tid; 3881b436b9beSJan Kara } 3882b436b9beSJan Kara 38830040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3884ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3885ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3886617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3887617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3888ac27a0ecSDave Kleikamp } else { 3889152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 3890ac27a0ecSDave Kleikamp } 3891814525f4SDarrick J. Wong } 3892ac27a0ecSDave Kleikamp 3893ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3894ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3895ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3896ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3897ef7f3835SKalpak Shah 389825ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 389925ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 390025ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 390125ec56b5SJean Noel Cordenner inode->i_version |= 390225ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 390325ec56b5SJean Noel Cordenner } 390425ec56b5SJean Noel Cordenner 3905c4b5a614STheodore Ts'o ret = 0; 3906485c26ecSTheodore Ts'o if (ei->i_file_acl && 39071032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 390824676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 390924676da4STheodore Ts'o ei->i_file_acl); 3910485c26ecSTheodore Ts'o ret = -EIO; 3911485c26ecSTheodore Ts'o goto bad_inode; 3912f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 3913f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3914f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3915c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 3916f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 39177a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 39187a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 3919fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3920fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 3921fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 3922fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 39231f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 3924fe2c8191SThiemo Nagel } 3925f19d5870STao Ma } 3926567f3e9aSTheodore Ts'o if (ret) 39277a262f7cSAneesh Kumar K.V goto bad_inode; 39287a262f7cSAneesh Kumar K.V 3929ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3930617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3931617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3932617ba13bSMingming Cao ext4_set_aops(inode); 3933ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3934617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3935617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3936ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3937e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 3938617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3939e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 3940e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 3941e83c1397SDuane Griffin } else { 3942617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3943617ba13bSMingming Cao ext4_set_aops(inode); 3944ac27a0ecSDave Kleikamp } 3945563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3946563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3947617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3948ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3949ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3950ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3951ac27a0ecSDave Kleikamp else 3952ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3953ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3954563bdd61STheodore Ts'o } else { 3955563bdd61STheodore Ts'o ret = -EIO; 395624676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3957563bdd61STheodore Ts'o goto bad_inode; 3958ac27a0ecSDave Kleikamp } 3959ac27a0ecSDave Kleikamp brelse(iloc.bh); 3960617ba13bSMingming Cao ext4_set_inode_flags(inode); 39611d1fe1eeSDavid Howells unlock_new_inode(inode); 39621d1fe1eeSDavid Howells return inode; 3963ac27a0ecSDave Kleikamp 3964ac27a0ecSDave Kleikamp bad_inode: 3965567f3e9aSTheodore Ts'o brelse(iloc.bh); 39661d1fe1eeSDavid Howells iget_failed(inode); 39671d1fe1eeSDavid Howells return ERR_PTR(ret); 3968ac27a0ecSDave Kleikamp } 3969ac27a0ecSDave Kleikamp 39700fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 39710fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 39720fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 39730fc1b451SAneesh Kumar K.V { 39740fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 39750fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 39760fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 39770fc1b451SAneesh Kumar K.V 39780fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 39790fc1b451SAneesh Kumar K.V /* 39804907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 39810fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39820fc1b451SAneesh Kumar K.V */ 39838180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39840fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 398584a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 3986f287a1a5STheodore Ts'o return 0; 3987f287a1a5STheodore Ts'o } 3988f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 3989f287a1a5STheodore Ts'o return -EFBIG; 3990f287a1a5STheodore Ts'o 3991f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 39920fc1b451SAneesh Kumar K.V /* 39930fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 39940fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 39950fc1b451SAneesh Kumar K.V */ 39968180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 39970fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 399884a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 39990fc1b451SAneesh Kumar K.V } else { 400084a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 40018180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 40028180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 40038180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 40048180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 40050fc1b451SAneesh Kumar K.V } 4006f287a1a5STheodore Ts'o return 0; 40070fc1b451SAneesh Kumar K.V } 40080fc1b451SAneesh Kumar K.V 4009ac27a0ecSDave Kleikamp /* 4010ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4011ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4012ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4013ac27a0ecSDave Kleikamp * 4014ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4015ac27a0ecSDave Kleikamp */ 4016617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4017ac27a0ecSDave Kleikamp struct inode *inode, 4018830156c7SFrank Mayhar struct ext4_iloc *iloc) 4019ac27a0ecSDave Kleikamp { 4020617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4021617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4022ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4023ac27a0ecSDave Kleikamp int err = 0, rc, block; 4024b71fc079SJan Kara int need_datasync = 0; 402508cefc7aSEric W. Biederman uid_t i_uid; 402608cefc7aSEric W. Biederman gid_t i_gid; 4027ac27a0ecSDave Kleikamp 4028ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4029ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 403019f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4031617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4032ac27a0ecSDave Kleikamp 4033ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4034ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 403508cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 403608cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 4037ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 403808cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 403908cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4040ac27a0ecSDave Kleikamp /* 4041ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4042ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4043ac27a0ecSDave Kleikamp */ 4044ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4045ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 404608cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 4047ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 404808cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 4049ac27a0ecSDave Kleikamp } else { 4050ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4051ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4052ac27a0ecSDave Kleikamp } 4053ac27a0ecSDave Kleikamp } else { 405408cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 405508cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4056ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4057ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4058ac27a0ecSDave Kleikamp } 4059ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4060ef7f3835SKalpak Shah 4061ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4062ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4063ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4064ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4065ef7f3835SKalpak Shah 40660fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 40670fc1b451SAneesh Kumar K.V goto out_brelse; 4068ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4069353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 40709b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 40719b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4072a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4073a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 40747973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4075b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4076a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4077b71fc079SJan Kara need_datasync = 1; 4078b71fc079SJan Kara } 4079ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4080ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4081617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4082617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4083617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4084617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4085ac27a0ecSDave Kleikamp /* If this is the first large file 4086ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4087ac27a0ecSDave Kleikamp */ 4088617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4089617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4090ac27a0ecSDave Kleikamp if (err) 4091ac27a0ecSDave Kleikamp goto out_brelse; 4092617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4093617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4094617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 40950390131bSFrank Mayhar ext4_handle_sync(handle); 4096b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4097ac27a0ecSDave Kleikamp } 4098ac27a0ecSDave Kleikamp } 4099ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4100ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4101ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4102ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4103ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4104ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4105ac27a0ecSDave Kleikamp } else { 4106ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4107ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4108ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4109ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4110ac27a0ecSDave Kleikamp } 4111f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4112de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4113ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4114f19d5870STao Ma } 4115ac27a0ecSDave Kleikamp 411625ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 411725ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 411825ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 411925ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 412025ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4121ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 412225ec56b5SJean Noel Cordenner } 412325ec56b5SJean Noel Cordenner 4124814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4125814525f4SDarrick J. Wong 41260390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 412773b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4128ac27a0ecSDave Kleikamp if (!err) 4129ac27a0ecSDave Kleikamp err = rc; 413019f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4131ac27a0ecSDave Kleikamp 4132b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4133ac27a0ecSDave Kleikamp out_brelse: 4134ac27a0ecSDave Kleikamp brelse(bh); 4135617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4136ac27a0ecSDave Kleikamp return err; 4137ac27a0ecSDave Kleikamp } 4138ac27a0ecSDave Kleikamp 4139ac27a0ecSDave Kleikamp /* 4140617ba13bSMingming Cao * ext4_write_inode() 4141ac27a0ecSDave Kleikamp * 4142ac27a0ecSDave Kleikamp * We are called from a few places: 4143ac27a0ecSDave Kleikamp * 4144ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4145ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 41464907cb7bSAnatol Pomozov * transaction to commit. 4147ac27a0ecSDave Kleikamp * 4148ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4149ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4150ac27a0ecSDave Kleikamp * 4151ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4152ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4153ac27a0ecSDave Kleikamp * journal commit. 4154ac27a0ecSDave Kleikamp * 4155ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4156ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4157617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4158ac27a0ecSDave Kleikamp * knfsd. 4159ac27a0ecSDave Kleikamp * 4160ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4161ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4162ac27a0ecSDave Kleikamp * which we are interested. 4163ac27a0ecSDave Kleikamp * 4164ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4165ac27a0ecSDave Kleikamp * 4166ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4167ac27a0ecSDave Kleikamp * stuff(); 4168ac27a0ecSDave Kleikamp * inode->i_size = expr; 4169ac27a0ecSDave Kleikamp * 4170ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4171ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4172ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4173ac27a0ecSDave Kleikamp */ 4174a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4175ac27a0ecSDave Kleikamp { 417691ac6f43SFrank Mayhar int err; 417791ac6f43SFrank Mayhar 4178ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4179ac27a0ecSDave Kleikamp return 0; 4180ac27a0ecSDave Kleikamp 418191ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4182617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4183b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4184ac27a0ecSDave Kleikamp dump_stack(); 4185ac27a0ecSDave Kleikamp return -EIO; 4186ac27a0ecSDave Kleikamp } 4187ac27a0ecSDave Kleikamp 4188a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4189ac27a0ecSDave Kleikamp return 0; 4190ac27a0ecSDave Kleikamp 419191ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 419291ac6f43SFrank Mayhar } else { 419391ac6f43SFrank Mayhar struct ext4_iloc iloc; 419491ac6f43SFrank Mayhar 41958b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 419691ac6f43SFrank Mayhar if (err) 419791ac6f43SFrank Mayhar return err; 4198a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4199830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4200830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4201c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4202c398eda0STheodore Ts'o "IO error syncing inode"); 4203830156c7SFrank Mayhar err = -EIO; 4204830156c7SFrank Mayhar } 4205fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 420691ac6f43SFrank Mayhar } 420791ac6f43SFrank Mayhar return err; 4208ac27a0ecSDave Kleikamp } 4209ac27a0ecSDave Kleikamp 4210ac27a0ecSDave Kleikamp /* 421153e87268SJan Kara * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate 421253e87268SJan Kara * buffers that are attached to a page stradding i_size and are undergoing 421353e87268SJan Kara * commit. In that case we have to wait for commit to finish and try again. 421453e87268SJan Kara */ 421553e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode) 421653e87268SJan Kara { 421753e87268SJan Kara struct page *page; 421853e87268SJan Kara unsigned offset; 421953e87268SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 422053e87268SJan Kara tid_t commit_tid = 0; 422153e87268SJan Kara int ret; 422253e87268SJan Kara 422353e87268SJan Kara offset = inode->i_size & (PAGE_CACHE_SIZE - 1); 422453e87268SJan Kara /* 422553e87268SJan Kara * All buffers in the last page remain valid? Then there's nothing to 422653e87268SJan Kara * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 422753e87268SJan Kara * blocksize case 422853e87268SJan Kara */ 422953e87268SJan Kara if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) 423053e87268SJan Kara return; 423153e87268SJan Kara while (1) { 423253e87268SJan Kara page = find_lock_page(inode->i_mapping, 423353e87268SJan Kara inode->i_size >> PAGE_CACHE_SHIFT); 423453e87268SJan Kara if (!page) 423553e87268SJan Kara return; 423653e87268SJan Kara ret = __ext4_journalled_invalidatepage(page, offset); 423753e87268SJan Kara unlock_page(page); 423853e87268SJan Kara page_cache_release(page); 423953e87268SJan Kara if (ret != -EBUSY) 424053e87268SJan Kara return; 424153e87268SJan Kara commit_tid = 0; 424253e87268SJan Kara read_lock(&journal->j_state_lock); 424353e87268SJan Kara if (journal->j_committing_transaction) 424453e87268SJan Kara commit_tid = journal->j_committing_transaction->t_tid; 424553e87268SJan Kara read_unlock(&journal->j_state_lock); 424653e87268SJan Kara if (commit_tid) 424753e87268SJan Kara jbd2_log_wait_commit(journal, commit_tid); 424853e87268SJan Kara } 424953e87268SJan Kara } 425053e87268SJan Kara 425153e87268SJan Kara /* 4252617ba13bSMingming Cao * ext4_setattr() 4253ac27a0ecSDave Kleikamp * 4254ac27a0ecSDave Kleikamp * Called from notify_change. 4255ac27a0ecSDave Kleikamp * 4256ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4257ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4258ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4259ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4260ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4261ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4262ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4263ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4264ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4265ac27a0ecSDave Kleikamp * 4266678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4267678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4268678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4269678aaf48SJan Kara * This way we are sure that all the data written in the previous 4270678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4271678aaf48SJan Kara * writeback). 4272678aaf48SJan Kara * 4273678aaf48SJan Kara * Called with inode->i_mutex down. 4274ac27a0ecSDave Kleikamp */ 4275617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4276ac27a0ecSDave Kleikamp { 4277ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4278ac27a0ecSDave Kleikamp int error, rc = 0; 42793d287de3SDmitry Monakhov int orphan = 0; 4280ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4281ac27a0ecSDave Kleikamp 4282ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4283ac27a0ecSDave Kleikamp if (error) 4284ac27a0ecSDave Kleikamp return error; 4285ac27a0ecSDave Kleikamp 428612755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4287871a2931SChristoph Hellwig dquot_initialize(inode); 428808cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 428908cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4290ac27a0ecSDave Kleikamp handle_t *handle; 4291ac27a0ecSDave Kleikamp 4292ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4293ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 42945aca07ebSDmitry Monakhov handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 4295194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 4296ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4297ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4298ac27a0ecSDave Kleikamp goto err_out; 4299ac27a0ecSDave Kleikamp } 4300b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4301ac27a0ecSDave Kleikamp if (error) { 4302617ba13bSMingming Cao ext4_journal_stop(handle); 4303ac27a0ecSDave Kleikamp return error; 4304ac27a0ecSDave Kleikamp } 4305ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4306ac27a0ecSDave Kleikamp * one transaction */ 4307ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4308ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4309ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4310ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4311617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4312617ba13bSMingming Cao ext4_journal_stop(handle); 4313ac27a0ecSDave Kleikamp } 4314ac27a0ecSDave Kleikamp 4315e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4316562c72aaSChristoph Hellwig 431712e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4318e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4319e2b46574SEric Sandeen 43200c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 43210c095c7fSTheodore Ts'o return -EFBIG; 4322e2b46574SEric Sandeen } 4323e2b46574SEric Sandeen } 4324e2b46574SEric Sandeen 4325ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4326c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4327072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4328ac27a0ecSDave Kleikamp handle_t *handle; 4329ac27a0ecSDave Kleikamp 4330617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4331ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4332ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4333ac27a0ecSDave Kleikamp goto err_out; 4334ac27a0ecSDave Kleikamp } 43353d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4336617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 43373d287de3SDmitry Monakhov orphan = 1; 43383d287de3SDmitry Monakhov } 4339617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4340617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4341ac27a0ecSDave Kleikamp if (!error) 4342ac27a0ecSDave Kleikamp error = rc; 4343617ba13bSMingming Cao ext4_journal_stop(handle); 4344678aaf48SJan Kara 4345678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4346678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4347678aaf48SJan Kara attr->ia_size); 4348678aaf48SJan Kara if (error) { 4349678aaf48SJan Kara /* Do as much error cleanup as possible */ 4350678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4351678aaf48SJan Kara if (IS_ERR(handle)) { 4352678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4353678aaf48SJan Kara goto err_out; 4354678aaf48SJan Kara } 4355678aaf48SJan Kara ext4_orphan_del(handle, inode); 43563d287de3SDmitry Monakhov orphan = 0; 4357678aaf48SJan Kara ext4_journal_stop(handle); 4358678aaf48SJan Kara goto err_out; 4359678aaf48SJan Kara } 4360678aaf48SJan Kara } 4361ac27a0ecSDave Kleikamp } 4362ac27a0ecSDave Kleikamp 4363072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 436453e87268SJan Kara if (attr->ia_size != inode->i_size) { 436553e87268SJan Kara loff_t oldsize = inode->i_size; 436653e87268SJan Kara 436753e87268SJan Kara i_size_write(inode, attr->ia_size); 436853e87268SJan Kara /* 436953e87268SJan Kara * Blocks are going to be removed from the inode. Wait 437053e87268SJan Kara * for dio in flight. Temporarily disable 437153e87268SJan Kara * dioread_nolock to prevent livelock. 437253e87268SJan Kara */ 43731b65007eSDmitry Monakhov if (orphan) { 437453e87268SJan Kara if (!ext4_should_journal_data(inode)) { 43751b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 43761c9114f9SDmitry Monakhov inode_dio_wait(inode); 43771b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 437853e87268SJan Kara } else 437953e87268SJan Kara ext4_wait_for_tail_page_commit(inode); 43801b65007eSDmitry Monakhov } 438153e87268SJan Kara /* 438253e87268SJan Kara * Truncate pagecache after we've waited for commit 438353e87268SJan Kara * in data=journal mode to make pages freeable. 438453e87268SJan Kara */ 438553e87268SJan Kara truncate_pagecache(inode, oldsize, inode->i_size); 43861c9114f9SDmitry Monakhov } 4387072bd7eaSTheodore Ts'o ext4_truncate(inode); 4388072bd7eaSTheodore Ts'o } 4389ac27a0ecSDave Kleikamp 43901025774cSChristoph Hellwig if (!rc) { 43911025774cSChristoph Hellwig setattr_copy(inode, attr); 43921025774cSChristoph Hellwig mark_inode_dirty(inode); 43931025774cSChristoph Hellwig } 43941025774cSChristoph Hellwig 43951025774cSChristoph Hellwig /* 43961025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 43971025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 43981025774cSChristoph Hellwig */ 43993d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4400617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4401ac27a0ecSDave Kleikamp 4402ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4403617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4404ac27a0ecSDave Kleikamp 4405ac27a0ecSDave Kleikamp err_out: 4406617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4407ac27a0ecSDave Kleikamp if (!error) 4408ac27a0ecSDave Kleikamp error = rc; 4409ac27a0ecSDave Kleikamp return error; 4410ac27a0ecSDave Kleikamp } 4411ac27a0ecSDave Kleikamp 44123e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 44133e3398a0SMingming Cao struct kstat *stat) 44143e3398a0SMingming Cao { 44153e3398a0SMingming Cao struct inode *inode; 44163e3398a0SMingming Cao unsigned long delalloc_blocks; 44173e3398a0SMingming Cao 44183e3398a0SMingming Cao inode = dentry->d_inode; 44193e3398a0SMingming Cao generic_fillattr(inode, stat); 44203e3398a0SMingming Cao 44213e3398a0SMingming Cao /* 44223e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 44233e3398a0SMingming Cao * otherwise in the case of system crash before the real block 44243e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 44253e3398a0SMingming Cao * on-disk file blocks. 44263e3398a0SMingming Cao * We always keep i_blocks updated together with real 44273e3398a0SMingming Cao * allocation. But to not confuse with user, stat 44283e3398a0SMingming Cao * will return the blocks that include the delayed allocation 44293e3398a0SMingming Cao * blocks for this file. 44303e3398a0SMingming Cao */ 443196607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 443296607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 44333e3398a0SMingming Cao 44343e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 44353e3398a0SMingming Cao return 0; 44363e3398a0SMingming Cao } 4437ac27a0ecSDave Kleikamp 4438a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4439a02908f1SMingming Cao { 444012e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 44418bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4442ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4443a02908f1SMingming Cao } 4444ac51d837STheodore Ts'o 4445a02908f1SMingming Cao /* 4446a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4447a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4448a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4449a02908f1SMingming Cao * 4450a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 44514907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4452a02908f1SMingming Cao * they could still across block group boundary. 4453a02908f1SMingming Cao * 4454a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4455a02908f1SMingming Cao */ 44561f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4457a02908f1SMingming Cao { 44588df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 44598df9675fSTheodore Ts'o int gdpblocks; 4460a02908f1SMingming Cao int idxblocks; 4461a02908f1SMingming Cao int ret = 0; 4462a02908f1SMingming Cao 4463a02908f1SMingming Cao /* 4464a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4465a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4466a02908f1SMingming Cao * physically contiguous on disk 4467a02908f1SMingming Cao * 4468a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4469a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4470a02908f1SMingming Cao */ 4471a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4472a02908f1SMingming Cao 4473a02908f1SMingming Cao ret = idxblocks; 4474a02908f1SMingming Cao 4475a02908f1SMingming Cao /* 4476a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4477a02908f1SMingming Cao * to account 4478a02908f1SMingming Cao */ 4479a02908f1SMingming Cao groups = idxblocks; 4480a02908f1SMingming Cao if (chunk) 4481a02908f1SMingming Cao groups += 1; 4482ac27a0ecSDave Kleikamp else 4483a02908f1SMingming Cao groups += nrblocks; 4484ac27a0ecSDave Kleikamp 4485a02908f1SMingming Cao gdpblocks = groups; 44868df9675fSTheodore Ts'o if (groups > ngroups) 44878df9675fSTheodore Ts'o groups = ngroups; 4488a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4489a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4490a02908f1SMingming Cao 4491a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4492a02908f1SMingming Cao ret += groups + gdpblocks; 4493a02908f1SMingming Cao 4494a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4495a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4496ac27a0ecSDave Kleikamp 4497ac27a0ecSDave Kleikamp return ret; 4498ac27a0ecSDave Kleikamp } 4499ac27a0ecSDave Kleikamp 4500ac27a0ecSDave Kleikamp /* 450125985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4502f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4503f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4504a02908f1SMingming Cao * 4505525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4506a02908f1SMingming Cao * 4507525f4ed8SMingming Cao * We need to consider the worse case, when 4508a02908f1SMingming Cao * one new block per extent. 4509a02908f1SMingming Cao */ 4510a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4511a02908f1SMingming Cao { 4512a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4513a02908f1SMingming Cao int ret; 4514a02908f1SMingming Cao 4515a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4516a02908f1SMingming Cao 4517a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4518a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4519a02908f1SMingming Cao ret += bpp; 4520a02908f1SMingming Cao return ret; 4521a02908f1SMingming Cao } 4522f3bd1f3fSMingming Cao 4523f3bd1f3fSMingming Cao /* 4524f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4525f3bd1f3fSMingming Cao * 4526f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 452779e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4528f3bd1f3fSMingming Cao * 4529f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4530f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4531f3bd1f3fSMingming Cao */ 4532f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4533f3bd1f3fSMingming Cao { 4534f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4535f3bd1f3fSMingming Cao } 4536f3bd1f3fSMingming Cao 4537a02908f1SMingming Cao /* 4538617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4539ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4540ac27a0ecSDave Kleikamp */ 4541617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4542617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4543ac27a0ecSDave Kleikamp { 4544ac27a0ecSDave Kleikamp int err = 0; 4545ac27a0ecSDave Kleikamp 4546c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 454725ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 454825ec56b5SJean Noel Cordenner 4549ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4550ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4551ac27a0ecSDave Kleikamp 4552dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4553830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4554ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4555ac27a0ecSDave Kleikamp return err; 4556ac27a0ecSDave Kleikamp } 4557ac27a0ecSDave Kleikamp 4558ac27a0ecSDave Kleikamp /* 4559ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4560ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4561ac27a0ecSDave Kleikamp */ 4562ac27a0ecSDave Kleikamp 4563ac27a0ecSDave Kleikamp int 4564617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4565617ba13bSMingming Cao struct ext4_iloc *iloc) 4566ac27a0ecSDave Kleikamp { 45670390131bSFrank Mayhar int err; 45680390131bSFrank Mayhar 4569617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4570ac27a0ecSDave Kleikamp if (!err) { 4571ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4572617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4573ac27a0ecSDave Kleikamp if (err) { 4574ac27a0ecSDave Kleikamp brelse(iloc->bh); 4575ac27a0ecSDave Kleikamp iloc->bh = NULL; 4576ac27a0ecSDave Kleikamp } 4577ac27a0ecSDave Kleikamp } 4578617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4579ac27a0ecSDave Kleikamp return err; 4580ac27a0ecSDave Kleikamp } 4581ac27a0ecSDave Kleikamp 4582ac27a0ecSDave Kleikamp /* 45836dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 45846dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 45856dd4ee7cSKalpak Shah */ 45861d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 45871d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 45881d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 45891d03ec98SAneesh Kumar K.V handle_t *handle) 45906dd4ee7cSKalpak Shah { 45916dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 45926dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 45936dd4ee7cSKalpak Shah 45946dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 45956dd4ee7cSKalpak Shah return 0; 45966dd4ee7cSKalpak Shah 45976dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 45986dd4ee7cSKalpak Shah 45996dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 46006dd4ee7cSKalpak Shah 46016dd4ee7cSKalpak Shah /* No extended attributes present */ 460219f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 46036dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 46046dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 46056dd4ee7cSKalpak Shah new_extra_isize); 46066dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 46076dd4ee7cSKalpak Shah return 0; 46086dd4ee7cSKalpak Shah } 46096dd4ee7cSKalpak Shah 46106dd4ee7cSKalpak Shah /* try to expand with EAs present */ 46116dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 46126dd4ee7cSKalpak Shah raw_inode, handle); 46136dd4ee7cSKalpak Shah } 46146dd4ee7cSKalpak Shah 46156dd4ee7cSKalpak Shah /* 4616ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4617ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4618ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4619ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4620ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4621ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4622ac27a0ecSDave Kleikamp * 4623ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4624ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4625ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4626ac27a0ecSDave Kleikamp * we start and wait on commits. 4627ac27a0ecSDave Kleikamp */ 4628617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4629ac27a0ecSDave Kleikamp { 4630617ba13bSMingming Cao struct ext4_iloc iloc; 46316dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 46326dd4ee7cSKalpak Shah static unsigned int mnt_count; 46336dd4ee7cSKalpak Shah int err, ret; 4634ac27a0ecSDave Kleikamp 4635ac27a0ecSDave Kleikamp might_sleep(); 46367ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4637617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 46380390131bSFrank Mayhar if (ext4_handle_valid(handle) && 46390390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 464019f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 46416dd4ee7cSKalpak Shah /* 46426dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 46436dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 46446dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 46456dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 46466dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 46476dd4ee7cSKalpak Shah */ 46486dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 46496dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 46506dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 46516dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 46526dd4ee7cSKalpak Shah iloc, handle); 46536dd4ee7cSKalpak Shah if (ret) { 465419f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 465519f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4656c1bddad9SAneesh Kumar K.V if (mnt_count != 4657c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 465812062dddSEric Sandeen ext4_warning(inode->i_sb, 46596dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 46606dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 46616dd4ee7cSKalpak Shah inode->i_ino); 4662c1bddad9SAneesh Kumar K.V mnt_count = 4663c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 46646dd4ee7cSKalpak Shah } 46656dd4ee7cSKalpak Shah } 46666dd4ee7cSKalpak Shah } 46676dd4ee7cSKalpak Shah } 4668ac27a0ecSDave Kleikamp if (!err) 4669617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4670ac27a0ecSDave Kleikamp return err; 4671ac27a0ecSDave Kleikamp } 4672ac27a0ecSDave Kleikamp 4673ac27a0ecSDave Kleikamp /* 4674617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4675ac27a0ecSDave Kleikamp * 4676ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4677ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4678ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4679ac27a0ecSDave Kleikamp * 46805dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4681ac27a0ecSDave Kleikamp * are allocated to the file. 4682ac27a0ecSDave Kleikamp * 4683ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4684ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4685ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4686ac27a0ecSDave Kleikamp */ 4687aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4688ac27a0ecSDave Kleikamp { 4689ac27a0ecSDave Kleikamp handle_t *handle; 4690ac27a0ecSDave Kleikamp 4691617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4692ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4693ac27a0ecSDave Kleikamp goto out; 4694f3dc272fSCurt Wohlgemuth 4695617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4696f3dc272fSCurt Wohlgemuth 4697617ba13bSMingming Cao ext4_journal_stop(handle); 4698ac27a0ecSDave Kleikamp out: 4699ac27a0ecSDave Kleikamp return; 4700ac27a0ecSDave Kleikamp } 4701ac27a0ecSDave Kleikamp 4702ac27a0ecSDave Kleikamp #if 0 4703ac27a0ecSDave Kleikamp /* 4704ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4705ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4706617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4707ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4708ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4709ac27a0ecSDave Kleikamp */ 4710617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4711ac27a0ecSDave Kleikamp { 4712617ba13bSMingming Cao struct ext4_iloc iloc; 4713ac27a0ecSDave Kleikamp 4714ac27a0ecSDave Kleikamp int err = 0; 4715ac27a0ecSDave Kleikamp if (handle) { 4716617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4717ac27a0ecSDave Kleikamp if (!err) { 4718ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4719dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4720ac27a0ecSDave Kleikamp if (!err) 47210390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 472273b50c1cSCurt Wohlgemuth NULL, 4723ac27a0ecSDave Kleikamp iloc.bh); 4724ac27a0ecSDave Kleikamp brelse(iloc.bh); 4725ac27a0ecSDave Kleikamp } 4726ac27a0ecSDave Kleikamp } 4727617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4728ac27a0ecSDave Kleikamp return err; 4729ac27a0ecSDave Kleikamp } 4730ac27a0ecSDave Kleikamp #endif 4731ac27a0ecSDave Kleikamp 4732617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4733ac27a0ecSDave Kleikamp { 4734ac27a0ecSDave Kleikamp journal_t *journal; 4735ac27a0ecSDave Kleikamp handle_t *handle; 4736ac27a0ecSDave Kleikamp int err; 4737ac27a0ecSDave Kleikamp 4738ac27a0ecSDave Kleikamp /* 4739ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4740ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4741ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4742ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4743ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4744ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4745ac27a0ecSDave Kleikamp * nobody is changing anything. 4746ac27a0ecSDave Kleikamp */ 4747ac27a0ecSDave Kleikamp 4748617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 47490390131bSFrank Mayhar if (!journal) 47500390131bSFrank Mayhar return 0; 4751d699594dSDave Hansen if (is_journal_aborted(journal)) 4752ac27a0ecSDave Kleikamp return -EROFS; 47532aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 47542aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 47552aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 47562aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 47572aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 47582aff57b0SYongqiang Yang */ 47592aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 47602aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 47612aff57b0SYongqiang Yang if (err < 0) 47622aff57b0SYongqiang Yang return err; 47632aff57b0SYongqiang Yang } 4764ac27a0ecSDave Kleikamp 476517335dccSDmitry Monakhov /* Wait for all existing dio workers */ 476617335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 476717335dccSDmitry Monakhov inode_dio_wait(inode); 476817335dccSDmitry Monakhov 4769dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4770ac27a0ecSDave Kleikamp 4771ac27a0ecSDave Kleikamp /* 4772ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4773ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4774ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4775ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4776ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4777ac27a0ecSDave Kleikamp */ 4778ac27a0ecSDave Kleikamp 4779ac27a0ecSDave Kleikamp if (val) 478012e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47815872ddaaSYongqiang Yang else { 47825872ddaaSYongqiang Yang jbd2_journal_flush(journal); 478312e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 47845872ddaaSYongqiang Yang } 4785617ba13bSMingming Cao ext4_set_aops(inode); 4786ac27a0ecSDave Kleikamp 4787dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 478817335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 4789ac27a0ecSDave Kleikamp 4790ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4791ac27a0ecSDave Kleikamp 4792617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4793ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4794ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4795ac27a0ecSDave Kleikamp 4796617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 47970390131bSFrank Mayhar ext4_handle_sync(handle); 4798617ba13bSMingming Cao ext4_journal_stop(handle); 4799617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4800ac27a0ecSDave Kleikamp 4801ac27a0ecSDave Kleikamp return err; 4802ac27a0ecSDave Kleikamp } 48032e9ee850SAneesh Kumar K.V 48042e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 48052e9ee850SAneesh Kumar K.V { 48062e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 48072e9ee850SAneesh Kumar K.V } 48082e9ee850SAneesh Kumar K.V 4809c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 48102e9ee850SAneesh Kumar K.V { 4811c2ec175cSNick Piggin struct page *page = vmf->page; 48122e9ee850SAneesh Kumar K.V loff_t size; 48132e9ee850SAneesh Kumar K.V unsigned long len; 48149ea7df53SJan Kara int ret; 48152e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 48162e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 48172e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 48189ea7df53SJan Kara handle_t *handle; 48199ea7df53SJan Kara get_block_t *get_block; 48209ea7df53SJan Kara int retries = 0; 48212e9ee850SAneesh Kumar K.V 48228e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 4823041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 48249ea7df53SJan Kara /* Delalloc case is easy... */ 48259ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 48269ea7df53SJan Kara !ext4_should_journal_data(inode) && 48279ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 48289ea7df53SJan Kara do { 48299ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 48309ea7df53SJan Kara ext4_da_get_block_prep); 48319ea7df53SJan Kara } while (ret == -ENOSPC && 48329ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 48339ea7df53SJan Kara goto out_ret; 48342e9ee850SAneesh Kumar K.V } 48350e499890SDarrick J. Wong 48360e499890SDarrick J. Wong lock_page(page); 48379ea7df53SJan Kara size = i_size_read(inode); 48389ea7df53SJan Kara /* Page got truncated from under us? */ 48399ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 48409ea7df53SJan Kara unlock_page(page); 48419ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 48429ea7df53SJan Kara goto out; 48430e499890SDarrick J. Wong } 48442e9ee850SAneesh Kumar K.V 48452e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 48462e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 48472e9ee850SAneesh Kumar K.V else 48482e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4849a827eaffSAneesh Kumar K.V /* 48509ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 48519ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4852a827eaffSAneesh Kumar K.V */ 48532e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 4854f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 4855f19d5870STao Ma 0, len, NULL, 4856a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 48579ea7df53SJan Kara /* Wait so that we don't change page under IO */ 48589ea7df53SJan Kara wait_on_page_writeback(page); 48599ea7df53SJan Kara ret = VM_FAULT_LOCKED; 48609ea7df53SJan Kara goto out; 48612e9ee850SAneesh Kumar K.V } 4862a827eaffSAneesh Kumar K.V } 4863a827eaffSAneesh Kumar K.V unlock_page(page); 48649ea7df53SJan Kara /* OK, we need to fill the hole... */ 48659ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 48669ea7df53SJan Kara get_block = ext4_get_block_write; 48679ea7df53SJan Kara else 48689ea7df53SJan Kara get_block = ext4_get_block; 48699ea7df53SJan Kara retry_alloc: 48709ea7df53SJan Kara handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 48719ea7df53SJan Kara if (IS_ERR(handle)) { 4872c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 48739ea7df53SJan Kara goto out; 48749ea7df53SJan Kara } 48759ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 48769ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 4877f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 48789ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 48799ea7df53SJan Kara unlock_page(page); 48809ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 4881fcbb5515SYongqiang Yang ext4_journal_stop(handle); 48829ea7df53SJan Kara goto out; 48839ea7df53SJan Kara } 48849ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 48859ea7df53SJan Kara } 48869ea7df53SJan Kara ext4_journal_stop(handle); 48879ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 48889ea7df53SJan Kara goto retry_alloc; 48899ea7df53SJan Kara out_ret: 48909ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 48919ea7df53SJan Kara out: 48928e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 48932e9ee850SAneesh Kumar K.V return ret; 48942e9ee850SAneesh Kumar K.V } 4895