1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw, 51814525f4SDarrick J. Wong struct ext4_inode_info *ei) 52814525f4SDarrick J. Wong { 53814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 54814525f4SDarrick J. Wong __u16 csum_lo; 55814525f4SDarrick J. Wong __u16 csum_hi = 0; 56814525f4SDarrick J. Wong __u32 csum; 57814525f4SDarrick J. Wong 58814525f4SDarrick J. Wong csum_lo = raw->i_checksum_lo; 59814525f4SDarrick J. Wong raw->i_checksum_lo = 0; 60814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 61814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) { 62814525f4SDarrick J. Wong csum_hi = raw->i_checksum_hi; 63814525f4SDarrick J. Wong raw->i_checksum_hi = 0; 64814525f4SDarrick J. Wong } 65814525f4SDarrick J. Wong 66814525f4SDarrick J. Wong csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, 67814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 68814525f4SDarrick J. Wong 69814525f4SDarrick J. Wong raw->i_checksum_lo = csum_lo; 70814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 71814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 72814525f4SDarrick J. Wong raw->i_checksum_hi = csum_hi; 73814525f4SDarrick J. Wong 74814525f4SDarrick J. Wong return csum; 75814525f4SDarrick J. Wong } 76814525f4SDarrick J. Wong 77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, 78814525f4SDarrick J. Wong struct ext4_inode_info *ei) 79814525f4SDarrick J. Wong { 80814525f4SDarrick J. Wong __u32 provided, calculated; 81814525f4SDarrick J. Wong 82814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 83814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 84814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 85814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 86814525f4SDarrick J. Wong return 1; 87814525f4SDarrick J. Wong 88814525f4SDarrick J. Wong provided = le16_to_cpu(raw->i_checksum_lo); 89814525f4SDarrick J. Wong calculated = ext4_inode_csum(inode, raw, ei); 90814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 91814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 92814525f4SDarrick J. Wong provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16; 93814525f4SDarrick J. Wong else 94814525f4SDarrick J. Wong calculated &= 0xFFFF; 95814525f4SDarrick J. Wong 96814525f4SDarrick J. Wong return provided == calculated; 97814525f4SDarrick J. Wong } 98814525f4SDarrick J. Wong 99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, 100814525f4SDarrick J. Wong struct ext4_inode_info *ei) 101814525f4SDarrick J. Wong { 102814525f4SDarrick J. Wong __u32 csum; 103814525f4SDarrick J. Wong 104814525f4SDarrick J. Wong if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105814525f4SDarrick J. Wong cpu_to_le32(EXT4_OS_LINUX) || 106814525f4SDarrick J. Wong !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) 108814525f4SDarrick J. Wong return; 109814525f4SDarrick J. Wong 110814525f4SDarrick J. Wong csum = ext4_inode_csum(inode, raw, ei); 111814525f4SDarrick J. Wong raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF); 112814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE && 113814525f4SDarrick J. Wong EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) 114814525f4SDarrick J. Wong raw->i_checksum_hi = cpu_to_le16(csum >> 16); 115814525f4SDarrick J. Wong } 116814525f4SDarrick J. Wong 117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 118678aaf48SJan Kara loff_t new_size) 119678aaf48SJan Kara { 1207ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 1218aefcd55STheodore Ts'o /* 1228aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 1238aefcd55STheodore Ts'o * writing, so there's no need to call 1248aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 1258aefcd55STheodore Ts'o * outstanding writes we need to flush. 1268aefcd55STheodore Ts'o */ 1278aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 1288aefcd55STheodore Ts'o return 0; 1298aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 1308aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 131678aaf48SJan Kara new_size); 132678aaf48SJan Kara } 133678aaf48SJan Kara 13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 135cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 136cb20d518STheodore Ts'o struct buffer_head *bh_result, int create); 137cb20d518STheodore Ts'o static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 138cb20d518STheodore Ts'o static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 139cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 140cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 1415f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 1425f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 1435f163cc7SEric Sandeen loff_t length, int flags); 14464769240SAlex Tomas 145ac27a0ecSDave Kleikamp /* 146ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 147ac27a0ecSDave Kleikamp */ 148617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 149ac27a0ecSDave Kleikamp { 150617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 151ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 152ac27a0ecSDave Kleikamp 153ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 154ac27a0ecSDave Kleikamp } 155ac27a0ecSDave Kleikamp 156ac27a0ecSDave Kleikamp /* 157ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 158ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 159ac27a0ecSDave Kleikamp * this transaction. 160ac27a0ecSDave Kleikamp */ 161487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 162487caeefSJan Kara int nblocks) 163ac27a0ecSDave Kleikamp { 164487caeefSJan Kara int ret; 165487caeefSJan Kara 166487caeefSJan Kara /* 167e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 168487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 169487caeefSJan Kara * page cache has been already dropped and writes are blocked by 170487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 171487caeefSJan Kara */ 1720390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 173ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 174487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1758e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 176487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 177fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 178487caeefSJan Kara 179487caeefSJan Kara return ret; 180ac27a0ecSDave Kleikamp } 181ac27a0ecSDave Kleikamp 182ac27a0ecSDave Kleikamp /* 183ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 184ac27a0ecSDave Kleikamp */ 1850930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 186ac27a0ecSDave Kleikamp { 187ac27a0ecSDave Kleikamp handle_t *handle; 188bc965ab3STheodore Ts'o int err; 189ac27a0ecSDave Kleikamp 1907ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1912581fdc8SJiaying Zhang 1922581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1932581fdc8SJiaying Zhang 1940930fcc1SAl Viro if (inode->i_nlink) { 1952d859db3SJan Kara /* 1962d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1972d859db3SJan Kara * journal. So although mm thinks everything is clean and 1982d859db3SJan Kara * ready for reaping the inode might still have some pages to 1992d859db3SJan Kara * write in the running transaction or waiting to be 2002d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 2012d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 2022d859db3SJan Kara * cause data loss. Also even if we did not discard these 2032d859db3SJan Kara * buffers, we would have no way to find them after the inode 2042d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 2052d859db3SJan Kara * read them before the transaction is checkpointed. So be 2062d859db3SJan Kara * careful and force everything to disk here... We use 2072d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 2082d859db3SJan Kara * containing inode's data. 2092d859db3SJan Kara * 2102d859db3SJan Kara * Note that directories do not have this problem because they 2112d859db3SJan Kara * don't use page cache. 2122d859db3SJan Kara */ 2132d859db3SJan Kara if (ext4_should_journal_data(inode) && 2142d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 2152d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 2162d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 2172d859db3SJan Kara 2182d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 2192d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 2202d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 2212d859db3SJan Kara } 2220930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 2230930fcc1SAl Viro goto no_delete; 2240930fcc1SAl Viro } 2250930fcc1SAl Viro 226907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 227871a2931SChristoph Hellwig dquot_initialize(inode); 228907f4554SChristoph Hellwig 229678aaf48SJan Kara if (ext4_should_order_data(inode)) 230678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 231ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 232ac27a0ecSDave Kleikamp 233ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 234ac27a0ecSDave Kleikamp goto no_delete; 235ac27a0ecSDave Kleikamp 2368e8ad8a5SJan Kara /* 2378e8ad8a5SJan Kara * Protect us against freezing - iput() caller didn't have to have any 2388e8ad8a5SJan Kara * protection against it 2398e8ad8a5SJan Kara */ 2408e8ad8a5SJan Kara sb_start_intwrite(inode->i_sb); 2419f125d64STheodore Ts'o handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); 242ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 243bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 244ac27a0ecSDave Kleikamp /* 245ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 246ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 247ac27a0ecSDave Kleikamp * cleaned up. 248ac27a0ecSDave Kleikamp */ 249617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 2508e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 251ac27a0ecSDave Kleikamp goto no_delete; 252ac27a0ecSDave Kleikamp } 253ac27a0ecSDave Kleikamp 254ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 2550390131bSFrank Mayhar ext4_handle_sync(handle); 256ac27a0ecSDave Kleikamp inode->i_size = 0; 257bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 258bc965ab3STheodore Ts'o if (err) { 25912062dddSEric Sandeen ext4_warning(inode->i_sb, 260bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 261bc965ab3STheodore Ts'o goto stop_handle; 262bc965ab3STheodore Ts'o } 263ac27a0ecSDave Kleikamp if (inode->i_blocks) 264617ba13bSMingming Cao ext4_truncate(inode); 265bc965ab3STheodore Ts'o 266bc965ab3STheodore Ts'o /* 267bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 268bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 269bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 270bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 271bc965ab3STheodore Ts'o */ 2720390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 273bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 274bc965ab3STheodore Ts'o if (err > 0) 275bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 276bc965ab3STheodore Ts'o if (err != 0) { 27712062dddSEric Sandeen ext4_warning(inode->i_sb, 278bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 279bc965ab3STheodore Ts'o stop_handle: 280bc965ab3STheodore Ts'o ext4_journal_stop(handle); 28145388219STheodore Ts'o ext4_orphan_del(NULL, inode); 2828e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 283bc965ab3STheodore Ts'o goto no_delete; 284bc965ab3STheodore Ts'o } 285bc965ab3STheodore Ts'o } 286bc965ab3STheodore Ts'o 287ac27a0ecSDave Kleikamp /* 288617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 289ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 290617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 291ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 292617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 293ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 294ac27a0ecSDave Kleikamp */ 295617ba13bSMingming Cao ext4_orphan_del(handle, inode); 296617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 297ac27a0ecSDave Kleikamp 298ac27a0ecSDave Kleikamp /* 299ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 300ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 301ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 302ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 303ac27a0ecSDave Kleikamp * fails. 304ac27a0ecSDave Kleikamp */ 305617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 306ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 3070930fcc1SAl Viro ext4_clear_inode(inode); 308ac27a0ecSDave Kleikamp else 309617ba13bSMingming Cao ext4_free_inode(handle, inode); 310617ba13bSMingming Cao ext4_journal_stop(handle); 3118e8ad8a5SJan Kara sb_end_intwrite(inode->i_sb); 312ac27a0ecSDave Kleikamp return; 313ac27a0ecSDave Kleikamp no_delete: 3140930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 315ac27a0ecSDave Kleikamp } 316ac27a0ecSDave Kleikamp 317a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 318a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 31960e58e0fSMingming Cao { 320a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 32160e58e0fSMingming Cao } 322a9e7f447SDmitry Monakhov #endif 3239d0be502STheodore Ts'o 32412219aeaSAneesh Kumar K.V /* 32512219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 3269d0be502STheodore Ts'o * to allocate a block located at @lblock 32712219aeaSAneesh Kumar K.V */ 32801f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 32912219aeaSAneesh Kumar K.V { 33012e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3319d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 33212219aeaSAneesh Kumar K.V 3338bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 33412219aeaSAneesh Kumar K.V } 33512219aeaSAneesh Kumar K.V 3360637c6f4STheodore Ts'o /* 3370637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 3380637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 3390637c6f4STheodore Ts'o */ 3405f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 3415f634d06SAneesh Kumar K.V int used, int quota_claim) 34212219aeaSAneesh Kumar K.V { 34312219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3440637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 34512219aeaSAneesh Kumar K.V 3460637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 347d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 3480637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 3490637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 3501084f252STheodore Ts'o "with only %d reserved data blocks", 3510637c6f4STheodore Ts'o __func__, inode->i_ino, used, 3520637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 3530637c6f4STheodore Ts'o WARN_ON(1); 3540637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 3556bc6e63fSAneesh Kumar K.V } 35612219aeaSAneesh Kumar K.V 35797795d2aSBrian Foster if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) { 35897795d2aSBrian Foster ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d " 35997795d2aSBrian Foster "with only %d reserved metadata blocks\n", __func__, 36097795d2aSBrian Foster inode->i_ino, ei->i_allocated_meta_blocks, 36197795d2aSBrian Foster ei->i_reserved_meta_blocks); 36297795d2aSBrian Foster WARN_ON(1); 36397795d2aSBrian Foster ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks; 36497795d2aSBrian Foster } 36597795d2aSBrian Foster 3660637c6f4STheodore Ts'o /* Update per-inode reservations */ 3670637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 3680637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 36957042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 37072b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 3710637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 3720637c6f4STheodore Ts'o 3730637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 3740637c6f4STheodore Ts'o /* 3750637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 3760637c6f4STheodore Ts'o * only when we have written all of the delayed 3770637c6f4STheodore Ts'o * allocation blocks. 3780637c6f4STheodore Ts'o */ 37957042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 38072b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 381ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 3829d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 3830637c6f4STheodore Ts'o } 38412219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 38560e58e0fSMingming Cao 38672b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 38772b8ab9dSEric Sandeen if (quota_claim) 3887b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 38972b8ab9dSEric Sandeen else { 3905f634d06SAneesh Kumar K.V /* 3915f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3925f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 39372b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3945f634d06SAneesh Kumar K.V */ 3957b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3965f634d06SAneesh Kumar K.V } 397d6014301SAneesh Kumar K.V 398d6014301SAneesh Kumar K.V /* 399d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 400d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 401d6014301SAneesh Kumar K.V * inode's preallocations. 402d6014301SAneesh Kumar K.V */ 4030637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 4040637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 405d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 40612219aeaSAneesh Kumar K.V } 40712219aeaSAneesh Kumar K.V 408e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 409c398eda0STheodore Ts'o unsigned int line, 41024676da4STheodore Ts'o struct ext4_map_blocks *map) 4116fd058f7STheodore Ts'o { 41224676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 41324676da4STheodore Ts'o map->m_len)) { 414c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 415c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 41624676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 417c398eda0STheodore Ts'o map->m_len); 4186fd058f7STheodore Ts'o return -EIO; 4196fd058f7STheodore Ts'o } 4206fd058f7STheodore Ts'o return 0; 4216fd058f7STheodore Ts'o } 4226fd058f7STheodore Ts'o 423e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 424c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 425e29136f8STheodore Ts'o 426f5ab0d1fSMingming Cao /* 4271f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 4281f94533dSTheodore Ts'o * starting at page frame idx. 42955138e0bSTheodore Ts'o */ 43055138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 43155138e0bSTheodore Ts'o unsigned int max_pages) 43255138e0bSTheodore Ts'o { 43355138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 43455138e0bSTheodore Ts'o pgoff_t index; 43555138e0bSTheodore Ts'o struct pagevec pvec; 43655138e0bSTheodore Ts'o pgoff_t num = 0; 43755138e0bSTheodore Ts'o int i, nr_pages, done = 0; 43855138e0bSTheodore Ts'o 43955138e0bSTheodore Ts'o if (max_pages == 0) 44055138e0bSTheodore Ts'o return 0; 44155138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 44255138e0bSTheodore Ts'o while (!done) { 44355138e0bSTheodore Ts'o index = idx; 44455138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 44555138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 44655138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 44755138e0bSTheodore Ts'o if (nr_pages == 0) 44855138e0bSTheodore Ts'o break; 44955138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 45055138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 45155138e0bSTheodore Ts'o struct buffer_head *bh, *head; 45255138e0bSTheodore Ts'o 45355138e0bSTheodore Ts'o lock_page(page); 45455138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 45555138e0bSTheodore Ts'o !PageDirty(page) || 45655138e0bSTheodore Ts'o PageWriteback(page) || 45755138e0bSTheodore Ts'o page->index != idx) { 45855138e0bSTheodore Ts'o done = 1; 45955138e0bSTheodore Ts'o unlock_page(page); 46055138e0bSTheodore Ts'o break; 46155138e0bSTheodore Ts'o } 4621f94533dSTheodore Ts'o if (page_has_buffers(page)) { 4631f94533dSTheodore Ts'o bh = head = page_buffers(page); 46455138e0bSTheodore Ts'o do { 46555138e0bSTheodore Ts'o if (!buffer_delay(bh) && 4661f94533dSTheodore Ts'o !buffer_unwritten(bh)) 46755138e0bSTheodore Ts'o done = 1; 4681f94533dSTheodore Ts'o bh = bh->b_this_page; 4691f94533dSTheodore Ts'o } while (!done && (bh != head)); 47055138e0bSTheodore Ts'o } 47155138e0bSTheodore Ts'o unlock_page(page); 47255138e0bSTheodore Ts'o if (done) 47355138e0bSTheodore Ts'o break; 47455138e0bSTheodore Ts'o idx++; 47555138e0bSTheodore Ts'o num++; 476659c6009SEric Sandeen if (num >= max_pages) { 477659c6009SEric Sandeen done = 1; 47855138e0bSTheodore Ts'o break; 47955138e0bSTheodore Ts'o } 480659c6009SEric Sandeen } 48155138e0bSTheodore Ts'o pagevec_release(&pvec); 48255138e0bSTheodore Ts'o } 48355138e0bSTheodore Ts'o return num; 48455138e0bSTheodore Ts'o } 48555138e0bSTheodore Ts'o 48655138e0bSTheodore Ts'o /* 487e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4882b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 489f5ab0d1fSMingming Cao * 490f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 491f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 492f5ab0d1fSMingming Cao * mapped. 493f5ab0d1fSMingming Cao * 494e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 495e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 496f5ab0d1fSMingming Cao * based files 497f5ab0d1fSMingming Cao * 498f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 499f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 500f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 501f5ab0d1fSMingming Cao * the buffer head is mapped. 502f5ab0d1fSMingming Cao * 503f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 504df3ab170STao Ma * that case, buffer head is unmapped 505f5ab0d1fSMingming Cao * 506f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 507f5ab0d1fSMingming Cao */ 508e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 509e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 5100e855ac8SAneesh Kumar K.V { 5110e855ac8SAneesh Kumar K.V int retval; 512f5ab0d1fSMingming Cao 513e35fd660STheodore Ts'o map->m_flags = 0; 514e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 515e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 516e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 5174df3d265SAneesh Kumar K.V /* 518b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 519b920c755STheodore Ts'o * file system block. 5204df3d265SAneesh Kumar K.V */ 521729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5220e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 52312e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 524a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 525a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5264df3d265SAneesh Kumar K.V } else { 527a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 528a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 5290e855ac8SAneesh Kumar K.V } 530729f52c6SZheng Liu if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 5314df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 532f5ab0d1fSMingming Cao 533e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 53451865fdaSZheng Liu int ret; 53551865fdaSZheng Liu if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 53651865fdaSZheng Liu /* delayed alloc may be allocated by fallocate and 53751865fdaSZheng Liu * coverted to initialized by directIO. 53851865fdaSZheng Liu * we need to handle delayed extent here. 53951865fdaSZheng Liu */ 54051865fdaSZheng Liu down_write((&EXT4_I(inode)->i_data_sem)); 54151865fdaSZheng Liu goto delayed_mapped; 54251865fdaSZheng Liu } 54351865fdaSZheng Liu ret = check_block_validity(inode, map); 5446fd058f7STheodore Ts'o if (ret != 0) 5456fd058f7STheodore Ts'o return ret; 5466fd058f7STheodore Ts'o } 5476fd058f7STheodore Ts'o 548f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 549c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 5504df3d265SAneesh Kumar K.V return retval; 5514df3d265SAneesh Kumar K.V 5524df3d265SAneesh Kumar K.V /* 553f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 554f5ab0d1fSMingming Cao * 555f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 556df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 557f5ab0d1fSMingming Cao * with buffer head unmapped. 558f5ab0d1fSMingming Cao */ 559e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 560f5ab0d1fSMingming Cao return retval; 561f5ab0d1fSMingming Cao 562f5ab0d1fSMingming Cao /* 5632a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 5642a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 5652a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 5662a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 5672a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 5682a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 5692a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 5702a8964d6SAneesh Kumar K.V * set on the buffer_head. 5712a8964d6SAneesh Kumar K.V */ 572e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 5732a8964d6SAneesh Kumar K.V 5742a8964d6SAneesh Kumar K.V /* 575f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 576f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 577f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 578f5ab0d1fSMingming Cao * with create == 1 flag. 5794df3d265SAneesh Kumar K.V */ 5804df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 581d2a17637SMingming Cao 582d2a17637SMingming Cao /* 583d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 584d2a17637SMingming Cao * we have already reserved fs blocks for allocation 585d2a17637SMingming Cao * let the underlying get_block() function know to 586d2a17637SMingming Cao * avoid double accounting 587d2a17637SMingming Cao */ 588c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 589f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 5904df3d265SAneesh Kumar K.V /* 5914df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 5924df3d265SAneesh Kumar K.V * could have changed the inode type in between 5934df3d265SAneesh Kumar K.V */ 59412e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 595e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 5960e855ac8SAneesh Kumar K.V } else { 597e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 598267e4db9SAneesh Kumar K.V 599e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 600267e4db9SAneesh Kumar K.V /* 601267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 602267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 603267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 604267e4db9SAneesh Kumar K.V */ 60519f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 606267e4db9SAneesh Kumar K.V } 6072ac3b6e0STheodore Ts'o 608d2a17637SMingming Cao /* 6092ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 6105f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 6115f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 6125f634d06SAneesh Kumar K.V * reserve space here. 613d2a17637SMingming Cao */ 6145f634d06SAneesh Kumar K.V if ((retval > 0) && 6151296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 6165f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 6175f634d06SAneesh Kumar K.V } 6185356f261SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 619f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 620d2a17637SMingming Cao 62151865fdaSZheng Liu if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 62251865fdaSZheng Liu int ret; 62351865fdaSZheng Liu delayed_mapped: 62451865fdaSZheng Liu /* delayed allocation blocks has been allocated */ 62551865fdaSZheng Liu ret = ext4_es_remove_extent(inode, map->m_lblk, 62651865fdaSZheng Liu map->m_len); 62751865fdaSZheng Liu if (ret < 0) 62851865fdaSZheng Liu retval = ret; 62951865fdaSZheng Liu } 6305356f261SAditya Kali } 6315356f261SAditya Kali 6320e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 633e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 634e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 6356fd058f7STheodore Ts'o if (ret != 0) 6366fd058f7STheodore Ts'o return ret; 6376fd058f7STheodore Ts'o } 6380e855ac8SAneesh Kumar K.V return retval; 6390e855ac8SAneesh Kumar K.V } 6400e855ac8SAneesh Kumar K.V 641f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 642f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 643f3bd1f3fSMingming Cao 6442ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 6452ed88685STheodore Ts'o struct buffer_head *bh, int flags) 646ac27a0ecSDave Kleikamp { 6473e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 6482ed88685STheodore Ts'o struct ext4_map_blocks map; 6497fb5409dSJan Kara int ret = 0, started = 0; 650f3bd1f3fSMingming Cao int dio_credits; 651ac27a0ecSDave Kleikamp 65246c7f254STao Ma if (ext4_has_inline_data(inode)) 65346c7f254STao Ma return -ERANGE; 65446c7f254STao Ma 6552ed88685STheodore Ts'o map.m_lblk = iblock; 6562ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 6572ed88685STheodore Ts'o 6588b0f165fSAnatol Pomozov if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) { 6597fb5409dSJan Kara /* Direct IO write... */ 6602ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 6612ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 6622ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 663f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 6647fb5409dSJan Kara if (IS_ERR(handle)) { 665ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 6662ed88685STheodore Ts'o return ret; 6677fb5409dSJan Kara } 6687fb5409dSJan Kara started = 1; 669ac27a0ecSDave Kleikamp } 670ac27a0ecSDave Kleikamp 6712ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 672ac27a0ecSDave Kleikamp if (ret > 0) { 6732ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 6742ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 6752ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 676ac27a0ecSDave Kleikamp ret = 0; 677ac27a0ecSDave Kleikamp } 6787fb5409dSJan Kara if (started) 6797fb5409dSJan Kara ext4_journal_stop(handle); 680ac27a0ecSDave Kleikamp return ret; 681ac27a0ecSDave Kleikamp } 682ac27a0ecSDave Kleikamp 6832ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 6842ed88685STheodore Ts'o struct buffer_head *bh, int create) 6852ed88685STheodore Ts'o { 6862ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 6872ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6882ed88685STheodore Ts'o } 6892ed88685STheodore Ts'o 690ac27a0ecSDave Kleikamp /* 691ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 692ac27a0ecSDave Kleikamp */ 693617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 694725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 695ac27a0ecSDave Kleikamp { 6962ed88685STheodore Ts'o struct ext4_map_blocks map; 6972ed88685STheodore Ts'o struct buffer_head *bh; 698ac27a0ecSDave Kleikamp int fatal = 0, err; 699ac27a0ecSDave Kleikamp 700ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 701ac27a0ecSDave Kleikamp 7022ed88685STheodore Ts'o map.m_lblk = block; 7032ed88685STheodore Ts'o map.m_len = 1; 7042ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 7052ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 7062ed88685STheodore Ts'o 70790b0a973SCarlos Maiolino /* ensure we send some value back into *errp */ 70890b0a973SCarlos Maiolino *errp = 0; 70990b0a973SCarlos Maiolino 7102ed88685STheodore Ts'o if (err < 0) 711ac27a0ecSDave Kleikamp *errp = err; 7122ed88685STheodore Ts'o if (err <= 0) 7132ed88685STheodore Ts'o return NULL; 7142ed88685STheodore Ts'o 7152ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 716ac27a0ecSDave Kleikamp if (!bh) { 717ac27a0ecSDave Kleikamp *errp = -EIO; 7182ed88685STheodore Ts'o return NULL; 719ac27a0ecSDave Kleikamp } 7202ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 721ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 722ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 723ac27a0ecSDave Kleikamp 724ac27a0ecSDave Kleikamp /* 725ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 726ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 727ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 728617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 729ac27a0ecSDave Kleikamp * problem. 730ac27a0ecSDave Kleikamp */ 731ac27a0ecSDave Kleikamp lock_buffer(bh); 732ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 733617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 734ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 735ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 736ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 737ac27a0ecSDave Kleikamp } 738ac27a0ecSDave Kleikamp unlock_buffer(bh); 7390390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 7400390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 741ac27a0ecSDave Kleikamp if (!fatal) 742ac27a0ecSDave Kleikamp fatal = err; 743ac27a0ecSDave Kleikamp } else { 744ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 745ac27a0ecSDave Kleikamp } 746ac27a0ecSDave Kleikamp if (fatal) { 747ac27a0ecSDave Kleikamp *errp = fatal; 748ac27a0ecSDave Kleikamp brelse(bh); 749ac27a0ecSDave Kleikamp bh = NULL; 750ac27a0ecSDave Kleikamp } 751ac27a0ecSDave Kleikamp return bh; 752ac27a0ecSDave Kleikamp } 753ac27a0ecSDave Kleikamp 754617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 755725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 756ac27a0ecSDave Kleikamp { 757ac27a0ecSDave Kleikamp struct buffer_head *bh; 758ac27a0ecSDave Kleikamp 759617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 760ac27a0ecSDave Kleikamp if (!bh) 761ac27a0ecSDave Kleikamp return bh; 762ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 763ac27a0ecSDave Kleikamp return bh; 76465299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 765ac27a0ecSDave Kleikamp wait_on_buffer(bh); 766ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 767ac27a0ecSDave Kleikamp return bh; 768ac27a0ecSDave Kleikamp put_bh(bh); 769ac27a0ecSDave Kleikamp *err = -EIO; 770ac27a0ecSDave Kleikamp return NULL; 771ac27a0ecSDave Kleikamp } 772ac27a0ecSDave Kleikamp 773f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle, 774ac27a0ecSDave Kleikamp struct buffer_head *head, 775ac27a0ecSDave Kleikamp unsigned from, 776ac27a0ecSDave Kleikamp unsigned to, 777ac27a0ecSDave Kleikamp int *partial, 778ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 779ac27a0ecSDave Kleikamp struct buffer_head *bh)) 780ac27a0ecSDave Kleikamp { 781ac27a0ecSDave Kleikamp struct buffer_head *bh; 782ac27a0ecSDave Kleikamp unsigned block_start, block_end; 783ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 784ac27a0ecSDave Kleikamp int err, ret = 0; 785ac27a0ecSDave Kleikamp struct buffer_head *next; 786ac27a0ecSDave Kleikamp 787ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 788ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 789de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 790ac27a0ecSDave Kleikamp next = bh->b_this_page; 791ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 792ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 793ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 794ac27a0ecSDave Kleikamp *partial = 1; 795ac27a0ecSDave Kleikamp continue; 796ac27a0ecSDave Kleikamp } 797ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 798ac27a0ecSDave Kleikamp if (!ret) 799ac27a0ecSDave Kleikamp ret = err; 800ac27a0ecSDave Kleikamp } 801ac27a0ecSDave Kleikamp return ret; 802ac27a0ecSDave Kleikamp } 803ac27a0ecSDave Kleikamp 804ac27a0ecSDave Kleikamp /* 805ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 806ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 807617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 808dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 809ac27a0ecSDave Kleikamp * prepare_write() is the right place. 810ac27a0ecSDave Kleikamp * 811617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 812617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 813ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 814ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 815ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 816ac27a0ecSDave Kleikamp * 817617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 818ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 819ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 820ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 821ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 822ac27a0ecSDave Kleikamp * violation. 823ac27a0ecSDave Kleikamp * 824dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 825ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 826ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 827ac27a0ecSDave Kleikamp * write. 828ac27a0ecSDave Kleikamp */ 829f19d5870STao Ma int do_journal_get_write_access(handle_t *handle, 830ac27a0ecSDave Kleikamp struct buffer_head *bh) 831ac27a0ecSDave Kleikamp { 83256d35a4cSJan Kara int dirty = buffer_dirty(bh); 83356d35a4cSJan Kara int ret; 83456d35a4cSJan Kara 835ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 836ac27a0ecSDave Kleikamp return 0; 83756d35a4cSJan Kara /* 838ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 83956d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 84056d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 841ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 84256d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 84356d35a4cSJan Kara * ever write the buffer. 84456d35a4cSJan Kara */ 84556d35a4cSJan Kara if (dirty) 84656d35a4cSJan Kara clear_buffer_dirty(bh); 84756d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 84856d35a4cSJan Kara if (!ret && dirty) 84956d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 85056d35a4cSJan Kara return ret; 851ac27a0ecSDave Kleikamp } 852ac27a0ecSDave Kleikamp 8538b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 8548b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create); 855bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 856bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 857bfc1af65SNick Piggin struct page **pagep, void **fsdata) 858ac27a0ecSDave Kleikamp { 859bfc1af65SNick Piggin struct inode *inode = mapping->host; 8601938a150SAneesh Kumar K.V int ret, needed_blocks; 861ac27a0ecSDave Kleikamp handle_t *handle; 862ac27a0ecSDave Kleikamp int retries = 0; 863bfc1af65SNick Piggin struct page *page; 864bfc1af65SNick Piggin pgoff_t index; 865bfc1af65SNick Piggin unsigned from, to; 866bfc1af65SNick Piggin 8679bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 8681938a150SAneesh Kumar K.V /* 8691938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 8701938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 8711938a150SAneesh Kumar K.V */ 8721938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 873bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 874bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 875bfc1af65SNick Piggin to = from + len; 876ac27a0ecSDave Kleikamp 877f19d5870STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 878f19d5870STao Ma ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, 879f19d5870STao Ma flags, pagep); 880f19d5870STao Ma if (ret < 0) 881f19d5870STao Ma goto out; 882f19d5870STao Ma if (ret == 1) { 883f19d5870STao Ma ret = 0; 884f19d5870STao Ma goto out; 885f19d5870STao Ma } 886f19d5870STao Ma } 887f19d5870STao Ma 888ac27a0ecSDave Kleikamp retry: 889617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 8907479d2b9SAndrew Morton if (IS_ERR(handle)) { 8917479d2b9SAndrew Morton ret = PTR_ERR(handle); 8927479d2b9SAndrew Morton goto out; 8937479d2b9SAndrew Morton } 894ac27a0ecSDave Kleikamp 895ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 896ebd3610bSJan Kara * started */ 897ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 898ebd3610bSJan Kara 89954566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 900cf108bcaSJan Kara if (!page) { 901cf108bcaSJan Kara ext4_journal_stop(handle); 902cf108bcaSJan Kara ret = -ENOMEM; 903cf108bcaSJan Kara goto out; 904cf108bcaSJan Kara } 905f19d5870STao Ma 906cf108bcaSJan Kara *pagep = page; 907cf108bcaSJan Kara 908744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 9096e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 910744692dcSJiaying Zhang else 9116e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 912bfc1af65SNick Piggin 913bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 914f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), 915f19d5870STao Ma from, to, NULL, 916f19d5870STao Ma do_journal_get_write_access); 917b46be050SAndrey Savochkin } 918bfc1af65SNick Piggin 919bfc1af65SNick Piggin if (ret) { 920bfc1af65SNick Piggin unlock_page(page); 921bfc1af65SNick Piggin page_cache_release(page); 922ae4d5372SAneesh Kumar K.V /* 9236e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 924ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 925ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 9261938a150SAneesh Kumar K.V * 9271938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 9281938a150SAneesh Kumar K.V * truncate finishes 929ae4d5372SAneesh Kumar K.V */ 930ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 9311938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 9321938a150SAneesh Kumar K.V 9331938a150SAneesh Kumar K.V ext4_journal_stop(handle); 9341938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 935b9a4207dSJan Kara ext4_truncate_failed_write(inode); 9361938a150SAneesh Kumar K.V /* 937ffacfa7aSJan Kara * If truncate failed early the inode might 9381938a150SAneesh Kumar K.V * still be on the orphan list; we need to 9391938a150SAneesh Kumar K.V * make sure the inode is removed from the 9401938a150SAneesh Kumar K.V * orphan list in that case. 9411938a150SAneesh Kumar K.V */ 9421938a150SAneesh Kumar K.V if (inode->i_nlink) 9431938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 9441938a150SAneesh Kumar K.V } 945bfc1af65SNick Piggin } 946bfc1af65SNick Piggin 947617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 948ac27a0ecSDave Kleikamp goto retry; 9497479d2b9SAndrew Morton out: 950ac27a0ecSDave Kleikamp return ret; 951ac27a0ecSDave Kleikamp } 952ac27a0ecSDave Kleikamp 953bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 954bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 955ac27a0ecSDave Kleikamp { 956ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 957ac27a0ecSDave Kleikamp return 0; 958ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 9590390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 960ac27a0ecSDave Kleikamp } 961ac27a0ecSDave Kleikamp 962f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 963f8514083SAneesh Kumar K.V struct address_space *mapping, 964f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 965f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 966f8514083SAneesh Kumar K.V { 967f8514083SAneesh Kumar K.V int i_size_changed = 0; 968f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 969f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 970f8514083SAneesh Kumar K.V 971f19d5870STao Ma if (ext4_has_inline_data(inode)) 972f19d5870STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 973f19d5870STao Ma copied, page); 974f19d5870STao Ma else 975f19d5870STao Ma copied = block_write_end(file, mapping, pos, 976f19d5870STao Ma len, copied, page, fsdata); 977f8514083SAneesh Kumar K.V 978f8514083SAneesh Kumar K.V /* 979f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 980f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 981f8514083SAneesh Kumar K.V * 982f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 983f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 984f8514083SAneesh Kumar K.V */ 985f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 986f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 987f8514083SAneesh Kumar K.V i_size_changed = 1; 988f8514083SAneesh Kumar K.V } 989f8514083SAneesh Kumar K.V 990f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 991f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 992f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 993f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 994f8514083SAneesh Kumar K.V */ 995f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 996f8514083SAneesh Kumar K.V i_size_changed = 1; 997f8514083SAneesh Kumar K.V } 998f8514083SAneesh Kumar K.V unlock_page(page); 999f8514083SAneesh Kumar K.V page_cache_release(page); 1000f8514083SAneesh Kumar K.V 1001f8514083SAneesh Kumar K.V /* 1002f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 1003f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 1004f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 1005f8514083SAneesh Kumar K.V * filesystems. 1006f8514083SAneesh Kumar K.V */ 1007f8514083SAneesh Kumar K.V if (i_size_changed) 1008f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 1009f8514083SAneesh Kumar K.V 1010f8514083SAneesh Kumar K.V return copied; 1011f8514083SAneesh Kumar K.V } 1012f8514083SAneesh Kumar K.V 1013ac27a0ecSDave Kleikamp /* 1014ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 1015ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 1016ac27a0ecSDave Kleikamp * 1017617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 1018ac27a0ecSDave Kleikamp * buffers are managed internally. 1019ac27a0ecSDave Kleikamp */ 1020bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 1021bfc1af65SNick Piggin struct address_space *mapping, 1022bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1023bfc1af65SNick Piggin struct page *page, void *fsdata) 1024ac27a0ecSDave Kleikamp { 1025617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1026cf108bcaSJan Kara struct inode *inode = mapping->host; 1027ac27a0ecSDave Kleikamp int ret = 0, ret2; 1028ac27a0ecSDave Kleikamp 10299bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 1030678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 1031ac27a0ecSDave Kleikamp 1032ac27a0ecSDave Kleikamp if (ret == 0) { 1033f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1034bfc1af65SNick Piggin page, fsdata); 1035f8a87d89SRoel Kluin copied = ret2; 1036ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1037f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1038f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1039f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1040f8514083SAneesh Kumar K.V */ 1041f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1042f8a87d89SRoel Kluin if (ret2 < 0) 1043f8a87d89SRoel Kluin ret = ret2; 104409e0834fSAkira Fujita } else { 104509e0834fSAkira Fujita unlock_page(page); 104609e0834fSAkira Fujita page_cache_release(page); 1047ac27a0ecSDave Kleikamp } 104809e0834fSAkira Fujita 1049617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1050ac27a0ecSDave Kleikamp if (!ret) 1051ac27a0ecSDave Kleikamp ret = ret2; 1052bfc1af65SNick Piggin 1053f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1054b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1055f8514083SAneesh Kumar K.V /* 1056ffacfa7aSJan Kara * If truncate failed early the inode might still be 1057f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1058f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1059f8514083SAneesh Kumar K.V */ 1060f8514083SAneesh Kumar K.V if (inode->i_nlink) 1061f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1062f8514083SAneesh Kumar K.V } 1063f8514083SAneesh Kumar K.V 1064f8514083SAneesh Kumar K.V 1065bfc1af65SNick Piggin return ret ? ret : copied; 1066ac27a0ecSDave Kleikamp } 1067ac27a0ecSDave Kleikamp 1068bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 1069bfc1af65SNick Piggin struct address_space *mapping, 1070bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1071bfc1af65SNick Piggin struct page *page, void *fsdata) 1072ac27a0ecSDave Kleikamp { 1073617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1074cf108bcaSJan Kara struct inode *inode = mapping->host; 1075ac27a0ecSDave Kleikamp int ret = 0, ret2; 1076ac27a0ecSDave Kleikamp 10779bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 1078f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1079bfc1af65SNick Piggin page, fsdata); 1080f8a87d89SRoel Kluin copied = ret2; 1081ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1082f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1083f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1084f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1085f8514083SAneesh Kumar K.V */ 1086f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1087f8514083SAneesh Kumar K.V 1088f8a87d89SRoel Kluin if (ret2 < 0) 1089f8a87d89SRoel Kluin ret = ret2; 1090ac27a0ecSDave Kleikamp 1091617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1092ac27a0ecSDave Kleikamp if (!ret) 1093ac27a0ecSDave Kleikamp ret = ret2; 1094bfc1af65SNick Piggin 1095f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1096b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1097f8514083SAneesh Kumar K.V /* 1098ffacfa7aSJan Kara * If truncate failed early the inode might still be 1099f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1100f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1101f8514083SAneesh Kumar K.V */ 1102f8514083SAneesh Kumar K.V if (inode->i_nlink) 1103f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1104f8514083SAneesh Kumar K.V } 1105f8514083SAneesh Kumar K.V 1106bfc1af65SNick Piggin return ret ? ret : copied; 1107ac27a0ecSDave Kleikamp } 1108ac27a0ecSDave Kleikamp 1109bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1110bfc1af65SNick Piggin struct address_space *mapping, 1111bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1112bfc1af65SNick Piggin struct page *page, void *fsdata) 1113ac27a0ecSDave Kleikamp { 1114617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1115bfc1af65SNick Piggin struct inode *inode = mapping->host; 1116ac27a0ecSDave Kleikamp int ret = 0, ret2; 1117ac27a0ecSDave Kleikamp int partial = 0; 1118bfc1af65SNick Piggin unsigned from, to; 1119cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1120ac27a0ecSDave Kleikamp 11219bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1122bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1123bfc1af65SNick Piggin to = from + len; 1124bfc1af65SNick Piggin 1125441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1126441c8508SCurt Wohlgemuth 11273fdcfb66STao Ma if (ext4_has_inline_data(inode)) 11283fdcfb66STao Ma copied = ext4_write_inline_data_end(inode, pos, len, 11293fdcfb66STao Ma copied, page); 11303fdcfb66STao Ma else { 1131bfc1af65SNick Piggin if (copied < len) { 1132bfc1af65SNick Piggin if (!PageUptodate(page)) 1133bfc1af65SNick Piggin copied = 0; 1134bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1135bfc1af65SNick Piggin } 1136ac27a0ecSDave Kleikamp 1137f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_buffers(page), from, 1138bfc1af65SNick Piggin to, &partial, write_end_fn); 1139ac27a0ecSDave Kleikamp if (!partial) 1140ac27a0ecSDave Kleikamp SetPageUptodate(page); 11413fdcfb66STao Ma } 1142cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1143cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1144bfc1af65SNick Piggin i_size_write(inode, pos+copied); 114519f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 11462d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1147cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1148cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1149617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1150ac27a0ecSDave Kleikamp if (!ret) 1151ac27a0ecSDave Kleikamp ret = ret2; 1152ac27a0ecSDave Kleikamp } 1153bfc1af65SNick Piggin 1154cf108bcaSJan Kara unlock_page(page); 1155f8514083SAneesh Kumar K.V page_cache_release(page); 1156ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1157f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1158f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1159f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1160f8514083SAneesh Kumar K.V */ 1161f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1162f8514083SAneesh Kumar K.V 1163617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1164ac27a0ecSDave Kleikamp if (!ret) 1165ac27a0ecSDave Kleikamp ret = ret2; 1166f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1167b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1168f8514083SAneesh Kumar K.V /* 1169ffacfa7aSJan Kara * If truncate failed early the inode might still be 1170f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1171f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1172f8514083SAneesh Kumar K.V */ 1173f8514083SAneesh Kumar K.V if (inode->i_nlink) 1174f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1175f8514083SAneesh Kumar K.V } 1176bfc1af65SNick Piggin 1177bfc1af65SNick Piggin return ret ? ret : copied; 1178ac27a0ecSDave Kleikamp } 1179d2a17637SMingming Cao 11809d0be502STheodore Ts'o /* 11817b415bf6SAditya Kali * Reserve a single cluster located at lblock 11829d0be502STheodore Ts'o */ 118301f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1184d2a17637SMingming Cao { 1185030ba6bcSAneesh Kumar K.V int retries = 0; 1186d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11870637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 11887b415bf6SAditya Kali unsigned int md_needed; 11895dd4056dSChristoph Hellwig int ret; 119003179fe9STheodore Ts'o ext4_lblk_t save_last_lblock; 119103179fe9STheodore Ts'o int save_len; 1192d2a17637SMingming Cao 119360e58e0fSMingming Cao /* 119472b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 119572b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 119672b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 119760e58e0fSMingming Cao */ 11987b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 11995dd4056dSChristoph Hellwig if (ret) 12005dd4056dSChristoph Hellwig return ret; 120103179fe9STheodore Ts'o 120203179fe9STheodore Ts'o /* 120303179fe9STheodore Ts'o * recalculate the amount of metadata blocks to reserve 120403179fe9STheodore Ts'o * in order to allocate nrblocks 120503179fe9STheodore Ts'o * worse case is one extent per block 120603179fe9STheodore Ts'o */ 120703179fe9STheodore Ts'o repeat: 120803179fe9STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 120903179fe9STheodore Ts'o /* 121003179fe9STheodore Ts'o * ext4_calc_metadata_amount() has side effects, which we have 121103179fe9STheodore Ts'o * to be prepared undo if we fail to claim space. 121203179fe9STheodore Ts'o */ 121303179fe9STheodore Ts'o save_len = ei->i_da_metadata_calc_len; 121403179fe9STheodore Ts'o save_last_lblock = ei->i_da_metadata_calc_last_lblock; 121503179fe9STheodore Ts'o md_needed = EXT4_NUM_B2C(sbi, 121603179fe9STheodore Ts'o ext4_calc_metadata_amount(inode, lblock)); 121703179fe9STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 121803179fe9STheodore Ts'o 121972b8ab9dSEric Sandeen /* 122072b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 122172b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 122272b8ab9dSEric Sandeen */ 1223e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 122403179fe9STheodore Ts'o ei->i_da_metadata_calc_len = save_len; 122503179fe9STheodore Ts'o ei->i_da_metadata_calc_last_lblock = save_last_lblock; 122603179fe9STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1227030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1228030ba6bcSAneesh Kumar K.V yield(); 1229030ba6bcSAneesh Kumar K.V goto repeat; 1230030ba6bcSAneesh Kumar K.V } 123103179fe9STheodore Ts'o dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1232d2a17637SMingming Cao return -ENOSPC; 1233d2a17637SMingming Cao } 12349d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 12350637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 12360637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 123739bc680aSDmitry Monakhov 1238d2a17637SMingming Cao return 0; /* success */ 1239d2a17637SMingming Cao } 1240d2a17637SMingming Cao 124112219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1242d2a17637SMingming Cao { 1243d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12440637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1245d2a17637SMingming Cao 1246cd213226SMingming Cao if (!to_free) 1247cd213226SMingming Cao return; /* Nothing to release, exit */ 1248cd213226SMingming Cao 1249d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1250cd213226SMingming Cao 12515a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 12520637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1253cd213226SMingming Cao /* 12540637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 12550637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 12560637c6f4STheodore Ts'o * function is called from invalidate page, it's 12570637c6f4STheodore Ts'o * harmless to return without any action. 1258cd213226SMingming Cao */ 12590637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 12600637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 12611084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 12620637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 12630637c6f4STheodore Ts'o WARN_ON(1); 12640637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 12650637c6f4STheodore Ts'o } 12660637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 12670637c6f4STheodore Ts'o 12680637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 12690637c6f4STheodore Ts'o /* 12700637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 12710637c6f4STheodore Ts'o * only when we have written all of the delayed 12720637c6f4STheodore Ts'o * allocation blocks. 12737b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 12747b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 12750637c6f4STheodore Ts'o */ 127657042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 127772b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1278ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 12799d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1280cd213226SMingming Cao } 1281cd213226SMingming Cao 128272b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 128357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1284d2a17637SMingming Cao 1285d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 128660e58e0fSMingming Cao 12877b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1288d2a17637SMingming Cao } 1289d2a17637SMingming Cao 1290d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1291d2a17637SMingming Cao unsigned long offset) 1292d2a17637SMingming Cao { 1293d2a17637SMingming Cao int to_release = 0; 1294d2a17637SMingming Cao struct buffer_head *head, *bh; 1295d2a17637SMingming Cao unsigned int curr_off = 0; 12967b415bf6SAditya Kali struct inode *inode = page->mapping->host; 12977b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12987b415bf6SAditya Kali int num_clusters; 129951865fdaSZheng Liu ext4_fsblk_t lblk; 1300d2a17637SMingming Cao 1301d2a17637SMingming Cao head = page_buffers(page); 1302d2a17637SMingming Cao bh = head; 1303d2a17637SMingming Cao do { 1304d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1305d2a17637SMingming Cao 1306d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1307d2a17637SMingming Cao to_release++; 1308d2a17637SMingming Cao clear_buffer_delay(bh); 1309d2a17637SMingming Cao } 1310d2a17637SMingming Cao curr_off = next_off; 1311d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 13127b415bf6SAditya Kali 131351865fdaSZheng Liu if (to_release) { 131451865fdaSZheng Liu lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 131551865fdaSZheng Liu ext4_es_remove_extent(inode, lblk, to_release); 131651865fdaSZheng Liu } 131751865fdaSZheng Liu 13187b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 13197b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 13207b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 13217b415bf6SAditya Kali while (num_clusters > 0) { 13227b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 13237b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 13247b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 13257d1b1fbcSZheng Liu !ext4_find_delalloc_cluster(inode, lblk)) 13267b415bf6SAditya Kali ext4_da_release_space(inode, 1); 13277b415bf6SAditya Kali 13287b415bf6SAditya Kali num_clusters--; 13297b415bf6SAditya Kali } 1330d2a17637SMingming Cao } 1331ac27a0ecSDave Kleikamp 1332ac27a0ecSDave Kleikamp /* 133364769240SAlex Tomas * Delayed allocation stuff 133464769240SAlex Tomas */ 133564769240SAlex Tomas 133664769240SAlex Tomas /* 133764769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1338a1d6cc56SAneesh Kumar K.V * them with writepage() call back 133964769240SAlex Tomas * 134064769240SAlex Tomas * @mpd->inode: inode 134164769240SAlex Tomas * @mpd->first_page: first page of the extent 134264769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 134364769240SAlex Tomas * 134464769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 134564769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 134664769240SAlex Tomas * 134764769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 134864769240SAlex Tomas */ 13491de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 13501de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 135164769240SAlex Tomas { 1352791b7f08SAneesh Kumar K.V struct pagevec pvec; 1353791b7f08SAneesh Kumar K.V unsigned long index, end; 1354791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1355791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1356791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1357cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 13583ecdb3a1STheodore Ts'o unsigned int len, block_start; 13593ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 1360cb20d518STheodore Ts'o int journal_data = ext4_should_journal_data(inode); 13611de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1362bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 136364769240SAlex Tomas 136464769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1365bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1366791b7f08SAneesh Kumar K.V /* 1367791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1368791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 13698dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1370791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1371791b7f08SAneesh Kumar K.V */ 137264769240SAlex Tomas index = mpd->first_page; 137364769240SAlex Tomas end = mpd->next_page - 1; 137464769240SAlex Tomas 1375791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 137664769240SAlex Tomas while (index <= end) { 1377791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 137864769240SAlex Tomas if (nr_pages == 0) 137964769240SAlex Tomas break; 138064769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 138197498956STheodore Ts'o int commit_write = 0, skip_page = 0; 138264769240SAlex Tomas struct page *page = pvec.pages[i]; 138364769240SAlex Tomas 1384791b7f08SAneesh Kumar K.V index = page->index; 1385791b7f08SAneesh Kumar K.V if (index > end) 1386791b7f08SAneesh Kumar K.V break; 1387cb20d518STheodore Ts'o 1388cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1389cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1390cb20d518STheodore Ts'o else 1391cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 13921de3e3dfSTheodore Ts'o if (map) { 13931de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 13941de3e3dfSTheodore Ts'o inode->i_blkbits); 13951de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 13961de3e3dfSTheodore Ts'o map->m_lblk); 13971de3e3dfSTheodore Ts'o } 1398791b7f08SAneesh Kumar K.V index++; 1399791b7f08SAneesh Kumar K.V 1400791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1401791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1402791b7f08SAneesh Kumar K.V 140322208dedSAneesh Kumar K.V /* 1404cb20d518STheodore Ts'o * If the page does not have buffers (for 1405cb20d518STheodore Ts'o * whatever reason), try to create them using 1406a107e5a3STheodore Ts'o * __block_write_begin. If this fails, 140797498956STheodore Ts'o * skip the page and move on. 140822208dedSAneesh Kumar K.V */ 1409cb20d518STheodore Ts'o if (!page_has_buffers(page)) { 1410a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1411cb20d518STheodore Ts'o noalloc_get_block_write)) { 141297498956STheodore Ts'o skip_page: 1413cb20d518STheodore Ts'o unlock_page(page); 1414cb20d518STheodore Ts'o continue; 1415cb20d518STheodore Ts'o } 1416cb20d518STheodore Ts'o commit_write = 1; 1417cb20d518STheodore Ts'o } 14183ecdb3a1STheodore Ts'o 14193ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 14203ecdb3a1STheodore Ts'o block_start = 0; 14213ecdb3a1STheodore Ts'o do { 14221de3e3dfSTheodore Ts'o if (!bh) 142397498956STheodore Ts'o goto skip_page; 14241de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 14251de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 14261de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 14271de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 14281de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 14291de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 14301de3e3dfSTheodore Ts'o } 14311de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 14321de3e3dfSTheodore Ts'o buffer_mapped(bh)) 14331de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 14341de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 14351de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 14361de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 14371de3e3dfSTheodore Ts'o } 14381de3e3dfSTheodore Ts'o 143913a79a47SYongqiang Yang /* 144013a79a47SYongqiang Yang * skip page if block allocation undone and 144113a79a47SYongqiang Yang * block is dirty 144213a79a47SYongqiang Yang */ 144313a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 144497498956STheodore Ts'o skip_page = 1; 14453ecdb3a1STheodore Ts'o bh = bh->b_this_page; 14463ecdb3a1STheodore Ts'o block_start += bh->b_size; 14471de3e3dfSTheodore Ts'o cur_logical++; 14481de3e3dfSTheodore Ts'o pblock++; 14491de3e3dfSTheodore Ts'o } while (bh != page_bufs); 14501de3e3dfSTheodore Ts'o 145197498956STheodore Ts'o if (skip_page) 145297498956STheodore Ts'o goto skip_page; 1453cb20d518STheodore Ts'o 1454cb20d518STheodore Ts'o if (commit_write) 1455cb20d518STheodore Ts'o /* mark the buffer_heads as dirty & uptodate */ 1456cb20d518STheodore Ts'o block_commit_write(page, 0, len); 1457cb20d518STheodore Ts'o 145897498956STheodore Ts'o clear_page_dirty_for_io(page); 1459bd2d0210STheodore Ts'o /* 1460bd2d0210STheodore Ts'o * Delalloc doesn't support data journalling, 1461bd2d0210STheodore Ts'o * but eventually maybe we'll lift this 1462bd2d0210STheodore Ts'o * restriction. 1463bd2d0210STheodore Ts'o */ 1464bd2d0210STheodore Ts'o if (unlikely(journal_data && PageChecked(page))) 1465cb20d518STheodore Ts'o err = __ext4_journalled_writepage(page, len); 14661449032bSTheodore Ts'o else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 1467bd2d0210STheodore Ts'o err = ext4_bio_write_page(&io_submit, page, 1468bd2d0210STheodore Ts'o len, mpd->wbc); 14699dd75f1fSTheodore Ts'o else if (buffer_uninit(page_bufs)) { 14709dd75f1fSTheodore Ts'o ext4_set_bh_endio(page_bufs, inode); 14719dd75f1fSTheodore Ts'o err = block_write_full_page_endio(page, 14729dd75f1fSTheodore Ts'o noalloc_get_block_write, 14739dd75f1fSTheodore Ts'o mpd->wbc, ext4_end_io_buffer_write); 14749dd75f1fSTheodore Ts'o } else 14751449032bSTheodore Ts'o err = block_write_full_page(page, 14761449032bSTheodore Ts'o noalloc_get_block_write, mpd->wbc); 1477cb20d518STheodore Ts'o 1478cb20d518STheodore Ts'o if (!err) 1479a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 148064769240SAlex Tomas /* 148164769240SAlex Tomas * In error case, we have to continue because 148264769240SAlex Tomas * remaining pages are still locked 148364769240SAlex Tomas */ 148464769240SAlex Tomas if (ret == 0) 148564769240SAlex Tomas ret = err; 148664769240SAlex Tomas } 148764769240SAlex Tomas pagevec_release(&pvec); 148864769240SAlex Tomas } 1489bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 149064769240SAlex Tomas return ret; 149164769240SAlex Tomas } 149264769240SAlex Tomas 1493c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1494c4a0c46eSAneesh Kumar K.V { 1495c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1496c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1497c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1498c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1499c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 150051865fdaSZheng Liu ext4_lblk_t start, last; 1501c4a0c46eSAneesh Kumar K.V 1502c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1503c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 150451865fdaSZheng Liu 150551865fdaSZheng Liu start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 150651865fdaSZheng Liu last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); 150751865fdaSZheng Liu ext4_es_remove_extent(inode, start, last - start + 1); 150851865fdaSZheng Liu 150966bea92cSEric Sandeen pagevec_init(&pvec, 0); 1510c4a0c46eSAneesh Kumar K.V while (index <= end) { 1511c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1512c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1513c4a0c46eSAneesh Kumar K.V break; 1514c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1515c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 15169b1d0998SJan Kara if (page->index > end) 1517c4a0c46eSAneesh Kumar K.V break; 1518c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1519c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1520c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1521c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1522c4a0c46eSAneesh Kumar K.V unlock_page(page); 1523c4a0c46eSAneesh Kumar K.V } 15249b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 15259b1d0998SJan Kara pagevec_release(&pvec); 1526c4a0c46eSAneesh Kumar K.V } 1527c4a0c46eSAneesh Kumar K.V return; 1528c4a0c46eSAneesh Kumar K.V } 1529c4a0c46eSAneesh Kumar K.V 1530df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1531df22291fSAneesh Kumar K.V { 1532df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 153392b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 153492b97816STheodore Ts'o 153592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 15365dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 15375dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 153892b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 153992b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 154057042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 154157042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 154292b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 15437b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 15447b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 154592b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 154692b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1547df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 154892b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1549df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1550df22291fSAneesh Kumar K.V return; 1551df22291fSAneesh Kumar K.V } 1552df22291fSAneesh Kumar K.V 1553b920c755STheodore Ts'o /* 15545a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 15555a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 155664769240SAlex Tomas * 15578dc207c0STheodore Ts'o * @mpd - bh describing space 155864769240SAlex Tomas * 155964769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 156064769240SAlex Tomas * 156164769240SAlex Tomas */ 15625a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 156364769240SAlex Tomas { 15642ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 15651de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 15662fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 15672fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 15682fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 15692fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 157064769240SAlex Tomas 157164769240SAlex Tomas /* 15725a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 15735a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 157464769240SAlex Tomas */ 15755a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 15765a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 157729fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 15785a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 15795a87b7a5STheodore Ts'o goto submit_io; 15802fa3cdfbSTheodore Ts'o 15812fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 15822fa3cdfbSTheodore Ts'o BUG_ON(!handle); 15832fa3cdfbSTheodore Ts'o 158479ffab34SAneesh Kumar K.V /* 158579e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 15862ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 15872ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 15882ac3b6e0STheodore Ts'o * one or more preallocated blocks). 15892ac3b6e0STheodore Ts'o * 15902ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 15912ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 15922ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 15932ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 159479e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1595f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 15962ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 15972ac3b6e0STheodore Ts'o * 15982ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 15992ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 16002ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 160179ffab34SAneesh Kumar K.V */ 16022ed88685STheodore Ts'o map.m_lblk = next; 16032ed88685STheodore Ts'o map.m_len = max_blocks; 16041296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1605744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1606744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 16072ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 16081296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 16091296cc85SAneesh Kumar K.V 16102ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 16112fa3cdfbSTheodore Ts'o if (blks < 0) { 1612e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1613e3570639SEric Sandeen 16142fa3cdfbSTheodore Ts'o err = blks; 1615ed5bde0bSTheodore Ts'o /* 16165a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 161797498956STheodore Ts'o * appears to be free blocks we will just let 161897498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1619c4a0c46eSAneesh Kumar K.V */ 1620c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 16215a87b7a5STheodore Ts'o goto submit_io; 1622df22291fSAneesh Kumar K.V 16235dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1624df22291fSAneesh Kumar K.V mpd->retval = err; 16255a87b7a5STheodore Ts'o goto submit_io; 1626df22291fSAneesh Kumar K.V } 1627df22291fSAneesh Kumar K.V 1628c4a0c46eSAneesh Kumar K.V /* 1629ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1630ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1631ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1632ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1633ed5bde0bSTheodore Ts'o * the same. 1634c4a0c46eSAneesh Kumar K.V */ 1635e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1636e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1637e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1638e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1639e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1640c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 16418dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1642e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1643e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1644e3570639SEric Sandeen if (err == -ENOSPC) 1645df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1646030ba6bcSAneesh Kumar K.V } 16472fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1648c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1649e0fd9b90SCurt Wohlgemuth 1650e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1651e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 16525a87b7a5STheodore Ts'o return; 1653c4a0c46eSAneesh Kumar K.V } 16542fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 16552fa3cdfbSTheodore Ts'o 16561de3e3dfSTheodore Ts'o mapp = ↦ 16572ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 16582ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 16592ed88685STheodore Ts'o int i; 166064769240SAlex Tomas 16612ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 16622ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 16632fa3cdfbSTheodore Ts'o } 16642fa3cdfbSTheodore Ts'o 16652fa3cdfbSTheodore Ts'o /* 166603f5d8bcSJan Kara * Update on-disk size along with block allocation. 16672fa3cdfbSTheodore Ts'o */ 16682fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 16692fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 16702fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 16712fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 16722fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 16735a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 16745a87b7a5STheodore Ts'o if (err) 16755a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 16765a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 16775a87b7a5STheodore Ts'o mpd->inode->i_ino); 16782fa3cdfbSTheodore Ts'o } 16792fa3cdfbSTheodore Ts'o 16805a87b7a5STheodore Ts'o submit_io: 16811de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 16825a87b7a5STheodore Ts'o mpd->io_done = 1; 168364769240SAlex Tomas } 168464769240SAlex Tomas 1685bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1686bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 168764769240SAlex Tomas 168864769240SAlex Tomas /* 168964769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 169064769240SAlex Tomas * 169164769240SAlex Tomas * @mpd->lbh - extent of blocks 169264769240SAlex Tomas * @logical - logical number of the block in the file 169364769240SAlex Tomas * @bh - bh of the block (used to access block's state) 169464769240SAlex Tomas * 169564769240SAlex Tomas * the function is used to collect contig. blocks in same state 169664769240SAlex Tomas */ 169764769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 16988dc207c0STheodore Ts'o sector_t logical, size_t b_size, 16998dc207c0STheodore Ts'o unsigned long b_state) 170064769240SAlex Tomas { 170164769240SAlex Tomas sector_t next; 17028dc207c0STheodore Ts'o int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 170364769240SAlex Tomas 1704c445e3e0SEric Sandeen /* 1705c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1706c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1707c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 170879e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1709c445e3e0SEric Sandeen */ 1710c445e3e0SEric Sandeen if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 1711c445e3e0SEric Sandeen goto flush_it; 1712c445e3e0SEric Sandeen 1713525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 171412e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 1715525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1716525f4ed8SMingming Cao /* 1717525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1718525f4ed8SMingming Cao * credit available. Total credit needed to insert 1719525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1720525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1721525f4ed8SMingming Cao */ 1722525f4ed8SMingming Cao goto flush_it; 1723525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1724525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 1725525f4ed8SMingming Cao /* 1726525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 1727525f4ed8SMingming Cao * allowed limit for which we have journal credit 1728525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 1729525f4ed8SMingming Cao */ 1730525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1731525f4ed8SMingming Cao mpd->inode->i_blkbits; 1732525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 1733525f4ed8SMingming Cao } 1734525f4ed8SMingming Cao } 173564769240SAlex Tomas /* 173664769240SAlex Tomas * First block in the extent 173764769240SAlex Tomas */ 17388dc207c0STheodore Ts'o if (mpd->b_size == 0) { 17398dc207c0STheodore Ts'o mpd->b_blocknr = logical; 17408dc207c0STheodore Ts'o mpd->b_size = b_size; 17418dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 174264769240SAlex Tomas return; 174364769240SAlex Tomas } 174464769240SAlex Tomas 17458dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 174664769240SAlex Tomas /* 174764769240SAlex Tomas * Can we merge the block to our big extent? 174864769240SAlex Tomas */ 17498dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 17508dc207c0STheodore Ts'o mpd->b_size += b_size; 175164769240SAlex Tomas return; 175264769240SAlex Tomas } 175364769240SAlex Tomas 1754525f4ed8SMingming Cao flush_it: 175564769240SAlex Tomas /* 175664769240SAlex Tomas * We couldn't merge the block to our extent, so we 175764769240SAlex Tomas * need to flush current extent and start new one 175864769240SAlex Tomas */ 17595a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1760a1d6cc56SAneesh Kumar K.V return; 176164769240SAlex Tomas } 176264769240SAlex Tomas 1763c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 176429fa89d0SAneesh Kumar K.V { 1765c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 176629fa89d0SAneesh Kumar K.V } 176729fa89d0SAneesh Kumar K.V 176864769240SAlex Tomas /* 17695356f261SAditya Kali * This function is grabs code from the very beginning of 17705356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 17715356f261SAditya Kali * time. This function looks up the requested blocks and sets the 17725356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 17735356f261SAditya Kali */ 17745356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 17755356f261SAditya Kali struct ext4_map_blocks *map, 17765356f261SAditya Kali struct buffer_head *bh) 17775356f261SAditya Kali { 17785356f261SAditya Kali int retval; 17795356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 17805356f261SAditya Kali 17815356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 17825356f261SAditya Kali invalid_block = ~0; 17835356f261SAditya Kali 17845356f261SAditya Kali map->m_flags = 0; 17855356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 17865356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 17875356f261SAditya Kali (unsigned long) map->m_lblk); 17885356f261SAditya Kali /* 17895356f261SAditya Kali * Try to see if we can get the block without requesting a new 17905356f261SAditya Kali * file system block. 17915356f261SAditya Kali */ 17925356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 1793*9c3569b5STao Ma if (ext4_has_inline_data(inode)) { 1794*9c3569b5STao Ma /* 1795*9c3569b5STao Ma * We will soon create blocks for this page, and let 1796*9c3569b5STao Ma * us pretend as if the blocks aren't allocated yet. 1797*9c3569b5STao Ma * In case of clusters, we have to handle the work 1798*9c3569b5STao Ma * of mapping from cluster so that the reserved space 1799*9c3569b5STao Ma * is calculated properly. 1800*9c3569b5STao Ma */ 1801*9c3569b5STao Ma if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) && 1802*9c3569b5STao Ma ext4_find_delalloc_cluster(inode, map->m_lblk)) 1803*9c3569b5STao Ma map->m_flags |= EXT4_MAP_FROM_CLUSTER; 1804*9c3569b5STao Ma retval = 0; 1805*9c3569b5STao Ma } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 18065356f261SAditya Kali retval = ext4_ext_map_blocks(NULL, inode, map, 0); 18075356f261SAditya Kali else 18085356f261SAditya Kali retval = ext4_ind_map_blocks(NULL, inode, map, 0); 18095356f261SAditya Kali 18105356f261SAditya Kali if (retval == 0) { 18115356f261SAditya Kali /* 18125356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 18135356f261SAditya Kali * is it OK? 18145356f261SAditya Kali */ 18155356f261SAditya Kali /* If the block was allocated from previously allocated cluster, 18165356f261SAditya Kali * then we dont need to reserve it again. */ 18175356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 18185356f261SAditya Kali retval = ext4_da_reserve_space(inode, iblock); 18195356f261SAditya Kali if (retval) 18205356f261SAditya Kali /* not enough space to reserve */ 18215356f261SAditya Kali goto out_unlock; 18225356f261SAditya Kali } 18235356f261SAditya Kali 182451865fdaSZheng Liu retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len); 182551865fdaSZheng Liu if (retval) 182651865fdaSZheng Liu goto out_unlock; 182751865fdaSZheng Liu 18285356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 18295356f261SAditya Kali * and it should not appear on the bh->b_state. 18305356f261SAditya Kali */ 18315356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 18325356f261SAditya Kali 18335356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 18345356f261SAditya Kali set_buffer_new(bh); 18355356f261SAditya Kali set_buffer_delay(bh); 18365356f261SAditya Kali } 18375356f261SAditya Kali 18385356f261SAditya Kali out_unlock: 18395356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 18405356f261SAditya Kali 18415356f261SAditya Kali return retval; 18425356f261SAditya Kali } 18435356f261SAditya Kali 18445356f261SAditya Kali /* 1845b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1846b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1847b920c755STheodore Ts'o * reserve space for a single block. 184829fa89d0SAneesh Kumar K.V * 184929fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 185029fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 185129fa89d0SAneesh Kumar K.V * 185229fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 185329fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 185429fa89d0SAneesh Kumar K.V * initialized properly. 185564769240SAlex Tomas */ 1856*9c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 18572ed88685STheodore Ts'o struct buffer_head *bh, int create) 185864769240SAlex Tomas { 18592ed88685STheodore Ts'o struct ext4_map_blocks map; 186064769240SAlex Tomas int ret = 0; 186164769240SAlex Tomas 186264769240SAlex Tomas BUG_ON(create == 0); 18632ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 18642ed88685STheodore Ts'o 18652ed88685STheodore Ts'o map.m_lblk = iblock; 18662ed88685STheodore Ts'o map.m_len = 1; 186764769240SAlex Tomas 186864769240SAlex Tomas /* 186964769240SAlex Tomas * first, we need to know whether the block is allocated already 187064769240SAlex Tomas * preallocated blocks are unmapped but should treated 187164769240SAlex Tomas * the same as allocated blocks. 187264769240SAlex Tomas */ 18735356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 18745356f261SAditya Kali if (ret <= 0) 18752ed88685STheodore Ts'o return ret; 187664769240SAlex Tomas 18772ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 18782ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 18792ed88685STheodore Ts'o 18802ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 18812ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 18822ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 18832ed88685STheodore Ts'o * get_block multiple times when we write to the same 18842ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 18852ed88685STheodore Ts'o * for partial write. 18862ed88685STheodore Ts'o */ 18872ed88685STheodore Ts'o set_buffer_new(bh); 1888c8205636STheodore Ts'o set_buffer_mapped(bh); 18892ed88685STheodore Ts'o } 18902ed88685STheodore Ts'o return 0; 189164769240SAlex Tomas } 189261628a3fSMingming Cao 1893b920c755STheodore Ts'o /* 1894b920c755STheodore Ts'o * This function is used as a standard get_block_t calback function 1895b920c755STheodore Ts'o * when there is no desire to allocate any blocks. It is used as a 1896ebdec241SChristoph Hellwig * callback function for block_write_begin() and block_write_full_page(). 1897206f7ab4SChristoph Hellwig * These functions should only try to map a single block at a time. 1898b920c755STheodore Ts'o * 1899b920c755STheodore Ts'o * Since this function doesn't do block allocations even if the caller 1900b920c755STheodore Ts'o * requests it by passing in create=1, it is critically important that 1901b920c755STheodore Ts'o * any caller checks to make sure that any buffer heads are returned 1902b920c755STheodore Ts'o * by this function are either all already mapped or marked for 1903206f7ab4SChristoph Hellwig * delayed allocation before calling block_write_full_page(). Otherwise, 1904206f7ab4SChristoph Hellwig * b_blocknr could be left unitialized, and the page write functions will 1905206f7ab4SChristoph Hellwig * be taken by surprise. 1906b920c755STheodore Ts'o */ 1907b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 1908f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 1909f0e6c985SAneesh Kumar K.V { 1910a2dc52b5STheodore Ts'o BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 19112ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 0); 191261628a3fSMingming Cao } 191361628a3fSMingming Cao 191462e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 191562e086beSAneesh Kumar K.V { 191662e086beSAneesh Kumar K.V get_bh(bh); 191762e086beSAneesh Kumar K.V return 0; 191862e086beSAneesh Kumar K.V } 191962e086beSAneesh Kumar K.V 192062e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 192162e086beSAneesh Kumar K.V { 192262e086beSAneesh Kumar K.V put_bh(bh); 192362e086beSAneesh Kumar K.V return 0; 192462e086beSAneesh Kumar K.V } 192562e086beSAneesh Kumar K.V 192662e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 192762e086beSAneesh Kumar K.V unsigned int len) 192862e086beSAneesh Kumar K.V { 192962e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 193062e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 19313fdcfb66STao Ma struct buffer_head *page_bufs = NULL; 193262e086beSAneesh Kumar K.V handle_t *handle = NULL; 19333fdcfb66STao Ma int ret = 0, err = 0; 19343fdcfb66STao Ma int inline_data = ext4_has_inline_data(inode); 19353fdcfb66STao Ma struct buffer_head *inode_bh = NULL; 193662e086beSAneesh Kumar K.V 1937cb20d518STheodore Ts'o ClearPageChecked(page); 19383fdcfb66STao Ma 19393fdcfb66STao Ma if (inline_data) { 19403fdcfb66STao Ma BUG_ON(page->index != 0); 19413fdcfb66STao Ma BUG_ON(len > ext4_get_max_inline_size(inode)); 19423fdcfb66STao Ma inode_bh = ext4_journalled_write_inline_data(inode, len, page); 19433fdcfb66STao Ma if (inode_bh == NULL) 19443fdcfb66STao Ma goto out; 19453fdcfb66STao Ma } else { 194662e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 19473fdcfb66STao Ma if (!page_bufs) { 19483fdcfb66STao Ma BUG(); 19493fdcfb66STao Ma goto out; 19503fdcfb66STao Ma } 19513fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19523fdcfb66STao Ma NULL, bget_one); 19533fdcfb66STao Ma } 195462e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 195562e086beSAneesh Kumar K.V * references to buffers so we are safe */ 195662e086beSAneesh Kumar K.V unlock_page(page); 195762e086beSAneesh Kumar K.V 195862e086beSAneesh Kumar K.V handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 195962e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 196062e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 196162e086beSAneesh Kumar K.V goto out; 196262e086beSAneesh Kumar K.V } 196362e086beSAneesh Kumar K.V 1964441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1965441c8508SCurt Wohlgemuth 19663fdcfb66STao Ma if (inline_data) { 19673fdcfb66STao Ma ret = ext4_journal_get_write_access(handle, inode_bh); 19683fdcfb66STao Ma 19693fdcfb66STao Ma err = ext4_handle_dirty_metadata(handle, inode, inode_bh); 19703fdcfb66STao Ma 19713fdcfb66STao Ma } else { 1972f19d5870STao Ma ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 197362e086beSAneesh Kumar K.V do_journal_get_write_access); 197462e086beSAneesh Kumar K.V 1975f19d5870STao Ma err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 197662e086beSAneesh Kumar K.V write_end_fn); 19773fdcfb66STao Ma } 197862e086beSAneesh Kumar K.V if (ret == 0) 197962e086beSAneesh Kumar K.V ret = err; 19802d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 198162e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 198262e086beSAneesh Kumar K.V if (!ret) 198362e086beSAneesh Kumar K.V ret = err; 198462e086beSAneesh Kumar K.V 19853fdcfb66STao Ma if (!ext4_has_inline_data(inode)) 19863fdcfb66STao Ma ext4_walk_page_buffers(handle, page_bufs, 0, len, 19873fdcfb66STao Ma NULL, bput_one); 198819f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 198962e086beSAneesh Kumar K.V out: 19903fdcfb66STao Ma brelse(inode_bh); 199162e086beSAneesh Kumar K.V return ret; 199262e086beSAneesh Kumar K.V } 199362e086beSAneesh Kumar K.V 199461628a3fSMingming Cao /* 199543ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 199643ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 199743ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 199843ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 199943ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 200043ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 200143ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 200243ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 200343ce1d23SAneesh Kumar K.V * 2004b920c755STheodore Ts'o * This function can get called via... 2005b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 2006b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 2007f6463b0dSArtem Bityutskiy * - shrink_page_list via the kswapd/direct reclaim (no journal handle) 2008b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 200943ce1d23SAneesh Kumar K.V * 201043ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 201143ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 201243ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 201343ce1d23SAneesh Kumar K.V * truncate(f, 1024); 201443ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 201543ce1d23SAneesh Kumar K.V * a[0] = 'a'; 201643ce1d23SAneesh Kumar K.V * truncate(f, 4096); 201743ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 201890802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 201943ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 202043ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 202143ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 202243ce1d23SAneesh Kumar K.V * buffer_heads mapped. 202343ce1d23SAneesh Kumar K.V * 202443ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 202543ce1d23SAneesh Kumar K.V * unwritten in the page. 202643ce1d23SAneesh Kumar K.V * 202743ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 202843ce1d23SAneesh Kumar K.V * 202943ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 203043ce1d23SAneesh Kumar K.V * ext4_writepage() 203143ce1d23SAneesh Kumar K.V * 203243ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 203343ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 203461628a3fSMingming Cao */ 203543ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 203664769240SAlex Tomas struct writeback_control *wbc) 203764769240SAlex Tomas { 2038a42afc5fSTheodore Ts'o int ret = 0, commit_write = 0; 203961628a3fSMingming Cao loff_t size; 2040498e5f24STheodore Ts'o unsigned int len; 2041744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 204261628a3fSMingming Cao struct inode *inode = page->mapping->host; 204364769240SAlex Tomas 2044a9c667f8SLukas Czerner trace_ext4_writepage(page); 204561628a3fSMingming Cao size = i_size_read(inode); 204661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 204761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 204861628a3fSMingming Cao else 204961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 205061628a3fSMingming Cao 2051a42afc5fSTheodore Ts'o /* 2052a42afc5fSTheodore Ts'o * If the page does not have buffers (for whatever reason), 2053a107e5a3STheodore Ts'o * try to create them using __block_write_begin. If this 2054a42afc5fSTheodore Ts'o * fails, redirty the page and move on. 2055a42afc5fSTheodore Ts'o */ 2056b1142e8fSTheodore Ts'o if (!page_has_buffers(page)) { 2057a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 2058a42afc5fSTheodore Ts'o noalloc_get_block_write)) { 2059a42afc5fSTheodore Ts'o redirty_page: 2060a42afc5fSTheodore Ts'o redirty_page_for_writepage(wbc, page); 2061a42afc5fSTheodore Ts'o unlock_page(page); 2062a42afc5fSTheodore Ts'o return 0; 2063a42afc5fSTheodore Ts'o } 2064a42afc5fSTheodore Ts'o commit_write = 1; 2065a42afc5fSTheodore Ts'o } 2066f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 2067f19d5870STao Ma if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL, 2068c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 206961628a3fSMingming Cao /* 2070b1142e8fSTheodore Ts'o * We don't want to do block allocation, so redirty 2071b1142e8fSTheodore Ts'o * the page and return. We may reach here when we do 2072b1142e8fSTheodore Ts'o * a journal commit via journal_submit_inode_data_buffers. 2073966dbde2SMel Gorman * We can also reach here via shrink_page_list but it 2074966dbde2SMel Gorman * should never be for direct reclaim so warn if that 2075966dbde2SMel Gorman * happens 2076f0e6c985SAneesh Kumar K.V */ 2077966dbde2SMel Gorman WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 2078966dbde2SMel Gorman PF_MEMALLOC); 2079a42afc5fSTheodore Ts'o goto redirty_page; 2080f0e6c985SAneesh Kumar K.V } 2081a42afc5fSTheodore Ts'o if (commit_write) 2082ed9b3e33SAneesh Kumar K.V /* now mark the buffer_heads as dirty and uptodate */ 2083b767e78aSAneesh Kumar K.V block_commit_write(page, 0, len); 208464769240SAlex Tomas 2085cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 208643ce1d23SAneesh Kumar K.V /* 208743ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 208843ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 208943ce1d23SAneesh Kumar K.V */ 20903f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 209143ce1d23SAneesh Kumar K.V 2092a42afc5fSTheodore Ts'o if (buffer_uninit(page_bufs)) { 2093744692dcSJiaying Zhang ext4_set_bh_endio(page_bufs, inode); 2094744692dcSJiaying Zhang ret = block_write_full_page_endio(page, noalloc_get_block_write, 2095744692dcSJiaying Zhang wbc, ext4_end_io_buffer_write); 2096744692dcSJiaying Zhang } else 2097b920c755STheodore Ts'o ret = block_write_full_page(page, noalloc_get_block_write, 2098f0e6c985SAneesh Kumar K.V wbc); 209964769240SAlex Tomas 210064769240SAlex Tomas return ret; 210164769240SAlex Tomas } 210264769240SAlex Tomas 210361628a3fSMingming Cao /* 2104525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 210525985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 2106525f4ed8SMingming Cao * a single extent allocation into a single transaction, 2107525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 2108525f4ed8SMingming Cao * the block allocation. 210961628a3fSMingming Cao */ 2110525f4ed8SMingming Cao 2111525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 2112525f4ed8SMingming Cao { 2113525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 2114525f4ed8SMingming Cao 2115525f4ed8SMingming Cao /* 2116525f4ed8SMingming Cao * With non-extent format the journal credit needed to 2117525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 2118525f4ed8SMingming Cao * number of contiguous block. So we will limit 2119525f4ed8SMingming Cao * number of contiguous block to a sane value 2120525f4ed8SMingming Cao */ 212112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 2122525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 2123525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 2124525f4ed8SMingming Cao 2125525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 2126525f4ed8SMingming Cao } 212761628a3fSMingming Cao 21288e48dcfbSTheodore Ts'o /* 21298e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 21308eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2131168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2132168fc022STheodore Ts'o * and then write them. 21338e48dcfbSTheodore Ts'o */ 2134*9c3569b5STao Ma static int write_cache_pages_da(handle_t *handle, 2135*9c3569b5STao Ma struct address_space *mapping, 21368e48dcfbSTheodore Ts'o struct writeback_control *wbc, 213772f84e65SEric Sandeen struct mpage_da_data *mpd, 213872f84e65SEric Sandeen pgoff_t *done_index) 21398e48dcfbSTheodore Ts'o { 21408eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2141168fc022STheodore Ts'o struct inode *inode = mapping->host; 21428e48dcfbSTheodore Ts'o struct pagevec pvec; 21434f01b02cSTheodore Ts'o unsigned int nr_pages; 21444f01b02cSTheodore Ts'o sector_t logical; 21454f01b02cSTheodore Ts'o pgoff_t index, end; 21468e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 21474f01b02cSTheodore Ts'o int i, tag, ret = 0; 21488e48dcfbSTheodore Ts'o 2149168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2150168fc022STheodore Ts'o mpd->wbc = wbc; 2151168fc022STheodore Ts'o mpd->inode = inode; 21528e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 21538e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 21548e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 21558e48dcfbSTheodore Ts'o 21566e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 21575b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 21585b41d924SEric Sandeen else 21595b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 21605b41d924SEric Sandeen 216172f84e65SEric Sandeen *done_index = index; 21624f01b02cSTheodore Ts'o while (index <= end) { 21635b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 21648e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 21658e48dcfbSTheodore Ts'o if (nr_pages == 0) 21664f01b02cSTheodore Ts'o return 0; 21678e48dcfbSTheodore Ts'o 21688e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 21698e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 21708e48dcfbSTheodore Ts'o 21718e48dcfbSTheodore Ts'o /* 21728e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 21738e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 21748e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 21758e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 21768e48dcfbSTheodore Ts'o * because we have a reference on the page. 21778e48dcfbSTheodore Ts'o */ 21784f01b02cSTheodore Ts'o if (page->index > end) 21794f01b02cSTheodore Ts'o goto out; 21808e48dcfbSTheodore Ts'o 218172f84e65SEric Sandeen *done_index = page->index + 1; 218272f84e65SEric Sandeen 218378aaced3STheodore Ts'o /* 218478aaced3STheodore Ts'o * If we can't merge this page, and we have 218578aaced3STheodore Ts'o * accumulated an contiguous region, write it 218678aaced3STheodore Ts'o */ 218778aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 218878aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 218978aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 219078aaced3STheodore Ts'o goto ret_extent_tail; 219178aaced3STheodore Ts'o } 219278aaced3STheodore Ts'o 21938e48dcfbSTheodore Ts'o lock_page(page); 21948e48dcfbSTheodore Ts'o 21958e48dcfbSTheodore Ts'o /* 21964f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 21974f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 21984f01b02cSTheodore Ts'o * are writing (which means it has been 21994f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 22004f01b02cSTheodore Ts'o * already under writeback and we are not 22014f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 22028e48dcfbSTheodore Ts'o */ 22034f01b02cSTheodore Ts'o if (!PageDirty(page) || 22044f01b02cSTheodore Ts'o (PageWriteback(page) && 22054f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 22064f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 22078e48dcfbSTheodore Ts'o unlock_page(page); 22088e48dcfbSTheodore Ts'o continue; 22098e48dcfbSTheodore Ts'o } 22108e48dcfbSTheodore Ts'o 22118e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 22128e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 22138e48dcfbSTheodore Ts'o 2214*9c3569b5STao Ma /* 2215*9c3569b5STao Ma * If we have inline data and arrive here, it means that 2216*9c3569b5STao Ma * we will soon create the block for the 1st page, so 2217*9c3569b5STao Ma * we'd better clear the inline data here. 2218*9c3569b5STao Ma */ 2219*9c3569b5STao Ma if (ext4_has_inline_data(inode)) { 2220*9c3569b5STao Ma BUG_ON(ext4_test_inode_state(inode, 2221*9c3569b5STao Ma EXT4_STATE_MAY_INLINE_DATA)); 2222*9c3569b5STao Ma ext4_destroy_inline_data(handle, inode); 2223*9c3569b5STao Ma } 2224*9c3569b5STao Ma 2225168fc022STheodore Ts'o if (mpd->next_page != page->index) 22268eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 22278eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 22288eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 22298eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 22308eb9e5ceSTheodore Ts'o 22318eb9e5ceSTheodore Ts'o if (!page_has_buffers(page)) { 22324f01b02cSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 22334f01b02cSTheodore Ts'o PAGE_CACHE_SIZE, 22348eb9e5ceSTheodore Ts'o (1 << BH_Dirty) | (1 << BH_Uptodate)); 22354f01b02cSTheodore Ts'o if (mpd->io_done) 22364f01b02cSTheodore Ts'o goto ret_extent_tail; 22378e48dcfbSTheodore Ts'o } else { 22388eb9e5ceSTheodore Ts'o /* 22394f01b02cSTheodore Ts'o * Page with regular buffer heads, 22404f01b02cSTheodore Ts'o * just add all dirty ones 22418eb9e5ceSTheodore Ts'o */ 22428eb9e5ceSTheodore Ts'o head = page_buffers(page); 22438eb9e5ceSTheodore Ts'o bh = head; 22448eb9e5ceSTheodore Ts'o do { 22458eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 22468eb9e5ceSTheodore Ts'o /* 22478eb9e5ceSTheodore Ts'o * We need to try to allocate 22488eb9e5ceSTheodore Ts'o * unmapped blocks in the same page. 22498eb9e5ceSTheodore Ts'o * Otherwise we won't make progress 22508eb9e5ceSTheodore Ts'o * with the page in ext4_writepage 22518eb9e5ceSTheodore Ts'o */ 22528eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 22538eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 22548eb9e5ceSTheodore Ts'o bh->b_size, 22558eb9e5ceSTheodore Ts'o bh->b_state); 22564f01b02cSTheodore Ts'o if (mpd->io_done) 22574f01b02cSTheodore Ts'o goto ret_extent_tail; 22588eb9e5ceSTheodore Ts'o } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 22598eb9e5ceSTheodore Ts'o /* 22604f01b02cSTheodore Ts'o * mapped dirty buffer. We need 22614f01b02cSTheodore Ts'o * to update the b_state 22624f01b02cSTheodore Ts'o * because we look at b_state 22634f01b02cSTheodore Ts'o * in mpage_da_map_blocks. We 22644f01b02cSTheodore Ts'o * don't update b_size because 22654f01b02cSTheodore Ts'o * if we find an unmapped 22664f01b02cSTheodore Ts'o * buffer_head later we need to 22674f01b02cSTheodore Ts'o * use the b_state flag of that 22684f01b02cSTheodore Ts'o * buffer_head. 22698eb9e5ceSTheodore Ts'o */ 22708eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 22718eb9e5ceSTheodore Ts'o mpd->b_state = bh->b_state & BH_FLAGS; 22728e48dcfbSTheodore Ts'o } 22738eb9e5ceSTheodore Ts'o logical++; 22748eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 22758e48dcfbSTheodore Ts'o } 22768e48dcfbSTheodore Ts'o 22778e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 22788e48dcfbSTheodore Ts'o nr_to_write--; 22798e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 22804f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 22818e48dcfbSTheodore Ts'o /* 22828e48dcfbSTheodore Ts'o * We stop writing back only if we are 22838e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 22848e48dcfbSTheodore Ts'o * integrity sync we have to keep going 22858e48dcfbSTheodore Ts'o * because someone may be concurrently 22868e48dcfbSTheodore Ts'o * dirtying pages, and we might have 22878e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 22888e48dcfbSTheodore Ts'o * pages, but have not synced all of the 22898e48dcfbSTheodore Ts'o * old dirty pages. 22908e48dcfbSTheodore Ts'o */ 22914f01b02cSTheodore Ts'o goto out; 22928e48dcfbSTheodore Ts'o } 22938e48dcfbSTheodore Ts'o } 22948e48dcfbSTheodore Ts'o pagevec_release(&pvec); 22958e48dcfbSTheodore Ts'o cond_resched(); 22968e48dcfbSTheodore Ts'o } 22974f01b02cSTheodore Ts'o return 0; 22984f01b02cSTheodore Ts'o ret_extent_tail: 22994f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 23008eb9e5ceSTheodore Ts'o out: 23018eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 23028eb9e5ceSTheodore Ts'o cond_resched(); 23038e48dcfbSTheodore Ts'o return ret; 23048e48dcfbSTheodore Ts'o } 23058e48dcfbSTheodore Ts'o 23068e48dcfbSTheodore Ts'o 230764769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 230864769240SAlex Tomas struct writeback_control *wbc) 230964769240SAlex Tomas { 231022208dedSAneesh Kumar K.V pgoff_t index; 231122208dedSAneesh Kumar K.V int range_whole = 0; 231261628a3fSMingming Cao handle_t *handle = NULL; 2313df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 23145e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2315498e5f24STheodore Ts'o int pages_written = 0; 231655138e0bSTheodore Ts'o unsigned int max_pages; 23172acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 231855138e0bSTheodore Ts'o int needed_blocks, ret = 0; 231955138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2320de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 23215e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 232272f84e65SEric Sandeen pgoff_t done_index = 0; 23235b41d924SEric Sandeen pgoff_t end; 23241bce63d1SShaohua Li struct blk_plug plug; 232561628a3fSMingming Cao 23269bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2327ba80b101STheodore Ts'o 232861628a3fSMingming Cao /* 232961628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 233061628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 233161628a3fSMingming Cao * because that could violate lock ordering on umount 233261628a3fSMingming Cao */ 2333a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 233461628a3fSMingming Cao return 0; 23352a21e37eSTheodore Ts'o 23362a21e37eSTheodore Ts'o /* 23372a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 23382a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 23392a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 23404ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 23412a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 23422a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 23432a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 23442a21e37eSTheodore Ts'o * the stack trace. 23452a21e37eSTheodore Ts'o */ 23464ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 23472a21e37eSTheodore Ts'o return -EROFS; 23482a21e37eSTheodore Ts'o 234922208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 235022208dedSAneesh Kumar K.V range_whole = 1; 235161628a3fSMingming Cao 23522acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 23532acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 235422208dedSAneesh Kumar K.V index = mapping->writeback_index; 23552acf2c26SAneesh Kumar K.V if (index) 23562acf2c26SAneesh Kumar K.V cycled = 0; 23572acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 23582acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 23592acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 23605b41d924SEric Sandeen end = -1; 23615b41d924SEric Sandeen } else { 236222208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 23635b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 23645b41d924SEric Sandeen } 2365a1d6cc56SAneesh Kumar K.V 236655138e0bSTheodore Ts'o /* 236755138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 236855138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 236955138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 237055138e0bSTheodore Ts'o * levels; different architectues have a different page size, 237155138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 237255138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 237355138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 237455138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 237555138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 237655138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 237755138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 237855138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 237955138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 238055138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 238155138e0bSTheodore Ts'o */ 238255138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2383b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2384b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2385b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 238655138e0bSTheodore Ts'o else 2387b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2388b443e733SEric Sandeen } else 238955138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 239055138e0bSTheodore Ts'o max_pages); 239155138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 239255138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 239355138e0bSTheodore Ts'o 239455138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 239555138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 239655138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 239755138e0bSTheodore Ts'o } 239855138e0bSTheodore Ts'o 23992acf2c26SAneesh Kumar K.V retry: 24006e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 24015b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 24025b41d924SEric Sandeen 24031bce63d1SShaohua Li blk_start_plug(&plug); 240422208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2405a1d6cc56SAneesh Kumar K.V 2406a1d6cc56SAneesh Kumar K.V /* 2407a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2408a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2409a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2410a1d6cc56SAneesh Kumar K.V * by delalloc 2411a1d6cc56SAneesh Kumar K.V */ 2412a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2413525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2414a1d6cc56SAneesh Kumar K.V 241561628a3fSMingming Cao /* start a new transaction*/ 241661628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 241761628a3fSMingming Cao if (IS_ERR(handle)) { 241861628a3fSMingming Cao ret = PTR_ERR(handle); 24191693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2420fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2421a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 24223c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 242361628a3fSMingming Cao goto out_writepages; 242461628a3fSMingming Cao } 2425f63e6005STheodore Ts'o 2426f63e6005STheodore Ts'o /* 24278eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2428f63e6005STheodore Ts'o * contiguous region of logical blocks that need 24298eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2430f63e6005STheodore Ts'o */ 2431*9c3569b5STao Ma ret = write_cache_pages_da(handle, mapping, 2432*9c3569b5STao Ma wbc, &mpd, &done_index); 2433f63e6005STheodore Ts'o /* 2434af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2435f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2436f63e6005STheodore Ts'o * them for I/O. 2437f63e6005STheodore Ts'o */ 2438f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 24395a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2440f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2441f63e6005STheodore Ts'o } 2442b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2443f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2444df22291fSAneesh Kumar K.V 244561628a3fSMingming Cao ext4_journal_stop(handle); 2446df22291fSAneesh Kumar K.V 24478f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 244822208dedSAneesh Kumar K.V /* commit the transaction which would 244922208dedSAneesh Kumar K.V * free blocks released in the transaction 245022208dedSAneesh Kumar K.V * and try again 245122208dedSAneesh Kumar K.V */ 2452df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 245322208dedSAneesh Kumar K.V ret = 0; 245422208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2455a1d6cc56SAneesh Kumar K.V /* 24568de49e67SKazuya Mio * Got one extent now try with rest of the pages. 24578de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 24588de49e67SKazuya Mio * So we don't need to write any more. 2459a1d6cc56SAneesh Kumar K.V */ 246022208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 24618de49e67SKazuya Mio ret = mpd.retval; 24622acf2c26SAneesh Kumar K.V io_done = 1; 246322208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 246461628a3fSMingming Cao /* 246561628a3fSMingming Cao * There is no more writeout needed 246661628a3fSMingming Cao * or we requested for a noblocking writeout 246761628a3fSMingming Cao * and we found the device congested 246861628a3fSMingming Cao */ 246961628a3fSMingming Cao break; 247061628a3fSMingming Cao } 24711bce63d1SShaohua Li blk_finish_plug(&plug); 24722acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 24732acf2c26SAneesh Kumar K.V cycled = 1; 24742acf2c26SAneesh Kumar K.V index = 0; 24752acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 24762acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 24772acf2c26SAneesh Kumar K.V goto retry; 24782acf2c26SAneesh Kumar K.V } 247961628a3fSMingming Cao 248022208dedSAneesh Kumar K.V /* Update index */ 24812acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 248222208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 248322208dedSAneesh Kumar K.V /* 248422208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 248522208dedSAneesh Kumar K.V * mode will write it back later 248622208dedSAneesh Kumar K.V */ 248772f84e65SEric Sandeen mapping->writeback_index = done_index; 2488a1d6cc56SAneesh Kumar K.V 248961628a3fSMingming Cao out_writepages: 249022208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2491de89de6eSTheodore Ts'o wbc->range_start = range_start; 24929bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 249361628a3fSMingming Cao return ret; 249464769240SAlex Tomas } 249564769240SAlex Tomas 249679f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 249779f0be8dSAneesh Kumar K.V { 249879f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 249979f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 250079f0be8dSAneesh Kumar K.V 250179f0be8dSAneesh Kumar K.V /* 250279f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 250379f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2504179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 250579f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 250679f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 250779f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 250879f0be8dSAneesh Kumar K.V */ 250957042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 251057042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 251157042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 251200d4e736STheodore Ts'o /* 251300d4e736STheodore Ts'o * Start pushing delalloc when 1/2 of free blocks are dirty. 251400d4e736STheodore Ts'o */ 251500d4e736STheodore Ts'o if (dirty_blocks && (free_blocks < 2 * dirty_blocks) && 251600d4e736STheodore Ts'o !writeback_in_progress(sb->s_bdi) && 251700d4e736STheodore Ts'o down_read_trylock(&sb->s_umount)) { 251800d4e736STheodore Ts'o writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE); 251900d4e736STheodore Ts'o up_read(&sb->s_umount); 252000d4e736STheodore Ts'o } 252100d4e736STheodore Ts'o 252279f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2523df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 252479f0be8dSAneesh Kumar K.V /* 2525c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2526c8afb446SEric Sandeen * or free blocks is less than watermark 252779f0be8dSAneesh Kumar K.V */ 252879f0be8dSAneesh Kumar K.V return 1; 252979f0be8dSAneesh Kumar K.V } 253079f0be8dSAneesh Kumar K.V return 0; 253179f0be8dSAneesh Kumar K.V } 253279f0be8dSAneesh Kumar K.V 253364769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 253464769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 253564769240SAlex Tomas struct page **pagep, void **fsdata) 253664769240SAlex Tomas { 253772b8ab9dSEric Sandeen int ret, retries = 0; 253864769240SAlex Tomas struct page *page; 253964769240SAlex Tomas pgoff_t index; 254064769240SAlex Tomas struct inode *inode = mapping->host; 254164769240SAlex Tomas handle_t *handle; 254264769240SAlex Tomas 254364769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 254479f0be8dSAneesh Kumar K.V 254579f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 254679f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 254779f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 254879f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 254979f0be8dSAneesh Kumar K.V } 255079f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 25519bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 2552*9c3569b5STao Ma 2553*9c3569b5STao Ma if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2554*9c3569b5STao Ma ret = ext4_da_write_inline_data_begin(mapping, inode, 2555*9c3569b5STao Ma pos, len, flags, 2556*9c3569b5STao Ma pagep, fsdata); 2557*9c3569b5STao Ma if (ret < 0) 2558*9c3569b5STao Ma goto out; 2559*9c3569b5STao Ma if (ret == 1) { 2560*9c3569b5STao Ma ret = 0; 2561*9c3569b5STao Ma goto out; 2562*9c3569b5STao Ma } 2563*9c3569b5STao Ma } 2564*9c3569b5STao Ma 2565d2a17637SMingming Cao retry: 256664769240SAlex Tomas /* 256764769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 256864769240SAlex Tomas * if there is delayed block allocation. But we still need 256964769240SAlex Tomas * to journalling the i_disksize update if writes to the end 257064769240SAlex Tomas * of file which has an already mapped buffer. 257164769240SAlex Tomas */ 257264769240SAlex Tomas handle = ext4_journal_start(inode, 1); 257364769240SAlex Tomas if (IS_ERR(handle)) { 257464769240SAlex Tomas ret = PTR_ERR(handle); 257564769240SAlex Tomas goto out; 257664769240SAlex Tomas } 2577ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2578ebd3610bSJan Kara * started */ 2579ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 258064769240SAlex Tomas 258154566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2582d5a0d4f7SEric Sandeen if (!page) { 2583d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2584d5a0d4f7SEric Sandeen ret = -ENOMEM; 2585d5a0d4f7SEric Sandeen goto out; 2586d5a0d4f7SEric Sandeen } 258764769240SAlex Tomas *pagep = page; 258864769240SAlex Tomas 25896e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 259064769240SAlex Tomas if (ret < 0) { 259164769240SAlex Tomas unlock_page(page); 259264769240SAlex Tomas ext4_journal_stop(handle); 259364769240SAlex Tomas page_cache_release(page); 2594ae4d5372SAneesh Kumar K.V /* 2595ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2596ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2597ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2598ae4d5372SAneesh Kumar K.V */ 2599ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2600b9a4207dSJan Kara ext4_truncate_failed_write(inode); 260164769240SAlex Tomas } 260264769240SAlex Tomas 2603d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2604d2a17637SMingming Cao goto retry; 260564769240SAlex Tomas out: 260664769240SAlex Tomas return ret; 260764769240SAlex Tomas } 260864769240SAlex Tomas 2609632eaeabSMingming Cao /* 2610632eaeabSMingming Cao * Check if we should update i_disksize 2611632eaeabSMingming Cao * when write to the end of file but not require block allocation 2612632eaeabSMingming Cao */ 2613632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2614632eaeabSMingming Cao unsigned long offset) 2615632eaeabSMingming Cao { 2616632eaeabSMingming Cao struct buffer_head *bh; 2617632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2618632eaeabSMingming Cao unsigned int idx; 2619632eaeabSMingming Cao int i; 2620632eaeabSMingming Cao 2621632eaeabSMingming Cao bh = page_buffers(page); 2622632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2623632eaeabSMingming Cao 2624632eaeabSMingming Cao for (i = 0; i < idx; i++) 2625632eaeabSMingming Cao bh = bh->b_this_page; 2626632eaeabSMingming Cao 262729fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2628632eaeabSMingming Cao return 0; 2629632eaeabSMingming Cao return 1; 2630632eaeabSMingming Cao } 2631632eaeabSMingming Cao 263264769240SAlex Tomas static int ext4_da_write_end(struct file *file, 263364769240SAlex Tomas struct address_space *mapping, 263464769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 263564769240SAlex Tomas struct page *page, void *fsdata) 263664769240SAlex Tomas { 263764769240SAlex Tomas struct inode *inode = mapping->host; 263864769240SAlex Tomas int ret = 0, ret2; 263964769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 264064769240SAlex Tomas loff_t new_i_size; 2641632eaeabSMingming Cao unsigned long start, end; 264279f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 264379f0be8dSAneesh Kumar K.V 264479f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 26453d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 26463d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 264779f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 264879f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 26493d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 265079f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 265179f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 26523d2b1582SLukas Czerner default: 265379f0be8dSAneesh Kumar K.V BUG(); 265479f0be8dSAneesh Kumar K.V } 265579f0be8dSAneesh Kumar K.V } 2656632eaeabSMingming Cao 26579bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2658632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2659632eaeabSMingming Cao end = start + copied - 1; 266064769240SAlex Tomas 266164769240SAlex Tomas /* 266264769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 266364769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 266464769240SAlex Tomas * into that. 266564769240SAlex Tomas */ 266664769240SAlex Tomas new_i_size = pos + copied; 2667ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 2668*9c3569b5STao Ma if (ext4_has_inline_data(inode) || 2669*9c3569b5STao Ma ext4_da_should_update_i_disksize(page, end)) { 2670632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2671f3b59291STheodore Ts'o if (new_i_size > EXT4_I(inode)->i_disksize) 267264769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 2673632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2674cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2675cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2676cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2677cf17fea6SAneesh Kumar K.V */ 2678cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2679632eaeabSMingming Cao } 2680632eaeabSMingming Cao } 2681*9c3569b5STao Ma 2682*9c3569b5STao Ma if (write_mode != CONVERT_INLINE_DATA && 2683*9c3569b5STao Ma ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 2684*9c3569b5STao Ma ext4_has_inline_data(inode)) 2685*9c3569b5STao Ma ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 2686*9c3569b5STao Ma page); 2687*9c3569b5STao Ma else 268864769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 268964769240SAlex Tomas page, fsdata); 2690*9c3569b5STao Ma 269164769240SAlex Tomas copied = ret2; 269264769240SAlex Tomas if (ret2 < 0) 269364769240SAlex Tomas ret = ret2; 269464769240SAlex Tomas ret2 = ext4_journal_stop(handle); 269564769240SAlex Tomas if (!ret) 269664769240SAlex Tomas ret = ret2; 269764769240SAlex Tomas 269864769240SAlex Tomas return ret ? ret : copied; 269964769240SAlex Tomas } 270064769240SAlex Tomas 270164769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 270264769240SAlex Tomas { 270364769240SAlex Tomas /* 270464769240SAlex Tomas * Drop reserved blocks 270564769240SAlex Tomas */ 270664769240SAlex Tomas BUG_ON(!PageLocked(page)); 270764769240SAlex Tomas if (!page_has_buffers(page)) 270864769240SAlex Tomas goto out; 270964769240SAlex Tomas 2710d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 271164769240SAlex Tomas 271264769240SAlex Tomas out: 271364769240SAlex Tomas ext4_invalidatepage(page, offset); 271464769240SAlex Tomas 271564769240SAlex Tomas return; 271664769240SAlex Tomas } 271764769240SAlex Tomas 2718ccd2506bSTheodore Ts'o /* 2719ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2720ccd2506bSTheodore Ts'o */ 2721ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2722ccd2506bSTheodore Ts'o { 2723fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2724fb40ba0dSTheodore Ts'o 2725ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2726ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2727ccd2506bSTheodore Ts'o return 0; 2728ccd2506bSTheodore Ts'o 2729ccd2506bSTheodore Ts'o /* 2730ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2731ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2732ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2733ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2734ccd2506bSTheodore Ts'o * would require replicating code paths in: 2735ccd2506bSTheodore Ts'o * 2736ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2737ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2738ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2739ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2740ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2741ccd2506bSTheodore Ts'o * 2742ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2743ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2744ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2745ccd2506bSTheodore Ts'o * doing I/O at all. 2746ccd2506bSTheodore Ts'o * 2747ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2748380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2749ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2750ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 275125985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2752ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2753ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2754ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2755ccd2506bSTheodore Ts'o * 2756ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2757ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2758ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2759ccd2506bSTheodore Ts'o */ 2760ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2761ccd2506bSTheodore Ts'o } 276264769240SAlex Tomas 276364769240SAlex Tomas /* 2764ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2765ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2766ac27a0ecSDave Kleikamp * 2767ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2768617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2769ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2770ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2771ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2772ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2773ac27a0ecSDave Kleikamp * 2774ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2775ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2776ac27a0ecSDave Kleikamp */ 2777617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2778ac27a0ecSDave Kleikamp { 2779ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2780ac27a0ecSDave Kleikamp journal_t *journal; 2781ac27a0ecSDave Kleikamp int err; 2782ac27a0ecSDave Kleikamp 278346c7f254STao Ma /* 278446c7f254STao Ma * We can get here for an inline file via the FIBMAP ioctl 278546c7f254STao Ma */ 278646c7f254STao Ma if (ext4_has_inline_data(inode)) 278746c7f254STao Ma return 0; 278846c7f254STao Ma 278964769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 279064769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 279164769240SAlex Tomas /* 279264769240SAlex Tomas * With delalloc we want to sync the file 279364769240SAlex Tomas * so that we can make sure we allocate 279464769240SAlex Tomas * blocks for file 279564769240SAlex Tomas */ 279664769240SAlex Tomas filemap_write_and_wait(mapping); 279764769240SAlex Tomas } 279864769240SAlex Tomas 279919f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 280019f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2801ac27a0ecSDave Kleikamp /* 2802ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2803ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2804ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2805ac27a0ecSDave Kleikamp * do we expect this to happen. 2806ac27a0ecSDave Kleikamp * 2807ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2808ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2809ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2810ac27a0ecSDave Kleikamp * will.) 2811ac27a0ecSDave Kleikamp * 2812617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2813ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2814ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2815ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2816ac27a0ecSDave Kleikamp * everything they get. 2817ac27a0ecSDave Kleikamp */ 2818ac27a0ecSDave Kleikamp 281919f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2820617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2821dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2822dab291afSMingming Cao err = jbd2_journal_flush(journal); 2823dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2824ac27a0ecSDave Kleikamp 2825ac27a0ecSDave Kleikamp if (err) 2826ac27a0ecSDave Kleikamp return 0; 2827ac27a0ecSDave Kleikamp } 2828ac27a0ecSDave Kleikamp 2829617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2830ac27a0ecSDave Kleikamp } 2831ac27a0ecSDave Kleikamp 2832617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2833ac27a0ecSDave Kleikamp { 283446c7f254STao Ma int ret = -EAGAIN; 283546c7f254STao Ma struct inode *inode = page->mapping->host; 283646c7f254STao Ma 28370562e0baSJiaying Zhang trace_ext4_readpage(page); 283846c7f254STao Ma 283946c7f254STao Ma if (ext4_has_inline_data(inode)) 284046c7f254STao Ma ret = ext4_readpage_inline(inode, page); 284146c7f254STao Ma 284246c7f254STao Ma if (ret == -EAGAIN) 2843617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 284446c7f254STao Ma 284546c7f254STao Ma return ret; 2846ac27a0ecSDave Kleikamp } 2847ac27a0ecSDave Kleikamp 2848ac27a0ecSDave Kleikamp static int 2849617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2850ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2851ac27a0ecSDave Kleikamp { 285246c7f254STao Ma struct inode *inode = mapping->host; 285346c7f254STao Ma 285446c7f254STao Ma /* If the file has inline data, no need to do readpages. */ 285546c7f254STao Ma if (ext4_has_inline_data(inode)) 285646c7f254STao Ma return 0; 285746c7f254STao Ma 2858617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2859ac27a0ecSDave Kleikamp } 2860ac27a0ecSDave Kleikamp 2861744692dcSJiaying Zhang static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) 2862744692dcSJiaying Zhang { 2863744692dcSJiaying Zhang struct buffer_head *head, *bh; 2864744692dcSJiaying Zhang unsigned int curr_off = 0; 2865744692dcSJiaying Zhang 2866744692dcSJiaying Zhang if (!page_has_buffers(page)) 2867744692dcSJiaying Zhang return; 2868744692dcSJiaying Zhang head = bh = page_buffers(page); 2869744692dcSJiaying Zhang do { 2870744692dcSJiaying Zhang if (offset <= curr_off && test_clear_buffer_uninit(bh) 2871744692dcSJiaying Zhang && bh->b_private) { 2872744692dcSJiaying Zhang ext4_free_io_end(bh->b_private); 2873744692dcSJiaying Zhang bh->b_private = NULL; 2874744692dcSJiaying Zhang bh->b_end_io = NULL; 2875744692dcSJiaying Zhang } 2876744692dcSJiaying Zhang curr_off = curr_off + bh->b_size; 2877744692dcSJiaying Zhang bh = bh->b_this_page; 2878744692dcSJiaying Zhang } while (bh != head); 2879744692dcSJiaying Zhang } 2880744692dcSJiaying Zhang 2881617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2882ac27a0ecSDave Kleikamp { 2883617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2884ac27a0ecSDave Kleikamp 28850562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 28860562e0baSJiaying Zhang 2887ac27a0ecSDave Kleikamp /* 2888744692dcSJiaying Zhang * free any io_end structure allocated for buffers to be discarded 2889744692dcSJiaying Zhang */ 2890744692dcSJiaying Zhang if (ext4_should_dioread_nolock(page->mapping->host)) 2891744692dcSJiaying Zhang ext4_invalidatepage_free_endio(page, offset); 2892744692dcSJiaying Zhang /* 2893ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2894ac27a0ecSDave Kleikamp */ 2895ac27a0ecSDave Kleikamp if (offset == 0) 2896ac27a0ecSDave Kleikamp ClearPageChecked(page); 2897ac27a0ecSDave Kleikamp 28980390131bSFrank Mayhar if (journal) 2899dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 29000390131bSFrank Mayhar else 29010390131bSFrank Mayhar block_invalidatepage(page, offset); 2902ac27a0ecSDave Kleikamp } 2903ac27a0ecSDave Kleikamp 2904617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2905ac27a0ecSDave Kleikamp { 2906617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2907ac27a0ecSDave Kleikamp 29080562e0baSJiaying Zhang trace_ext4_releasepage(page); 29090562e0baSJiaying Zhang 2910ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2911ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2912ac27a0ecSDave Kleikamp return 0; 29130390131bSFrank Mayhar if (journal) 2914dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 29150390131bSFrank Mayhar else 29160390131bSFrank Mayhar return try_to_free_buffers(page); 2917ac27a0ecSDave Kleikamp } 2918ac27a0ecSDave Kleikamp 2919ac27a0ecSDave Kleikamp /* 29202ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 29212ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 29222ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 29232ed88685STheodore Ts'o */ 2924f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock, 29254c0425ffSMingming Cao struct buffer_head *bh_result, int create) 29264c0425ffSMingming Cao { 2927c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 29288d5d02e6SMingming Cao inode->i_ino, create); 29292ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 29302ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 29314c0425ffSMingming Cao } 29324c0425ffSMingming Cao 2933729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock, 29348b0f165fSAnatol Pomozov struct buffer_head *bh_result, int create) 2935729f52c6SZheng Liu { 29368b0f165fSAnatol Pomozov ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n", 29378b0f165fSAnatol Pomozov inode->i_ino, create); 29388b0f165fSAnatol Pomozov return _ext4_get_block(inode, iblock, bh_result, 29398b0f165fSAnatol Pomozov EXT4_GET_BLOCKS_NO_LOCK); 2940729f52c6SZheng Liu } 2941729f52c6SZheng Liu 29424c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2943552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2944552ef802SChristoph Hellwig bool is_async) 29454c0425ffSMingming Cao { 294672c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 29474c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 29484c0425ffSMingming Cao 29494b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 29504b70df18SMingming if (!io_end || !size) 2951552ef802SChristoph Hellwig goto out; 29524b70df18SMingming 29538d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 2954ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 29558d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 29568d5d02e6SMingming Cao size); 29578d5d02e6SMingming Cao 2958b5a7e970STheodore Ts'o iocb->private = NULL; 2959b5a7e970STheodore Ts'o 29608d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2961bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 29628d5d02e6SMingming Cao ext4_free_io_end(io_end); 29635b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 29645b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 29655b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 296672c5052dSChristoph Hellwig inode_dio_done(inode); 29675b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 29688d5d02e6SMingming Cao } 29698d5d02e6SMingming Cao 29704c0425ffSMingming Cao io_end->offset = offset; 29714c0425ffSMingming Cao io_end->size = size; 29725b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 29735b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 29745b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 29755b3ff237Sjiayingz@google.com (Jiaying Zhang) } 29764c0425ffSMingming Cao 297728a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 29784c0425ffSMingming Cao } 2979c7064ef1SJiaying Zhang 2980744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) 2981744692dcSJiaying Zhang { 2982744692dcSJiaying Zhang ext4_io_end_t *io_end = bh->b_private; 2983744692dcSJiaying Zhang struct inode *inode; 2984744692dcSJiaying Zhang 2985744692dcSJiaying Zhang if (!test_clear_buffer_uninit(bh) || !io_end) 2986744692dcSJiaying Zhang goto out; 2987744692dcSJiaying Zhang 2988744692dcSJiaying Zhang if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { 298992b97816STheodore Ts'o ext4_msg(io_end->inode->i_sb, KERN_INFO, 299092b97816STheodore Ts'o "sb umounted, discard end_io request for inode %lu", 2991744692dcSJiaying Zhang io_end->inode->i_ino); 2992744692dcSJiaying Zhang ext4_free_io_end(io_end); 2993744692dcSJiaying Zhang goto out; 2994744692dcSJiaying Zhang } 2995744692dcSJiaying Zhang 299632c80b32STao Ma /* 299732c80b32STao Ma * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, 299832c80b32STao Ma * but being more careful is always safe for the future change. 299932c80b32STao Ma */ 3000744692dcSJiaying Zhang inode = io_end->inode; 30010edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io_end); 300228a535f9SDmitry Monakhov ext4_add_complete_io(io_end); 3003744692dcSJiaying Zhang out: 3004744692dcSJiaying Zhang bh->b_private = NULL; 3005744692dcSJiaying Zhang bh->b_end_io = NULL; 3006744692dcSJiaying Zhang clear_buffer_uninit(bh); 3007744692dcSJiaying Zhang end_buffer_async_write(bh, uptodate); 3008744692dcSJiaying Zhang } 3009744692dcSJiaying Zhang 3010744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) 3011744692dcSJiaying Zhang { 3012744692dcSJiaying Zhang ext4_io_end_t *io_end; 3013744692dcSJiaying Zhang struct page *page = bh->b_page; 3014744692dcSJiaying Zhang loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; 3015744692dcSJiaying Zhang size_t size = bh->b_size; 3016744692dcSJiaying Zhang 3017744692dcSJiaying Zhang retry: 3018744692dcSJiaying Zhang io_end = ext4_init_io_end(inode, GFP_ATOMIC); 3019744692dcSJiaying Zhang if (!io_end) { 30206db26ffcSAndrew Morton pr_warn_ratelimited("%s: allocation fail\n", __func__); 3021744692dcSJiaying Zhang schedule(); 3022744692dcSJiaying Zhang goto retry; 3023744692dcSJiaying Zhang } 3024744692dcSJiaying Zhang io_end->offset = offset; 3025744692dcSJiaying Zhang io_end->size = size; 3026744692dcSJiaying Zhang /* 3027744692dcSJiaying Zhang * We need to hold a reference to the page to make sure it 3028744692dcSJiaying Zhang * doesn't get evicted before ext4_end_io_work() has a chance 3029744692dcSJiaying Zhang * to convert the extent from written to unwritten. 3030744692dcSJiaying Zhang */ 3031744692dcSJiaying Zhang io_end->page = page; 3032744692dcSJiaying Zhang get_page(io_end->page); 3033744692dcSJiaying Zhang 3034744692dcSJiaying Zhang bh->b_private = io_end; 3035744692dcSJiaying Zhang bh->b_end_io = ext4_end_io_buffer_write; 3036744692dcSJiaying Zhang return 0; 3037744692dcSJiaying Zhang } 3038744692dcSJiaying Zhang 30394c0425ffSMingming Cao /* 30404c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 30414c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 30424c0425ffSMingming Cao * fall back to buffered IO. 30434c0425ffSMingming Cao * 3044b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 304569c499d1STheodore Ts'o * If those blocks were preallocated, we mark sure they are split, but 3046b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 30474c0425ffSMingming Cao * 304869c499d1STheodore Ts'o * The unwritten extents will be converted to written when DIO is completed. 30498d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 305025985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 30518d5d02e6SMingming Cao * when async direct IO completed. 30524c0425ffSMingming Cao * 30534c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 30544c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 30554c0425ffSMingming Cao * if the machine crashes during the write. 30564c0425ffSMingming Cao * 30574c0425ffSMingming Cao */ 30584c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 30594c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 30604c0425ffSMingming Cao unsigned long nr_segs) 30614c0425ffSMingming Cao { 30624c0425ffSMingming Cao struct file *file = iocb->ki_filp; 30634c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 30644c0425ffSMingming Cao ssize_t ret; 30654c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 3066729f52c6SZheng Liu int overwrite = 0; 30678b0f165fSAnatol Pomozov get_block_t *get_block_func = NULL; 30688b0f165fSAnatol Pomozov int dio_flags = 0; 306969c499d1STheodore Ts'o loff_t final_size = offset + count; 307069c499d1STheodore Ts'o 307169c499d1STheodore Ts'o /* Use the old path for reads and writes beyond i_size. */ 307269c499d1STheodore Ts'o if (rw != WRITE || final_size > inode->i_size) 307369c499d1STheodore Ts'o return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3074729f52c6SZheng Liu 30754bd809dbSZheng Liu BUG_ON(iocb->private == NULL); 30764bd809dbSZheng Liu 30774bd809dbSZheng Liu /* If we do a overwrite dio, i_mutex locking can be released */ 30784bd809dbSZheng Liu overwrite = *((int *)iocb->private); 30794bd809dbSZheng Liu 30804bd809dbSZheng Liu if (overwrite) { 30811f555cfaSDmitry Monakhov atomic_inc(&inode->i_dio_count); 30824bd809dbSZheng Liu down_read(&EXT4_I(inode)->i_data_sem); 30834bd809dbSZheng Liu mutex_unlock(&inode->i_mutex); 30844bd809dbSZheng Liu } 30854bd809dbSZheng Liu 30864c0425ffSMingming Cao /* 30878d5d02e6SMingming Cao * We could direct write to holes and fallocate. 30888d5d02e6SMingming Cao * 308969c499d1STheodore Ts'o * Allocated blocks to fill the hole are marked as 309069c499d1STheodore Ts'o * uninitialized to prevent parallel buffered read to expose 309169c499d1STheodore Ts'o * the stale data before DIO complete the data IO. 30928d5d02e6SMingming Cao * 309369c499d1STheodore Ts'o * As to previously fallocated extents, ext4 get_block will 309469c499d1STheodore Ts'o * just simply mark the buffer mapped but still keep the 309569c499d1STheodore Ts'o * extents uninitialized. 30964c0425ffSMingming Cao * 309769c499d1STheodore Ts'o * For non AIO case, we will convert those unwritten extents 30988d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 30994c0425ffSMingming Cao * 310069c499d1STheodore Ts'o * For async DIO, the conversion needs to be deferred when the 310169c499d1STheodore Ts'o * IO is completed. The ext4 end_io callback function will be 310269c499d1STheodore Ts'o * called to take care of the conversion work. Here for async 310369c499d1STheodore Ts'o * case, we allocate an io_end structure to hook to the iocb. 31044c0425ffSMingming Cao */ 31058d5d02e6SMingming Cao iocb->private = NULL; 3106f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 31078d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 310869c499d1STheodore Ts'o ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS); 31094bd809dbSZheng Liu if (!io_end) { 31104bd809dbSZheng Liu ret = -ENOMEM; 31114bd809dbSZheng Liu goto retake_lock; 31124bd809dbSZheng Liu } 3113266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 3114266991b1SJeff Moyer iocb->private = io_end; 31158d5d02e6SMingming Cao /* 311669c499d1STheodore Ts'o * we save the io structure for current async direct 311769c499d1STheodore Ts'o * IO, so that later ext4_map_blocks() could flag the 311869c499d1STheodore Ts'o * io structure whether there is a unwritten extents 311969c499d1STheodore Ts'o * needs to be converted when IO is completed. 31208d5d02e6SMingming Cao */ 3121f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, io_end); 31228d5d02e6SMingming Cao } 31238d5d02e6SMingming Cao 31248b0f165fSAnatol Pomozov if (overwrite) { 31258b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write_nolock; 31268b0f165fSAnatol Pomozov } else { 31278b0f165fSAnatol Pomozov get_block_func = ext4_get_block_write; 31288b0f165fSAnatol Pomozov dio_flags = DIO_LOCKING; 31298b0f165fSAnatol Pomozov } 3130729f52c6SZheng Liu ret = __blockdev_direct_IO(rw, iocb, inode, 3131729f52c6SZheng Liu inode->i_sb->s_bdev, iov, 3132729f52c6SZheng Liu offset, nr_segs, 31338b0f165fSAnatol Pomozov get_block_func, 3134729f52c6SZheng Liu ext4_end_io_dio, 3135729f52c6SZheng Liu NULL, 31368b0f165fSAnatol Pomozov dio_flags); 31378b0f165fSAnatol Pomozov 31388d5d02e6SMingming Cao if (iocb->private) 3139f45ee3a1SDmitry Monakhov ext4_inode_aio_set(inode, NULL); 31408d5d02e6SMingming Cao /* 314169c499d1STheodore Ts'o * The io_end structure takes a reference to the inode, that 314269c499d1STheodore Ts'o * structure needs to be destroyed and the reference to the 314369c499d1STheodore Ts'o * inode need to be dropped, when IO is complete, even with 0 314469c499d1STheodore Ts'o * byte write, or failed. 31458d5d02e6SMingming Cao * 314669c499d1STheodore Ts'o * In the successful AIO DIO case, the io_end structure will 314769c499d1STheodore Ts'o * be destroyed and the reference to the inode will be dropped 31488d5d02e6SMingming Cao * after the end_io call back function is called. 31498d5d02e6SMingming Cao * 315069c499d1STheodore Ts'o * In the case there is 0 byte write, or error case, since VFS 315169c499d1STheodore Ts'o * direct IO won't invoke the end_io call back function, we 315269c499d1STheodore Ts'o * need to free the end_io structure here. 31538d5d02e6SMingming Cao */ 31548d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 31558d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 31568d5d02e6SMingming Cao iocb->private = NULL; 3157729f52c6SZheng Liu } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode, 31585f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 3159109f5565SMingming int err; 31608d5d02e6SMingming Cao /* 31618d5d02e6SMingming Cao * for non AIO case, since the IO is already 316225985edcSLucas De Marchi * completed, we could do the conversion right here 31638d5d02e6SMingming Cao */ 3164109f5565SMingming err = ext4_convert_unwritten_extents(inode, 31658d5d02e6SMingming Cao offset, ret); 3166109f5565SMingming if (err < 0) 3167109f5565SMingming ret = err; 316819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3169109f5565SMingming } 31704bd809dbSZheng Liu 31714bd809dbSZheng Liu retake_lock: 31724bd809dbSZheng Liu /* take i_mutex locking again if we do a ovewrite dio */ 31734bd809dbSZheng Liu if (overwrite) { 31741f555cfaSDmitry Monakhov inode_dio_done(inode); 31754bd809dbSZheng Liu up_read(&EXT4_I(inode)->i_data_sem); 31764bd809dbSZheng Liu mutex_lock(&inode->i_mutex); 31774bd809dbSZheng Liu } 31784bd809dbSZheng Liu 31794c0425ffSMingming Cao return ret; 31804c0425ffSMingming Cao } 31818d5d02e6SMingming Cao 31824c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 31834c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 31844c0425ffSMingming Cao unsigned long nr_segs) 31854c0425ffSMingming Cao { 31864c0425ffSMingming Cao struct file *file = iocb->ki_filp; 31874c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 31880562e0baSJiaying Zhang ssize_t ret; 31894c0425ffSMingming Cao 319084ebd795STheodore Ts'o /* 319184ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 319284ebd795STheodore Ts'o */ 319384ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 319484ebd795STheodore Ts'o return 0; 319584ebd795STheodore Ts'o 319646c7f254STao Ma /* Let buffer I/O handle the inline data case. */ 319746c7f254STao Ma if (ext4_has_inline_data(inode)) 319846c7f254STao Ma return 0; 319946c7f254STao Ma 32000562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 320112e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 32020562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 32030562e0baSJiaying Zhang else 32040562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 32050562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 32060562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 32070562e0baSJiaying Zhang return ret; 32084c0425ffSMingming Cao } 32094c0425ffSMingming Cao 3210ac27a0ecSDave Kleikamp /* 3211617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3212ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3213ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3214ac27a0ecSDave Kleikamp * not necessarily locked. 3215ac27a0ecSDave Kleikamp * 3216ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3217ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3218ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3219ac27a0ecSDave Kleikamp * 3220ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3221ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3222ac27a0ecSDave Kleikamp */ 3223617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3224ac27a0ecSDave Kleikamp { 3225ac27a0ecSDave Kleikamp SetPageChecked(page); 3226ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3227ac27a0ecSDave Kleikamp } 3228ac27a0ecSDave Kleikamp 3229617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3230617ba13bSMingming Cao .readpage = ext4_readpage, 3231617ba13bSMingming Cao .readpages = ext4_readpages, 323243ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3233bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3234bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3235617ba13bSMingming Cao .bmap = ext4_bmap, 3236617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3237617ba13bSMingming Cao .releasepage = ext4_releasepage, 3238617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3239ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32408ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3241aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3242ac27a0ecSDave Kleikamp }; 3243ac27a0ecSDave Kleikamp 3244617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3245617ba13bSMingming Cao .readpage = ext4_readpage, 3246617ba13bSMingming Cao .readpages = ext4_readpages, 324743ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3248bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3249bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3250617ba13bSMingming Cao .bmap = ext4_bmap, 3251617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3252617ba13bSMingming Cao .releasepage = ext4_releasepage, 3253617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3254ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 32558ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3256aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3257ac27a0ecSDave Kleikamp }; 3258ac27a0ecSDave Kleikamp 3259617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3260617ba13bSMingming Cao .readpage = ext4_readpage, 3261617ba13bSMingming Cao .readpages = ext4_readpages, 326243ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3263bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3264bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3265617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3266617ba13bSMingming Cao .bmap = ext4_bmap, 3267617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3268617ba13bSMingming Cao .releasepage = ext4_releasepage, 326984ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 32708ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3271aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3272ac27a0ecSDave Kleikamp }; 3273ac27a0ecSDave Kleikamp 327464769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 327564769240SAlex Tomas .readpage = ext4_readpage, 327664769240SAlex Tomas .readpages = ext4_readpages, 327743ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 327864769240SAlex Tomas .writepages = ext4_da_writepages, 327964769240SAlex Tomas .write_begin = ext4_da_write_begin, 328064769240SAlex Tomas .write_end = ext4_da_write_end, 328164769240SAlex Tomas .bmap = ext4_bmap, 328264769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 328364769240SAlex Tomas .releasepage = ext4_releasepage, 328464769240SAlex Tomas .direct_IO = ext4_direct_IO, 328564769240SAlex Tomas .migratepage = buffer_migrate_page, 32868ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3287aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 328864769240SAlex Tomas }; 328964769240SAlex Tomas 3290617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3291ac27a0ecSDave Kleikamp { 32923d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 32933d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 32943d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 3295cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3296ac27a0ecSDave Kleikamp else 32973d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_ordered_aops; 32983d2b1582SLukas Czerner break; 32993d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 33003d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 33013d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_da_aops; 33023d2b1582SLukas Czerner else 33033d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_writeback_aops; 33043d2b1582SLukas Czerner break; 33053d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3306617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 33073d2b1582SLukas Czerner break; 33083d2b1582SLukas Czerner default: 33093d2b1582SLukas Czerner BUG(); 33103d2b1582SLukas Czerner } 3311ac27a0ecSDave Kleikamp } 3312ac27a0ecSDave Kleikamp 33134e96b2dbSAllison Henderson 33144e96b2dbSAllison Henderson /* 33154e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 33164e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 33174e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 33184e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 33194e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 33204e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 33214e96b2dbSAllison Henderson */ 33224e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 33234e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 33244e96b2dbSAllison Henderson loff_t length, int flags) 33254e96b2dbSAllison Henderson { 33264e96b2dbSAllison Henderson struct inode *inode = mapping->host; 33274e96b2dbSAllison Henderson struct page *page; 33284e96b2dbSAllison Henderson int err = 0; 33294e96b2dbSAllison Henderson 33304e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 33314e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 33324e96b2dbSAllison Henderson if (!page) 33335129d05fSYongqiang Yang return -ENOMEM; 33344e96b2dbSAllison Henderson 33354e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 33364e96b2dbSAllison Henderson from, length, flags); 33374e96b2dbSAllison Henderson 33384e96b2dbSAllison Henderson unlock_page(page); 33394e96b2dbSAllison Henderson page_cache_release(page); 33404e96b2dbSAllison Henderson return err; 33414e96b2dbSAllison Henderson } 33424e96b2dbSAllison Henderson 33434e96b2dbSAllison Henderson /* 33444e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 33454e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 33464e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 33474e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 33484e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 33494e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 33504e96b2dbSAllison Henderson * 33514e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 33524e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 33534e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 33544e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 33554e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 33564e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 33574e96b2dbSAllison Henderson * released. 33584e96b2dbSAllison Henderson * 33594e96b2dbSAllison Henderson * handle: The journal handle 33604e96b2dbSAllison Henderson * inode: The files inode 33614e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 33624907cb7bSAnatol Pomozov * from: The starting byte offset (from the beginning of the file) 33634e96b2dbSAllison Henderson * to begin discarding 33644e96b2dbSAllison Henderson * len: The length of bytes to discard 33654e96b2dbSAllison Henderson * flags: Optional flags that may be used: 33664e96b2dbSAllison Henderson * 33674e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 33684e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 33694e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 33704907cb7bSAnatol Pomozov * for updating the contents of a page whose blocks may 33714e96b2dbSAllison Henderson * have already been released, and we only want to zero 33724e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 33734e96b2dbSAllison Henderson * 33744907cb7bSAnatol Pomozov * Returns zero on success or negative on failure. 33754e96b2dbSAllison Henderson */ 33765f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 33774e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 33784e96b2dbSAllison Henderson loff_t length, int flags) 33794e96b2dbSAllison Henderson { 33804e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 33814e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 33824e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 33834e96b2dbSAllison Henderson ext4_lblk_t iblock; 33844e96b2dbSAllison Henderson struct buffer_head *bh; 33854e96b2dbSAllison Henderson int err = 0; 33864e96b2dbSAllison Henderson 33874e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 33884e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 33894e96b2dbSAllison Henderson 33904e96b2dbSAllison Henderson if (index != page->index) 33914e96b2dbSAllison Henderson return -EINVAL; 33924e96b2dbSAllison Henderson 33934e96b2dbSAllison Henderson /* 33944e96b2dbSAllison Henderson * correct length if it does not fall between 33954e96b2dbSAllison Henderson * 'from' and the end of the page 33964e96b2dbSAllison Henderson */ 33974e96b2dbSAllison Henderson if (length > max || length < 0) 33984e96b2dbSAllison Henderson length = max; 33994e96b2dbSAllison Henderson 34004e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 34014e96b2dbSAllison Henderson 3402093e6e36SYongqiang Yang if (!page_has_buffers(page)) 34034e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 34044e96b2dbSAllison Henderson 34054e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 34064e96b2dbSAllison Henderson bh = page_buffers(page); 34074e96b2dbSAllison Henderson pos = blocksize; 34084e96b2dbSAllison Henderson while (offset >= pos) { 34094e96b2dbSAllison Henderson bh = bh->b_this_page; 34104e96b2dbSAllison Henderson iblock++; 34114e96b2dbSAllison Henderson pos += blocksize; 34124e96b2dbSAllison Henderson } 34134e96b2dbSAllison Henderson 34144e96b2dbSAllison Henderson pos = offset; 34154e96b2dbSAllison Henderson while (pos < offset + length) { 3416e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3417e260daf2SYongqiang Yang 34184e96b2dbSAllison Henderson err = 0; 34194e96b2dbSAllison Henderson 34204e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 34214e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 34224e96b2dbSAllison Henderson 34234e96b2dbSAllison Henderson /* The length of space until the end of the block */ 34244e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 34254e96b2dbSAllison Henderson 34264e96b2dbSAllison Henderson /* 34274e96b2dbSAllison Henderson * Do not unmap or zero past end of block 34284e96b2dbSAllison Henderson * for this buffer head 34294e96b2dbSAllison Henderson */ 34304e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 34314e96b2dbSAllison Henderson range_to_discard = end_of_block; 34324e96b2dbSAllison Henderson 34334e96b2dbSAllison Henderson 34344e96b2dbSAllison Henderson /* 34354e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 34364e96b2dbSAllison Henderson * regions of the page 34374e96b2dbSAllison Henderson */ 34384e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 34394e96b2dbSAllison Henderson buffer_mapped(bh)) 34404e96b2dbSAllison Henderson goto next; 34414e96b2dbSAllison Henderson 34424e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 34434e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 34444e96b2dbSAllison Henderson clear_buffer_dirty(bh); 34454e96b2dbSAllison Henderson bh->b_bdev = NULL; 34464e96b2dbSAllison Henderson clear_buffer_mapped(bh); 34474e96b2dbSAllison Henderson clear_buffer_req(bh); 34484e96b2dbSAllison Henderson clear_buffer_new(bh); 34494e96b2dbSAllison Henderson clear_buffer_delay(bh); 34504e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 34514e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 34524e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 34534e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 34544e96b2dbSAllison Henderson goto next; 34554e96b2dbSAllison Henderson } 34564e96b2dbSAllison Henderson 34574e96b2dbSAllison Henderson /* 34584e96b2dbSAllison Henderson * If this block is not completely contained in the range 34594e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 34604e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 34614e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 34624e96b2dbSAllison Henderson * partial zeros on it. 34634e96b2dbSAllison Henderson */ 34644e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 34654e96b2dbSAllison Henderson /* 34664e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 34674e96b2dbSAllison Henderson * from the block 34684e96b2dbSAllison Henderson */ 34694e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 34704e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 34714e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 34724e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 34734e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 34744e96b2dbSAllison Henderson goto next; 34754e96b2dbSAllison Henderson } 34764e96b2dbSAllison Henderson } 34774e96b2dbSAllison Henderson 34784e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 34794e96b2dbSAllison Henderson if (PageUptodate(page)) 34804e96b2dbSAllison Henderson set_buffer_uptodate(bh); 34814e96b2dbSAllison Henderson 34824e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 34834e96b2dbSAllison Henderson err = -EIO; 34844e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 34854e96b2dbSAllison Henderson wait_on_buffer(bh); 34864e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 34874e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 34884e96b2dbSAllison Henderson goto next; 34894e96b2dbSAllison Henderson } 34904e96b2dbSAllison Henderson 34914e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 34924e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 34934e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 34944e96b2dbSAllison Henderson if (err) 34954e96b2dbSAllison Henderson goto next; 34964e96b2dbSAllison Henderson } 34974e96b2dbSAllison Henderson 34984e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 34994e96b2dbSAllison Henderson 35004e96b2dbSAllison Henderson err = 0; 35014e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 35024e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3503decbd919STheodore Ts'o } else 35044e96b2dbSAllison Henderson mark_buffer_dirty(bh); 35054e96b2dbSAllison Henderson 35064e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 35074e96b2dbSAllison Henderson next: 35084e96b2dbSAllison Henderson bh = bh->b_this_page; 35094e96b2dbSAllison Henderson iblock++; 35104e96b2dbSAllison Henderson pos += range_to_discard; 35114e96b2dbSAllison Henderson } 35124e96b2dbSAllison Henderson 35134e96b2dbSAllison Henderson return err; 35144e96b2dbSAllison Henderson } 35154e96b2dbSAllison Henderson 351691ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 351791ef4cafSDuane Griffin { 351891ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 351991ef4cafSDuane Griffin return 1; 352091ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 352191ef4cafSDuane Griffin return 1; 352291ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 352391ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 352491ef4cafSDuane Griffin return 0; 352591ef4cafSDuane Griffin } 352691ef4cafSDuane Griffin 3527ac27a0ecSDave Kleikamp /* 3528a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3529a4bb6b64SAllison Henderson * associated with the given offset and length 3530a4bb6b64SAllison Henderson * 3531a4bb6b64SAllison Henderson * @inode: File inode 3532a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3533a4bb6b64SAllison Henderson * @len: The length of the hole 3534a4bb6b64SAllison Henderson * 35354907cb7bSAnatol Pomozov * Returns: 0 on success or negative on failure 3536a4bb6b64SAllison Henderson */ 3537a4bb6b64SAllison Henderson 3538a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3539a4bb6b64SAllison Henderson { 3540a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3541a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 354273355192SAllison Henderson return -EOPNOTSUPP; 3543a4bb6b64SAllison Henderson 3544a4bb6b64SAllison Henderson if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3545a4bb6b64SAllison Henderson /* TODO: Add support for non extent hole punching */ 354673355192SAllison Henderson return -EOPNOTSUPP; 3547a4bb6b64SAllison Henderson } 3548a4bb6b64SAllison Henderson 3549bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3550bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 355173355192SAllison Henderson return -EOPNOTSUPP; 3552bab08ab9STheodore Ts'o } 3553bab08ab9STheodore Ts'o 3554a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3555a4bb6b64SAllison Henderson } 3556a4bb6b64SAllison Henderson 3557a4bb6b64SAllison Henderson /* 3558617ba13bSMingming Cao * ext4_truncate() 3559ac27a0ecSDave Kleikamp * 3560617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3561617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3562ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3563ac27a0ecSDave Kleikamp * 356442b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3565ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3566ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3567ac27a0ecSDave Kleikamp * 3568ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3569ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3570ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3571ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3572ac27a0ecSDave Kleikamp * left-to-right works OK too). 3573ac27a0ecSDave Kleikamp * 3574ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3575ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3576ac27a0ecSDave Kleikamp * 3577ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3578617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3579ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3580617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3581617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3582ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3583617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3584ac27a0ecSDave Kleikamp */ 3585617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3586ac27a0ecSDave Kleikamp { 35870562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 35880562e0baSJiaying Zhang 358991ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3590ac27a0ecSDave Kleikamp return; 3591ac27a0ecSDave Kleikamp 359212e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3593c8d46e41SJiaying Zhang 35945534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 359519f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 35967d8f9f7dSTheodore Ts'o 3597ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3598cf108bcaSJan Kara ext4_ext_truncate(inode); 3599ff9893dcSAmir Goldstein else 3600ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3601a86c6181SAlex Tomas 36020562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3603ac27a0ecSDave Kleikamp } 3604ac27a0ecSDave Kleikamp 3605ac27a0ecSDave Kleikamp /* 3606617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3607ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3608ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3609ac27a0ecSDave Kleikamp * inode. 3610ac27a0ecSDave Kleikamp */ 3611617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3612617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3613ac27a0ecSDave Kleikamp { 3614240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3615ac27a0ecSDave Kleikamp struct buffer_head *bh; 3616240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3617240799cdSTheodore Ts'o ext4_fsblk_t block; 3618240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3619ac27a0ecSDave Kleikamp 36203a06d778SAneesh Kumar K.V iloc->bh = NULL; 3621240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3622ac27a0ecSDave Kleikamp return -EIO; 3623ac27a0ecSDave Kleikamp 3624240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3625240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3626240799cdSTheodore Ts'o if (!gdp) 3627240799cdSTheodore Ts'o return -EIO; 3628240799cdSTheodore Ts'o 3629240799cdSTheodore Ts'o /* 3630240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3631240799cdSTheodore Ts'o */ 363200d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3633240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3634240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3635240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3636240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3637240799cdSTheodore Ts'o 3638240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3639ac27a0ecSDave Kleikamp if (!bh) { 3640c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3641c398eda0STheodore Ts'o "unable to read itable block"); 3642ac27a0ecSDave Kleikamp return -EIO; 3643ac27a0ecSDave Kleikamp } 3644ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3645ac27a0ecSDave Kleikamp lock_buffer(bh); 36469c83a923SHidehiro Kawai 36479c83a923SHidehiro Kawai /* 36489c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 36499c83a923SHidehiro Kawai * to write out another inode in the same block. In this 36509c83a923SHidehiro Kawai * case, we don't have to read the block because we may 36519c83a923SHidehiro Kawai * read the old inode data successfully. 36529c83a923SHidehiro Kawai */ 36539c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 36549c83a923SHidehiro Kawai set_buffer_uptodate(bh); 36559c83a923SHidehiro Kawai 3656ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3657ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3658ac27a0ecSDave Kleikamp unlock_buffer(bh); 3659ac27a0ecSDave Kleikamp goto has_buffer; 3660ac27a0ecSDave Kleikamp } 3661ac27a0ecSDave Kleikamp 3662ac27a0ecSDave Kleikamp /* 3663ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3664ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3665ac27a0ecSDave Kleikamp * block. 3666ac27a0ecSDave Kleikamp */ 3667ac27a0ecSDave Kleikamp if (in_mem) { 3668ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3669240799cdSTheodore Ts'o int i, start; 3670ac27a0ecSDave Kleikamp 3671240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3672ac27a0ecSDave Kleikamp 3673ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3674240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3675ac27a0ecSDave Kleikamp if (!bitmap_bh) 3676ac27a0ecSDave Kleikamp goto make_io; 3677ac27a0ecSDave Kleikamp 3678ac27a0ecSDave Kleikamp /* 3679ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3680ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3681ac27a0ecSDave Kleikamp * of one, so skip it. 3682ac27a0ecSDave Kleikamp */ 3683ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3684ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3685ac27a0ecSDave Kleikamp goto make_io; 3686ac27a0ecSDave Kleikamp } 3687240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3688ac27a0ecSDave Kleikamp if (i == inode_offset) 3689ac27a0ecSDave Kleikamp continue; 3690617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3691ac27a0ecSDave Kleikamp break; 3692ac27a0ecSDave Kleikamp } 3693ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3694240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3695ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3696ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3697ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3698ac27a0ecSDave Kleikamp unlock_buffer(bh); 3699ac27a0ecSDave Kleikamp goto has_buffer; 3700ac27a0ecSDave Kleikamp } 3701ac27a0ecSDave Kleikamp } 3702ac27a0ecSDave Kleikamp 3703ac27a0ecSDave Kleikamp make_io: 3704ac27a0ecSDave Kleikamp /* 3705240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3706240799cdSTheodore Ts'o * blocks from the inode table. 3707240799cdSTheodore Ts'o */ 3708240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3709240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3710240799cdSTheodore Ts'o unsigned num; 3711240799cdSTheodore Ts'o 3712240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3713b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3714240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3715240799cdSTheodore Ts'o if (table > b) 3716240799cdSTheodore Ts'o b = table; 3717240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3718240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3719feb0ab32SDarrick J. Wong if (ext4_has_group_desc_csum(sb)) 3720560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3721240799cdSTheodore Ts'o table += num / inodes_per_block; 3722240799cdSTheodore Ts'o if (end > table) 3723240799cdSTheodore Ts'o end = table; 3724240799cdSTheodore Ts'o while (b <= end) 3725240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3726240799cdSTheodore Ts'o } 3727240799cdSTheodore Ts'o 3728240799cdSTheodore Ts'o /* 3729ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3730ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3731ac27a0ecSDave Kleikamp * Read the block from disk. 3732ac27a0ecSDave Kleikamp */ 37330562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3734ac27a0ecSDave Kleikamp get_bh(bh); 3735ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 373665299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3737ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3738ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3739c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3740c398eda0STheodore Ts'o "unable to read itable block"); 3741ac27a0ecSDave Kleikamp brelse(bh); 3742ac27a0ecSDave Kleikamp return -EIO; 3743ac27a0ecSDave Kleikamp } 3744ac27a0ecSDave Kleikamp } 3745ac27a0ecSDave Kleikamp has_buffer: 3746ac27a0ecSDave Kleikamp iloc->bh = bh; 3747ac27a0ecSDave Kleikamp return 0; 3748ac27a0ecSDave Kleikamp } 3749ac27a0ecSDave Kleikamp 3750617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3751ac27a0ecSDave Kleikamp { 3752ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3753617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 375419f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3755ac27a0ecSDave Kleikamp } 3756ac27a0ecSDave Kleikamp 3757617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3758ac27a0ecSDave Kleikamp { 3759617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3760ac27a0ecSDave Kleikamp 3761ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3762617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3763ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3764617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3765ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3766617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3767ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3768617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3769ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3770617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3771ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3772ac27a0ecSDave Kleikamp } 3773ac27a0ecSDave Kleikamp 3774ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3775ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3776ff9ddf7eSJan Kara { 377784a8dce2SDmitry Monakhov unsigned int vfs_fl; 377884a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3779ff9ddf7eSJan Kara 378084a8dce2SDmitry Monakhov do { 378184a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 378284a8dce2SDmitry Monakhov old_fl = ei->i_flags; 378384a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 378484a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 378584a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 378684a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 378784a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 378884a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 378984a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 379084a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 379184a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 379284a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 379384a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 379484a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 379584a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 379684a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3797ff9ddf7eSJan Kara } 3798de9a55b8STheodore Ts'o 37990fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 38000fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 38010fc1b451SAneesh Kumar K.V { 38020fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 38038180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 38048180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 38050fc1b451SAneesh Kumar K.V 38060fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 38070fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 38080fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 38090fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 38100fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 381107a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 38128180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 38138180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 38148180a562SAneesh Kumar K.V } else { 38150fc1b451SAneesh Kumar K.V return i_blocks; 38168180a562SAneesh Kumar K.V } 38170fc1b451SAneesh Kumar K.V } else { 38180fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 38190fc1b451SAneesh Kumar K.V } 38200fc1b451SAneesh Kumar K.V } 3821ff9ddf7eSJan Kara 3822152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode, 3823152a7b0aSTao Ma struct ext4_inode *raw_inode, 3824152a7b0aSTao Ma struct ext4_inode_info *ei) 3825152a7b0aSTao Ma { 3826152a7b0aSTao Ma __le32 *magic = (void *)raw_inode + 3827152a7b0aSTao Ma EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; 382867cf5b09STao Ma if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { 3829152a7b0aSTao Ma ext4_set_inode_state(inode, EXT4_STATE_XATTR); 383067cf5b09STao Ma ext4_find_inline_data_nolock(inode); 3831f19d5870STao Ma } else 3832f19d5870STao Ma EXT4_I(inode)->i_inline_off = 0; 3833152a7b0aSTao Ma } 3834152a7b0aSTao Ma 38351d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3836ac27a0ecSDave Kleikamp { 3837617ba13bSMingming Cao struct ext4_iloc iloc; 3838617ba13bSMingming Cao struct ext4_inode *raw_inode; 38391d1fe1eeSDavid Howells struct ext4_inode_info *ei; 38401d1fe1eeSDavid Howells struct inode *inode; 3841b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 38421d1fe1eeSDavid Howells long ret; 3843ac27a0ecSDave Kleikamp int block; 384408cefc7aSEric W. Biederman uid_t i_uid; 384508cefc7aSEric W. Biederman gid_t i_gid; 3846ac27a0ecSDave Kleikamp 38471d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 38481d1fe1eeSDavid Howells if (!inode) 38491d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 38501d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 38511d1fe1eeSDavid Howells return inode; 38521d1fe1eeSDavid Howells 38531d1fe1eeSDavid Howells ei = EXT4_I(inode); 38547dc57615SPeter Huewe iloc.bh = NULL; 3855ac27a0ecSDave Kleikamp 38561d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 38571d1fe1eeSDavid Howells if (ret < 0) 3858ac27a0ecSDave Kleikamp goto bad_inode; 3859617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3860814525f4SDarrick J. Wong 3861814525f4SDarrick J. Wong if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3862814525f4SDarrick J. Wong ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3863814525f4SDarrick J. Wong if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3864814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)) { 3865814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)", 3866814525f4SDarrick J. Wong EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize, 3867814525f4SDarrick J. Wong EXT4_INODE_SIZE(inode->i_sb)); 3868814525f4SDarrick J. Wong ret = -EIO; 3869814525f4SDarrick J. Wong goto bad_inode; 3870814525f4SDarrick J. Wong } 3871814525f4SDarrick J. Wong } else 3872814525f4SDarrick J. Wong ei->i_extra_isize = 0; 3873814525f4SDarrick J. Wong 3874814525f4SDarrick J. Wong /* Precompute checksum seed for inode metadata */ 3875814525f4SDarrick J. Wong if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3876814525f4SDarrick J. Wong EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { 3877814525f4SDarrick J. Wong struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3878814525f4SDarrick J. Wong __u32 csum; 3879814525f4SDarrick J. Wong __le32 inum = cpu_to_le32(inode->i_ino); 3880814525f4SDarrick J. Wong __le32 gen = raw_inode->i_generation; 3881814525f4SDarrick J. Wong csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, 3882814525f4SDarrick J. Wong sizeof(inum)); 3883814525f4SDarrick J. Wong ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, 3884814525f4SDarrick J. Wong sizeof(gen)); 3885814525f4SDarrick J. Wong } 3886814525f4SDarrick J. Wong 3887814525f4SDarrick J. Wong if (!ext4_inode_csum_verify(inode, raw_inode, ei)) { 3888814525f4SDarrick J. Wong EXT4_ERROR_INODE(inode, "checksum invalid"); 3889814525f4SDarrick J. Wong ret = -EIO; 3890814525f4SDarrick J. Wong goto bad_inode; 3891814525f4SDarrick J. Wong } 3892814525f4SDarrick J. Wong 3893ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 389408cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 389508cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3896ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 389708cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 389808cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3899ac27a0ecSDave Kleikamp } 390008cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 390108cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 3902bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3903ac27a0ecSDave Kleikamp 3904353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 390567cf5b09STao Ma ei->i_inline_off = 0; 3906ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3907ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3908ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3909ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3910ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3911ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3912ac27a0ecSDave Kleikamp */ 3913ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3914ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3915617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3916ac27a0ecSDave Kleikamp /* this inode is deleted */ 39171d1fe1eeSDavid Howells ret = -ESTALE; 3918ac27a0ecSDave Kleikamp goto bad_inode; 3919ac27a0ecSDave Kleikamp } 3920ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3921ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3922ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3923ac27a0ecSDave Kleikamp * the process of deleting those. */ 3924ac27a0ecSDave Kleikamp } 3925ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 39260fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 39277973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3928a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3929a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3930a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3931a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3932ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3933a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3934a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3935a9e7f447SDmitry Monakhov #endif 3936ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3937ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3938a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3939ac27a0ecSDave Kleikamp /* 3940ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3941ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3942ac27a0ecSDave Kleikamp */ 3943617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3944ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3945ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3946ac27a0ecSDave Kleikamp 3947b436b9beSJan Kara /* 3948b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3949b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3950b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3951b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3952b436b9beSJan Kara * now it is reread from disk. 3953b436b9beSJan Kara */ 3954b436b9beSJan Kara if (journal) { 3955b436b9beSJan Kara transaction_t *transaction; 3956b436b9beSJan Kara tid_t tid; 3957b436b9beSJan Kara 3958a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3959b436b9beSJan Kara if (journal->j_running_transaction) 3960b436b9beSJan Kara transaction = journal->j_running_transaction; 3961b436b9beSJan Kara else 3962b436b9beSJan Kara transaction = journal->j_committing_transaction; 3963b436b9beSJan Kara if (transaction) 3964b436b9beSJan Kara tid = transaction->t_tid; 3965b436b9beSJan Kara else 3966b436b9beSJan Kara tid = journal->j_commit_sequence; 3967a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3968b436b9beSJan Kara ei->i_sync_tid = tid; 3969b436b9beSJan Kara ei->i_datasync_tid = tid; 3970b436b9beSJan Kara } 3971b436b9beSJan Kara 39720040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3973ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3974ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3975617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3976617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3977ac27a0ecSDave Kleikamp } else { 3978152a7b0aSTao Ma ext4_iget_extra_inode(inode, raw_inode, ei); 3979ac27a0ecSDave Kleikamp } 3980814525f4SDarrick J. Wong } 3981ac27a0ecSDave Kleikamp 3982ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3983ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3984ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3985ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3986ef7f3835SKalpak Shah 398725ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 398825ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 398925ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 399025ec56b5SJean Noel Cordenner inode->i_version |= 399125ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 399225ec56b5SJean Noel Cordenner } 399325ec56b5SJean Noel Cordenner 3994c4b5a614STheodore Ts'o ret = 0; 3995485c26ecSTheodore Ts'o if (ei->i_file_acl && 39961032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 399724676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 399824676da4STheodore Ts'o ei->i_file_acl); 3999485c26ecSTheodore Ts'o ret = -EIO; 4000485c26ecSTheodore Ts'o goto bad_inode; 4001f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4002f19d5870STao Ma if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4003f19d5870STao Ma if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4004c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 4005f19d5870STao Ma !ext4_inode_is_fast_symlink(inode)))) 40067a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 40077a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 4008fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 4009fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 4010fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 4011fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 40121f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 4013fe2c8191SThiemo Nagel } 4014f19d5870STao Ma } 4015567f3e9aSTheodore Ts'o if (ret) 40167a262f7cSAneesh Kumar K.V goto bad_inode; 40177a262f7cSAneesh Kumar K.V 4018ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 4019617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 4020617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 4021617ba13bSMingming Cao ext4_set_aops(inode); 4022ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 4023617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 4024617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 4025ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 4026e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 4027617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 4028e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 4029e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 4030e83c1397SDuane Griffin } else { 4031617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 4032617ba13bSMingming Cao ext4_set_aops(inode); 4033ac27a0ecSDave Kleikamp } 4034563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 4035563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 4036617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 4037ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 4038ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4039ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 4040ac27a0ecSDave Kleikamp else 4041ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 4042ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 4043563bdd61STheodore Ts'o } else { 4044563bdd61STheodore Ts'o ret = -EIO; 404524676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 4046563bdd61STheodore Ts'o goto bad_inode; 4047ac27a0ecSDave Kleikamp } 4048ac27a0ecSDave Kleikamp brelse(iloc.bh); 4049617ba13bSMingming Cao ext4_set_inode_flags(inode); 40501d1fe1eeSDavid Howells unlock_new_inode(inode); 40511d1fe1eeSDavid Howells return inode; 4052ac27a0ecSDave Kleikamp 4053ac27a0ecSDave Kleikamp bad_inode: 4054567f3e9aSTheodore Ts'o brelse(iloc.bh); 40551d1fe1eeSDavid Howells iget_failed(inode); 40561d1fe1eeSDavid Howells return ERR_PTR(ret); 4057ac27a0ecSDave Kleikamp } 4058ac27a0ecSDave Kleikamp 40590fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 40600fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 40610fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 40620fc1b451SAneesh Kumar K.V { 40630fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 40640fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 40650fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 40660fc1b451SAneesh Kumar K.V 40670fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 40680fc1b451SAneesh Kumar K.V /* 40694907cb7bSAnatol Pomozov * i_blocks can be represented in a 32 bit variable 40700fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 40710fc1b451SAneesh Kumar K.V */ 40728180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 40730fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 407484a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 4075f287a1a5STheodore Ts'o return 0; 4076f287a1a5STheodore Ts'o } 4077f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 4078f287a1a5STheodore Ts'o return -EFBIG; 4079f287a1a5STheodore Ts'o 4080f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 40810fc1b451SAneesh Kumar K.V /* 40820fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 40830fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 40840fc1b451SAneesh Kumar K.V */ 40858180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 40860fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 408784a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 40880fc1b451SAneesh Kumar K.V } else { 408984a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 40908180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 40918180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 40928180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 40938180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 40940fc1b451SAneesh Kumar K.V } 4095f287a1a5STheodore Ts'o return 0; 40960fc1b451SAneesh Kumar K.V } 40970fc1b451SAneesh Kumar K.V 4098ac27a0ecSDave Kleikamp /* 4099ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 4100ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 4101ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 4102ac27a0ecSDave Kleikamp * 4103ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 4104ac27a0ecSDave Kleikamp */ 4105617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 4106ac27a0ecSDave Kleikamp struct inode *inode, 4107830156c7SFrank Mayhar struct ext4_iloc *iloc) 4108ac27a0ecSDave Kleikamp { 4109617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 4110617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 4111ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 4112ac27a0ecSDave Kleikamp int err = 0, rc, block; 4113b71fc079SJan Kara int need_datasync = 0; 411408cefc7aSEric W. Biederman uid_t i_uid; 411508cefc7aSEric W. Biederman gid_t i_gid; 4116ac27a0ecSDave Kleikamp 4117ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 4118ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 411919f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 4120617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 4121ac27a0ecSDave Kleikamp 4122ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 4123ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 412408cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 412508cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 4126ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 412708cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 412808cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 4129ac27a0ecSDave Kleikamp /* 4130ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 4131ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 4132ac27a0ecSDave Kleikamp */ 4133ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 4134ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 413508cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 4136ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 413708cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 4138ac27a0ecSDave Kleikamp } else { 4139ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4140ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4141ac27a0ecSDave Kleikamp } 4142ac27a0ecSDave Kleikamp } else { 414308cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 414408cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 4145ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 4146ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 4147ac27a0ecSDave Kleikamp } 4148ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 4149ef7f3835SKalpak Shah 4150ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 4151ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 4152ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4153ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4154ef7f3835SKalpak Shah 41550fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 41560fc1b451SAneesh Kumar K.V goto out_brelse; 4157ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 4158353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 41599b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 41609b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 4161a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 4162a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 41637973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 4164b71fc079SJan Kara if (ei->i_disksize != ext4_isize(raw_inode)) { 4165a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 4166b71fc079SJan Kara need_datasync = 1; 4167b71fc079SJan Kara } 4168ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 4169ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 4170617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 4171617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 4172617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 4173617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 4174ac27a0ecSDave Kleikamp /* If this is the first large file 4175ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 4176ac27a0ecSDave Kleikamp */ 4177617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 4178617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 4179ac27a0ecSDave Kleikamp if (err) 4180ac27a0ecSDave Kleikamp goto out_brelse; 4181617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 4182617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 4183617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 41840390131bSFrank Mayhar ext4_handle_sync(handle); 4185b50924c2SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 4186ac27a0ecSDave Kleikamp } 4187ac27a0ecSDave Kleikamp } 4188ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4189ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 4190ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 4191ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 4192ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 4193ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 4194ac27a0ecSDave Kleikamp } else { 4195ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 4196ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 4197ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 4198ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 4199ac27a0ecSDave Kleikamp } 4200f19d5870STao Ma } else if (!ext4_has_inline_data(inode)) { 4201de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 4202ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 4203f19d5870STao Ma } 4204ac27a0ecSDave Kleikamp 420525ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 420625ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 420725ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 420825ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 420925ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4210ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 421125ec56b5SJean Noel Cordenner } 421225ec56b5SJean Noel Cordenner 4213814525f4SDarrick J. Wong ext4_inode_csum_set(inode, raw_inode, ei); 4214814525f4SDarrick J. Wong 42150390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 421673b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4217ac27a0ecSDave Kleikamp if (!err) 4218ac27a0ecSDave Kleikamp err = rc; 421919f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4220ac27a0ecSDave Kleikamp 4221b71fc079SJan Kara ext4_update_inode_fsync_trans(handle, inode, need_datasync); 4222ac27a0ecSDave Kleikamp out_brelse: 4223ac27a0ecSDave Kleikamp brelse(bh); 4224617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4225ac27a0ecSDave Kleikamp return err; 4226ac27a0ecSDave Kleikamp } 4227ac27a0ecSDave Kleikamp 4228ac27a0ecSDave Kleikamp /* 4229617ba13bSMingming Cao * ext4_write_inode() 4230ac27a0ecSDave Kleikamp * 4231ac27a0ecSDave Kleikamp * We are called from a few places: 4232ac27a0ecSDave Kleikamp * 4233ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4234ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 42354907cb7bSAnatol Pomozov * transaction to commit. 4236ac27a0ecSDave Kleikamp * 4237ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4238ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4239ac27a0ecSDave Kleikamp * 4240ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4241ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4242ac27a0ecSDave Kleikamp * journal commit. 4243ac27a0ecSDave Kleikamp * 4244ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4245ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4246617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4247ac27a0ecSDave Kleikamp * knfsd. 4248ac27a0ecSDave Kleikamp * 4249ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4250ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4251ac27a0ecSDave Kleikamp * which we are interested. 4252ac27a0ecSDave Kleikamp * 4253ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4254ac27a0ecSDave Kleikamp * 4255ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4256ac27a0ecSDave Kleikamp * stuff(); 4257ac27a0ecSDave Kleikamp * inode->i_size = expr; 4258ac27a0ecSDave Kleikamp * 4259ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4260ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4261ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4262ac27a0ecSDave Kleikamp */ 4263a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4264ac27a0ecSDave Kleikamp { 426591ac6f43SFrank Mayhar int err; 426691ac6f43SFrank Mayhar 4267ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4268ac27a0ecSDave Kleikamp return 0; 4269ac27a0ecSDave Kleikamp 427091ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4271617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4272b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4273ac27a0ecSDave Kleikamp dump_stack(); 4274ac27a0ecSDave Kleikamp return -EIO; 4275ac27a0ecSDave Kleikamp } 4276ac27a0ecSDave Kleikamp 4277a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4278ac27a0ecSDave Kleikamp return 0; 4279ac27a0ecSDave Kleikamp 428091ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 428191ac6f43SFrank Mayhar } else { 428291ac6f43SFrank Mayhar struct ext4_iloc iloc; 428391ac6f43SFrank Mayhar 42848b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 428591ac6f43SFrank Mayhar if (err) 428691ac6f43SFrank Mayhar return err; 4287a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4288830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4289830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4290c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4291c398eda0STheodore Ts'o "IO error syncing inode"); 4292830156c7SFrank Mayhar err = -EIO; 4293830156c7SFrank Mayhar } 4294fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 429591ac6f43SFrank Mayhar } 429691ac6f43SFrank Mayhar return err; 4297ac27a0ecSDave Kleikamp } 4298ac27a0ecSDave Kleikamp 4299ac27a0ecSDave Kleikamp /* 4300617ba13bSMingming Cao * ext4_setattr() 4301ac27a0ecSDave Kleikamp * 4302ac27a0ecSDave Kleikamp * Called from notify_change. 4303ac27a0ecSDave Kleikamp * 4304ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4305ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4306ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4307ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4308ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4309ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4310ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4311ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4312ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4313ac27a0ecSDave Kleikamp * 4314678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4315678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4316678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4317678aaf48SJan Kara * This way we are sure that all the data written in the previous 4318678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4319678aaf48SJan Kara * writeback). 4320678aaf48SJan Kara * 4321678aaf48SJan Kara * Called with inode->i_mutex down. 4322ac27a0ecSDave Kleikamp */ 4323617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4324ac27a0ecSDave Kleikamp { 4325ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4326ac27a0ecSDave Kleikamp int error, rc = 0; 43273d287de3SDmitry Monakhov int orphan = 0; 4328ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4329ac27a0ecSDave Kleikamp 4330ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4331ac27a0ecSDave Kleikamp if (error) 4332ac27a0ecSDave Kleikamp return error; 4333ac27a0ecSDave Kleikamp 433412755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4335871a2931SChristoph Hellwig dquot_initialize(inode); 433608cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 433708cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4338ac27a0ecSDave Kleikamp handle_t *handle; 4339ac27a0ecSDave Kleikamp 4340ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4341ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 43425aca07ebSDmitry Monakhov handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 4343194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 4344ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4345ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4346ac27a0ecSDave Kleikamp goto err_out; 4347ac27a0ecSDave Kleikamp } 4348b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4349ac27a0ecSDave Kleikamp if (error) { 4350617ba13bSMingming Cao ext4_journal_stop(handle); 4351ac27a0ecSDave Kleikamp return error; 4352ac27a0ecSDave Kleikamp } 4353ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4354ac27a0ecSDave Kleikamp * one transaction */ 4355ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4356ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4357ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4358ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4359617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4360617ba13bSMingming Cao ext4_journal_stop(handle); 4361ac27a0ecSDave Kleikamp } 4362ac27a0ecSDave Kleikamp 4363e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4364562c72aaSChristoph Hellwig 436512e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4366e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4367e2b46574SEric Sandeen 43680c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 43690c095c7fSTheodore Ts'o return -EFBIG; 4370e2b46574SEric Sandeen } 4371e2b46574SEric Sandeen } 4372e2b46574SEric Sandeen 4373ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4374c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4375072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4376ac27a0ecSDave Kleikamp handle_t *handle; 4377ac27a0ecSDave Kleikamp 4378617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4379ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4380ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4381ac27a0ecSDave Kleikamp goto err_out; 4382ac27a0ecSDave Kleikamp } 43833d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4384617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 43853d287de3SDmitry Monakhov orphan = 1; 43863d287de3SDmitry Monakhov } 4387617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4388617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4389ac27a0ecSDave Kleikamp if (!error) 4390ac27a0ecSDave Kleikamp error = rc; 4391617ba13bSMingming Cao ext4_journal_stop(handle); 4392678aaf48SJan Kara 4393678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4394678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4395678aaf48SJan Kara attr->ia_size); 4396678aaf48SJan Kara if (error) { 4397678aaf48SJan Kara /* Do as much error cleanup as possible */ 4398678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4399678aaf48SJan Kara if (IS_ERR(handle)) { 4400678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4401678aaf48SJan Kara goto err_out; 4402678aaf48SJan Kara } 4403678aaf48SJan Kara ext4_orphan_del(handle, inode); 44043d287de3SDmitry Monakhov orphan = 0; 4405678aaf48SJan Kara ext4_journal_stop(handle); 4406678aaf48SJan Kara goto err_out; 4407678aaf48SJan Kara } 4408678aaf48SJan Kara } 4409ac27a0ecSDave Kleikamp } 4410ac27a0ecSDave Kleikamp 4411072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 44121c9114f9SDmitry Monakhov if (attr->ia_size != i_size_read(inode)) { 4413072bd7eaSTheodore Ts'o truncate_setsize(inode, attr->ia_size); 44141b65007eSDmitry Monakhov /* Inode size will be reduced, wait for dio in flight. 44151b65007eSDmitry Monakhov * Temporarily disable dioread_nolock to prevent 44161b65007eSDmitry Monakhov * livelock. */ 44171b65007eSDmitry Monakhov if (orphan) { 44181b65007eSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 44191c9114f9SDmitry Monakhov inode_dio_wait(inode); 44201b65007eSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 44211b65007eSDmitry Monakhov } 44221c9114f9SDmitry Monakhov } 4423072bd7eaSTheodore Ts'o ext4_truncate(inode); 4424072bd7eaSTheodore Ts'o } 4425ac27a0ecSDave Kleikamp 44261025774cSChristoph Hellwig if (!rc) { 44271025774cSChristoph Hellwig setattr_copy(inode, attr); 44281025774cSChristoph Hellwig mark_inode_dirty(inode); 44291025774cSChristoph Hellwig } 44301025774cSChristoph Hellwig 44311025774cSChristoph Hellwig /* 44321025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 44331025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 44341025774cSChristoph Hellwig */ 44353d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4436617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4437ac27a0ecSDave Kleikamp 4438ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4439617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4440ac27a0ecSDave Kleikamp 4441ac27a0ecSDave Kleikamp err_out: 4442617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4443ac27a0ecSDave Kleikamp if (!error) 4444ac27a0ecSDave Kleikamp error = rc; 4445ac27a0ecSDave Kleikamp return error; 4446ac27a0ecSDave Kleikamp } 4447ac27a0ecSDave Kleikamp 44483e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 44493e3398a0SMingming Cao struct kstat *stat) 44503e3398a0SMingming Cao { 44513e3398a0SMingming Cao struct inode *inode; 44523e3398a0SMingming Cao unsigned long delalloc_blocks; 44533e3398a0SMingming Cao 44543e3398a0SMingming Cao inode = dentry->d_inode; 44553e3398a0SMingming Cao generic_fillattr(inode, stat); 44563e3398a0SMingming Cao 44573e3398a0SMingming Cao /* 44583e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 44593e3398a0SMingming Cao * otherwise in the case of system crash before the real block 44603e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 44613e3398a0SMingming Cao * on-disk file blocks. 44623e3398a0SMingming Cao * We always keep i_blocks updated together with real 44633e3398a0SMingming Cao * allocation. But to not confuse with user, stat 44643e3398a0SMingming Cao * will return the blocks that include the delayed allocation 44653e3398a0SMingming Cao * blocks for this file. 44663e3398a0SMingming Cao */ 446796607551STao Ma delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), 446896607551STao Ma EXT4_I(inode)->i_reserved_data_blocks); 44693e3398a0SMingming Cao 44703e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 44713e3398a0SMingming Cao return 0; 44723e3398a0SMingming Cao } 4473ac27a0ecSDave Kleikamp 4474a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4475a02908f1SMingming Cao { 447612e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 44778bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4478ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4479a02908f1SMingming Cao } 4480ac51d837STheodore Ts'o 4481a02908f1SMingming Cao /* 4482a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4483a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4484a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4485a02908f1SMingming Cao * 4486a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 44874907cb7bSAnatol Pomozov * different block groups too. If they are contiguous, with flexbg, 4488a02908f1SMingming Cao * they could still across block group boundary. 4489a02908f1SMingming Cao * 4490a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4491a02908f1SMingming Cao */ 44921f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4493a02908f1SMingming Cao { 44948df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 44958df9675fSTheodore Ts'o int gdpblocks; 4496a02908f1SMingming Cao int idxblocks; 4497a02908f1SMingming Cao int ret = 0; 4498a02908f1SMingming Cao 4499a02908f1SMingming Cao /* 4500a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4501a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4502a02908f1SMingming Cao * physically contiguous on disk 4503a02908f1SMingming Cao * 4504a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4505a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4506a02908f1SMingming Cao */ 4507a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4508a02908f1SMingming Cao 4509a02908f1SMingming Cao ret = idxblocks; 4510a02908f1SMingming Cao 4511a02908f1SMingming Cao /* 4512a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4513a02908f1SMingming Cao * to account 4514a02908f1SMingming Cao */ 4515a02908f1SMingming Cao groups = idxblocks; 4516a02908f1SMingming Cao if (chunk) 4517a02908f1SMingming Cao groups += 1; 4518ac27a0ecSDave Kleikamp else 4519a02908f1SMingming Cao groups += nrblocks; 4520ac27a0ecSDave Kleikamp 4521a02908f1SMingming Cao gdpblocks = groups; 45228df9675fSTheodore Ts'o if (groups > ngroups) 45238df9675fSTheodore Ts'o groups = ngroups; 4524a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4525a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4526a02908f1SMingming Cao 4527a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4528a02908f1SMingming Cao ret += groups + gdpblocks; 4529a02908f1SMingming Cao 4530a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4531a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4532ac27a0ecSDave Kleikamp 4533ac27a0ecSDave Kleikamp return ret; 4534ac27a0ecSDave Kleikamp } 4535ac27a0ecSDave Kleikamp 4536ac27a0ecSDave Kleikamp /* 453725985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4538f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4539f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4540a02908f1SMingming Cao * 4541525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4542a02908f1SMingming Cao * 4543525f4ed8SMingming Cao * We need to consider the worse case, when 4544a02908f1SMingming Cao * one new block per extent. 4545a02908f1SMingming Cao */ 4546a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4547a02908f1SMingming Cao { 4548a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4549a02908f1SMingming Cao int ret; 4550a02908f1SMingming Cao 4551a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4552a02908f1SMingming Cao 4553a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4554a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4555a02908f1SMingming Cao ret += bpp; 4556a02908f1SMingming Cao return ret; 4557a02908f1SMingming Cao } 4558f3bd1f3fSMingming Cao 4559f3bd1f3fSMingming Cao /* 4560f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4561f3bd1f3fSMingming Cao * 4562f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 456379e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4564f3bd1f3fSMingming Cao * 4565f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4566f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4567f3bd1f3fSMingming Cao */ 4568f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4569f3bd1f3fSMingming Cao { 4570f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4571f3bd1f3fSMingming Cao } 4572f3bd1f3fSMingming Cao 4573a02908f1SMingming Cao /* 4574617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4575ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4576ac27a0ecSDave Kleikamp */ 4577617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4578617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4579ac27a0ecSDave Kleikamp { 4580ac27a0ecSDave Kleikamp int err = 0; 4581ac27a0ecSDave Kleikamp 4582c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 458325ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 458425ec56b5SJean Noel Cordenner 4585ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4586ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4587ac27a0ecSDave Kleikamp 4588dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4589830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4590ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4591ac27a0ecSDave Kleikamp return err; 4592ac27a0ecSDave Kleikamp } 4593ac27a0ecSDave Kleikamp 4594ac27a0ecSDave Kleikamp /* 4595ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4596ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4597ac27a0ecSDave Kleikamp */ 4598ac27a0ecSDave Kleikamp 4599ac27a0ecSDave Kleikamp int 4600617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4601617ba13bSMingming Cao struct ext4_iloc *iloc) 4602ac27a0ecSDave Kleikamp { 46030390131bSFrank Mayhar int err; 46040390131bSFrank Mayhar 4605617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4606ac27a0ecSDave Kleikamp if (!err) { 4607ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4608617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4609ac27a0ecSDave Kleikamp if (err) { 4610ac27a0ecSDave Kleikamp brelse(iloc->bh); 4611ac27a0ecSDave Kleikamp iloc->bh = NULL; 4612ac27a0ecSDave Kleikamp } 4613ac27a0ecSDave Kleikamp } 4614617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4615ac27a0ecSDave Kleikamp return err; 4616ac27a0ecSDave Kleikamp } 4617ac27a0ecSDave Kleikamp 4618ac27a0ecSDave Kleikamp /* 46196dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 46206dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 46216dd4ee7cSKalpak Shah */ 46221d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 46231d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 46241d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 46251d03ec98SAneesh Kumar K.V handle_t *handle) 46266dd4ee7cSKalpak Shah { 46276dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 46286dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 46296dd4ee7cSKalpak Shah 46306dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 46316dd4ee7cSKalpak Shah return 0; 46326dd4ee7cSKalpak Shah 46336dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 46346dd4ee7cSKalpak Shah 46356dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 46366dd4ee7cSKalpak Shah 46376dd4ee7cSKalpak Shah /* No extended attributes present */ 463819f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 46396dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 46406dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 46416dd4ee7cSKalpak Shah new_extra_isize); 46426dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 46436dd4ee7cSKalpak Shah return 0; 46446dd4ee7cSKalpak Shah } 46456dd4ee7cSKalpak Shah 46466dd4ee7cSKalpak Shah /* try to expand with EAs present */ 46476dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 46486dd4ee7cSKalpak Shah raw_inode, handle); 46496dd4ee7cSKalpak Shah } 46506dd4ee7cSKalpak Shah 46516dd4ee7cSKalpak Shah /* 4652ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4653ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4654ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4655ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4656ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4657ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4658ac27a0ecSDave Kleikamp * 4659ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4660ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4661ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4662ac27a0ecSDave Kleikamp * we start and wait on commits. 4663ac27a0ecSDave Kleikamp */ 4664617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4665ac27a0ecSDave Kleikamp { 4666617ba13bSMingming Cao struct ext4_iloc iloc; 46676dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 46686dd4ee7cSKalpak Shah static unsigned int mnt_count; 46696dd4ee7cSKalpak Shah int err, ret; 4670ac27a0ecSDave Kleikamp 4671ac27a0ecSDave Kleikamp might_sleep(); 46727ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4673617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 46740390131bSFrank Mayhar if (ext4_handle_valid(handle) && 46750390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 467619f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 46776dd4ee7cSKalpak Shah /* 46786dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 46796dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 46806dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 46816dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 46826dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 46836dd4ee7cSKalpak Shah */ 46846dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 46856dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 46866dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 46876dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 46886dd4ee7cSKalpak Shah iloc, handle); 46896dd4ee7cSKalpak Shah if (ret) { 469019f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 469119f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4692c1bddad9SAneesh Kumar K.V if (mnt_count != 4693c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 469412062dddSEric Sandeen ext4_warning(inode->i_sb, 46956dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 46966dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 46976dd4ee7cSKalpak Shah inode->i_ino); 4698c1bddad9SAneesh Kumar K.V mnt_count = 4699c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 47006dd4ee7cSKalpak Shah } 47016dd4ee7cSKalpak Shah } 47026dd4ee7cSKalpak Shah } 47036dd4ee7cSKalpak Shah } 4704ac27a0ecSDave Kleikamp if (!err) 4705617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4706ac27a0ecSDave Kleikamp return err; 4707ac27a0ecSDave Kleikamp } 4708ac27a0ecSDave Kleikamp 4709ac27a0ecSDave Kleikamp /* 4710617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4711ac27a0ecSDave Kleikamp * 4712ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4713ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4714ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4715ac27a0ecSDave Kleikamp * 47165dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4717ac27a0ecSDave Kleikamp * are allocated to the file. 4718ac27a0ecSDave Kleikamp * 4719ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4720ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4721ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4722ac27a0ecSDave Kleikamp */ 4723aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4724ac27a0ecSDave Kleikamp { 4725ac27a0ecSDave Kleikamp handle_t *handle; 4726ac27a0ecSDave Kleikamp 4727617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4728ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4729ac27a0ecSDave Kleikamp goto out; 4730f3dc272fSCurt Wohlgemuth 4731617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4732f3dc272fSCurt Wohlgemuth 4733617ba13bSMingming Cao ext4_journal_stop(handle); 4734ac27a0ecSDave Kleikamp out: 4735ac27a0ecSDave Kleikamp return; 4736ac27a0ecSDave Kleikamp } 4737ac27a0ecSDave Kleikamp 4738ac27a0ecSDave Kleikamp #if 0 4739ac27a0ecSDave Kleikamp /* 4740ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4741ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4742617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4743ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4744ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4745ac27a0ecSDave Kleikamp */ 4746617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4747ac27a0ecSDave Kleikamp { 4748617ba13bSMingming Cao struct ext4_iloc iloc; 4749ac27a0ecSDave Kleikamp 4750ac27a0ecSDave Kleikamp int err = 0; 4751ac27a0ecSDave Kleikamp if (handle) { 4752617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4753ac27a0ecSDave Kleikamp if (!err) { 4754ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4755dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4756ac27a0ecSDave Kleikamp if (!err) 47570390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 475873b50c1cSCurt Wohlgemuth NULL, 4759ac27a0ecSDave Kleikamp iloc.bh); 4760ac27a0ecSDave Kleikamp brelse(iloc.bh); 4761ac27a0ecSDave Kleikamp } 4762ac27a0ecSDave Kleikamp } 4763617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4764ac27a0ecSDave Kleikamp return err; 4765ac27a0ecSDave Kleikamp } 4766ac27a0ecSDave Kleikamp #endif 4767ac27a0ecSDave Kleikamp 4768617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4769ac27a0ecSDave Kleikamp { 4770ac27a0ecSDave Kleikamp journal_t *journal; 4771ac27a0ecSDave Kleikamp handle_t *handle; 4772ac27a0ecSDave Kleikamp int err; 4773ac27a0ecSDave Kleikamp 4774ac27a0ecSDave Kleikamp /* 4775ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4776ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4777ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4778ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4779ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4780ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4781ac27a0ecSDave Kleikamp * nobody is changing anything. 4782ac27a0ecSDave Kleikamp */ 4783ac27a0ecSDave Kleikamp 4784617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 47850390131bSFrank Mayhar if (!journal) 47860390131bSFrank Mayhar return 0; 4787d699594dSDave Hansen if (is_journal_aborted(journal)) 4788ac27a0ecSDave Kleikamp return -EROFS; 47892aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 47902aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 47912aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 47922aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 47932aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 47942aff57b0SYongqiang Yang */ 47952aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 47962aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 47972aff57b0SYongqiang Yang if (err < 0) 47982aff57b0SYongqiang Yang return err; 47992aff57b0SYongqiang Yang } 4800ac27a0ecSDave Kleikamp 480117335dccSDmitry Monakhov /* Wait for all existing dio workers */ 480217335dccSDmitry Monakhov ext4_inode_block_unlocked_dio(inode); 480317335dccSDmitry Monakhov inode_dio_wait(inode); 480417335dccSDmitry Monakhov 4805dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4806ac27a0ecSDave Kleikamp 4807ac27a0ecSDave Kleikamp /* 4808ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4809ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4810ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4811ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4812ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4813ac27a0ecSDave Kleikamp */ 4814ac27a0ecSDave Kleikamp 4815ac27a0ecSDave Kleikamp if (val) 481612e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 48175872ddaaSYongqiang Yang else { 48185872ddaaSYongqiang Yang jbd2_journal_flush(journal); 481912e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 48205872ddaaSYongqiang Yang } 4821617ba13bSMingming Cao ext4_set_aops(inode); 4822ac27a0ecSDave Kleikamp 4823dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 482417335dccSDmitry Monakhov ext4_inode_resume_unlocked_dio(inode); 4825ac27a0ecSDave Kleikamp 4826ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4827ac27a0ecSDave Kleikamp 4828617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4829ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4830ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4831ac27a0ecSDave Kleikamp 4832617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 48330390131bSFrank Mayhar ext4_handle_sync(handle); 4834617ba13bSMingming Cao ext4_journal_stop(handle); 4835617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4836ac27a0ecSDave Kleikamp 4837ac27a0ecSDave Kleikamp return err; 4838ac27a0ecSDave Kleikamp } 48392e9ee850SAneesh Kumar K.V 48402e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 48412e9ee850SAneesh Kumar K.V { 48422e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 48432e9ee850SAneesh Kumar K.V } 48442e9ee850SAneesh Kumar K.V 4845c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 48462e9ee850SAneesh Kumar K.V { 4847c2ec175cSNick Piggin struct page *page = vmf->page; 48482e9ee850SAneesh Kumar K.V loff_t size; 48492e9ee850SAneesh Kumar K.V unsigned long len; 48509ea7df53SJan Kara int ret; 48512e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 48522e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 48532e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 48549ea7df53SJan Kara handle_t *handle; 48559ea7df53SJan Kara get_block_t *get_block; 48569ea7df53SJan Kara int retries = 0; 48572e9ee850SAneesh Kumar K.V 48588e8ad8a5SJan Kara sb_start_pagefault(inode->i_sb); 4859041bbb6dSTheodore Ts'o file_update_time(vma->vm_file); 48609ea7df53SJan Kara /* Delalloc case is easy... */ 48619ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 48629ea7df53SJan Kara !ext4_should_journal_data(inode) && 48639ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 48649ea7df53SJan Kara do { 48659ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 48669ea7df53SJan Kara ext4_da_get_block_prep); 48679ea7df53SJan Kara } while (ret == -ENOSPC && 48689ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 48699ea7df53SJan Kara goto out_ret; 48702e9ee850SAneesh Kumar K.V } 48710e499890SDarrick J. Wong 48720e499890SDarrick J. Wong lock_page(page); 48739ea7df53SJan Kara size = i_size_read(inode); 48749ea7df53SJan Kara /* Page got truncated from under us? */ 48759ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 48769ea7df53SJan Kara unlock_page(page); 48779ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 48789ea7df53SJan Kara goto out; 48790e499890SDarrick J. Wong } 48802e9ee850SAneesh Kumar K.V 48812e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 48822e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 48832e9ee850SAneesh Kumar K.V else 48842e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4885a827eaffSAneesh Kumar K.V /* 48869ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 48879ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4888a827eaffSAneesh Kumar K.V */ 48892e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 4890f19d5870STao Ma if (!ext4_walk_page_buffers(NULL, page_buffers(page), 4891f19d5870STao Ma 0, len, NULL, 4892a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 48939ea7df53SJan Kara /* Wait so that we don't change page under IO */ 48949ea7df53SJan Kara wait_on_page_writeback(page); 48959ea7df53SJan Kara ret = VM_FAULT_LOCKED; 48969ea7df53SJan Kara goto out; 48972e9ee850SAneesh Kumar K.V } 4898a827eaffSAneesh Kumar K.V } 4899a827eaffSAneesh Kumar K.V unlock_page(page); 49009ea7df53SJan Kara /* OK, we need to fill the hole... */ 49019ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 49029ea7df53SJan Kara get_block = ext4_get_block_write; 49039ea7df53SJan Kara else 49049ea7df53SJan Kara get_block = ext4_get_block; 49059ea7df53SJan Kara retry_alloc: 49069ea7df53SJan Kara handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 49079ea7df53SJan Kara if (IS_ERR(handle)) { 4908c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 49099ea7df53SJan Kara goto out; 49109ea7df53SJan Kara } 49119ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 49129ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 4913f19d5870STao Ma if (ext4_walk_page_buffers(handle, page_buffers(page), 0, 49149ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 49159ea7df53SJan Kara unlock_page(page); 49169ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 4917fcbb5515SYongqiang Yang ext4_journal_stop(handle); 49189ea7df53SJan Kara goto out; 49199ea7df53SJan Kara } 49209ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 49219ea7df53SJan Kara } 49229ea7df53SJan Kara ext4_journal_stop(handle); 49239ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 49249ea7df53SJan Kara goto retry_alloc; 49259ea7df53SJan Kara out_ret: 49269ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 49279ea7df53SJan Kara out: 49288e8ad8a5SJan Kara sb_end_pagefault(inode->i_sb); 49292e9ee850SAneesh Kumar K.V return ret; 49302e9ee850SAneesh Kumar K.V } 4931