1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/fs.h> 22ac27a0ecSDave Kleikamp #include <linux/time.h> 23dab291afSMingming Cao #include <linux/jbd2.h> 24ac27a0ecSDave Kleikamp #include <linux/highuid.h> 25ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 26ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 27ac27a0ecSDave Kleikamp #include <linux/string.h> 28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 29ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3064769240SAlex Tomas #include <linux/pagevec.h> 31ac27a0ecSDave Kleikamp #include <linux/mpage.h> 32e83c1397SDuane Griffin #include <linux/namei.h> 33ac27a0ecSDave Kleikamp #include <linux/uio.h> 34ac27a0ecSDave Kleikamp #include <linux/bio.h> 354c0425ffSMingming Cao #include <linux/workqueue.h> 36744692dcSJiaying Zhang #include <linux/kernel.h> 376db26ffcSAndrew Morton #include <linux/printk.h> 385a0e3ad6STejun Heo #include <linux/slab.h> 39a8901d34STheodore Ts'o #include <linux/ratelimit.h> 409bffad1eSTheodore Ts'o 413dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 42ac27a0ecSDave Kleikamp #include "xattr.h" 43ac27a0ecSDave Kleikamp #include "acl.h" 449f125d64STheodore Ts'o #include "truncate.h" 45ac27a0ecSDave Kleikamp 469bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 479bffad1eSTheodore Ts'o 48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 49a1d6cc56SAneesh Kumar K.V 50678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 51678aaf48SJan Kara loff_t new_size) 52678aaf48SJan Kara { 537ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 548aefcd55STheodore Ts'o /* 558aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 568aefcd55STheodore Ts'o * writing, so there's no need to call 578aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 588aefcd55STheodore Ts'o * outstanding writes we need to flush. 598aefcd55STheodore Ts'o */ 608aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 618aefcd55STheodore Ts'o return 0; 628aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 638aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 64678aaf48SJan Kara new_size); 65678aaf48SJan Kara } 66678aaf48SJan Kara 6764769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 68cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 69cb20d518STheodore Ts'o struct buffer_head *bh_result, int create); 70cb20d518STheodore Ts'o static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 71cb20d518STheodore Ts'o static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 72cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 73cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 745f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 755f163cc7SEric Sandeen struct inode *inode, struct page *page, loff_t from, 765f163cc7SEric Sandeen loff_t length, int flags); 7764769240SAlex Tomas 78ac27a0ecSDave Kleikamp /* 79ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 80ac27a0ecSDave Kleikamp */ 81617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 82ac27a0ecSDave Kleikamp { 83617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 84ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 85ac27a0ecSDave Kleikamp 86ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 87ac27a0ecSDave Kleikamp } 88ac27a0ecSDave Kleikamp 89ac27a0ecSDave Kleikamp /* 90ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 91ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 92ac27a0ecSDave Kleikamp * this transaction. 93ac27a0ecSDave Kleikamp */ 94487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 95487caeefSJan Kara int nblocks) 96ac27a0ecSDave Kleikamp { 97487caeefSJan Kara int ret; 98487caeefSJan Kara 99487caeefSJan Kara /* 100e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 101487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 102487caeefSJan Kara * page cache has been already dropped and writes are blocked by 103487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 104487caeefSJan Kara */ 1050390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 106ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 107487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1088e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 109487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 110fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 111487caeefSJan Kara 112487caeefSJan Kara return ret; 113ac27a0ecSDave Kleikamp } 114ac27a0ecSDave Kleikamp 115ac27a0ecSDave Kleikamp /* 116ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 117ac27a0ecSDave Kleikamp */ 1180930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 119ac27a0ecSDave Kleikamp { 120ac27a0ecSDave Kleikamp handle_t *handle; 121bc965ab3STheodore Ts'o int err; 122ac27a0ecSDave Kleikamp 1237ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1242581fdc8SJiaying Zhang 1252581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1262581fdc8SJiaying Zhang 1270930fcc1SAl Viro if (inode->i_nlink) { 1282d859db3SJan Kara /* 1292d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1302d859db3SJan Kara * journal. So although mm thinks everything is clean and 1312d859db3SJan Kara * ready for reaping the inode might still have some pages to 1322d859db3SJan Kara * write in the running transaction or waiting to be 1332d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1342d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1352d859db3SJan Kara * cause data loss. Also even if we did not discard these 1362d859db3SJan Kara * buffers, we would have no way to find them after the inode 1372d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 1382d859db3SJan Kara * read them before the transaction is checkpointed. So be 1392d859db3SJan Kara * careful and force everything to disk here... We use 1402d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 1412d859db3SJan Kara * containing inode's data. 1422d859db3SJan Kara * 1432d859db3SJan Kara * Note that directories do not have this problem because they 1442d859db3SJan Kara * don't use page cache. 1452d859db3SJan Kara */ 1462d859db3SJan Kara if (ext4_should_journal_data(inode) && 1472d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 1482d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 1492d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 1502d859db3SJan Kara 1512d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 1522d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 1532d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 1542d859db3SJan Kara } 1550930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 1560930fcc1SAl Viro goto no_delete; 1570930fcc1SAl Viro } 1580930fcc1SAl Viro 159907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 160871a2931SChristoph Hellwig dquot_initialize(inode); 161907f4554SChristoph Hellwig 162678aaf48SJan Kara if (ext4_should_order_data(inode)) 163678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 164ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 165ac27a0ecSDave Kleikamp 166ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 167ac27a0ecSDave Kleikamp goto no_delete; 168ac27a0ecSDave Kleikamp 1699f125d64STheodore Ts'o handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); 170ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 171bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 172ac27a0ecSDave Kleikamp /* 173ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 174ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 175ac27a0ecSDave Kleikamp * cleaned up. 176ac27a0ecSDave Kleikamp */ 177617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 178ac27a0ecSDave Kleikamp goto no_delete; 179ac27a0ecSDave Kleikamp } 180ac27a0ecSDave Kleikamp 181ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 1820390131bSFrank Mayhar ext4_handle_sync(handle); 183ac27a0ecSDave Kleikamp inode->i_size = 0; 184bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 185bc965ab3STheodore Ts'o if (err) { 18612062dddSEric Sandeen ext4_warning(inode->i_sb, 187bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 188bc965ab3STheodore Ts'o goto stop_handle; 189bc965ab3STheodore Ts'o } 190ac27a0ecSDave Kleikamp if (inode->i_blocks) 191617ba13bSMingming Cao ext4_truncate(inode); 192bc965ab3STheodore Ts'o 193bc965ab3STheodore Ts'o /* 194bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 195bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 196bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 197bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 198bc965ab3STheodore Ts'o */ 1990390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 200bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 201bc965ab3STheodore Ts'o if (err > 0) 202bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 203bc965ab3STheodore Ts'o if (err != 0) { 20412062dddSEric Sandeen ext4_warning(inode->i_sb, 205bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 206bc965ab3STheodore Ts'o stop_handle: 207bc965ab3STheodore Ts'o ext4_journal_stop(handle); 20845388219STheodore Ts'o ext4_orphan_del(NULL, inode); 209bc965ab3STheodore Ts'o goto no_delete; 210bc965ab3STheodore Ts'o } 211bc965ab3STheodore Ts'o } 212bc965ab3STheodore Ts'o 213ac27a0ecSDave Kleikamp /* 214617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 215ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 216617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 217ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 218617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 219ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 220ac27a0ecSDave Kleikamp */ 221617ba13bSMingming Cao ext4_orphan_del(handle, inode); 222617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 223ac27a0ecSDave Kleikamp 224ac27a0ecSDave Kleikamp /* 225ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 226ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 227ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 228ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 229ac27a0ecSDave Kleikamp * fails. 230ac27a0ecSDave Kleikamp */ 231617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 232ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 2330930fcc1SAl Viro ext4_clear_inode(inode); 234ac27a0ecSDave Kleikamp else 235617ba13bSMingming Cao ext4_free_inode(handle, inode); 236617ba13bSMingming Cao ext4_journal_stop(handle); 237ac27a0ecSDave Kleikamp return; 238ac27a0ecSDave Kleikamp no_delete: 2390930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 240ac27a0ecSDave Kleikamp } 241ac27a0ecSDave Kleikamp 242a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 243a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 24460e58e0fSMingming Cao { 245a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 24660e58e0fSMingming Cao } 247a9e7f447SDmitry Monakhov #endif 2489d0be502STheodore Ts'o 24912219aeaSAneesh Kumar K.V /* 25012219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 2519d0be502STheodore Ts'o * to allocate a block located at @lblock 25212219aeaSAneesh Kumar K.V */ 25301f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 25412219aeaSAneesh Kumar K.V { 25512e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 2569d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 25712219aeaSAneesh Kumar K.V 2588bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 25912219aeaSAneesh Kumar K.V } 26012219aeaSAneesh Kumar K.V 2610637c6f4STheodore Ts'o /* 2620637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 2630637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 2640637c6f4STheodore Ts'o */ 2655f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 2665f634d06SAneesh Kumar K.V int used, int quota_claim) 26712219aeaSAneesh Kumar K.V { 26812219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2690637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 27012219aeaSAneesh Kumar K.V 2710637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 272d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 2730637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 2740637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 2751084f252STheodore Ts'o "with only %d reserved data blocks", 2760637c6f4STheodore Ts'o __func__, inode->i_ino, used, 2770637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 2780637c6f4STheodore Ts'o WARN_ON(1); 2790637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 2806bc6e63fSAneesh Kumar K.V } 28112219aeaSAneesh Kumar K.V 2820637c6f4STheodore Ts'o /* Update per-inode reservations */ 2830637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 2840637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 28557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 28672b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 2870637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 2880637c6f4STheodore Ts'o 2890637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 2900637c6f4STheodore Ts'o /* 2910637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 2920637c6f4STheodore Ts'o * only when we have written all of the delayed 2930637c6f4STheodore Ts'o * allocation blocks. 2940637c6f4STheodore Ts'o */ 29557042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 29672b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 297ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 2989d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 2990637c6f4STheodore Ts'o } 30012219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 30160e58e0fSMingming Cao 30272b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 30372b8ab9dSEric Sandeen if (quota_claim) 3047b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 30572b8ab9dSEric Sandeen else { 3065f634d06SAneesh Kumar K.V /* 3075f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3085f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 30972b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3105f634d06SAneesh Kumar K.V */ 3117b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3125f634d06SAneesh Kumar K.V } 313d6014301SAneesh Kumar K.V 314d6014301SAneesh Kumar K.V /* 315d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 316d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 317d6014301SAneesh Kumar K.V * inode's preallocations. 318d6014301SAneesh Kumar K.V */ 3190637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 3200637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 321d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 32212219aeaSAneesh Kumar K.V } 32312219aeaSAneesh Kumar K.V 324e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 325c398eda0STheodore Ts'o unsigned int line, 32624676da4STheodore Ts'o struct ext4_map_blocks *map) 3276fd058f7STheodore Ts'o { 32824676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 32924676da4STheodore Ts'o map->m_len)) { 330c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 331c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 33224676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 333c398eda0STheodore Ts'o map->m_len); 3346fd058f7STheodore Ts'o return -EIO; 3356fd058f7STheodore Ts'o } 3366fd058f7STheodore Ts'o return 0; 3376fd058f7STheodore Ts'o } 3386fd058f7STheodore Ts'o 339e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 340c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 341e29136f8STheodore Ts'o 342f5ab0d1fSMingming Cao /* 3431f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 3441f94533dSTheodore Ts'o * starting at page frame idx. 34555138e0bSTheodore Ts'o */ 34655138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 34755138e0bSTheodore Ts'o unsigned int max_pages) 34855138e0bSTheodore Ts'o { 34955138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 35055138e0bSTheodore Ts'o pgoff_t index; 35155138e0bSTheodore Ts'o struct pagevec pvec; 35255138e0bSTheodore Ts'o pgoff_t num = 0; 35355138e0bSTheodore Ts'o int i, nr_pages, done = 0; 35455138e0bSTheodore Ts'o 35555138e0bSTheodore Ts'o if (max_pages == 0) 35655138e0bSTheodore Ts'o return 0; 35755138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 35855138e0bSTheodore Ts'o while (!done) { 35955138e0bSTheodore Ts'o index = idx; 36055138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 36155138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 36255138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 36355138e0bSTheodore Ts'o if (nr_pages == 0) 36455138e0bSTheodore Ts'o break; 36555138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 36655138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 36755138e0bSTheodore Ts'o struct buffer_head *bh, *head; 36855138e0bSTheodore Ts'o 36955138e0bSTheodore Ts'o lock_page(page); 37055138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 37155138e0bSTheodore Ts'o !PageDirty(page) || 37255138e0bSTheodore Ts'o PageWriteback(page) || 37355138e0bSTheodore Ts'o page->index != idx) { 37455138e0bSTheodore Ts'o done = 1; 37555138e0bSTheodore Ts'o unlock_page(page); 37655138e0bSTheodore Ts'o break; 37755138e0bSTheodore Ts'o } 3781f94533dSTheodore Ts'o if (page_has_buffers(page)) { 3791f94533dSTheodore Ts'o bh = head = page_buffers(page); 38055138e0bSTheodore Ts'o do { 38155138e0bSTheodore Ts'o if (!buffer_delay(bh) && 3821f94533dSTheodore Ts'o !buffer_unwritten(bh)) 38355138e0bSTheodore Ts'o done = 1; 3841f94533dSTheodore Ts'o bh = bh->b_this_page; 3851f94533dSTheodore Ts'o } while (!done && (bh != head)); 38655138e0bSTheodore Ts'o } 38755138e0bSTheodore Ts'o unlock_page(page); 38855138e0bSTheodore Ts'o if (done) 38955138e0bSTheodore Ts'o break; 39055138e0bSTheodore Ts'o idx++; 39155138e0bSTheodore Ts'o num++; 392659c6009SEric Sandeen if (num >= max_pages) { 393659c6009SEric Sandeen done = 1; 39455138e0bSTheodore Ts'o break; 39555138e0bSTheodore Ts'o } 396659c6009SEric Sandeen } 39755138e0bSTheodore Ts'o pagevec_release(&pvec); 39855138e0bSTheodore Ts'o } 39955138e0bSTheodore Ts'o return num; 40055138e0bSTheodore Ts'o } 40155138e0bSTheodore Ts'o 40255138e0bSTheodore Ts'o /* 4035356f261SAditya Kali * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map. 4045356f261SAditya Kali */ 4055356f261SAditya Kali static void set_buffers_da_mapped(struct inode *inode, 4065356f261SAditya Kali struct ext4_map_blocks *map) 4075356f261SAditya Kali { 4085356f261SAditya Kali struct address_space *mapping = inode->i_mapping; 4095356f261SAditya Kali struct pagevec pvec; 4105356f261SAditya Kali int i, nr_pages; 4115356f261SAditya Kali pgoff_t index, end; 4125356f261SAditya Kali 4135356f261SAditya Kali index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 4145356f261SAditya Kali end = (map->m_lblk + map->m_len - 1) >> 4155356f261SAditya Kali (PAGE_CACHE_SHIFT - inode->i_blkbits); 4165356f261SAditya Kali 4175356f261SAditya Kali pagevec_init(&pvec, 0); 4185356f261SAditya Kali while (index <= end) { 4195356f261SAditya Kali nr_pages = pagevec_lookup(&pvec, mapping, index, 4205356f261SAditya Kali min(end - index + 1, 4215356f261SAditya Kali (pgoff_t)PAGEVEC_SIZE)); 4225356f261SAditya Kali if (nr_pages == 0) 4235356f261SAditya Kali break; 4245356f261SAditya Kali for (i = 0; i < nr_pages; i++) { 4255356f261SAditya Kali struct page *page = pvec.pages[i]; 4265356f261SAditya Kali struct buffer_head *bh, *head; 4275356f261SAditya Kali 4285356f261SAditya Kali if (unlikely(page->mapping != mapping) || 4295356f261SAditya Kali !PageDirty(page)) 4305356f261SAditya Kali break; 4315356f261SAditya Kali 4325356f261SAditya Kali if (page_has_buffers(page)) { 4335356f261SAditya Kali bh = head = page_buffers(page); 4345356f261SAditya Kali do { 4355356f261SAditya Kali set_buffer_da_mapped(bh); 4365356f261SAditya Kali bh = bh->b_this_page; 4375356f261SAditya Kali } while (bh != head); 4385356f261SAditya Kali } 4395356f261SAditya Kali index++; 4405356f261SAditya Kali } 4415356f261SAditya Kali pagevec_release(&pvec); 4425356f261SAditya Kali } 4435356f261SAditya Kali } 4445356f261SAditya Kali 4455356f261SAditya Kali /* 446e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4472b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 448f5ab0d1fSMingming Cao * 449f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 450f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 451f5ab0d1fSMingming Cao * mapped. 452f5ab0d1fSMingming Cao * 453e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 454e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 455f5ab0d1fSMingming Cao * based files 456f5ab0d1fSMingming Cao * 457f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 458f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 459f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 460f5ab0d1fSMingming Cao * the buffer head is mapped. 461f5ab0d1fSMingming Cao * 462f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 463df3ab170STao Ma * that case, buffer head is unmapped 464f5ab0d1fSMingming Cao * 465f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 466f5ab0d1fSMingming Cao */ 467e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 468e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4690e855ac8SAneesh Kumar K.V { 4700e855ac8SAneesh Kumar K.V int retval; 471f5ab0d1fSMingming Cao 472e35fd660STheodore Ts'o map->m_flags = 0; 473e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 474e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 475e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 4764df3d265SAneesh Kumar K.V /* 477b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 478b920c755STheodore Ts'o * file system block. 4794df3d265SAneesh Kumar K.V */ 4800e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 48112e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 482a4e5d88bSDmitry Monakhov retval = ext4_ext_map_blocks(handle, inode, map, flags & 483a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 4844df3d265SAneesh Kumar K.V } else { 485a4e5d88bSDmitry Monakhov retval = ext4_ind_map_blocks(handle, inode, map, flags & 486a4e5d88bSDmitry Monakhov EXT4_GET_BLOCKS_KEEP_SIZE); 4870e855ac8SAneesh Kumar K.V } 4884df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 489f5ab0d1fSMingming Cao 490e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 491e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 4926fd058f7STheodore Ts'o if (ret != 0) 4936fd058f7STheodore Ts'o return ret; 4946fd058f7STheodore Ts'o } 4956fd058f7STheodore Ts'o 496f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 497c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 4984df3d265SAneesh Kumar K.V return retval; 4994df3d265SAneesh Kumar K.V 5004df3d265SAneesh Kumar K.V /* 501f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 502f5ab0d1fSMingming Cao * 503f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 504df3ab170STao Ma * ext4_ext_get_block() returns the create = 0 505f5ab0d1fSMingming Cao * with buffer head unmapped. 506f5ab0d1fSMingming Cao */ 507e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 508f5ab0d1fSMingming Cao return retval; 509f5ab0d1fSMingming Cao 510f5ab0d1fSMingming Cao /* 5112a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 5122a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 5132a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 5142a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 5152a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 5162a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 5172a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 5182a8964d6SAneesh Kumar K.V * set on the buffer_head. 5192a8964d6SAneesh Kumar K.V */ 520e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 5212a8964d6SAneesh Kumar K.V 5222a8964d6SAneesh Kumar K.V /* 523f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 524f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 525f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 526f5ab0d1fSMingming Cao * with create == 1 flag. 5274df3d265SAneesh Kumar K.V */ 5284df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 529d2a17637SMingming Cao 530d2a17637SMingming Cao /* 531d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 532d2a17637SMingming Cao * we have already reserved fs blocks for allocation 533d2a17637SMingming Cao * let the underlying get_block() function know to 534d2a17637SMingming Cao * avoid double accounting 535d2a17637SMingming Cao */ 536c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 537f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 5384df3d265SAneesh Kumar K.V /* 5394df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 5404df3d265SAneesh Kumar K.V * could have changed the inode type in between 5414df3d265SAneesh Kumar K.V */ 54212e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 543e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 5440e855ac8SAneesh Kumar K.V } else { 545e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 546267e4db9SAneesh Kumar K.V 547e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 548267e4db9SAneesh Kumar K.V /* 549267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 550267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 551267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 552267e4db9SAneesh Kumar K.V */ 55319f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 554267e4db9SAneesh Kumar K.V } 5552ac3b6e0STheodore Ts'o 556d2a17637SMingming Cao /* 5572ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 5585f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 5595f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 5605f634d06SAneesh Kumar K.V * reserve space here. 561d2a17637SMingming Cao */ 5625f634d06SAneesh Kumar K.V if ((retval > 0) && 5631296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 5645f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 5655f634d06SAneesh Kumar K.V } 5665356f261SAditya Kali if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { 567f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 568d2a17637SMingming Cao 5695356f261SAditya Kali /* If we have successfully mapped the delayed allocated blocks, 5705356f261SAditya Kali * set the BH_Da_Mapped bit on them. Its important to do this 5715356f261SAditya Kali * under the protection of i_data_sem. 5725356f261SAditya Kali */ 5735356f261SAditya Kali if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 5745356f261SAditya Kali set_buffers_da_mapped(inode, map); 5755356f261SAditya Kali } 5765356f261SAditya Kali 5770e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 578e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 579e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 5806fd058f7STheodore Ts'o if (ret != 0) 5816fd058f7STheodore Ts'o return ret; 5826fd058f7STheodore Ts'o } 5830e855ac8SAneesh Kumar K.V return retval; 5840e855ac8SAneesh Kumar K.V } 5850e855ac8SAneesh Kumar K.V 586f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 587f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 588f3bd1f3fSMingming Cao 5892ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 5902ed88685STheodore Ts'o struct buffer_head *bh, int flags) 591ac27a0ecSDave Kleikamp { 5923e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 5932ed88685STheodore Ts'o struct ext4_map_blocks map; 5947fb5409dSJan Kara int ret = 0, started = 0; 595f3bd1f3fSMingming Cao int dio_credits; 596ac27a0ecSDave Kleikamp 5972ed88685STheodore Ts'o map.m_lblk = iblock; 5982ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 5992ed88685STheodore Ts'o 6002ed88685STheodore Ts'o if (flags && !handle) { 6017fb5409dSJan Kara /* Direct IO write... */ 6022ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 6032ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 6042ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 605f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 6067fb5409dSJan Kara if (IS_ERR(handle)) { 607ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 6082ed88685STheodore Ts'o return ret; 6097fb5409dSJan Kara } 6107fb5409dSJan Kara started = 1; 611ac27a0ecSDave Kleikamp } 612ac27a0ecSDave Kleikamp 6132ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 614ac27a0ecSDave Kleikamp if (ret > 0) { 6152ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 6162ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 6172ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 618ac27a0ecSDave Kleikamp ret = 0; 619ac27a0ecSDave Kleikamp } 6207fb5409dSJan Kara if (started) 6217fb5409dSJan Kara ext4_journal_stop(handle); 622ac27a0ecSDave Kleikamp return ret; 623ac27a0ecSDave Kleikamp } 624ac27a0ecSDave Kleikamp 6252ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 6262ed88685STheodore Ts'o struct buffer_head *bh, int create) 6272ed88685STheodore Ts'o { 6282ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 6292ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6302ed88685STheodore Ts'o } 6312ed88685STheodore Ts'o 632ac27a0ecSDave Kleikamp /* 633ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 634ac27a0ecSDave Kleikamp */ 635617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 636725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 637ac27a0ecSDave Kleikamp { 6382ed88685STheodore Ts'o struct ext4_map_blocks map; 6392ed88685STheodore Ts'o struct buffer_head *bh; 640ac27a0ecSDave Kleikamp int fatal = 0, err; 641ac27a0ecSDave Kleikamp 642ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 643ac27a0ecSDave Kleikamp 6442ed88685STheodore Ts'o map.m_lblk = block; 6452ed88685STheodore Ts'o map.m_len = 1; 6462ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 6472ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 6482ed88685STheodore Ts'o 6492ed88685STheodore Ts'o if (err < 0) 650ac27a0ecSDave Kleikamp *errp = err; 6512ed88685STheodore Ts'o if (err <= 0) 6522ed88685STheodore Ts'o return NULL; 6532ed88685STheodore Ts'o *errp = 0; 6542ed88685STheodore Ts'o 6552ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 656ac27a0ecSDave Kleikamp if (!bh) { 657ac27a0ecSDave Kleikamp *errp = -EIO; 6582ed88685STheodore Ts'o return NULL; 659ac27a0ecSDave Kleikamp } 6602ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 661ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 662ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 663ac27a0ecSDave Kleikamp 664ac27a0ecSDave Kleikamp /* 665ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 666ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 667ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 668617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 669ac27a0ecSDave Kleikamp * problem. 670ac27a0ecSDave Kleikamp */ 671ac27a0ecSDave Kleikamp lock_buffer(bh); 672ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 673617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 674ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 675ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 676ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 677ac27a0ecSDave Kleikamp } 678ac27a0ecSDave Kleikamp unlock_buffer(bh); 6790390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 6800390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 681ac27a0ecSDave Kleikamp if (!fatal) 682ac27a0ecSDave Kleikamp fatal = err; 683ac27a0ecSDave Kleikamp } else { 684ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 685ac27a0ecSDave Kleikamp } 686ac27a0ecSDave Kleikamp if (fatal) { 687ac27a0ecSDave Kleikamp *errp = fatal; 688ac27a0ecSDave Kleikamp brelse(bh); 689ac27a0ecSDave Kleikamp bh = NULL; 690ac27a0ecSDave Kleikamp } 691ac27a0ecSDave Kleikamp return bh; 692ac27a0ecSDave Kleikamp } 693ac27a0ecSDave Kleikamp 694617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 695725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 696ac27a0ecSDave Kleikamp { 697ac27a0ecSDave Kleikamp struct buffer_head *bh; 698ac27a0ecSDave Kleikamp 699617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 700ac27a0ecSDave Kleikamp if (!bh) 701ac27a0ecSDave Kleikamp return bh; 702ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 703ac27a0ecSDave Kleikamp return bh; 70465299a3bSChristoph Hellwig ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 705ac27a0ecSDave Kleikamp wait_on_buffer(bh); 706ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 707ac27a0ecSDave Kleikamp return bh; 708ac27a0ecSDave Kleikamp put_bh(bh); 709ac27a0ecSDave Kleikamp *err = -EIO; 710ac27a0ecSDave Kleikamp return NULL; 711ac27a0ecSDave Kleikamp } 712ac27a0ecSDave Kleikamp 713ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle, 714ac27a0ecSDave Kleikamp struct buffer_head *head, 715ac27a0ecSDave Kleikamp unsigned from, 716ac27a0ecSDave Kleikamp unsigned to, 717ac27a0ecSDave Kleikamp int *partial, 718ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 719ac27a0ecSDave Kleikamp struct buffer_head *bh)) 720ac27a0ecSDave Kleikamp { 721ac27a0ecSDave Kleikamp struct buffer_head *bh; 722ac27a0ecSDave Kleikamp unsigned block_start, block_end; 723ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 724ac27a0ecSDave Kleikamp int err, ret = 0; 725ac27a0ecSDave Kleikamp struct buffer_head *next; 726ac27a0ecSDave Kleikamp 727ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 728ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 729de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 730ac27a0ecSDave Kleikamp next = bh->b_this_page; 731ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 732ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 733ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 734ac27a0ecSDave Kleikamp *partial = 1; 735ac27a0ecSDave Kleikamp continue; 736ac27a0ecSDave Kleikamp } 737ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 738ac27a0ecSDave Kleikamp if (!ret) 739ac27a0ecSDave Kleikamp ret = err; 740ac27a0ecSDave Kleikamp } 741ac27a0ecSDave Kleikamp return ret; 742ac27a0ecSDave Kleikamp } 743ac27a0ecSDave Kleikamp 744ac27a0ecSDave Kleikamp /* 745ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 746ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 747617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 748dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 749ac27a0ecSDave Kleikamp * prepare_write() is the right place. 750ac27a0ecSDave Kleikamp * 751617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 752617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 753ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 754ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 755ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 756ac27a0ecSDave Kleikamp * 757617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 758ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 759ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 760ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 761ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 762ac27a0ecSDave Kleikamp * violation. 763ac27a0ecSDave Kleikamp * 764dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 765ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 766ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 767ac27a0ecSDave Kleikamp * write. 768ac27a0ecSDave Kleikamp */ 769ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle, 770ac27a0ecSDave Kleikamp struct buffer_head *bh) 771ac27a0ecSDave Kleikamp { 77256d35a4cSJan Kara int dirty = buffer_dirty(bh); 77356d35a4cSJan Kara int ret; 77456d35a4cSJan Kara 775ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 776ac27a0ecSDave Kleikamp return 0; 77756d35a4cSJan Kara /* 778ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 77956d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 78056d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 781ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 78256d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 78356d35a4cSJan Kara * ever write the buffer. 78456d35a4cSJan Kara */ 78556d35a4cSJan Kara if (dirty) 78656d35a4cSJan Kara clear_buffer_dirty(bh); 78756d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 78856d35a4cSJan Kara if (!ret && dirty) 78956d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 79056d35a4cSJan Kara return ret; 791ac27a0ecSDave Kleikamp } 792ac27a0ecSDave Kleikamp 793744692dcSJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock, 794744692dcSJiaying Zhang struct buffer_head *bh_result, int create); 795bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 796bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 797bfc1af65SNick Piggin struct page **pagep, void **fsdata) 798ac27a0ecSDave Kleikamp { 799bfc1af65SNick Piggin struct inode *inode = mapping->host; 8001938a150SAneesh Kumar K.V int ret, needed_blocks; 801ac27a0ecSDave Kleikamp handle_t *handle; 802ac27a0ecSDave Kleikamp int retries = 0; 803bfc1af65SNick Piggin struct page *page; 804bfc1af65SNick Piggin pgoff_t index; 805bfc1af65SNick Piggin unsigned from, to; 806bfc1af65SNick Piggin 8079bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 8081938a150SAneesh Kumar K.V /* 8091938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 8101938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 8111938a150SAneesh Kumar K.V */ 8121938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 813bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 814bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 815bfc1af65SNick Piggin to = from + len; 816ac27a0ecSDave Kleikamp 817ac27a0ecSDave Kleikamp retry: 818617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 8197479d2b9SAndrew Morton if (IS_ERR(handle)) { 8207479d2b9SAndrew Morton ret = PTR_ERR(handle); 8217479d2b9SAndrew Morton goto out; 8227479d2b9SAndrew Morton } 823ac27a0ecSDave Kleikamp 824ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 825ebd3610bSJan Kara * started */ 826ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 827ebd3610bSJan Kara 82854566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 829cf108bcaSJan Kara if (!page) { 830cf108bcaSJan Kara ext4_journal_stop(handle); 831cf108bcaSJan Kara ret = -ENOMEM; 832cf108bcaSJan Kara goto out; 833cf108bcaSJan Kara } 834cf108bcaSJan Kara *pagep = page; 835cf108bcaSJan Kara 836744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 8376e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 838744692dcSJiaying Zhang else 8396e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 840bfc1af65SNick Piggin 841bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 842ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), 843ac27a0ecSDave Kleikamp from, to, NULL, do_journal_get_write_access); 844b46be050SAndrey Savochkin } 845bfc1af65SNick Piggin 846bfc1af65SNick Piggin if (ret) { 847bfc1af65SNick Piggin unlock_page(page); 848bfc1af65SNick Piggin page_cache_release(page); 849ae4d5372SAneesh Kumar K.V /* 8506e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 851ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 852ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 8531938a150SAneesh Kumar K.V * 8541938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 8551938a150SAneesh Kumar K.V * truncate finishes 856ae4d5372SAneesh Kumar K.V */ 857ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 8581938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 8591938a150SAneesh Kumar K.V 8601938a150SAneesh Kumar K.V ext4_journal_stop(handle); 8611938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 862b9a4207dSJan Kara ext4_truncate_failed_write(inode); 8631938a150SAneesh Kumar K.V /* 864ffacfa7aSJan Kara * If truncate failed early the inode might 8651938a150SAneesh Kumar K.V * still be on the orphan list; we need to 8661938a150SAneesh Kumar K.V * make sure the inode is removed from the 8671938a150SAneesh Kumar K.V * orphan list in that case. 8681938a150SAneesh Kumar K.V */ 8691938a150SAneesh Kumar K.V if (inode->i_nlink) 8701938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 8711938a150SAneesh Kumar K.V } 872bfc1af65SNick Piggin } 873bfc1af65SNick Piggin 874617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 875ac27a0ecSDave Kleikamp goto retry; 8767479d2b9SAndrew Morton out: 877ac27a0ecSDave Kleikamp return ret; 878ac27a0ecSDave Kleikamp } 879ac27a0ecSDave Kleikamp 880bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 881bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 882ac27a0ecSDave Kleikamp { 883ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 884ac27a0ecSDave Kleikamp return 0; 885ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 8860390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 887ac27a0ecSDave Kleikamp } 888ac27a0ecSDave Kleikamp 889f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 890f8514083SAneesh Kumar K.V struct address_space *mapping, 891f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 892f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 893f8514083SAneesh Kumar K.V { 894f8514083SAneesh Kumar K.V int i_size_changed = 0; 895f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 896f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 897f8514083SAneesh Kumar K.V 898f8514083SAneesh Kumar K.V copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 899f8514083SAneesh Kumar K.V 900f8514083SAneesh Kumar K.V /* 901f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 902f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 903f8514083SAneesh Kumar K.V * 904f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 905f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 906f8514083SAneesh Kumar K.V */ 907f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 908f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 909f8514083SAneesh Kumar K.V i_size_changed = 1; 910f8514083SAneesh Kumar K.V } 911f8514083SAneesh Kumar K.V 912f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 913f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 914f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 915f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 916f8514083SAneesh Kumar K.V */ 917f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 918f8514083SAneesh Kumar K.V i_size_changed = 1; 919f8514083SAneesh Kumar K.V } 920f8514083SAneesh Kumar K.V unlock_page(page); 921f8514083SAneesh Kumar K.V page_cache_release(page); 922f8514083SAneesh Kumar K.V 923f8514083SAneesh Kumar K.V /* 924f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 925f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 926f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 927f8514083SAneesh Kumar K.V * filesystems. 928f8514083SAneesh Kumar K.V */ 929f8514083SAneesh Kumar K.V if (i_size_changed) 930f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 931f8514083SAneesh Kumar K.V 932f8514083SAneesh Kumar K.V return copied; 933f8514083SAneesh Kumar K.V } 934f8514083SAneesh Kumar K.V 935ac27a0ecSDave Kleikamp /* 936ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 937ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 938ac27a0ecSDave Kleikamp * 939617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 940ac27a0ecSDave Kleikamp * buffers are managed internally. 941ac27a0ecSDave Kleikamp */ 942bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 943bfc1af65SNick Piggin struct address_space *mapping, 944bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 945bfc1af65SNick Piggin struct page *page, void *fsdata) 946ac27a0ecSDave Kleikamp { 947617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 948cf108bcaSJan Kara struct inode *inode = mapping->host; 949ac27a0ecSDave Kleikamp int ret = 0, ret2; 950ac27a0ecSDave Kleikamp 9519bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 952678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 953ac27a0ecSDave Kleikamp 954ac27a0ecSDave Kleikamp if (ret == 0) { 955f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 956bfc1af65SNick Piggin page, fsdata); 957f8a87d89SRoel Kluin copied = ret2; 958ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 959f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 960f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 961f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 962f8514083SAneesh Kumar K.V */ 963f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 964f8a87d89SRoel Kluin if (ret2 < 0) 965f8a87d89SRoel Kluin ret = ret2; 96609e0834fSAkira Fujita } else { 96709e0834fSAkira Fujita unlock_page(page); 96809e0834fSAkira Fujita page_cache_release(page); 969ac27a0ecSDave Kleikamp } 97009e0834fSAkira Fujita 971617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 972ac27a0ecSDave Kleikamp if (!ret) 973ac27a0ecSDave Kleikamp ret = ret2; 974bfc1af65SNick Piggin 975f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 976b9a4207dSJan Kara ext4_truncate_failed_write(inode); 977f8514083SAneesh Kumar K.V /* 978ffacfa7aSJan Kara * If truncate failed early the inode might still be 979f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 980f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 981f8514083SAneesh Kumar K.V */ 982f8514083SAneesh Kumar K.V if (inode->i_nlink) 983f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 984f8514083SAneesh Kumar K.V } 985f8514083SAneesh Kumar K.V 986f8514083SAneesh Kumar K.V 987bfc1af65SNick Piggin return ret ? ret : copied; 988ac27a0ecSDave Kleikamp } 989ac27a0ecSDave Kleikamp 990bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 991bfc1af65SNick Piggin struct address_space *mapping, 992bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 993bfc1af65SNick Piggin struct page *page, void *fsdata) 994ac27a0ecSDave Kleikamp { 995617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 996cf108bcaSJan Kara struct inode *inode = mapping->host; 997ac27a0ecSDave Kleikamp int ret = 0, ret2; 998ac27a0ecSDave Kleikamp 9999bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 1000f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 1001bfc1af65SNick Piggin page, fsdata); 1002f8a87d89SRoel Kluin copied = ret2; 1003ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1004f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1005f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1006f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1007f8514083SAneesh Kumar K.V */ 1008f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1009f8514083SAneesh Kumar K.V 1010f8a87d89SRoel Kluin if (ret2 < 0) 1011f8a87d89SRoel Kluin ret = ret2; 1012ac27a0ecSDave Kleikamp 1013617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1014ac27a0ecSDave Kleikamp if (!ret) 1015ac27a0ecSDave Kleikamp ret = ret2; 1016bfc1af65SNick Piggin 1017f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1018b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1019f8514083SAneesh Kumar K.V /* 1020ffacfa7aSJan Kara * If truncate failed early the inode might still be 1021f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1022f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1023f8514083SAneesh Kumar K.V */ 1024f8514083SAneesh Kumar K.V if (inode->i_nlink) 1025f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1026f8514083SAneesh Kumar K.V } 1027f8514083SAneesh Kumar K.V 1028bfc1af65SNick Piggin return ret ? ret : copied; 1029ac27a0ecSDave Kleikamp } 1030ac27a0ecSDave Kleikamp 1031bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 1032bfc1af65SNick Piggin struct address_space *mapping, 1033bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 1034bfc1af65SNick Piggin struct page *page, void *fsdata) 1035ac27a0ecSDave Kleikamp { 1036617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 1037bfc1af65SNick Piggin struct inode *inode = mapping->host; 1038ac27a0ecSDave Kleikamp int ret = 0, ret2; 1039ac27a0ecSDave Kleikamp int partial = 0; 1040bfc1af65SNick Piggin unsigned from, to; 1041cf17fea6SAneesh Kumar K.V loff_t new_i_size; 1042ac27a0ecSDave Kleikamp 10439bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 1044bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 1045bfc1af65SNick Piggin to = from + len; 1046bfc1af65SNick Piggin 1047441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1048441c8508SCurt Wohlgemuth 1049bfc1af65SNick Piggin if (copied < len) { 1050bfc1af65SNick Piggin if (!PageUptodate(page)) 1051bfc1af65SNick Piggin copied = 0; 1052bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 1053bfc1af65SNick Piggin } 1054ac27a0ecSDave Kleikamp 1055ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), from, 1056bfc1af65SNick Piggin to, &partial, write_end_fn); 1057ac27a0ecSDave Kleikamp if (!partial) 1058ac27a0ecSDave Kleikamp SetPageUptodate(page); 1059cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1060cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1061bfc1af65SNick Piggin i_size_write(inode, pos+copied); 106219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 10632d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1064cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1065cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1066617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1067ac27a0ecSDave Kleikamp if (!ret) 1068ac27a0ecSDave Kleikamp ret = ret2; 1069ac27a0ecSDave Kleikamp } 1070bfc1af65SNick Piggin 1071cf108bcaSJan Kara unlock_page(page); 1072f8514083SAneesh Kumar K.V page_cache_release(page); 1073ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1074f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1075f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1076f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1077f8514083SAneesh Kumar K.V */ 1078f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1079f8514083SAneesh Kumar K.V 1080617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1081ac27a0ecSDave Kleikamp if (!ret) 1082ac27a0ecSDave Kleikamp ret = ret2; 1083f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1084b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1085f8514083SAneesh Kumar K.V /* 1086ffacfa7aSJan Kara * If truncate failed early the inode might still be 1087f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1088f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1089f8514083SAneesh Kumar K.V */ 1090f8514083SAneesh Kumar K.V if (inode->i_nlink) 1091f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1092f8514083SAneesh Kumar K.V } 1093bfc1af65SNick Piggin 1094bfc1af65SNick Piggin return ret ? ret : copied; 1095ac27a0ecSDave Kleikamp } 1096d2a17637SMingming Cao 10979d0be502STheodore Ts'o /* 10987b415bf6SAditya Kali * Reserve a single cluster located at lblock 10999d0be502STheodore Ts'o */ 110001f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1101d2a17637SMingming Cao { 1102030ba6bcSAneesh Kumar K.V int retries = 0; 1103d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11040637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 11057b415bf6SAditya Kali unsigned int md_needed; 11065dd4056dSChristoph Hellwig int ret; 1107d2a17637SMingming Cao 1108d2a17637SMingming Cao /* 1109d2a17637SMingming Cao * recalculate the amount of metadata blocks to reserve 1110d2a17637SMingming Cao * in order to allocate nrblocks 1111d2a17637SMingming Cao * worse case is one extent per block 1112d2a17637SMingming Cao */ 1113030ba6bcSAneesh Kumar K.V repeat: 11140637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 11157b415bf6SAditya Kali md_needed = EXT4_NUM_B2C(sbi, 11167b415bf6SAditya Kali ext4_calc_metadata_amount(inode, lblock)); 1117f8ec9d68STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 11180637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1119d2a17637SMingming Cao 112060e58e0fSMingming Cao /* 112172b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 112272b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 112372b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 112460e58e0fSMingming Cao */ 11257b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 11265dd4056dSChristoph Hellwig if (ret) 11275dd4056dSChristoph Hellwig return ret; 112872b8ab9dSEric Sandeen /* 112972b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 113072b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 113172b8ab9dSEric Sandeen */ 1132e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 11337b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1134030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1135030ba6bcSAneesh Kumar K.V yield(); 1136030ba6bcSAneesh Kumar K.V goto repeat; 1137030ba6bcSAneesh Kumar K.V } 1138d2a17637SMingming Cao return -ENOSPC; 1139d2a17637SMingming Cao } 11400637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 11419d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 11420637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 11430637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 114439bc680aSDmitry Monakhov 1145d2a17637SMingming Cao return 0; /* success */ 1146d2a17637SMingming Cao } 1147d2a17637SMingming Cao 114812219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1149d2a17637SMingming Cao { 1150d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11510637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1152d2a17637SMingming Cao 1153cd213226SMingming Cao if (!to_free) 1154cd213226SMingming Cao return; /* Nothing to release, exit */ 1155cd213226SMingming Cao 1156d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1157cd213226SMingming Cao 11585a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 11590637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1160cd213226SMingming Cao /* 11610637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 11620637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 11630637c6f4STheodore Ts'o * function is called from invalidate page, it's 11640637c6f4STheodore Ts'o * harmless to return without any action. 1165cd213226SMingming Cao */ 11660637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 11670637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 11681084f252STheodore Ts'o "data blocks", inode->i_ino, to_free, 11690637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 11700637c6f4STheodore Ts'o WARN_ON(1); 11710637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 11720637c6f4STheodore Ts'o } 11730637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 11740637c6f4STheodore Ts'o 11750637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 11760637c6f4STheodore Ts'o /* 11770637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 11780637c6f4STheodore Ts'o * only when we have written all of the delayed 11790637c6f4STheodore Ts'o * allocation blocks. 11807b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 11817b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 11820637c6f4STheodore Ts'o */ 118357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 118472b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1185ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 11869d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1187cd213226SMingming Cao } 1188cd213226SMingming Cao 118972b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 119057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1191d2a17637SMingming Cao 1192d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 119360e58e0fSMingming Cao 11947b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1195d2a17637SMingming Cao } 1196d2a17637SMingming Cao 1197d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1198d2a17637SMingming Cao unsigned long offset) 1199d2a17637SMingming Cao { 1200d2a17637SMingming Cao int to_release = 0; 1201d2a17637SMingming Cao struct buffer_head *head, *bh; 1202d2a17637SMingming Cao unsigned int curr_off = 0; 12037b415bf6SAditya Kali struct inode *inode = page->mapping->host; 12047b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 12057b415bf6SAditya Kali int num_clusters; 1206d2a17637SMingming Cao 1207d2a17637SMingming Cao head = page_buffers(page); 1208d2a17637SMingming Cao bh = head; 1209d2a17637SMingming Cao do { 1210d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1211d2a17637SMingming Cao 1212d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1213d2a17637SMingming Cao to_release++; 1214d2a17637SMingming Cao clear_buffer_delay(bh); 12155356f261SAditya Kali clear_buffer_da_mapped(bh); 1216d2a17637SMingming Cao } 1217d2a17637SMingming Cao curr_off = next_off; 1218d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 12197b415bf6SAditya Kali 12207b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 12217b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 12227b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 12237b415bf6SAditya Kali while (num_clusters > 0) { 12247b415bf6SAditya Kali ext4_fsblk_t lblk; 12257b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 12267b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 12277b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 12287b415bf6SAditya Kali !ext4_find_delalloc_cluster(inode, lblk, 1)) 12297b415bf6SAditya Kali ext4_da_release_space(inode, 1); 12307b415bf6SAditya Kali 12317b415bf6SAditya Kali num_clusters--; 12327b415bf6SAditya Kali } 1233d2a17637SMingming Cao } 1234ac27a0ecSDave Kleikamp 1235ac27a0ecSDave Kleikamp /* 123664769240SAlex Tomas * Delayed allocation stuff 123764769240SAlex Tomas */ 123864769240SAlex Tomas 123964769240SAlex Tomas /* 124064769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1241a1d6cc56SAneesh Kumar K.V * them with writepage() call back 124264769240SAlex Tomas * 124364769240SAlex Tomas * @mpd->inode: inode 124464769240SAlex Tomas * @mpd->first_page: first page of the extent 124564769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 124664769240SAlex Tomas * 124764769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 124864769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 124964769240SAlex Tomas * 125064769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 125164769240SAlex Tomas */ 12521de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 12531de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 125464769240SAlex Tomas { 1255791b7f08SAneesh Kumar K.V struct pagevec pvec; 1256791b7f08SAneesh Kumar K.V unsigned long index, end; 1257791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1258791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1259791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1260cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 12613ecdb3a1STheodore Ts'o unsigned int len, block_start; 12623ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 1263cb20d518STheodore Ts'o int journal_data = ext4_should_journal_data(inode); 12641de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1265bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 126664769240SAlex Tomas 126764769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1268bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1269791b7f08SAneesh Kumar K.V /* 1270791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1271791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 12728dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1273791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1274791b7f08SAneesh Kumar K.V */ 127564769240SAlex Tomas index = mpd->first_page; 127664769240SAlex Tomas end = mpd->next_page - 1; 127764769240SAlex Tomas 1278791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 127964769240SAlex Tomas while (index <= end) { 1280791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 128164769240SAlex Tomas if (nr_pages == 0) 128264769240SAlex Tomas break; 128364769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 128497498956STheodore Ts'o int commit_write = 0, skip_page = 0; 128564769240SAlex Tomas struct page *page = pvec.pages[i]; 128664769240SAlex Tomas 1287791b7f08SAneesh Kumar K.V index = page->index; 1288791b7f08SAneesh Kumar K.V if (index > end) 1289791b7f08SAneesh Kumar K.V break; 1290cb20d518STheodore Ts'o 1291cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1292cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1293cb20d518STheodore Ts'o else 1294cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 12951de3e3dfSTheodore Ts'o if (map) { 12961de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 12971de3e3dfSTheodore Ts'o inode->i_blkbits); 12981de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 12991de3e3dfSTheodore Ts'o map->m_lblk); 13001de3e3dfSTheodore Ts'o } 1301791b7f08SAneesh Kumar K.V index++; 1302791b7f08SAneesh Kumar K.V 1303791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1304791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1305791b7f08SAneesh Kumar K.V 130622208dedSAneesh Kumar K.V /* 1307cb20d518STheodore Ts'o * If the page does not have buffers (for 1308cb20d518STheodore Ts'o * whatever reason), try to create them using 1309a107e5a3STheodore Ts'o * __block_write_begin. If this fails, 131097498956STheodore Ts'o * skip the page and move on. 131122208dedSAneesh Kumar K.V */ 1312cb20d518STheodore Ts'o if (!page_has_buffers(page)) { 1313a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1314cb20d518STheodore Ts'o noalloc_get_block_write)) { 131597498956STheodore Ts'o skip_page: 1316cb20d518STheodore Ts'o unlock_page(page); 1317cb20d518STheodore Ts'o continue; 1318cb20d518STheodore Ts'o } 1319cb20d518STheodore Ts'o commit_write = 1; 1320cb20d518STheodore Ts'o } 13213ecdb3a1STheodore Ts'o 13223ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 13233ecdb3a1STheodore Ts'o block_start = 0; 13243ecdb3a1STheodore Ts'o do { 13251de3e3dfSTheodore Ts'o if (!bh) 132697498956STheodore Ts'o goto skip_page; 13271de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 13281de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 13291de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 13301de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 13311de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 13321de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 13331de3e3dfSTheodore Ts'o } 13345356f261SAditya Kali if (buffer_da_mapped(bh)) 13355356f261SAditya Kali clear_buffer_da_mapped(bh); 13361de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 13371de3e3dfSTheodore Ts'o buffer_mapped(bh)) 13381de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 13391de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 13401de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 13411de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 13421de3e3dfSTheodore Ts'o } 13431de3e3dfSTheodore Ts'o 134413a79a47SYongqiang Yang /* 134513a79a47SYongqiang Yang * skip page if block allocation undone and 134613a79a47SYongqiang Yang * block is dirty 134713a79a47SYongqiang Yang */ 134813a79a47SYongqiang Yang if (ext4_bh_delay_or_unwritten(NULL, bh)) 134997498956STheodore Ts'o skip_page = 1; 13503ecdb3a1STheodore Ts'o bh = bh->b_this_page; 13513ecdb3a1STheodore Ts'o block_start += bh->b_size; 13521de3e3dfSTheodore Ts'o cur_logical++; 13531de3e3dfSTheodore Ts'o pblock++; 13541de3e3dfSTheodore Ts'o } while (bh != page_bufs); 13551de3e3dfSTheodore Ts'o 135697498956STheodore Ts'o if (skip_page) 135797498956STheodore Ts'o goto skip_page; 1358cb20d518STheodore Ts'o 1359cb20d518STheodore Ts'o if (commit_write) 1360cb20d518STheodore Ts'o /* mark the buffer_heads as dirty & uptodate */ 1361cb20d518STheodore Ts'o block_commit_write(page, 0, len); 1362cb20d518STheodore Ts'o 136397498956STheodore Ts'o clear_page_dirty_for_io(page); 1364bd2d0210STheodore Ts'o /* 1365bd2d0210STheodore Ts'o * Delalloc doesn't support data journalling, 1366bd2d0210STheodore Ts'o * but eventually maybe we'll lift this 1367bd2d0210STheodore Ts'o * restriction. 1368bd2d0210STheodore Ts'o */ 1369bd2d0210STheodore Ts'o if (unlikely(journal_data && PageChecked(page))) 1370cb20d518STheodore Ts'o err = __ext4_journalled_writepage(page, len); 13711449032bSTheodore Ts'o else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 1372bd2d0210STheodore Ts'o err = ext4_bio_write_page(&io_submit, page, 1373bd2d0210STheodore Ts'o len, mpd->wbc); 13749dd75f1fSTheodore Ts'o else if (buffer_uninit(page_bufs)) { 13759dd75f1fSTheodore Ts'o ext4_set_bh_endio(page_bufs, inode); 13769dd75f1fSTheodore Ts'o err = block_write_full_page_endio(page, 13779dd75f1fSTheodore Ts'o noalloc_get_block_write, 13789dd75f1fSTheodore Ts'o mpd->wbc, ext4_end_io_buffer_write); 13799dd75f1fSTheodore Ts'o } else 13801449032bSTheodore Ts'o err = block_write_full_page(page, 13811449032bSTheodore Ts'o noalloc_get_block_write, mpd->wbc); 1382cb20d518STheodore Ts'o 1383cb20d518STheodore Ts'o if (!err) 1384a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 138564769240SAlex Tomas /* 138664769240SAlex Tomas * In error case, we have to continue because 138764769240SAlex Tomas * remaining pages are still locked 138864769240SAlex Tomas */ 138964769240SAlex Tomas if (ret == 0) 139064769240SAlex Tomas ret = err; 139164769240SAlex Tomas } 139264769240SAlex Tomas pagevec_release(&pvec); 139364769240SAlex Tomas } 1394bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 139564769240SAlex Tomas return ret; 139664769240SAlex Tomas } 139764769240SAlex Tomas 1398c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1399c4a0c46eSAneesh Kumar K.V { 1400c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1401c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1402c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1403c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1404c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1405c4a0c46eSAneesh Kumar K.V 1406c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1407c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 1408c4a0c46eSAneesh Kumar K.V while (index <= end) { 1409c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1410c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1411c4a0c46eSAneesh Kumar K.V break; 1412c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1413c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 14149b1d0998SJan Kara if (page->index > end) 1415c4a0c46eSAneesh Kumar K.V break; 1416c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1417c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1418c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1419c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1420c4a0c46eSAneesh Kumar K.V unlock_page(page); 1421c4a0c46eSAneesh Kumar K.V } 14229b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 14239b1d0998SJan Kara pagevec_release(&pvec); 1424c4a0c46eSAneesh Kumar K.V } 1425c4a0c46eSAneesh Kumar K.V return; 1426c4a0c46eSAneesh Kumar K.V } 1427c4a0c46eSAneesh Kumar K.V 1428df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1429df22291fSAneesh Kumar K.V { 1430df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 143192b97816STheodore Ts'o struct super_block *sb = inode->i_sb; 143292b97816STheodore Ts'o 143392b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld", 14345dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 14355dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 143692b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Free/Dirty block details"); 143792b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "free_blocks=%lld", 143857042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 143957042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 144092b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld", 14417b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 14427b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 144392b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "Block reservation details"); 144492b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u", 1445df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 144692b97816STheodore Ts'o ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u", 1447df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1448df22291fSAneesh Kumar K.V return; 1449df22291fSAneesh Kumar K.V } 1450df22291fSAneesh Kumar K.V 1451b920c755STheodore Ts'o /* 14525a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 14535a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 145464769240SAlex Tomas * 14558dc207c0STheodore Ts'o * @mpd - bh describing space 145664769240SAlex Tomas * 145764769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 145864769240SAlex Tomas * 145964769240SAlex Tomas */ 14605a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 146164769240SAlex Tomas { 14622ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 14631de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 14642fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 14652fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 14662fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 14672fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 146864769240SAlex Tomas 146964769240SAlex Tomas /* 14705a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 14715a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 147264769240SAlex Tomas */ 14735a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 14745a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 147529fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 14765a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 14775a87b7a5STheodore Ts'o goto submit_io; 14782fa3cdfbSTheodore Ts'o 14792fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 14802fa3cdfbSTheodore Ts'o BUG_ON(!handle); 14812fa3cdfbSTheodore Ts'o 148279ffab34SAneesh Kumar K.V /* 148379e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 14842ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 14852ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 14862ac3b6e0STheodore Ts'o * one or more preallocated blocks). 14872ac3b6e0STheodore Ts'o * 14882ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 14892ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 14902ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 14912ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 149279e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1493f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 14942ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 14952ac3b6e0STheodore Ts'o * 14962ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 14972ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 14982ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 149979ffab34SAneesh Kumar K.V */ 15002ed88685STheodore Ts'o map.m_lblk = next; 15012ed88685STheodore Ts'o map.m_len = max_blocks; 15021296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1503744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1504744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 15052ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 15061296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 15071296cc85SAneesh Kumar K.V 15082ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 15092fa3cdfbSTheodore Ts'o if (blks < 0) { 1510e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1511e3570639SEric Sandeen 15122fa3cdfbSTheodore Ts'o err = blks; 1513ed5bde0bSTheodore Ts'o /* 15145a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 151597498956STheodore Ts'o * appears to be free blocks we will just let 151697498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1517c4a0c46eSAneesh Kumar K.V */ 1518c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 15195a87b7a5STheodore Ts'o goto submit_io; 1520df22291fSAneesh Kumar K.V 15215dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1522df22291fSAneesh Kumar K.V mpd->retval = err; 15235a87b7a5STheodore Ts'o goto submit_io; 1524df22291fSAneesh Kumar K.V } 1525df22291fSAneesh Kumar K.V 1526c4a0c46eSAneesh Kumar K.V /* 1527ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1528ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1529ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1530ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1531ed5bde0bSTheodore Ts'o * the same. 1532c4a0c46eSAneesh Kumar K.V */ 1533e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1534e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1535e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1536e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1537e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1538c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 15398dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1540e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1541e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1542e3570639SEric Sandeen if (err == -ENOSPC) 1543df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1544030ba6bcSAneesh Kumar K.V } 15452fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1546c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1547e0fd9b90SCurt Wohlgemuth 1548e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1549e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 15505a87b7a5STheodore Ts'o return; 1551c4a0c46eSAneesh Kumar K.V } 15522fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 15532fa3cdfbSTheodore Ts'o 15541de3e3dfSTheodore Ts'o mapp = ↦ 15552ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 15562ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 15572ed88685STheodore Ts'o int i; 155864769240SAlex Tomas 15592ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 15602ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 156164769240SAlex Tomas 15622fa3cdfbSTheodore Ts'o if (ext4_should_order_data(mpd->inode)) { 15632fa3cdfbSTheodore Ts'o err = ext4_jbd2_file_inode(handle, mpd->inode); 15648de49e67SKazuya Mio if (err) { 1565decbd919STheodore Ts'o /* Only if the journal is aborted */ 15668de49e67SKazuya Mio mpd->retval = err; 15678de49e67SKazuya Mio goto submit_io; 15688de49e67SKazuya Mio } 15692fa3cdfbSTheodore Ts'o } 15702fa3cdfbSTheodore Ts'o } 15712fa3cdfbSTheodore Ts'o 15722fa3cdfbSTheodore Ts'o /* 157303f5d8bcSJan Kara * Update on-disk size along with block allocation. 15742fa3cdfbSTheodore Ts'o */ 15752fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 15762fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 15772fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 15782fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 15792fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 15805a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 15815a87b7a5STheodore Ts'o if (err) 15825a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 15835a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 15845a87b7a5STheodore Ts'o mpd->inode->i_ino); 15852fa3cdfbSTheodore Ts'o } 15862fa3cdfbSTheodore Ts'o 15875a87b7a5STheodore Ts'o submit_io: 15881de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 15895a87b7a5STheodore Ts'o mpd->io_done = 1; 159064769240SAlex Tomas } 159164769240SAlex Tomas 1592bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1593bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 159464769240SAlex Tomas 159564769240SAlex Tomas /* 159664769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 159764769240SAlex Tomas * 159864769240SAlex Tomas * @mpd->lbh - extent of blocks 159964769240SAlex Tomas * @logical - logical number of the block in the file 160064769240SAlex Tomas * @bh - bh of the block (used to access block's state) 160164769240SAlex Tomas * 160264769240SAlex Tomas * the function is used to collect contig. blocks in same state 160364769240SAlex Tomas */ 160464769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 16058dc207c0STheodore Ts'o sector_t logical, size_t b_size, 16068dc207c0STheodore Ts'o unsigned long b_state) 160764769240SAlex Tomas { 160864769240SAlex Tomas sector_t next; 16098dc207c0STheodore Ts'o int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 161064769240SAlex Tomas 1611c445e3e0SEric Sandeen /* 1612c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1613c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1614c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 161579e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1616c445e3e0SEric Sandeen */ 1617c445e3e0SEric Sandeen if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 1618c445e3e0SEric Sandeen goto flush_it; 1619c445e3e0SEric Sandeen 1620525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 162112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 1622525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1623525f4ed8SMingming Cao /* 1624525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1625525f4ed8SMingming Cao * credit available. Total credit needed to insert 1626525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1627525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1628525f4ed8SMingming Cao */ 1629525f4ed8SMingming Cao goto flush_it; 1630525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1631525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 1632525f4ed8SMingming Cao /* 1633525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 1634525f4ed8SMingming Cao * allowed limit for which we have journal credit 1635525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 1636525f4ed8SMingming Cao */ 1637525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1638525f4ed8SMingming Cao mpd->inode->i_blkbits; 1639525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 1640525f4ed8SMingming Cao } 1641525f4ed8SMingming Cao } 164264769240SAlex Tomas /* 164364769240SAlex Tomas * First block in the extent 164464769240SAlex Tomas */ 16458dc207c0STheodore Ts'o if (mpd->b_size == 0) { 16468dc207c0STheodore Ts'o mpd->b_blocknr = logical; 16478dc207c0STheodore Ts'o mpd->b_size = b_size; 16488dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 164964769240SAlex Tomas return; 165064769240SAlex Tomas } 165164769240SAlex Tomas 16528dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 165364769240SAlex Tomas /* 165464769240SAlex Tomas * Can we merge the block to our big extent? 165564769240SAlex Tomas */ 16568dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 16578dc207c0STheodore Ts'o mpd->b_size += b_size; 165864769240SAlex Tomas return; 165964769240SAlex Tomas } 166064769240SAlex Tomas 1661525f4ed8SMingming Cao flush_it: 166264769240SAlex Tomas /* 166364769240SAlex Tomas * We couldn't merge the block to our extent, so we 166464769240SAlex Tomas * need to flush current extent and start new one 166564769240SAlex Tomas */ 16665a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1667a1d6cc56SAneesh Kumar K.V return; 166864769240SAlex Tomas } 166964769240SAlex Tomas 1670c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 167129fa89d0SAneesh Kumar K.V { 1672c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 167329fa89d0SAneesh Kumar K.V } 167429fa89d0SAneesh Kumar K.V 167564769240SAlex Tomas /* 16765356f261SAditya Kali * This function is grabs code from the very beginning of 16775356f261SAditya Kali * ext4_map_blocks, but assumes that the caller is from delayed write 16785356f261SAditya Kali * time. This function looks up the requested blocks and sets the 16795356f261SAditya Kali * buffer delay bit under the protection of i_data_sem. 16805356f261SAditya Kali */ 16815356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, 16825356f261SAditya Kali struct ext4_map_blocks *map, 16835356f261SAditya Kali struct buffer_head *bh) 16845356f261SAditya Kali { 16855356f261SAditya Kali int retval; 16865356f261SAditya Kali sector_t invalid_block = ~((sector_t) 0xffff); 16875356f261SAditya Kali 16885356f261SAditya Kali if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 16895356f261SAditya Kali invalid_block = ~0; 16905356f261SAditya Kali 16915356f261SAditya Kali map->m_flags = 0; 16925356f261SAditya Kali ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u," 16935356f261SAditya Kali "logical block %lu\n", inode->i_ino, map->m_len, 16945356f261SAditya Kali (unsigned long) map->m_lblk); 16955356f261SAditya Kali /* 16965356f261SAditya Kali * Try to see if we can get the block without requesting a new 16975356f261SAditya Kali * file system block. 16985356f261SAditya Kali */ 16995356f261SAditya Kali down_read((&EXT4_I(inode)->i_data_sem)); 17005356f261SAditya Kali if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 17015356f261SAditya Kali retval = ext4_ext_map_blocks(NULL, inode, map, 0); 17025356f261SAditya Kali else 17035356f261SAditya Kali retval = ext4_ind_map_blocks(NULL, inode, map, 0); 17045356f261SAditya Kali 17055356f261SAditya Kali if (retval == 0) { 17065356f261SAditya Kali /* 17075356f261SAditya Kali * XXX: __block_prepare_write() unmaps passed block, 17085356f261SAditya Kali * is it OK? 17095356f261SAditya Kali */ 17105356f261SAditya Kali /* If the block was allocated from previously allocated cluster, 17115356f261SAditya Kali * then we dont need to reserve it again. */ 17125356f261SAditya Kali if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 17135356f261SAditya Kali retval = ext4_da_reserve_space(inode, iblock); 17145356f261SAditya Kali if (retval) 17155356f261SAditya Kali /* not enough space to reserve */ 17165356f261SAditya Kali goto out_unlock; 17175356f261SAditya Kali } 17185356f261SAditya Kali 17195356f261SAditya Kali /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served 17205356f261SAditya Kali * and it should not appear on the bh->b_state. 17215356f261SAditya Kali */ 17225356f261SAditya Kali map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; 17235356f261SAditya Kali 17245356f261SAditya Kali map_bh(bh, inode->i_sb, invalid_block); 17255356f261SAditya Kali set_buffer_new(bh); 17265356f261SAditya Kali set_buffer_delay(bh); 17275356f261SAditya Kali } 17285356f261SAditya Kali 17295356f261SAditya Kali out_unlock: 17305356f261SAditya Kali up_read((&EXT4_I(inode)->i_data_sem)); 17315356f261SAditya Kali 17325356f261SAditya Kali return retval; 17335356f261SAditya Kali } 17345356f261SAditya Kali 17355356f261SAditya Kali /* 1736b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1737b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1738b920c755STheodore Ts'o * reserve space for a single block. 173929fa89d0SAneesh Kumar K.V * 174029fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 174129fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 174229fa89d0SAneesh Kumar K.V * 174329fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 174429fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 174529fa89d0SAneesh Kumar K.V * initialized properly. 174664769240SAlex Tomas */ 174764769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 17482ed88685STheodore Ts'o struct buffer_head *bh, int create) 174964769240SAlex Tomas { 17502ed88685STheodore Ts'o struct ext4_map_blocks map; 175164769240SAlex Tomas int ret = 0; 175264769240SAlex Tomas 175364769240SAlex Tomas BUG_ON(create == 0); 17542ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 17552ed88685STheodore Ts'o 17562ed88685STheodore Ts'o map.m_lblk = iblock; 17572ed88685STheodore Ts'o map.m_len = 1; 175864769240SAlex Tomas 175964769240SAlex Tomas /* 176064769240SAlex Tomas * first, we need to know whether the block is allocated already 176164769240SAlex Tomas * preallocated blocks are unmapped but should treated 176264769240SAlex Tomas * the same as allocated blocks. 176364769240SAlex Tomas */ 17645356f261SAditya Kali ret = ext4_da_map_blocks(inode, iblock, &map, bh); 17655356f261SAditya Kali if (ret <= 0) 17662ed88685STheodore Ts'o return ret; 176764769240SAlex Tomas 17682ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 17692ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 17702ed88685STheodore Ts'o 17712ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 17722ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 17732ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 17742ed88685STheodore Ts'o * get_block multiple times when we write to the same 17752ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 17762ed88685STheodore Ts'o * for partial write. 17772ed88685STheodore Ts'o */ 17782ed88685STheodore Ts'o set_buffer_new(bh); 1779c8205636STheodore Ts'o set_buffer_mapped(bh); 17802ed88685STheodore Ts'o } 17812ed88685STheodore Ts'o return 0; 178264769240SAlex Tomas } 178361628a3fSMingming Cao 1784b920c755STheodore Ts'o /* 1785b920c755STheodore Ts'o * This function is used as a standard get_block_t calback function 1786b920c755STheodore Ts'o * when there is no desire to allocate any blocks. It is used as a 1787ebdec241SChristoph Hellwig * callback function for block_write_begin() and block_write_full_page(). 1788206f7ab4SChristoph Hellwig * These functions should only try to map a single block at a time. 1789b920c755STheodore Ts'o * 1790b920c755STheodore Ts'o * Since this function doesn't do block allocations even if the caller 1791b920c755STheodore Ts'o * requests it by passing in create=1, it is critically important that 1792b920c755STheodore Ts'o * any caller checks to make sure that any buffer heads are returned 1793b920c755STheodore Ts'o * by this function are either all already mapped or marked for 1794206f7ab4SChristoph Hellwig * delayed allocation before calling block_write_full_page(). Otherwise, 1795206f7ab4SChristoph Hellwig * b_blocknr could be left unitialized, and the page write functions will 1796206f7ab4SChristoph Hellwig * be taken by surprise. 1797b920c755STheodore Ts'o */ 1798b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 1799f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 1800f0e6c985SAneesh Kumar K.V { 1801a2dc52b5STheodore Ts'o BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 18022ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 0); 180361628a3fSMingming Cao } 180461628a3fSMingming Cao 180562e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 180662e086beSAneesh Kumar K.V { 180762e086beSAneesh Kumar K.V get_bh(bh); 180862e086beSAneesh Kumar K.V return 0; 180962e086beSAneesh Kumar K.V } 181062e086beSAneesh Kumar K.V 181162e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 181262e086beSAneesh Kumar K.V { 181362e086beSAneesh Kumar K.V put_bh(bh); 181462e086beSAneesh Kumar K.V return 0; 181562e086beSAneesh Kumar K.V } 181662e086beSAneesh Kumar K.V 181762e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 181862e086beSAneesh Kumar K.V unsigned int len) 181962e086beSAneesh Kumar K.V { 182062e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 182162e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 182262e086beSAneesh Kumar K.V struct buffer_head *page_bufs; 182362e086beSAneesh Kumar K.V handle_t *handle = NULL; 182462e086beSAneesh Kumar K.V int ret = 0; 182562e086beSAneesh Kumar K.V int err; 182662e086beSAneesh Kumar K.V 1827cb20d518STheodore Ts'o ClearPageChecked(page); 182862e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 182962e086beSAneesh Kumar K.V BUG_ON(!page_bufs); 183062e086beSAneesh Kumar K.V walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 183162e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 183262e086beSAneesh Kumar K.V * references to buffers so we are safe */ 183362e086beSAneesh Kumar K.V unlock_page(page); 183462e086beSAneesh Kumar K.V 183562e086beSAneesh Kumar K.V handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 183662e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 183762e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 183862e086beSAneesh Kumar K.V goto out; 183962e086beSAneesh Kumar K.V } 184062e086beSAneesh Kumar K.V 1841441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1842441c8508SCurt Wohlgemuth 184362e086beSAneesh Kumar K.V ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 184462e086beSAneesh Kumar K.V do_journal_get_write_access); 184562e086beSAneesh Kumar K.V 184662e086beSAneesh Kumar K.V err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 184762e086beSAneesh Kumar K.V write_end_fn); 184862e086beSAneesh Kumar K.V if (ret == 0) 184962e086beSAneesh Kumar K.V ret = err; 18502d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 185162e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 185262e086beSAneesh Kumar K.V if (!ret) 185362e086beSAneesh Kumar K.V ret = err; 185462e086beSAneesh Kumar K.V 185562e086beSAneesh Kumar K.V walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 185619f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 185762e086beSAneesh Kumar K.V out: 185862e086beSAneesh Kumar K.V return ret; 185962e086beSAneesh Kumar K.V } 186062e086beSAneesh Kumar K.V 1861744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 1862744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 1863744692dcSJiaying Zhang 186461628a3fSMingming Cao /* 186543ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 186643ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 186743ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 186843ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 186943ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 187043ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 187143ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 187243ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 187343ce1d23SAneesh Kumar K.V * 1874b920c755STheodore Ts'o * This function can get called via... 1875b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 1876b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1877b920c755STheodore Ts'o * - shrink_page_list via pdflush (no journal handle) 1878b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 187943ce1d23SAneesh Kumar K.V * 188043ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 188143ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 188243ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 188343ce1d23SAneesh Kumar K.V * truncate(f, 1024); 188443ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 188543ce1d23SAneesh Kumar K.V * a[0] = 'a'; 188643ce1d23SAneesh Kumar K.V * truncate(f, 4096); 188743ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 188890802ed9SPaul Bolle * but other buffer_heads would be unmapped but dirty (dirty done via the 188943ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 189043ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 189143ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 189243ce1d23SAneesh Kumar K.V * buffer_heads mapped. 189343ce1d23SAneesh Kumar K.V * 189443ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 189543ce1d23SAneesh Kumar K.V * unwritten in the page. 189643ce1d23SAneesh Kumar K.V * 189743ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 189843ce1d23SAneesh Kumar K.V * 189943ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 190043ce1d23SAneesh Kumar K.V * ext4_writepage() 190143ce1d23SAneesh Kumar K.V * 190243ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 190343ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 190461628a3fSMingming Cao */ 190543ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 190664769240SAlex Tomas struct writeback_control *wbc) 190764769240SAlex Tomas { 1908a42afc5fSTheodore Ts'o int ret = 0, commit_write = 0; 190961628a3fSMingming Cao loff_t size; 1910498e5f24STheodore Ts'o unsigned int len; 1911744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 191261628a3fSMingming Cao struct inode *inode = page->mapping->host; 191364769240SAlex Tomas 1914a9c667f8SLukas Czerner trace_ext4_writepage(page); 191561628a3fSMingming Cao size = i_size_read(inode); 191661628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 191761628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 191861628a3fSMingming Cao else 191961628a3fSMingming Cao len = PAGE_CACHE_SIZE; 192061628a3fSMingming Cao 1921a42afc5fSTheodore Ts'o /* 1922a42afc5fSTheodore Ts'o * If the page does not have buffers (for whatever reason), 1923a107e5a3STheodore Ts'o * try to create them using __block_write_begin. If this 1924a42afc5fSTheodore Ts'o * fails, redirty the page and move on. 1925a42afc5fSTheodore Ts'o */ 1926b1142e8fSTheodore Ts'o if (!page_has_buffers(page)) { 1927a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1928a42afc5fSTheodore Ts'o noalloc_get_block_write)) { 1929a42afc5fSTheodore Ts'o redirty_page: 1930a42afc5fSTheodore Ts'o redirty_page_for_writepage(wbc, page); 1931a42afc5fSTheodore Ts'o unlock_page(page); 1932a42afc5fSTheodore Ts'o return 0; 1933a42afc5fSTheodore Ts'o } 1934a42afc5fSTheodore Ts'o commit_write = 1; 1935a42afc5fSTheodore Ts'o } 1936f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 1937f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1938c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 193961628a3fSMingming Cao /* 1940b1142e8fSTheodore Ts'o * We don't want to do block allocation, so redirty 1941b1142e8fSTheodore Ts'o * the page and return. We may reach here when we do 1942b1142e8fSTheodore Ts'o * a journal commit via journal_submit_inode_data_buffers. 1943966dbde2SMel Gorman * We can also reach here via shrink_page_list but it 1944966dbde2SMel Gorman * should never be for direct reclaim so warn if that 1945966dbde2SMel Gorman * happens 1946f0e6c985SAneesh Kumar K.V */ 1947966dbde2SMel Gorman WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == 1948966dbde2SMel Gorman PF_MEMALLOC); 1949a42afc5fSTheodore Ts'o goto redirty_page; 1950f0e6c985SAneesh Kumar K.V } 1951a42afc5fSTheodore Ts'o if (commit_write) 1952ed9b3e33SAneesh Kumar K.V /* now mark the buffer_heads as dirty and uptodate */ 1953b767e78aSAneesh Kumar K.V block_commit_write(page, 0, len); 195464769240SAlex Tomas 1955cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 195643ce1d23SAneesh Kumar K.V /* 195743ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 195843ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 195943ce1d23SAneesh Kumar K.V */ 19603f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 196143ce1d23SAneesh Kumar K.V 1962a42afc5fSTheodore Ts'o if (buffer_uninit(page_bufs)) { 1963744692dcSJiaying Zhang ext4_set_bh_endio(page_bufs, inode); 1964744692dcSJiaying Zhang ret = block_write_full_page_endio(page, noalloc_get_block_write, 1965744692dcSJiaying Zhang wbc, ext4_end_io_buffer_write); 1966744692dcSJiaying Zhang } else 1967b920c755STheodore Ts'o ret = block_write_full_page(page, noalloc_get_block_write, 1968f0e6c985SAneesh Kumar K.V wbc); 196964769240SAlex Tomas 197064769240SAlex Tomas return ret; 197164769240SAlex Tomas } 197264769240SAlex Tomas 197361628a3fSMingming Cao /* 1974525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 197525985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 1976525f4ed8SMingming Cao * a single extent allocation into a single transaction, 1977525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 1978525f4ed8SMingming Cao * the block allocation. 197961628a3fSMingming Cao */ 1980525f4ed8SMingming Cao 1981525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 1982525f4ed8SMingming Cao { 1983525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 1984525f4ed8SMingming Cao 1985525f4ed8SMingming Cao /* 1986525f4ed8SMingming Cao * With non-extent format the journal credit needed to 1987525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 1988525f4ed8SMingming Cao * number of contiguous block. So we will limit 1989525f4ed8SMingming Cao * number of contiguous block to a sane value 1990525f4ed8SMingming Cao */ 199112e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 1992525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 1993525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 1994525f4ed8SMingming Cao 1995525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 1996525f4ed8SMingming Cao } 199761628a3fSMingming Cao 19988e48dcfbSTheodore Ts'o /* 19998e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 20008eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 2001168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 2002168fc022STheodore Ts'o * and then write them. 20038e48dcfbSTheodore Ts'o */ 20048e48dcfbSTheodore Ts'o static int write_cache_pages_da(struct address_space *mapping, 20058e48dcfbSTheodore Ts'o struct writeback_control *wbc, 200672f84e65SEric Sandeen struct mpage_da_data *mpd, 200772f84e65SEric Sandeen pgoff_t *done_index) 20088e48dcfbSTheodore Ts'o { 20098eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 2010168fc022STheodore Ts'o struct inode *inode = mapping->host; 20118e48dcfbSTheodore Ts'o struct pagevec pvec; 20124f01b02cSTheodore Ts'o unsigned int nr_pages; 20134f01b02cSTheodore Ts'o sector_t logical; 20144f01b02cSTheodore Ts'o pgoff_t index, end; 20158e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 20164f01b02cSTheodore Ts'o int i, tag, ret = 0; 20178e48dcfbSTheodore Ts'o 2018168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 2019168fc022STheodore Ts'o mpd->wbc = wbc; 2020168fc022STheodore Ts'o mpd->inode = inode; 20218e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 20228e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 20238e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 20248e48dcfbSTheodore Ts'o 20256e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 20265b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 20275b41d924SEric Sandeen else 20285b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 20295b41d924SEric Sandeen 203072f84e65SEric Sandeen *done_index = index; 20314f01b02cSTheodore Ts'o while (index <= end) { 20325b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 20338e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 20348e48dcfbSTheodore Ts'o if (nr_pages == 0) 20354f01b02cSTheodore Ts'o return 0; 20368e48dcfbSTheodore Ts'o 20378e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 20388e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 20398e48dcfbSTheodore Ts'o 20408e48dcfbSTheodore Ts'o /* 20418e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 20428e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 20438e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 20448e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 20458e48dcfbSTheodore Ts'o * because we have a reference on the page. 20468e48dcfbSTheodore Ts'o */ 20474f01b02cSTheodore Ts'o if (page->index > end) 20484f01b02cSTheodore Ts'o goto out; 20498e48dcfbSTheodore Ts'o 205072f84e65SEric Sandeen *done_index = page->index + 1; 205172f84e65SEric Sandeen 205278aaced3STheodore Ts'o /* 205378aaced3STheodore Ts'o * If we can't merge this page, and we have 205478aaced3STheodore Ts'o * accumulated an contiguous region, write it 205578aaced3STheodore Ts'o */ 205678aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 205778aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 205878aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 205978aaced3STheodore Ts'o goto ret_extent_tail; 206078aaced3STheodore Ts'o } 206178aaced3STheodore Ts'o 20628e48dcfbSTheodore Ts'o lock_page(page); 20638e48dcfbSTheodore Ts'o 20648e48dcfbSTheodore Ts'o /* 20654f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 20664f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 20674f01b02cSTheodore Ts'o * are writing (which means it has been 20684f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 20694f01b02cSTheodore Ts'o * already under writeback and we are not 20704f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 20718e48dcfbSTheodore Ts'o */ 20724f01b02cSTheodore Ts'o if (!PageDirty(page) || 20734f01b02cSTheodore Ts'o (PageWriteback(page) && 20744f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 20754f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 20768e48dcfbSTheodore Ts'o unlock_page(page); 20778e48dcfbSTheodore Ts'o continue; 20788e48dcfbSTheodore Ts'o } 20798e48dcfbSTheodore Ts'o 20808e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 20818e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 20828e48dcfbSTheodore Ts'o 2083168fc022STheodore Ts'o if (mpd->next_page != page->index) 20848eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 20858eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 20868eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 20878eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 20888eb9e5ceSTheodore Ts'o 20898eb9e5ceSTheodore Ts'o if (!page_has_buffers(page)) { 20904f01b02cSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 20914f01b02cSTheodore Ts'o PAGE_CACHE_SIZE, 20928eb9e5ceSTheodore Ts'o (1 << BH_Dirty) | (1 << BH_Uptodate)); 20934f01b02cSTheodore Ts'o if (mpd->io_done) 20944f01b02cSTheodore Ts'o goto ret_extent_tail; 20958e48dcfbSTheodore Ts'o } else { 20968eb9e5ceSTheodore Ts'o /* 20974f01b02cSTheodore Ts'o * Page with regular buffer heads, 20984f01b02cSTheodore Ts'o * just add all dirty ones 20998eb9e5ceSTheodore Ts'o */ 21008eb9e5ceSTheodore Ts'o head = page_buffers(page); 21018eb9e5ceSTheodore Ts'o bh = head; 21028eb9e5ceSTheodore Ts'o do { 21038eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 21048eb9e5ceSTheodore Ts'o /* 21058eb9e5ceSTheodore Ts'o * We need to try to allocate 21068eb9e5ceSTheodore Ts'o * unmapped blocks in the same page. 21078eb9e5ceSTheodore Ts'o * Otherwise we won't make progress 21088eb9e5ceSTheodore Ts'o * with the page in ext4_writepage 21098eb9e5ceSTheodore Ts'o */ 21108eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 21118eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 21128eb9e5ceSTheodore Ts'o bh->b_size, 21138eb9e5ceSTheodore Ts'o bh->b_state); 21144f01b02cSTheodore Ts'o if (mpd->io_done) 21154f01b02cSTheodore Ts'o goto ret_extent_tail; 21168eb9e5ceSTheodore Ts'o } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 21178eb9e5ceSTheodore Ts'o /* 21184f01b02cSTheodore Ts'o * mapped dirty buffer. We need 21194f01b02cSTheodore Ts'o * to update the b_state 21204f01b02cSTheodore Ts'o * because we look at b_state 21214f01b02cSTheodore Ts'o * in mpage_da_map_blocks. We 21224f01b02cSTheodore Ts'o * don't update b_size because 21234f01b02cSTheodore Ts'o * if we find an unmapped 21244f01b02cSTheodore Ts'o * buffer_head later we need to 21254f01b02cSTheodore Ts'o * use the b_state flag of that 21264f01b02cSTheodore Ts'o * buffer_head. 21278eb9e5ceSTheodore Ts'o */ 21288eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 21298eb9e5ceSTheodore Ts'o mpd->b_state = bh->b_state & BH_FLAGS; 21308e48dcfbSTheodore Ts'o } 21318eb9e5ceSTheodore Ts'o logical++; 21328eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 21338e48dcfbSTheodore Ts'o } 21348e48dcfbSTheodore Ts'o 21358e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 21368e48dcfbSTheodore Ts'o nr_to_write--; 21378e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 21384f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 21398e48dcfbSTheodore Ts'o /* 21408e48dcfbSTheodore Ts'o * We stop writing back only if we are 21418e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 21428e48dcfbSTheodore Ts'o * integrity sync we have to keep going 21438e48dcfbSTheodore Ts'o * because someone may be concurrently 21448e48dcfbSTheodore Ts'o * dirtying pages, and we might have 21458e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 21468e48dcfbSTheodore Ts'o * pages, but have not synced all of the 21478e48dcfbSTheodore Ts'o * old dirty pages. 21488e48dcfbSTheodore Ts'o */ 21494f01b02cSTheodore Ts'o goto out; 21508e48dcfbSTheodore Ts'o } 21518e48dcfbSTheodore Ts'o } 21528e48dcfbSTheodore Ts'o pagevec_release(&pvec); 21538e48dcfbSTheodore Ts'o cond_resched(); 21548e48dcfbSTheodore Ts'o } 21554f01b02cSTheodore Ts'o return 0; 21564f01b02cSTheodore Ts'o ret_extent_tail: 21574f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 21588eb9e5ceSTheodore Ts'o out: 21598eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 21608eb9e5ceSTheodore Ts'o cond_resched(); 21618e48dcfbSTheodore Ts'o return ret; 21628e48dcfbSTheodore Ts'o } 21638e48dcfbSTheodore Ts'o 21648e48dcfbSTheodore Ts'o 216564769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 216664769240SAlex Tomas struct writeback_control *wbc) 216764769240SAlex Tomas { 216822208dedSAneesh Kumar K.V pgoff_t index; 216922208dedSAneesh Kumar K.V int range_whole = 0; 217061628a3fSMingming Cao handle_t *handle = NULL; 2171df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 21725e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2173498e5f24STheodore Ts'o int pages_written = 0; 217455138e0bSTheodore Ts'o unsigned int max_pages; 21752acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 217655138e0bSTheodore Ts'o int needed_blocks, ret = 0; 217755138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2178de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 21795e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 218072f84e65SEric Sandeen pgoff_t done_index = 0; 21815b41d924SEric Sandeen pgoff_t end; 21821bce63d1SShaohua Li struct blk_plug plug; 218361628a3fSMingming Cao 21849bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2185ba80b101STheodore Ts'o 218661628a3fSMingming Cao /* 218761628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 218861628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 218961628a3fSMingming Cao * because that could violate lock ordering on umount 219061628a3fSMingming Cao */ 2191a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 219261628a3fSMingming Cao return 0; 21932a21e37eSTheodore Ts'o 21942a21e37eSTheodore Ts'o /* 21952a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 21962a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 21972a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 21984ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 21992a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 22002a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 22012a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 22022a21e37eSTheodore Ts'o * the stack trace. 22032a21e37eSTheodore Ts'o */ 22044ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 22052a21e37eSTheodore Ts'o return -EROFS; 22062a21e37eSTheodore Ts'o 220722208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 220822208dedSAneesh Kumar K.V range_whole = 1; 220961628a3fSMingming Cao 22102acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 22112acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 221222208dedSAneesh Kumar K.V index = mapping->writeback_index; 22132acf2c26SAneesh Kumar K.V if (index) 22142acf2c26SAneesh Kumar K.V cycled = 0; 22152acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 22162acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 22172acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 22185b41d924SEric Sandeen end = -1; 22195b41d924SEric Sandeen } else { 222022208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 22215b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 22225b41d924SEric Sandeen } 2223a1d6cc56SAneesh Kumar K.V 222455138e0bSTheodore Ts'o /* 222555138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 222655138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 222755138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 222855138e0bSTheodore Ts'o * levels; different architectues have a different page size, 222955138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 223055138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 223155138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 223255138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 223355138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 223455138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 223555138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 223655138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 223755138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 223855138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 223955138e0bSTheodore Ts'o */ 224055138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2241b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2242b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2243b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 224455138e0bSTheodore Ts'o else 2245b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2246b443e733SEric Sandeen } else 224755138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 224855138e0bSTheodore Ts'o max_pages); 224955138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 225055138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 225155138e0bSTheodore Ts'o 225255138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 225355138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 225455138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 225555138e0bSTheodore Ts'o } 225655138e0bSTheodore Ts'o 22572acf2c26SAneesh Kumar K.V retry: 22586e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 22595b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 22605b41d924SEric Sandeen 22611bce63d1SShaohua Li blk_start_plug(&plug); 226222208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2263a1d6cc56SAneesh Kumar K.V 2264a1d6cc56SAneesh Kumar K.V /* 2265a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2266a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2267a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2268a1d6cc56SAneesh Kumar K.V * by delalloc 2269a1d6cc56SAneesh Kumar K.V */ 2270a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2271525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2272a1d6cc56SAneesh Kumar K.V 227361628a3fSMingming Cao /* start a new transaction*/ 227461628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 227561628a3fSMingming Cao if (IS_ERR(handle)) { 227661628a3fSMingming Cao ret = PTR_ERR(handle); 22771693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2278fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2279a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 22803c1fcb2cSNamjae Jeon blk_finish_plug(&plug); 228161628a3fSMingming Cao goto out_writepages; 228261628a3fSMingming Cao } 2283f63e6005STheodore Ts'o 2284f63e6005STheodore Ts'o /* 22858eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2286f63e6005STheodore Ts'o * contiguous region of logical blocks that need 22878eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2288f63e6005STheodore Ts'o */ 228972f84e65SEric Sandeen ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 2290f63e6005STheodore Ts'o /* 2291af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2292f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2293f63e6005STheodore Ts'o * them for I/O. 2294f63e6005STheodore Ts'o */ 2295f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 22965a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2297f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2298f63e6005STheodore Ts'o } 2299b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2300f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2301df22291fSAneesh Kumar K.V 230261628a3fSMingming Cao ext4_journal_stop(handle); 2303df22291fSAneesh Kumar K.V 23048f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 230522208dedSAneesh Kumar K.V /* commit the transaction which would 230622208dedSAneesh Kumar K.V * free blocks released in the transaction 230722208dedSAneesh Kumar K.V * and try again 230822208dedSAneesh Kumar K.V */ 2309df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 231022208dedSAneesh Kumar K.V ret = 0; 231122208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2312a1d6cc56SAneesh Kumar K.V /* 23138de49e67SKazuya Mio * Got one extent now try with rest of the pages. 23148de49e67SKazuya Mio * If mpd.retval is set -EIO, journal is aborted. 23158de49e67SKazuya Mio * So we don't need to write any more. 2316a1d6cc56SAneesh Kumar K.V */ 231722208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 23188de49e67SKazuya Mio ret = mpd.retval; 23192acf2c26SAneesh Kumar K.V io_done = 1; 232022208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 232161628a3fSMingming Cao /* 232261628a3fSMingming Cao * There is no more writeout needed 232361628a3fSMingming Cao * or we requested for a noblocking writeout 232461628a3fSMingming Cao * and we found the device congested 232561628a3fSMingming Cao */ 232661628a3fSMingming Cao break; 232761628a3fSMingming Cao } 23281bce63d1SShaohua Li blk_finish_plug(&plug); 23292acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 23302acf2c26SAneesh Kumar K.V cycled = 1; 23312acf2c26SAneesh Kumar K.V index = 0; 23322acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 23332acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 23342acf2c26SAneesh Kumar K.V goto retry; 23352acf2c26SAneesh Kumar K.V } 233661628a3fSMingming Cao 233722208dedSAneesh Kumar K.V /* Update index */ 23382acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 233922208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 234022208dedSAneesh Kumar K.V /* 234122208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 234222208dedSAneesh Kumar K.V * mode will write it back later 234322208dedSAneesh Kumar K.V */ 234472f84e65SEric Sandeen mapping->writeback_index = done_index; 2345a1d6cc56SAneesh Kumar K.V 234661628a3fSMingming Cao out_writepages: 234722208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2348de89de6eSTheodore Ts'o wbc->range_start = range_start; 23499bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 235061628a3fSMingming Cao return ret; 235164769240SAlex Tomas } 235264769240SAlex Tomas 235379f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1 235479f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 235579f0be8dSAneesh Kumar K.V { 235679f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 235779f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 235879f0be8dSAneesh Kumar K.V 235979f0be8dSAneesh Kumar K.V /* 236079f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 236179f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2362179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 236379f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 236479f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 236579f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 236679f0be8dSAneesh Kumar K.V */ 236757042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 236857042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 236957042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 237079f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2371df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 237279f0be8dSAneesh Kumar K.V /* 2373c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2374c8afb446SEric Sandeen * or free blocks is less than watermark 237579f0be8dSAneesh Kumar K.V */ 237679f0be8dSAneesh Kumar K.V return 1; 237779f0be8dSAneesh Kumar K.V } 2378c8afb446SEric Sandeen /* 2379c8afb446SEric Sandeen * Even if we don't switch but are nearing capacity, 2380c8afb446SEric Sandeen * start pushing delalloc when 1/2 of free blocks are dirty. 2381c8afb446SEric Sandeen */ 2382c8afb446SEric Sandeen if (free_blocks < 2 * dirty_blocks) 23830e175a18SCurt Wohlgemuth writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); 2384c8afb446SEric Sandeen 238579f0be8dSAneesh Kumar K.V return 0; 238679f0be8dSAneesh Kumar K.V } 238779f0be8dSAneesh Kumar K.V 238864769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 238964769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 239064769240SAlex Tomas struct page **pagep, void **fsdata) 239164769240SAlex Tomas { 239272b8ab9dSEric Sandeen int ret, retries = 0; 239364769240SAlex Tomas struct page *page; 239464769240SAlex Tomas pgoff_t index; 239564769240SAlex Tomas struct inode *inode = mapping->host; 239664769240SAlex Tomas handle_t *handle; 239764769240SAlex Tomas 239864769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 239979f0be8dSAneesh Kumar K.V 240079f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 240179f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 240279f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 240379f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 240479f0be8dSAneesh Kumar K.V } 240579f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 24069bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 2407d2a17637SMingming Cao retry: 240864769240SAlex Tomas /* 240964769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 241064769240SAlex Tomas * if there is delayed block allocation. But we still need 241164769240SAlex Tomas * to journalling the i_disksize update if writes to the end 241264769240SAlex Tomas * of file which has an already mapped buffer. 241364769240SAlex Tomas */ 241464769240SAlex Tomas handle = ext4_journal_start(inode, 1); 241564769240SAlex Tomas if (IS_ERR(handle)) { 241664769240SAlex Tomas ret = PTR_ERR(handle); 241764769240SAlex Tomas goto out; 241864769240SAlex Tomas } 2419ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2420ebd3610bSJan Kara * started */ 2421ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 242264769240SAlex Tomas 242354566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2424d5a0d4f7SEric Sandeen if (!page) { 2425d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2426d5a0d4f7SEric Sandeen ret = -ENOMEM; 2427d5a0d4f7SEric Sandeen goto out; 2428d5a0d4f7SEric Sandeen } 242964769240SAlex Tomas *pagep = page; 243064769240SAlex Tomas 24316e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 243264769240SAlex Tomas if (ret < 0) { 243364769240SAlex Tomas unlock_page(page); 243464769240SAlex Tomas ext4_journal_stop(handle); 243564769240SAlex Tomas page_cache_release(page); 2436ae4d5372SAneesh Kumar K.V /* 2437ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2438ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2439ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2440ae4d5372SAneesh Kumar K.V */ 2441ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2442b9a4207dSJan Kara ext4_truncate_failed_write(inode); 244364769240SAlex Tomas } 244464769240SAlex Tomas 2445d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2446d2a17637SMingming Cao goto retry; 244764769240SAlex Tomas out: 244864769240SAlex Tomas return ret; 244964769240SAlex Tomas } 245064769240SAlex Tomas 2451632eaeabSMingming Cao /* 2452632eaeabSMingming Cao * Check if we should update i_disksize 2453632eaeabSMingming Cao * when write to the end of file but not require block allocation 2454632eaeabSMingming Cao */ 2455632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2456632eaeabSMingming Cao unsigned long offset) 2457632eaeabSMingming Cao { 2458632eaeabSMingming Cao struct buffer_head *bh; 2459632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2460632eaeabSMingming Cao unsigned int idx; 2461632eaeabSMingming Cao int i; 2462632eaeabSMingming Cao 2463632eaeabSMingming Cao bh = page_buffers(page); 2464632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2465632eaeabSMingming Cao 2466632eaeabSMingming Cao for (i = 0; i < idx; i++) 2467632eaeabSMingming Cao bh = bh->b_this_page; 2468632eaeabSMingming Cao 246929fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2470632eaeabSMingming Cao return 0; 2471632eaeabSMingming Cao return 1; 2472632eaeabSMingming Cao } 2473632eaeabSMingming Cao 247464769240SAlex Tomas static int ext4_da_write_end(struct file *file, 247564769240SAlex Tomas struct address_space *mapping, 247664769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 247764769240SAlex Tomas struct page *page, void *fsdata) 247864769240SAlex Tomas { 247964769240SAlex Tomas struct inode *inode = mapping->host; 248064769240SAlex Tomas int ret = 0, ret2; 248164769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 248264769240SAlex Tomas loff_t new_i_size; 2483632eaeabSMingming Cao unsigned long start, end; 248479f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 248579f0be8dSAneesh Kumar K.V 248679f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 24873d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 24883d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 248979f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 249079f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 24913d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 249279f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 249379f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 24943d2b1582SLukas Czerner default: 249579f0be8dSAneesh Kumar K.V BUG(); 249679f0be8dSAneesh Kumar K.V } 249779f0be8dSAneesh Kumar K.V } 2498632eaeabSMingming Cao 24999bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2500632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2501632eaeabSMingming Cao end = start + copied - 1; 250264769240SAlex Tomas 250364769240SAlex Tomas /* 250464769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 250564769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 250664769240SAlex Tomas * into that. 250764769240SAlex Tomas */ 250864769240SAlex Tomas 250964769240SAlex Tomas new_i_size = pos + copied; 2510ea51d132SAndrea Arcangeli if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 2511632eaeabSMingming Cao if (ext4_da_should_update_i_disksize(page, end)) { 2512632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2513632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 251464769240SAlex Tomas /* 2515632eaeabSMingming Cao * Updating i_disksize when extending file 2516632eaeabSMingming Cao * without needing block allocation 251764769240SAlex Tomas */ 251864769240SAlex Tomas if (ext4_should_order_data(inode)) 2519632eaeabSMingming Cao ret = ext4_jbd2_file_inode(handle, 2520632eaeabSMingming Cao inode); 252164769240SAlex Tomas 252264769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 252364769240SAlex Tomas } 2524632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2525cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2526cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2527cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2528cf17fea6SAneesh Kumar K.V */ 2529cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2530632eaeabSMingming Cao } 2531632eaeabSMingming Cao } 253264769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 253364769240SAlex Tomas page, fsdata); 253464769240SAlex Tomas copied = ret2; 253564769240SAlex Tomas if (ret2 < 0) 253664769240SAlex Tomas ret = ret2; 253764769240SAlex Tomas ret2 = ext4_journal_stop(handle); 253864769240SAlex Tomas if (!ret) 253964769240SAlex Tomas ret = ret2; 254064769240SAlex Tomas 254164769240SAlex Tomas return ret ? ret : copied; 254264769240SAlex Tomas } 254364769240SAlex Tomas 254464769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 254564769240SAlex Tomas { 254664769240SAlex Tomas /* 254764769240SAlex Tomas * Drop reserved blocks 254864769240SAlex Tomas */ 254964769240SAlex Tomas BUG_ON(!PageLocked(page)); 255064769240SAlex Tomas if (!page_has_buffers(page)) 255164769240SAlex Tomas goto out; 255264769240SAlex Tomas 2553d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 255464769240SAlex Tomas 255564769240SAlex Tomas out: 255664769240SAlex Tomas ext4_invalidatepage(page, offset); 255764769240SAlex Tomas 255864769240SAlex Tomas return; 255964769240SAlex Tomas } 256064769240SAlex Tomas 2561ccd2506bSTheodore Ts'o /* 2562ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2563ccd2506bSTheodore Ts'o */ 2564ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2565ccd2506bSTheodore Ts'o { 2566fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2567fb40ba0dSTheodore Ts'o 2568ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2569ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2570ccd2506bSTheodore Ts'o return 0; 2571ccd2506bSTheodore Ts'o 2572ccd2506bSTheodore Ts'o /* 2573ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2574ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2575ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2576ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2577ccd2506bSTheodore Ts'o * would require replicating code paths in: 2578ccd2506bSTheodore Ts'o * 2579ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2580ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2581ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2582ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2583ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2584ccd2506bSTheodore Ts'o * 2585ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2586ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2587ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2588ccd2506bSTheodore Ts'o * doing I/O at all. 2589ccd2506bSTheodore Ts'o * 2590ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2591380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2592ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2593ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 259425985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2595ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2596ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2597ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2598ccd2506bSTheodore Ts'o * 2599ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2600ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2601ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2602ccd2506bSTheodore Ts'o */ 2603ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2604ccd2506bSTheodore Ts'o } 260564769240SAlex Tomas 260664769240SAlex Tomas /* 2607ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2608ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2609ac27a0ecSDave Kleikamp * 2610ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2611617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2612ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2613ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2614ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2615ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2616ac27a0ecSDave Kleikamp * 2617ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2618ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2619ac27a0ecSDave Kleikamp */ 2620617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2621ac27a0ecSDave Kleikamp { 2622ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2623ac27a0ecSDave Kleikamp journal_t *journal; 2624ac27a0ecSDave Kleikamp int err; 2625ac27a0ecSDave Kleikamp 262664769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 262764769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 262864769240SAlex Tomas /* 262964769240SAlex Tomas * With delalloc we want to sync the file 263064769240SAlex Tomas * so that we can make sure we allocate 263164769240SAlex Tomas * blocks for file 263264769240SAlex Tomas */ 263364769240SAlex Tomas filemap_write_and_wait(mapping); 263464769240SAlex Tomas } 263564769240SAlex Tomas 263619f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 263719f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2638ac27a0ecSDave Kleikamp /* 2639ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2640ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2641ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2642ac27a0ecSDave Kleikamp * do we expect this to happen. 2643ac27a0ecSDave Kleikamp * 2644ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2645ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2646ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2647ac27a0ecSDave Kleikamp * will.) 2648ac27a0ecSDave Kleikamp * 2649617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2650ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2651ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2652ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2653ac27a0ecSDave Kleikamp * everything they get. 2654ac27a0ecSDave Kleikamp */ 2655ac27a0ecSDave Kleikamp 265619f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2657617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2658dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2659dab291afSMingming Cao err = jbd2_journal_flush(journal); 2660dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2661ac27a0ecSDave Kleikamp 2662ac27a0ecSDave Kleikamp if (err) 2663ac27a0ecSDave Kleikamp return 0; 2664ac27a0ecSDave Kleikamp } 2665ac27a0ecSDave Kleikamp 2666617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2667ac27a0ecSDave Kleikamp } 2668ac27a0ecSDave Kleikamp 2669617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2670ac27a0ecSDave Kleikamp { 26710562e0baSJiaying Zhang trace_ext4_readpage(page); 2672617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 2673ac27a0ecSDave Kleikamp } 2674ac27a0ecSDave Kleikamp 2675ac27a0ecSDave Kleikamp static int 2676617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2677ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2678ac27a0ecSDave Kleikamp { 2679617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2680ac27a0ecSDave Kleikamp } 2681ac27a0ecSDave Kleikamp 2682744692dcSJiaying Zhang static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) 2683744692dcSJiaying Zhang { 2684744692dcSJiaying Zhang struct buffer_head *head, *bh; 2685744692dcSJiaying Zhang unsigned int curr_off = 0; 2686744692dcSJiaying Zhang 2687744692dcSJiaying Zhang if (!page_has_buffers(page)) 2688744692dcSJiaying Zhang return; 2689744692dcSJiaying Zhang head = bh = page_buffers(page); 2690744692dcSJiaying Zhang do { 2691744692dcSJiaying Zhang if (offset <= curr_off && test_clear_buffer_uninit(bh) 2692744692dcSJiaying Zhang && bh->b_private) { 2693744692dcSJiaying Zhang ext4_free_io_end(bh->b_private); 2694744692dcSJiaying Zhang bh->b_private = NULL; 2695744692dcSJiaying Zhang bh->b_end_io = NULL; 2696744692dcSJiaying Zhang } 2697744692dcSJiaying Zhang curr_off = curr_off + bh->b_size; 2698744692dcSJiaying Zhang bh = bh->b_this_page; 2699744692dcSJiaying Zhang } while (bh != head); 2700744692dcSJiaying Zhang } 2701744692dcSJiaying Zhang 2702617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2703ac27a0ecSDave Kleikamp { 2704617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2705ac27a0ecSDave Kleikamp 27060562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 27070562e0baSJiaying Zhang 2708ac27a0ecSDave Kleikamp /* 2709744692dcSJiaying Zhang * free any io_end structure allocated for buffers to be discarded 2710744692dcSJiaying Zhang */ 2711744692dcSJiaying Zhang if (ext4_should_dioread_nolock(page->mapping->host)) 2712744692dcSJiaying Zhang ext4_invalidatepage_free_endio(page, offset); 2713744692dcSJiaying Zhang /* 2714ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2715ac27a0ecSDave Kleikamp */ 2716ac27a0ecSDave Kleikamp if (offset == 0) 2717ac27a0ecSDave Kleikamp ClearPageChecked(page); 2718ac27a0ecSDave Kleikamp 27190390131bSFrank Mayhar if (journal) 2720dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 27210390131bSFrank Mayhar else 27220390131bSFrank Mayhar block_invalidatepage(page, offset); 2723ac27a0ecSDave Kleikamp } 2724ac27a0ecSDave Kleikamp 2725617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2726ac27a0ecSDave Kleikamp { 2727617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2728ac27a0ecSDave Kleikamp 27290562e0baSJiaying Zhang trace_ext4_releasepage(page); 27300562e0baSJiaying Zhang 2731ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2732ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2733ac27a0ecSDave Kleikamp return 0; 27340390131bSFrank Mayhar if (journal) 2735dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 27360390131bSFrank Mayhar else 27370390131bSFrank Mayhar return try_to_free_buffers(page); 2738ac27a0ecSDave Kleikamp } 2739ac27a0ecSDave Kleikamp 2740ac27a0ecSDave Kleikamp /* 27412ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 27422ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 27432ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 27442ed88685STheodore Ts'o */ 2745c7064ef1SJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock, 27464c0425ffSMingming Cao struct buffer_head *bh_result, int create) 27474c0425ffSMingming Cao { 2748c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 27498d5d02e6SMingming Cao inode->i_ino, create); 27502ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 27512ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 27524c0425ffSMingming Cao } 27534c0425ffSMingming Cao 27544c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2755552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2756552ef802SChristoph Hellwig bool is_async) 27574c0425ffSMingming Cao { 275872c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 27594c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 27604c0425ffSMingming Cao struct workqueue_struct *wq; 2761744692dcSJiaying Zhang unsigned long flags; 2762744692dcSJiaying Zhang struct ext4_inode_info *ei; 27634c0425ffSMingming Cao 27644b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 27654b70df18SMingming if (!io_end || !size) 2766552ef802SChristoph Hellwig goto out; 27674b70df18SMingming 27688d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p " 2769ace36ad4SJoe Perches "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 27708d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 27718d5d02e6SMingming Cao size); 27728d5d02e6SMingming Cao 2773b5a7e970STheodore Ts'o iocb->private = NULL; 2774b5a7e970STheodore Ts'o 27758d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2776bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 27778d5d02e6SMingming Cao ext4_free_io_end(io_end); 27785b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 27795b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 27805b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 278172c5052dSChristoph Hellwig inode_dio_done(inode); 27825b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 27838d5d02e6SMingming Cao } 27848d5d02e6SMingming Cao 27854c0425ffSMingming Cao io_end->offset = offset; 27864c0425ffSMingming Cao io_end->size = size; 27875b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 27885b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 27895b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 27905b3ff237Sjiayingz@google.com (Jiaying Zhang) } 27914c0425ffSMingming Cao wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 27924c0425ffSMingming Cao 27938d5d02e6SMingming Cao /* Add the io_end to per-inode completed aio dio list*/ 2794744692dcSJiaying Zhang ei = EXT4_I(io_end->inode); 2795744692dcSJiaying Zhang spin_lock_irqsave(&ei->i_completed_io_lock, flags); 2796744692dcSJiaying Zhang list_add_tail(&io_end->list, &ei->i_completed_io_list); 2797744692dcSJiaying Zhang spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 2798c999af2bSEric Sandeen 2799c999af2bSEric Sandeen /* queue the work to convert unwritten extents to written */ 28004c81f045STejun Heo queue_work(wq, &io_end->work); 28014c0425ffSMingming Cao } 2802c7064ef1SJiaying Zhang 2803744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) 2804744692dcSJiaying Zhang { 2805744692dcSJiaying Zhang ext4_io_end_t *io_end = bh->b_private; 2806744692dcSJiaying Zhang struct workqueue_struct *wq; 2807744692dcSJiaying Zhang struct inode *inode; 2808744692dcSJiaying Zhang unsigned long flags; 2809744692dcSJiaying Zhang 2810744692dcSJiaying Zhang if (!test_clear_buffer_uninit(bh) || !io_end) 2811744692dcSJiaying Zhang goto out; 2812744692dcSJiaying Zhang 2813744692dcSJiaying Zhang if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { 281492b97816STheodore Ts'o ext4_msg(io_end->inode->i_sb, KERN_INFO, 281592b97816STheodore Ts'o "sb umounted, discard end_io request for inode %lu", 2816744692dcSJiaying Zhang io_end->inode->i_ino); 2817744692dcSJiaying Zhang ext4_free_io_end(io_end); 2818744692dcSJiaying Zhang goto out; 2819744692dcSJiaying Zhang } 2820744692dcSJiaying Zhang 282132c80b32STao Ma /* 282232c80b32STao Ma * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, 282332c80b32STao Ma * but being more careful is always safe for the future change. 282432c80b32STao Ma */ 2825744692dcSJiaying Zhang inode = io_end->inode; 28260edeb71dSTao Ma ext4_set_io_unwritten_flag(inode, io_end); 2827744692dcSJiaying Zhang 2828744692dcSJiaying Zhang /* Add the io_end to per-inode completed io list*/ 2829744692dcSJiaying Zhang spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 2830744692dcSJiaying Zhang list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 2831744692dcSJiaying Zhang spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 2832744692dcSJiaying Zhang 2833744692dcSJiaying Zhang wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 2834744692dcSJiaying Zhang /* queue the work to convert unwritten extents to written */ 2835744692dcSJiaying Zhang queue_work(wq, &io_end->work); 2836744692dcSJiaying Zhang out: 2837744692dcSJiaying Zhang bh->b_private = NULL; 2838744692dcSJiaying Zhang bh->b_end_io = NULL; 2839744692dcSJiaying Zhang clear_buffer_uninit(bh); 2840744692dcSJiaying Zhang end_buffer_async_write(bh, uptodate); 2841744692dcSJiaying Zhang } 2842744692dcSJiaying Zhang 2843744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) 2844744692dcSJiaying Zhang { 2845744692dcSJiaying Zhang ext4_io_end_t *io_end; 2846744692dcSJiaying Zhang struct page *page = bh->b_page; 2847744692dcSJiaying Zhang loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; 2848744692dcSJiaying Zhang size_t size = bh->b_size; 2849744692dcSJiaying Zhang 2850744692dcSJiaying Zhang retry: 2851744692dcSJiaying Zhang io_end = ext4_init_io_end(inode, GFP_ATOMIC); 2852744692dcSJiaying Zhang if (!io_end) { 28536db26ffcSAndrew Morton pr_warn_ratelimited("%s: allocation fail\n", __func__); 2854744692dcSJiaying Zhang schedule(); 2855744692dcSJiaying Zhang goto retry; 2856744692dcSJiaying Zhang } 2857744692dcSJiaying Zhang io_end->offset = offset; 2858744692dcSJiaying Zhang io_end->size = size; 2859744692dcSJiaying Zhang /* 2860744692dcSJiaying Zhang * We need to hold a reference to the page to make sure it 2861744692dcSJiaying Zhang * doesn't get evicted before ext4_end_io_work() has a chance 2862744692dcSJiaying Zhang * to convert the extent from written to unwritten. 2863744692dcSJiaying Zhang */ 2864744692dcSJiaying Zhang io_end->page = page; 2865744692dcSJiaying Zhang get_page(io_end->page); 2866744692dcSJiaying Zhang 2867744692dcSJiaying Zhang bh->b_private = io_end; 2868744692dcSJiaying Zhang bh->b_end_io = ext4_end_io_buffer_write; 2869744692dcSJiaying Zhang return 0; 2870744692dcSJiaying Zhang } 2871744692dcSJiaying Zhang 28724c0425ffSMingming Cao /* 28734c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 28744c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 28754c0425ffSMingming Cao * fall back to buffered IO. 28764c0425ffSMingming Cao * 2877b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 28784c0425ffSMingming Cao * If those blocks were preallocated, we mark sure they are splited, but 2879b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 28804c0425ffSMingming Cao * 28818d5d02e6SMingming Cao * The unwrritten extents will be converted to written when DIO is completed. 28828d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 288325985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 28848d5d02e6SMingming Cao * when async direct IO completed. 28854c0425ffSMingming Cao * 28864c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 28874c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 28884c0425ffSMingming Cao * if the machine crashes during the write. 28894c0425ffSMingming Cao * 28904c0425ffSMingming Cao */ 28914c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 28924c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 28934c0425ffSMingming Cao unsigned long nr_segs) 28944c0425ffSMingming Cao { 28954c0425ffSMingming Cao struct file *file = iocb->ki_filp; 28964c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 28974c0425ffSMingming Cao ssize_t ret; 28984c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 28994c0425ffSMingming Cao 29004c0425ffSMingming Cao loff_t final_size = offset + count; 29014c0425ffSMingming Cao if (rw == WRITE && final_size <= inode->i_size) { 29024c0425ffSMingming Cao /* 29038d5d02e6SMingming Cao * We could direct write to holes and fallocate. 29048d5d02e6SMingming Cao * 29058d5d02e6SMingming Cao * Allocated blocks to fill the hole are marked as uninitialized 290625985edcSLucas De Marchi * to prevent parallel buffered read to expose the stale data 29074c0425ffSMingming Cao * before DIO complete the data IO. 29088d5d02e6SMingming Cao * 29098d5d02e6SMingming Cao * As to previously fallocated extents, ext4 get_block 29104c0425ffSMingming Cao * will just simply mark the buffer mapped but still 29114c0425ffSMingming Cao * keep the extents uninitialized. 29124c0425ffSMingming Cao * 29138d5d02e6SMingming Cao * for non AIO case, we will convert those unwritten extents 29148d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 29154c0425ffSMingming Cao * 29168d5d02e6SMingming Cao * for async DIO, the conversion needs to be defered when 29178d5d02e6SMingming Cao * the IO is completed. The ext4 end_io callback function 29188d5d02e6SMingming Cao * will be called to take care of the conversion work. 29198d5d02e6SMingming Cao * Here for async case, we allocate an io_end structure to 29208d5d02e6SMingming Cao * hook to the iocb. 29214c0425ffSMingming Cao */ 29228d5d02e6SMingming Cao iocb->private = NULL; 29238d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = NULL; 29248d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 2925266991b1SJeff Moyer ext4_io_end_t *io_end = 2926266991b1SJeff Moyer ext4_init_io_end(inode, GFP_NOFS); 2927266991b1SJeff Moyer if (!io_end) 29284c0425ffSMingming Cao return -ENOMEM; 2929266991b1SJeff Moyer io_end->flag |= EXT4_IO_END_DIRECT; 2930266991b1SJeff Moyer iocb->private = io_end; 29318d5d02e6SMingming Cao /* 29328d5d02e6SMingming Cao * we save the io structure for current async 293379e83036SEric Sandeen * direct IO, so that later ext4_map_blocks() 29348d5d02e6SMingming Cao * could flag the io structure whether there 29358d5d02e6SMingming Cao * is a unwritten extents needs to be converted 29368d5d02e6SMingming Cao * when IO is completed. 29378d5d02e6SMingming Cao */ 29388d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = iocb->private; 29398d5d02e6SMingming Cao } 29408d5d02e6SMingming Cao 2941aacfc19cSChristoph Hellwig ret = __blockdev_direct_IO(rw, iocb, inode, 29424c0425ffSMingming Cao inode->i_sb->s_bdev, iov, 29434c0425ffSMingming Cao offset, nr_segs, 2944c7064ef1SJiaying Zhang ext4_get_block_write, 2945aacfc19cSChristoph Hellwig ext4_end_io_dio, 2946aacfc19cSChristoph Hellwig NULL, 294793ef8541SJeff Moyer DIO_LOCKING); 29488d5d02e6SMingming Cao if (iocb->private) 29498d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = NULL; 29508d5d02e6SMingming Cao /* 29518d5d02e6SMingming Cao * The io_end structure takes a reference to the inode, 29528d5d02e6SMingming Cao * that structure needs to be destroyed and the 29538d5d02e6SMingming Cao * reference to the inode need to be dropped, when IO is 29548d5d02e6SMingming Cao * complete, even with 0 byte write, or failed. 29558d5d02e6SMingming Cao * 29568d5d02e6SMingming Cao * In the successful AIO DIO case, the io_end structure will be 29578d5d02e6SMingming Cao * desctroyed and the reference to the inode will be dropped 29588d5d02e6SMingming Cao * after the end_io call back function is called. 29598d5d02e6SMingming Cao * 29608d5d02e6SMingming Cao * In the case there is 0 byte write, or error case, since 29618d5d02e6SMingming Cao * VFS direct IO won't invoke the end_io call back function, 29628d5d02e6SMingming Cao * we need to free the end_io structure here. 29638d5d02e6SMingming Cao */ 29648d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 29658d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 29668d5d02e6SMingming Cao iocb->private = NULL; 296719f5fb7aSTheodore Ts'o } else if (ret > 0 && ext4_test_inode_state(inode, 29685f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 2969109f5565SMingming int err; 29708d5d02e6SMingming Cao /* 29718d5d02e6SMingming Cao * for non AIO case, since the IO is already 297225985edcSLucas De Marchi * completed, we could do the conversion right here 29738d5d02e6SMingming Cao */ 2974109f5565SMingming err = ext4_convert_unwritten_extents(inode, 29758d5d02e6SMingming Cao offset, ret); 2976109f5565SMingming if (err < 0) 2977109f5565SMingming ret = err; 297819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 2979109f5565SMingming } 29804c0425ffSMingming Cao return ret; 29814c0425ffSMingming Cao } 29828d5d02e6SMingming Cao 29838d5d02e6SMingming Cao /* for write the the end of file case, we fall back to old way */ 29844c0425ffSMingming Cao return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 29854c0425ffSMingming Cao } 29864c0425ffSMingming Cao 29874c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 29884c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 29894c0425ffSMingming Cao unsigned long nr_segs) 29904c0425ffSMingming Cao { 29914c0425ffSMingming Cao struct file *file = iocb->ki_filp; 29924c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 29930562e0baSJiaying Zhang ssize_t ret; 29944c0425ffSMingming Cao 299584ebd795STheodore Ts'o /* 299684ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 299784ebd795STheodore Ts'o */ 299884ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 299984ebd795STheodore Ts'o return 0; 300084ebd795STheodore Ts'o 30010562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 300212e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 30030562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 30040562e0baSJiaying Zhang else 30050562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 30060562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 30070562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 30080562e0baSJiaying Zhang return ret; 30094c0425ffSMingming Cao } 30104c0425ffSMingming Cao 3011ac27a0ecSDave Kleikamp /* 3012617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 3013ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 3014ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 3015ac27a0ecSDave Kleikamp * not necessarily locked. 3016ac27a0ecSDave Kleikamp * 3017ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 3018ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 3019ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 3020ac27a0ecSDave Kleikamp * 3021ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 3022ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 3023ac27a0ecSDave Kleikamp */ 3024617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 3025ac27a0ecSDave Kleikamp { 3026ac27a0ecSDave Kleikamp SetPageChecked(page); 3027ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 3028ac27a0ecSDave Kleikamp } 3029ac27a0ecSDave Kleikamp 3030617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 3031617ba13bSMingming Cao .readpage = ext4_readpage, 3032617ba13bSMingming Cao .readpages = ext4_readpages, 303343ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3034bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3035bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 3036617ba13bSMingming Cao .bmap = ext4_bmap, 3037617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3038617ba13bSMingming Cao .releasepage = ext4_releasepage, 3039617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3040ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 30418ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3042aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3043ac27a0ecSDave Kleikamp }; 3044ac27a0ecSDave Kleikamp 3045617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 3046617ba13bSMingming Cao .readpage = ext4_readpage, 3047617ba13bSMingming Cao .readpages = ext4_readpages, 304843ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3049bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3050bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 3051617ba13bSMingming Cao .bmap = ext4_bmap, 3052617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3053617ba13bSMingming Cao .releasepage = ext4_releasepage, 3054617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 3055ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 30568ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3057aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3058ac27a0ecSDave Kleikamp }; 3059ac27a0ecSDave Kleikamp 3060617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 3061617ba13bSMingming Cao .readpage = ext4_readpage, 3062617ba13bSMingming Cao .readpages = ext4_readpages, 306343ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 3064bfc1af65SNick Piggin .write_begin = ext4_write_begin, 3065bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 3066617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 3067617ba13bSMingming Cao .bmap = ext4_bmap, 3068617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 3069617ba13bSMingming Cao .releasepage = ext4_releasepage, 307084ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 30718ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3072aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 3073ac27a0ecSDave Kleikamp }; 3074ac27a0ecSDave Kleikamp 307564769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 307664769240SAlex Tomas .readpage = ext4_readpage, 307764769240SAlex Tomas .readpages = ext4_readpages, 307843ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 307964769240SAlex Tomas .writepages = ext4_da_writepages, 308064769240SAlex Tomas .write_begin = ext4_da_write_begin, 308164769240SAlex Tomas .write_end = ext4_da_write_end, 308264769240SAlex Tomas .bmap = ext4_bmap, 308364769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 308464769240SAlex Tomas .releasepage = ext4_releasepage, 308564769240SAlex Tomas .direct_IO = ext4_direct_IO, 308664769240SAlex Tomas .migratepage = buffer_migrate_page, 30878ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 3088aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 308964769240SAlex Tomas }; 309064769240SAlex Tomas 3091617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 3092ac27a0ecSDave Kleikamp { 30933d2b1582SLukas Czerner switch (ext4_inode_journal_mode(inode)) { 30943d2b1582SLukas Czerner case EXT4_INODE_ORDERED_DATA_MODE: 30953d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 3096cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3097ac27a0ecSDave Kleikamp else 30983d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_ordered_aops; 30993d2b1582SLukas Czerner break; 31003d2b1582SLukas Czerner case EXT4_INODE_WRITEBACK_DATA_MODE: 31013d2b1582SLukas Czerner if (test_opt(inode->i_sb, DELALLOC)) 31023d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_da_aops; 31033d2b1582SLukas Czerner else 31043d2b1582SLukas Czerner inode->i_mapping->a_ops = &ext4_writeback_aops; 31053d2b1582SLukas Czerner break; 31063d2b1582SLukas Czerner case EXT4_INODE_JOURNAL_DATA_MODE: 3107617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 31083d2b1582SLukas Czerner break; 31093d2b1582SLukas Czerner default: 31103d2b1582SLukas Czerner BUG(); 31113d2b1582SLukas Czerner } 3112ac27a0ecSDave Kleikamp } 3113ac27a0ecSDave Kleikamp 31144e96b2dbSAllison Henderson 31154e96b2dbSAllison Henderson /* 31164e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 31174e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 31184e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 31194e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 31204e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 31214e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 31224e96b2dbSAllison Henderson */ 31234e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 31244e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 31254e96b2dbSAllison Henderson loff_t length, int flags) 31264e96b2dbSAllison Henderson { 31274e96b2dbSAllison Henderson struct inode *inode = mapping->host; 31284e96b2dbSAllison Henderson struct page *page; 31294e96b2dbSAllison Henderson int err = 0; 31304e96b2dbSAllison Henderson 31314e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 31324e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 31334e96b2dbSAllison Henderson if (!page) 31345129d05fSYongqiang Yang return -ENOMEM; 31354e96b2dbSAllison Henderson 31364e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 31374e96b2dbSAllison Henderson from, length, flags); 31384e96b2dbSAllison Henderson 31394e96b2dbSAllison Henderson unlock_page(page); 31404e96b2dbSAllison Henderson page_cache_release(page); 31414e96b2dbSAllison Henderson return err; 31424e96b2dbSAllison Henderson } 31434e96b2dbSAllison Henderson 31444e96b2dbSAllison Henderson /* 31454e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 31464e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 31474e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 31484e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 31494e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 31504e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 31514e96b2dbSAllison Henderson * 31524e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 31534e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 31544e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 31554e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 31564e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 31574e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 31584e96b2dbSAllison Henderson * released. 31594e96b2dbSAllison Henderson * 31604e96b2dbSAllison Henderson * handle: The journal handle 31614e96b2dbSAllison Henderson * inode: The files inode 31624e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 31634e96b2dbSAllison Henderson * from: The starting byte offset (from the begining of the file) 31644e96b2dbSAllison Henderson * to begin discarding 31654e96b2dbSAllison Henderson * len: The length of bytes to discard 31664e96b2dbSAllison Henderson * flags: Optional flags that may be used: 31674e96b2dbSAllison Henderson * 31684e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 31694e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 31704e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 31714e96b2dbSAllison Henderson * for updateing the contents of a page whose blocks may 31724e96b2dbSAllison Henderson * have already been released, and we only want to zero 31734e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 31744e96b2dbSAllison Henderson * 31754e96b2dbSAllison Henderson * Returns zero on sucess or negative on failure. 31764e96b2dbSAllison Henderson */ 31775f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 31784e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 31794e96b2dbSAllison Henderson loff_t length, int flags) 31804e96b2dbSAllison Henderson { 31814e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 31824e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 31834e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 31844e96b2dbSAllison Henderson ext4_lblk_t iblock; 31854e96b2dbSAllison Henderson struct buffer_head *bh; 31864e96b2dbSAllison Henderson int err = 0; 31874e96b2dbSAllison Henderson 31884e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 31894e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 31904e96b2dbSAllison Henderson 31914e96b2dbSAllison Henderson if (index != page->index) 31924e96b2dbSAllison Henderson return -EINVAL; 31934e96b2dbSAllison Henderson 31944e96b2dbSAllison Henderson /* 31954e96b2dbSAllison Henderson * correct length if it does not fall between 31964e96b2dbSAllison Henderson * 'from' and the end of the page 31974e96b2dbSAllison Henderson */ 31984e96b2dbSAllison Henderson if (length > max || length < 0) 31994e96b2dbSAllison Henderson length = max; 32004e96b2dbSAllison Henderson 32014e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 32024e96b2dbSAllison Henderson 3203093e6e36SYongqiang Yang if (!page_has_buffers(page)) 32044e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 32054e96b2dbSAllison Henderson 32064e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 32074e96b2dbSAllison Henderson bh = page_buffers(page); 32084e96b2dbSAllison Henderson pos = blocksize; 32094e96b2dbSAllison Henderson while (offset >= pos) { 32104e96b2dbSAllison Henderson bh = bh->b_this_page; 32114e96b2dbSAllison Henderson iblock++; 32124e96b2dbSAllison Henderson pos += blocksize; 32134e96b2dbSAllison Henderson } 32144e96b2dbSAllison Henderson 32154e96b2dbSAllison Henderson pos = offset; 32164e96b2dbSAllison Henderson while (pos < offset + length) { 3217e260daf2SYongqiang Yang unsigned int end_of_block, range_to_discard; 3218e260daf2SYongqiang Yang 32194e96b2dbSAllison Henderson err = 0; 32204e96b2dbSAllison Henderson 32214e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 32224e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 32234e96b2dbSAllison Henderson 32244e96b2dbSAllison Henderson /* The length of space until the end of the block */ 32254e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 32264e96b2dbSAllison Henderson 32274e96b2dbSAllison Henderson /* 32284e96b2dbSAllison Henderson * Do not unmap or zero past end of block 32294e96b2dbSAllison Henderson * for this buffer head 32304e96b2dbSAllison Henderson */ 32314e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 32324e96b2dbSAllison Henderson range_to_discard = end_of_block; 32334e96b2dbSAllison Henderson 32344e96b2dbSAllison Henderson 32354e96b2dbSAllison Henderson /* 32364e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 32374e96b2dbSAllison Henderson * regions of the page 32384e96b2dbSAllison Henderson */ 32394e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 32404e96b2dbSAllison Henderson buffer_mapped(bh)) 32414e96b2dbSAllison Henderson goto next; 32424e96b2dbSAllison Henderson 32434e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 32444e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 32454e96b2dbSAllison Henderson clear_buffer_dirty(bh); 32464e96b2dbSAllison Henderson bh->b_bdev = NULL; 32474e96b2dbSAllison Henderson clear_buffer_mapped(bh); 32484e96b2dbSAllison Henderson clear_buffer_req(bh); 32494e96b2dbSAllison Henderson clear_buffer_new(bh); 32504e96b2dbSAllison Henderson clear_buffer_delay(bh); 32514e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 32524e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 32534e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 32544e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 32554e96b2dbSAllison Henderson goto next; 32564e96b2dbSAllison Henderson } 32574e96b2dbSAllison Henderson 32584e96b2dbSAllison Henderson /* 32594e96b2dbSAllison Henderson * If this block is not completely contained in the range 32604e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 32614e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 32624e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 32634e96b2dbSAllison Henderson * partial zeros on it. 32644e96b2dbSAllison Henderson */ 32654e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 32664e96b2dbSAllison Henderson /* 32674e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 32684e96b2dbSAllison Henderson * from the block 32694e96b2dbSAllison Henderson */ 32704e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 32714e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 32724e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 32734e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 32744e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 32754e96b2dbSAllison Henderson goto next; 32764e96b2dbSAllison Henderson } 32774e96b2dbSAllison Henderson } 32784e96b2dbSAllison Henderson 32794e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 32804e96b2dbSAllison Henderson if (PageUptodate(page)) 32814e96b2dbSAllison Henderson set_buffer_uptodate(bh); 32824e96b2dbSAllison Henderson 32834e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 32844e96b2dbSAllison Henderson err = -EIO; 32854e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 32864e96b2dbSAllison Henderson wait_on_buffer(bh); 32874e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 32884e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 32894e96b2dbSAllison Henderson goto next; 32904e96b2dbSAllison Henderson } 32914e96b2dbSAllison Henderson 32924e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 32934e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 32944e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 32954e96b2dbSAllison Henderson if (err) 32964e96b2dbSAllison Henderson goto next; 32974e96b2dbSAllison Henderson } 32984e96b2dbSAllison Henderson 32994e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 33004e96b2dbSAllison Henderson 33014e96b2dbSAllison Henderson err = 0; 33024e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 33034e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3304decbd919STheodore Ts'o } else 33054e96b2dbSAllison Henderson mark_buffer_dirty(bh); 33064e96b2dbSAllison Henderson 33074e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 33084e96b2dbSAllison Henderson next: 33094e96b2dbSAllison Henderson bh = bh->b_this_page; 33104e96b2dbSAllison Henderson iblock++; 33114e96b2dbSAllison Henderson pos += range_to_discard; 33124e96b2dbSAllison Henderson } 33134e96b2dbSAllison Henderson 33144e96b2dbSAllison Henderson return err; 33154e96b2dbSAllison Henderson } 33164e96b2dbSAllison Henderson 331791ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 331891ef4cafSDuane Griffin { 331991ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 332091ef4cafSDuane Griffin return 1; 332191ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 332291ef4cafSDuane Griffin return 1; 332391ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 332491ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 332591ef4cafSDuane Griffin return 0; 332691ef4cafSDuane Griffin } 332791ef4cafSDuane Griffin 3328ac27a0ecSDave Kleikamp /* 3329a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3330a4bb6b64SAllison Henderson * associated with the given offset and length 3331a4bb6b64SAllison Henderson * 3332a4bb6b64SAllison Henderson * @inode: File inode 3333a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3334a4bb6b64SAllison Henderson * @len: The length of the hole 3335a4bb6b64SAllison Henderson * 3336a4bb6b64SAllison Henderson * Returns: 0 on sucess or negative on failure 3337a4bb6b64SAllison Henderson */ 3338a4bb6b64SAllison Henderson 3339a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3340a4bb6b64SAllison Henderson { 3341a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3342a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 334373355192SAllison Henderson return -EOPNOTSUPP; 3344a4bb6b64SAllison Henderson 3345a4bb6b64SAllison Henderson if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3346a4bb6b64SAllison Henderson /* TODO: Add support for non extent hole punching */ 334773355192SAllison Henderson return -EOPNOTSUPP; 3348a4bb6b64SAllison Henderson } 3349a4bb6b64SAllison Henderson 3350bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3351bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 335273355192SAllison Henderson return -EOPNOTSUPP; 3353bab08ab9STheodore Ts'o } 3354bab08ab9STheodore Ts'o 3355a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3356a4bb6b64SAllison Henderson } 3357a4bb6b64SAllison Henderson 3358a4bb6b64SAllison Henderson /* 3359617ba13bSMingming Cao * ext4_truncate() 3360ac27a0ecSDave Kleikamp * 3361617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3362617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3363ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3364ac27a0ecSDave Kleikamp * 336542b2aa86SJustin P. Mattock * As we work through the truncate and commit bits of it to the journal there 3366ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3367ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3368ac27a0ecSDave Kleikamp * 3369ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3370ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3371ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3372ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3373ac27a0ecSDave Kleikamp * left-to-right works OK too). 3374ac27a0ecSDave Kleikamp * 3375ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3376ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3377ac27a0ecSDave Kleikamp * 3378ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3379617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3380ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3381617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3382617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3383ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3384617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3385ac27a0ecSDave Kleikamp */ 3386617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3387ac27a0ecSDave Kleikamp { 33880562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 33890562e0baSJiaying Zhang 339091ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3391ac27a0ecSDave Kleikamp return; 3392ac27a0ecSDave Kleikamp 339312e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3394c8d46e41SJiaying Zhang 33955534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 339619f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 33977d8f9f7dSTheodore Ts'o 3398ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3399cf108bcaSJan Kara ext4_ext_truncate(inode); 3400ff9893dcSAmir Goldstein else 3401ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3402a86c6181SAlex Tomas 34030562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3404ac27a0ecSDave Kleikamp } 3405ac27a0ecSDave Kleikamp 3406ac27a0ecSDave Kleikamp /* 3407617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3408ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3409ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3410ac27a0ecSDave Kleikamp * inode. 3411ac27a0ecSDave Kleikamp */ 3412617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3413617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3414ac27a0ecSDave Kleikamp { 3415240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3416ac27a0ecSDave Kleikamp struct buffer_head *bh; 3417240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3418240799cdSTheodore Ts'o ext4_fsblk_t block; 3419240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3420ac27a0ecSDave Kleikamp 34213a06d778SAneesh Kumar K.V iloc->bh = NULL; 3422240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3423ac27a0ecSDave Kleikamp return -EIO; 3424ac27a0ecSDave Kleikamp 3425240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3426240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3427240799cdSTheodore Ts'o if (!gdp) 3428240799cdSTheodore Ts'o return -EIO; 3429240799cdSTheodore Ts'o 3430240799cdSTheodore Ts'o /* 3431240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3432240799cdSTheodore Ts'o */ 343300d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3434240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3435240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3436240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3437240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3438240799cdSTheodore Ts'o 3439240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3440ac27a0ecSDave Kleikamp if (!bh) { 3441c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3442c398eda0STheodore Ts'o "unable to read itable block"); 3443ac27a0ecSDave Kleikamp return -EIO; 3444ac27a0ecSDave Kleikamp } 3445ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3446ac27a0ecSDave Kleikamp lock_buffer(bh); 34479c83a923SHidehiro Kawai 34489c83a923SHidehiro Kawai /* 34499c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 34509c83a923SHidehiro Kawai * to write out another inode in the same block. In this 34519c83a923SHidehiro Kawai * case, we don't have to read the block because we may 34529c83a923SHidehiro Kawai * read the old inode data successfully. 34539c83a923SHidehiro Kawai */ 34549c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 34559c83a923SHidehiro Kawai set_buffer_uptodate(bh); 34569c83a923SHidehiro Kawai 3457ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3458ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3459ac27a0ecSDave Kleikamp unlock_buffer(bh); 3460ac27a0ecSDave Kleikamp goto has_buffer; 3461ac27a0ecSDave Kleikamp } 3462ac27a0ecSDave Kleikamp 3463ac27a0ecSDave Kleikamp /* 3464ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3465ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3466ac27a0ecSDave Kleikamp * block. 3467ac27a0ecSDave Kleikamp */ 3468ac27a0ecSDave Kleikamp if (in_mem) { 3469ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3470240799cdSTheodore Ts'o int i, start; 3471ac27a0ecSDave Kleikamp 3472240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3473ac27a0ecSDave Kleikamp 3474ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3475240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3476ac27a0ecSDave Kleikamp if (!bitmap_bh) 3477ac27a0ecSDave Kleikamp goto make_io; 3478ac27a0ecSDave Kleikamp 3479ac27a0ecSDave Kleikamp /* 3480ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3481ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3482ac27a0ecSDave Kleikamp * of one, so skip it. 3483ac27a0ecSDave Kleikamp */ 3484ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3485ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3486ac27a0ecSDave Kleikamp goto make_io; 3487ac27a0ecSDave Kleikamp } 3488240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3489ac27a0ecSDave Kleikamp if (i == inode_offset) 3490ac27a0ecSDave Kleikamp continue; 3491617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3492ac27a0ecSDave Kleikamp break; 3493ac27a0ecSDave Kleikamp } 3494ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3495240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3496ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3497ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3498ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3499ac27a0ecSDave Kleikamp unlock_buffer(bh); 3500ac27a0ecSDave Kleikamp goto has_buffer; 3501ac27a0ecSDave Kleikamp } 3502ac27a0ecSDave Kleikamp } 3503ac27a0ecSDave Kleikamp 3504ac27a0ecSDave Kleikamp make_io: 3505ac27a0ecSDave Kleikamp /* 3506240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3507240799cdSTheodore Ts'o * blocks from the inode table. 3508240799cdSTheodore Ts'o */ 3509240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3510240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3511240799cdSTheodore Ts'o unsigned num; 3512240799cdSTheodore Ts'o 3513240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3514b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3515240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3516240799cdSTheodore Ts'o if (table > b) 3517240799cdSTheodore Ts'o b = table; 3518240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3519240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3520240799cdSTheodore Ts'o if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3521240799cdSTheodore Ts'o EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3522560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3523240799cdSTheodore Ts'o table += num / inodes_per_block; 3524240799cdSTheodore Ts'o if (end > table) 3525240799cdSTheodore Ts'o end = table; 3526240799cdSTheodore Ts'o while (b <= end) 3527240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3528240799cdSTheodore Ts'o } 3529240799cdSTheodore Ts'o 3530240799cdSTheodore Ts'o /* 3531ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3532ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3533ac27a0ecSDave Kleikamp * Read the block from disk. 3534ac27a0ecSDave Kleikamp */ 35350562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3536ac27a0ecSDave Kleikamp get_bh(bh); 3537ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 353865299a3bSChristoph Hellwig submit_bh(READ | REQ_META | REQ_PRIO, bh); 3539ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3540ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3541c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3542c398eda0STheodore Ts'o "unable to read itable block"); 3543ac27a0ecSDave Kleikamp brelse(bh); 3544ac27a0ecSDave Kleikamp return -EIO; 3545ac27a0ecSDave Kleikamp } 3546ac27a0ecSDave Kleikamp } 3547ac27a0ecSDave Kleikamp has_buffer: 3548ac27a0ecSDave Kleikamp iloc->bh = bh; 3549ac27a0ecSDave Kleikamp return 0; 3550ac27a0ecSDave Kleikamp } 3551ac27a0ecSDave Kleikamp 3552617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3553ac27a0ecSDave Kleikamp { 3554ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3555617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 355619f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3557ac27a0ecSDave Kleikamp } 3558ac27a0ecSDave Kleikamp 3559617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3560ac27a0ecSDave Kleikamp { 3561617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3562ac27a0ecSDave Kleikamp 3563ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3564617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3565ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3566617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3567ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3568617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3569ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3570617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3571ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3572617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3573ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3574ac27a0ecSDave Kleikamp } 3575ac27a0ecSDave Kleikamp 3576ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3577ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3578ff9ddf7eSJan Kara { 357984a8dce2SDmitry Monakhov unsigned int vfs_fl; 358084a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3581ff9ddf7eSJan Kara 358284a8dce2SDmitry Monakhov do { 358384a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 358484a8dce2SDmitry Monakhov old_fl = ei->i_flags; 358584a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 358684a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 358784a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 358884a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 358984a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 359084a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 359184a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 359284a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 359384a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 359484a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 359584a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 359684a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 359784a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 359884a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3599ff9ddf7eSJan Kara } 3600de9a55b8STheodore Ts'o 36010fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 36020fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 36030fc1b451SAneesh Kumar K.V { 36040fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 36058180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 36068180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 36070fc1b451SAneesh Kumar K.V 36080fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 36090fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 36100fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 36110fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 36120fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 361307a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 36148180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 36158180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 36168180a562SAneesh Kumar K.V } else { 36170fc1b451SAneesh Kumar K.V return i_blocks; 36188180a562SAneesh Kumar K.V } 36190fc1b451SAneesh Kumar K.V } else { 36200fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 36210fc1b451SAneesh Kumar K.V } 36220fc1b451SAneesh Kumar K.V } 3623ff9ddf7eSJan Kara 36241d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3625ac27a0ecSDave Kleikamp { 3626617ba13bSMingming Cao struct ext4_iloc iloc; 3627617ba13bSMingming Cao struct ext4_inode *raw_inode; 36281d1fe1eeSDavid Howells struct ext4_inode_info *ei; 36291d1fe1eeSDavid Howells struct inode *inode; 3630b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 36311d1fe1eeSDavid Howells long ret; 3632ac27a0ecSDave Kleikamp int block; 3633*08cefc7aSEric W. Biederman uid_t i_uid; 3634*08cefc7aSEric W. Biederman gid_t i_gid; 3635ac27a0ecSDave Kleikamp 36361d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 36371d1fe1eeSDavid Howells if (!inode) 36381d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 36391d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 36401d1fe1eeSDavid Howells return inode; 36411d1fe1eeSDavid Howells 36421d1fe1eeSDavid Howells ei = EXT4_I(inode); 36437dc57615SPeter Huewe iloc.bh = NULL; 3644ac27a0ecSDave Kleikamp 36451d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 36461d1fe1eeSDavid Howells if (ret < 0) 3647ac27a0ecSDave Kleikamp goto bad_inode; 3648617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3649ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 3650*08cefc7aSEric W. Biederman i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 3651*08cefc7aSEric W. Biederman i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3652ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 3653*08cefc7aSEric W. Biederman i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 3654*08cefc7aSEric W. Biederman i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3655ac27a0ecSDave Kleikamp } 3656*08cefc7aSEric W. Biederman i_uid_write(inode, i_uid); 3657*08cefc7aSEric W. Biederman i_gid_write(inode, i_gid); 3658bfe86848SMiklos Szeredi set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 3659ac27a0ecSDave Kleikamp 3660353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 3661ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3662ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3663ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3664ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3665ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3666ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3667ac27a0ecSDave Kleikamp */ 3668ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3669ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3670617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3671ac27a0ecSDave Kleikamp /* this inode is deleted */ 36721d1fe1eeSDavid Howells ret = -ESTALE; 3673ac27a0ecSDave Kleikamp goto bad_inode; 3674ac27a0ecSDave Kleikamp } 3675ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3676ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3677ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3678ac27a0ecSDave Kleikamp * the process of deleting those. */ 3679ac27a0ecSDave Kleikamp } 3680ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 36810fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 36827973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3683a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3684a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3685a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3686a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3687ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3688a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3689a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3690a9e7f447SDmitry Monakhov #endif 3691ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3692ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3693a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3694ac27a0ecSDave Kleikamp /* 3695ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3696ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3697ac27a0ecSDave Kleikamp */ 3698617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3699ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3700ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3701ac27a0ecSDave Kleikamp 3702b436b9beSJan Kara /* 3703b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3704b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3705b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3706b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3707b436b9beSJan Kara * now it is reread from disk. 3708b436b9beSJan Kara */ 3709b436b9beSJan Kara if (journal) { 3710b436b9beSJan Kara transaction_t *transaction; 3711b436b9beSJan Kara tid_t tid; 3712b436b9beSJan Kara 3713a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3714b436b9beSJan Kara if (journal->j_running_transaction) 3715b436b9beSJan Kara transaction = journal->j_running_transaction; 3716b436b9beSJan Kara else 3717b436b9beSJan Kara transaction = journal->j_committing_transaction; 3718b436b9beSJan Kara if (transaction) 3719b436b9beSJan Kara tid = transaction->t_tid; 3720b436b9beSJan Kara else 3721b436b9beSJan Kara tid = journal->j_commit_sequence; 3722a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3723b436b9beSJan Kara ei->i_sync_tid = tid; 3724b436b9beSJan Kara ei->i_datasync_tid = tid; 3725b436b9beSJan Kara } 3726b436b9beSJan Kara 37270040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3728ac27a0ecSDave Kleikamp ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3729617ba13bSMingming Cao if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3730e5d2861fSKirill Korotaev EXT4_INODE_SIZE(inode->i_sb)) { 37311d1fe1eeSDavid Howells ret = -EIO; 3732ac27a0ecSDave Kleikamp goto bad_inode; 3733e5d2861fSKirill Korotaev } 3734ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3735ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3736617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3737617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3738ac27a0ecSDave Kleikamp } else { 3739ac27a0ecSDave Kleikamp __le32 *magic = (void *)raw_inode + 3740617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE + 3741ac27a0ecSDave Kleikamp ei->i_extra_isize; 3742617ba13bSMingming Cao if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 374319f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_XATTR); 3744ac27a0ecSDave Kleikamp } 3745ac27a0ecSDave Kleikamp } else 3746ac27a0ecSDave Kleikamp ei->i_extra_isize = 0; 3747ac27a0ecSDave Kleikamp 3748ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3749ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3750ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3751ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3752ef7f3835SKalpak Shah 375325ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 375425ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 375525ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 375625ec56b5SJean Noel Cordenner inode->i_version |= 375725ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 375825ec56b5SJean Noel Cordenner } 375925ec56b5SJean Noel Cordenner 3760c4b5a614STheodore Ts'o ret = 0; 3761485c26ecSTheodore Ts'o if (ei->i_file_acl && 37621032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 376324676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 376424676da4STheodore Ts'o ei->i_file_acl); 3765485c26ecSTheodore Ts'o ret = -EIO; 3766485c26ecSTheodore Ts'o goto bad_inode; 376707a03824STheodore Ts'o } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3768c4b5a614STheodore Ts'o if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3769c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 3770c4b5a614STheodore Ts'o !ext4_inode_is_fast_symlink(inode))) 37717a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 37727a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 3773fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3774fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 3775fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 3776fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 37771f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 3778fe2c8191SThiemo Nagel } 3779567f3e9aSTheodore Ts'o if (ret) 37807a262f7cSAneesh Kumar K.V goto bad_inode; 37817a262f7cSAneesh Kumar K.V 3782ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3783617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3784617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3785617ba13bSMingming Cao ext4_set_aops(inode); 3786ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3787617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3788617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3789ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3790e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 3791617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3792e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 3793e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 3794e83c1397SDuane Griffin } else { 3795617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3796617ba13bSMingming Cao ext4_set_aops(inode); 3797ac27a0ecSDave Kleikamp } 3798563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3799563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3800617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3801ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3802ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3803ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3804ac27a0ecSDave Kleikamp else 3805ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3806ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3807563bdd61STheodore Ts'o } else { 3808563bdd61STheodore Ts'o ret = -EIO; 380924676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3810563bdd61STheodore Ts'o goto bad_inode; 3811ac27a0ecSDave Kleikamp } 3812ac27a0ecSDave Kleikamp brelse(iloc.bh); 3813617ba13bSMingming Cao ext4_set_inode_flags(inode); 38141d1fe1eeSDavid Howells unlock_new_inode(inode); 38151d1fe1eeSDavid Howells return inode; 3816ac27a0ecSDave Kleikamp 3817ac27a0ecSDave Kleikamp bad_inode: 3818567f3e9aSTheodore Ts'o brelse(iloc.bh); 38191d1fe1eeSDavid Howells iget_failed(inode); 38201d1fe1eeSDavid Howells return ERR_PTR(ret); 3821ac27a0ecSDave Kleikamp } 3822ac27a0ecSDave Kleikamp 38230fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 38240fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 38250fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 38260fc1b451SAneesh Kumar K.V { 38270fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 38280fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 38290fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 38300fc1b451SAneesh Kumar K.V 38310fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 38320fc1b451SAneesh Kumar K.V /* 38330fc1b451SAneesh Kumar K.V * i_blocks can be represnted in a 32 bit variable 38340fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 38350fc1b451SAneesh Kumar K.V */ 38368180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38370fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 383884a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 3839f287a1a5STheodore Ts'o return 0; 3840f287a1a5STheodore Ts'o } 3841f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 3842f287a1a5STheodore Ts'o return -EFBIG; 3843f287a1a5STheodore Ts'o 3844f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 38450fc1b451SAneesh Kumar K.V /* 38460fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 38470fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 38480fc1b451SAneesh Kumar K.V */ 38498180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38500fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 385184a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 38520fc1b451SAneesh Kumar K.V } else { 385384a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 38548180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 38558180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 38568180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38578180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 38580fc1b451SAneesh Kumar K.V } 3859f287a1a5STheodore Ts'o return 0; 38600fc1b451SAneesh Kumar K.V } 38610fc1b451SAneesh Kumar K.V 3862ac27a0ecSDave Kleikamp /* 3863ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 3864ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 3865ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 3866ac27a0ecSDave Kleikamp * 3867ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 3868ac27a0ecSDave Kleikamp */ 3869617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 3870ac27a0ecSDave Kleikamp struct inode *inode, 3871830156c7SFrank Mayhar struct ext4_iloc *iloc) 3872ac27a0ecSDave Kleikamp { 3873617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 3874617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3875ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 3876ac27a0ecSDave Kleikamp int err = 0, rc, block; 3877*08cefc7aSEric W. Biederman uid_t i_uid; 3878*08cefc7aSEric W. Biederman gid_t i_gid; 3879ac27a0ecSDave Kleikamp 3880ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 3881ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 388219f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 3883617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 3884ac27a0ecSDave Kleikamp 3885ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 3886ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 3887*08cefc7aSEric W. Biederman i_uid = i_uid_read(inode); 3888*08cefc7aSEric W. Biederman i_gid = i_gid_read(inode); 3889ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 3890*08cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid)); 3891*08cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid)); 3892ac27a0ecSDave Kleikamp /* 3893ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 3894ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 3895ac27a0ecSDave Kleikamp */ 3896ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 3897ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 3898*08cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_uid)); 3899ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 3900*08cefc7aSEric W. Biederman cpu_to_le16(high_16_bits(i_gid)); 3901ac27a0ecSDave Kleikamp } else { 3902ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3903ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3904ac27a0ecSDave Kleikamp } 3905ac27a0ecSDave Kleikamp } else { 3906*08cefc7aSEric W. Biederman raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid)); 3907*08cefc7aSEric W. Biederman raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid)); 3908ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3909ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3910ac27a0ecSDave Kleikamp } 3911ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 3912ef7f3835SKalpak Shah 3913ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 3914ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 3915ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 3916ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 3917ef7f3835SKalpak Shah 39180fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 39190fc1b451SAneesh Kumar K.V goto out_brelse; 3920ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 3921353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 39229b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 39239b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 3924a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 3925a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 39267973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 3927a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 3928ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 3929ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 3930617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 3931617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 3932617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 3933617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 3934ac27a0ecSDave Kleikamp /* If this is the first large file 3935ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 3936ac27a0ecSDave Kleikamp */ 3937617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 3938617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 3939ac27a0ecSDave Kleikamp if (err) 3940ac27a0ecSDave Kleikamp goto out_brelse; 3941617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 3942617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 3943617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 39440390131bSFrank Mayhar ext4_handle_sync(handle); 39451b8b9750SArtem Bityutskiy err = ext4_handle_dirty_super(handle, sb); 3946ac27a0ecSDave Kleikamp } 3947ac27a0ecSDave Kleikamp } 3948ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 3949ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 3950ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 3951ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 3952ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 3953ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 3954ac27a0ecSDave Kleikamp } else { 3955ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 3956ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 3957ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 3958ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 3959ac27a0ecSDave Kleikamp } 3960de9a55b8STheodore Ts'o } else 3961de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 3962ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 3963ac27a0ecSDave Kleikamp 396425ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 396525ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 396625ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 396725ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 396825ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 3969ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 397025ec56b5SJean Noel Cordenner } 397125ec56b5SJean Noel Cordenner 39720390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 397373b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 3974ac27a0ecSDave Kleikamp if (!err) 3975ac27a0ecSDave Kleikamp err = rc; 397619f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 3977ac27a0ecSDave Kleikamp 3978b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 3979ac27a0ecSDave Kleikamp out_brelse: 3980ac27a0ecSDave Kleikamp brelse(bh); 3981617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 3982ac27a0ecSDave Kleikamp return err; 3983ac27a0ecSDave Kleikamp } 3984ac27a0ecSDave Kleikamp 3985ac27a0ecSDave Kleikamp /* 3986617ba13bSMingming Cao * ext4_write_inode() 3987ac27a0ecSDave Kleikamp * 3988ac27a0ecSDave Kleikamp * We are called from a few places: 3989ac27a0ecSDave Kleikamp * 3990ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 3991ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 3992ac27a0ecSDave Kleikamp * trasnaction to commit. 3993ac27a0ecSDave Kleikamp * 3994ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 3995ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 3996ac27a0ecSDave Kleikamp * 3997ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 3998ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 3999ac27a0ecSDave Kleikamp * journal commit. 4000ac27a0ecSDave Kleikamp * 4001ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4002ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4003617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4004ac27a0ecSDave Kleikamp * knfsd. 4005ac27a0ecSDave Kleikamp * 4006ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4007ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4008ac27a0ecSDave Kleikamp * which we are interested. 4009ac27a0ecSDave Kleikamp * 4010ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4011ac27a0ecSDave Kleikamp * 4012ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4013ac27a0ecSDave Kleikamp * stuff(); 4014ac27a0ecSDave Kleikamp * inode->i_size = expr; 4015ac27a0ecSDave Kleikamp * 4016ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4017ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4018ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4019ac27a0ecSDave Kleikamp */ 4020a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4021ac27a0ecSDave Kleikamp { 402291ac6f43SFrank Mayhar int err; 402391ac6f43SFrank Mayhar 4024ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4025ac27a0ecSDave Kleikamp return 0; 4026ac27a0ecSDave Kleikamp 402791ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4028617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4029b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4030ac27a0ecSDave Kleikamp dump_stack(); 4031ac27a0ecSDave Kleikamp return -EIO; 4032ac27a0ecSDave Kleikamp } 4033ac27a0ecSDave Kleikamp 4034a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4035ac27a0ecSDave Kleikamp return 0; 4036ac27a0ecSDave Kleikamp 403791ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 403891ac6f43SFrank Mayhar } else { 403991ac6f43SFrank Mayhar struct ext4_iloc iloc; 404091ac6f43SFrank Mayhar 40418b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 404291ac6f43SFrank Mayhar if (err) 404391ac6f43SFrank Mayhar return err; 4044a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4045830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4046830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4047c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4048c398eda0STheodore Ts'o "IO error syncing inode"); 4049830156c7SFrank Mayhar err = -EIO; 4050830156c7SFrank Mayhar } 4051fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 405291ac6f43SFrank Mayhar } 405391ac6f43SFrank Mayhar return err; 4054ac27a0ecSDave Kleikamp } 4055ac27a0ecSDave Kleikamp 4056ac27a0ecSDave Kleikamp /* 4057617ba13bSMingming Cao * ext4_setattr() 4058ac27a0ecSDave Kleikamp * 4059ac27a0ecSDave Kleikamp * Called from notify_change. 4060ac27a0ecSDave Kleikamp * 4061ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4062ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4063ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4064ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4065ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4066ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4067ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4068ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4069ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4070ac27a0ecSDave Kleikamp * 4071678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4072678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4073678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4074678aaf48SJan Kara * This way we are sure that all the data written in the previous 4075678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4076678aaf48SJan Kara * writeback). 4077678aaf48SJan Kara * 4078678aaf48SJan Kara * Called with inode->i_mutex down. 4079ac27a0ecSDave Kleikamp */ 4080617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4081ac27a0ecSDave Kleikamp { 4082ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4083ac27a0ecSDave Kleikamp int error, rc = 0; 40843d287de3SDmitry Monakhov int orphan = 0; 4085ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4086ac27a0ecSDave Kleikamp 4087ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4088ac27a0ecSDave Kleikamp if (error) 4089ac27a0ecSDave Kleikamp return error; 4090ac27a0ecSDave Kleikamp 409112755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4092871a2931SChristoph Hellwig dquot_initialize(inode); 4093*08cefc7aSEric W. Biederman if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || 4094*08cefc7aSEric W. Biederman (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { 4095ac27a0ecSDave Kleikamp handle_t *handle; 4096ac27a0ecSDave Kleikamp 4097ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4098ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 40995aca07ebSDmitry Monakhov handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 4100194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 4101ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4102ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4103ac27a0ecSDave Kleikamp goto err_out; 4104ac27a0ecSDave Kleikamp } 4105b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4106ac27a0ecSDave Kleikamp if (error) { 4107617ba13bSMingming Cao ext4_journal_stop(handle); 4108ac27a0ecSDave Kleikamp return error; 4109ac27a0ecSDave Kleikamp } 4110ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4111ac27a0ecSDave Kleikamp * one transaction */ 4112ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4113ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4114ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4115ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4116617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4117617ba13bSMingming Cao ext4_journal_stop(handle); 4118ac27a0ecSDave Kleikamp } 4119ac27a0ecSDave Kleikamp 4120e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4121562c72aaSChristoph Hellwig inode_dio_wait(inode); 4122562c72aaSChristoph Hellwig 412312e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4124e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4125e2b46574SEric Sandeen 41260c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 41270c095c7fSTheodore Ts'o return -EFBIG; 4128e2b46574SEric Sandeen } 4129e2b46574SEric Sandeen } 4130e2b46574SEric Sandeen 4131ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4132c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4133072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4134ac27a0ecSDave Kleikamp handle_t *handle; 4135ac27a0ecSDave Kleikamp 4136617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4137ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4138ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4139ac27a0ecSDave Kleikamp goto err_out; 4140ac27a0ecSDave Kleikamp } 41413d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4142617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 41433d287de3SDmitry Monakhov orphan = 1; 41443d287de3SDmitry Monakhov } 4145617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4146617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4147ac27a0ecSDave Kleikamp if (!error) 4148ac27a0ecSDave Kleikamp error = rc; 4149617ba13bSMingming Cao ext4_journal_stop(handle); 4150678aaf48SJan Kara 4151678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4152678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4153678aaf48SJan Kara attr->ia_size); 4154678aaf48SJan Kara if (error) { 4155678aaf48SJan Kara /* Do as much error cleanup as possible */ 4156678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4157678aaf48SJan Kara if (IS_ERR(handle)) { 4158678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4159678aaf48SJan Kara goto err_out; 4160678aaf48SJan Kara } 4161678aaf48SJan Kara ext4_orphan_del(handle, inode); 41623d287de3SDmitry Monakhov orphan = 0; 4163678aaf48SJan Kara ext4_journal_stop(handle); 4164678aaf48SJan Kara goto err_out; 4165678aaf48SJan Kara } 4166678aaf48SJan Kara } 4167ac27a0ecSDave Kleikamp } 4168ac27a0ecSDave Kleikamp 4169072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 4170afcff5d8SLukas Czerner if (attr->ia_size != i_size_read(inode)) 4171072bd7eaSTheodore Ts'o truncate_setsize(inode, attr->ia_size); 4172072bd7eaSTheodore Ts'o ext4_truncate(inode); 4173072bd7eaSTheodore Ts'o } 4174ac27a0ecSDave Kleikamp 41751025774cSChristoph Hellwig if (!rc) { 41761025774cSChristoph Hellwig setattr_copy(inode, attr); 41771025774cSChristoph Hellwig mark_inode_dirty(inode); 41781025774cSChristoph Hellwig } 41791025774cSChristoph Hellwig 41801025774cSChristoph Hellwig /* 41811025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 41821025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 41831025774cSChristoph Hellwig */ 41843d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4185617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4186ac27a0ecSDave Kleikamp 4187ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4188617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4189ac27a0ecSDave Kleikamp 4190ac27a0ecSDave Kleikamp err_out: 4191617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4192ac27a0ecSDave Kleikamp if (!error) 4193ac27a0ecSDave Kleikamp error = rc; 4194ac27a0ecSDave Kleikamp return error; 4195ac27a0ecSDave Kleikamp } 4196ac27a0ecSDave Kleikamp 41973e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 41983e3398a0SMingming Cao struct kstat *stat) 41993e3398a0SMingming Cao { 42003e3398a0SMingming Cao struct inode *inode; 42013e3398a0SMingming Cao unsigned long delalloc_blocks; 42023e3398a0SMingming Cao 42033e3398a0SMingming Cao inode = dentry->d_inode; 42043e3398a0SMingming Cao generic_fillattr(inode, stat); 42053e3398a0SMingming Cao 42063e3398a0SMingming Cao /* 42073e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 42083e3398a0SMingming Cao * otherwise in the case of system crash before the real block 42093e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 42103e3398a0SMingming Cao * on-disk file blocks. 42113e3398a0SMingming Cao * We always keep i_blocks updated together with real 42123e3398a0SMingming Cao * allocation. But to not confuse with user, stat 42133e3398a0SMingming Cao * will return the blocks that include the delayed allocation 42143e3398a0SMingming Cao * blocks for this file. 42153e3398a0SMingming Cao */ 42163e3398a0SMingming Cao delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 42173e3398a0SMingming Cao 42183e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 42193e3398a0SMingming Cao return 0; 42203e3398a0SMingming Cao } 4221ac27a0ecSDave Kleikamp 4222a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4223a02908f1SMingming Cao { 422412e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 42258bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4226ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4227a02908f1SMingming Cao } 4228ac51d837STheodore Ts'o 4229a02908f1SMingming Cao /* 4230a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4231a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4232a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4233a02908f1SMingming Cao * 4234a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 4235af901ca1SAndré Goddard Rosa * different block groups too. If they are contiuguous, with flexbg, 4236a02908f1SMingming Cao * they could still across block group boundary. 4237a02908f1SMingming Cao * 4238a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4239a02908f1SMingming Cao */ 42401f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4241a02908f1SMingming Cao { 42428df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 42438df9675fSTheodore Ts'o int gdpblocks; 4244a02908f1SMingming Cao int idxblocks; 4245a02908f1SMingming Cao int ret = 0; 4246a02908f1SMingming Cao 4247a02908f1SMingming Cao /* 4248a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4249a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4250a02908f1SMingming Cao * physically contiguous on disk 4251a02908f1SMingming Cao * 4252a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4253a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4254a02908f1SMingming Cao */ 4255a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4256a02908f1SMingming Cao 4257a02908f1SMingming Cao ret = idxblocks; 4258a02908f1SMingming Cao 4259a02908f1SMingming Cao /* 4260a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4261a02908f1SMingming Cao * to account 4262a02908f1SMingming Cao */ 4263a02908f1SMingming Cao groups = idxblocks; 4264a02908f1SMingming Cao if (chunk) 4265a02908f1SMingming Cao groups += 1; 4266ac27a0ecSDave Kleikamp else 4267a02908f1SMingming Cao groups += nrblocks; 4268ac27a0ecSDave Kleikamp 4269a02908f1SMingming Cao gdpblocks = groups; 42708df9675fSTheodore Ts'o if (groups > ngroups) 42718df9675fSTheodore Ts'o groups = ngroups; 4272a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4273a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4274a02908f1SMingming Cao 4275a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4276a02908f1SMingming Cao ret += groups + gdpblocks; 4277a02908f1SMingming Cao 4278a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4279a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4280ac27a0ecSDave Kleikamp 4281ac27a0ecSDave Kleikamp return ret; 4282ac27a0ecSDave Kleikamp } 4283ac27a0ecSDave Kleikamp 4284ac27a0ecSDave Kleikamp /* 428525985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4286f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4287f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4288a02908f1SMingming Cao * 4289525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4290a02908f1SMingming Cao * 4291525f4ed8SMingming Cao * We need to consider the worse case, when 4292a02908f1SMingming Cao * one new block per extent. 4293a02908f1SMingming Cao */ 4294a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4295a02908f1SMingming Cao { 4296a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4297a02908f1SMingming Cao int ret; 4298a02908f1SMingming Cao 4299a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4300a02908f1SMingming Cao 4301a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4302a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4303a02908f1SMingming Cao ret += bpp; 4304a02908f1SMingming Cao return ret; 4305a02908f1SMingming Cao } 4306f3bd1f3fSMingming Cao 4307f3bd1f3fSMingming Cao /* 4308f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4309f3bd1f3fSMingming Cao * 4310f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 431179e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4312f3bd1f3fSMingming Cao * 4313f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4314f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4315f3bd1f3fSMingming Cao */ 4316f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4317f3bd1f3fSMingming Cao { 4318f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4319f3bd1f3fSMingming Cao } 4320f3bd1f3fSMingming Cao 4321a02908f1SMingming Cao /* 4322617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4323ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4324ac27a0ecSDave Kleikamp */ 4325617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4326617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4327ac27a0ecSDave Kleikamp { 4328ac27a0ecSDave Kleikamp int err = 0; 4329ac27a0ecSDave Kleikamp 4330c64db50eSTheodore Ts'o if (IS_I_VERSION(inode)) 433125ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 433225ec56b5SJean Noel Cordenner 4333ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4334ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4335ac27a0ecSDave Kleikamp 4336dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4337830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4338ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4339ac27a0ecSDave Kleikamp return err; 4340ac27a0ecSDave Kleikamp } 4341ac27a0ecSDave Kleikamp 4342ac27a0ecSDave Kleikamp /* 4343ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4344ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4345ac27a0ecSDave Kleikamp */ 4346ac27a0ecSDave Kleikamp 4347ac27a0ecSDave Kleikamp int 4348617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4349617ba13bSMingming Cao struct ext4_iloc *iloc) 4350ac27a0ecSDave Kleikamp { 43510390131bSFrank Mayhar int err; 43520390131bSFrank Mayhar 4353617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4354ac27a0ecSDave Kleikamp if (!err) { 4355ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4356617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4357ac27a0ecSDave Kleikamp if (err) { 4358ac27a0ecSDave Kleikamp brelse(iloc->bh); 4359ac27a0ecSDave Kleikamp iloc->bh = NULL; 4360ac27a0ecSDave Kleikamp } 4361ac27a0ecSDave Kleikamp } 4362617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4363ac27a0ecSDave Kleikamp return err; 4364ac27a0ecSDave Kleikamp } 4365ac27a0ecSDave Kleikamp 4366ac27a0ecSDave Kleikamp /* 43676dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 43686dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 43696dd4ee7cSKalpak Shah */ 43701d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 43711d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 43721d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 43731d03ec98SAneesh Kumar K.V handle_t *handle) 43746dd4ee7cSKalpak Shah { 43756dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 43766dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 43776dd4ee7cSKalpak Shah 43786dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 43796dd4ee7cSKalpak Shah return 0; 43806dd4ee7cSKalpak Shah 43816dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 43826dd4ee7cSKalpak Shah 43836dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 43846dd4ee7cSKalpak Shah 43856dd4ee7cSKalpak Shah /* No extended attributes present */ 438619f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 43876dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 43886dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 43896dd4ee7cSKalpak Shah new_extra_isize); 43906dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 43916dd4ee7cSKalpak Shah return 0; 43926dd4ee7cSKalpak Shah } 43936dd4ee7cSKalpak Shah 43946dd4ee7cSKalpak Shah /* try to expand with EAs present */ 43956dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 43966dd4ee7cSKalpak Shah raw_inode, handle); 43976dd4ee7cSKalpak Shah } 43986dd4ee7cSKalpak Shah 43996dd4ee7cSKalpak Shah /* 4400ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4401ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4402ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4403ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4404ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4405ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4406ac27a0ecSDave Kleikamp * 4407ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4408ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4409ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4410ac27a0ecSDave Kleikamp * we start and wait on commits. 4411ac27a0ecSDave Kleikamp * 4412ac27a0ecSDave Kleikamp * Is this efficient/effective? Well, we're being nice to the system 4413ac27a0ecSDave Kleikamp * by cleaning up our inodes proactively so they can be reaped 4414ac27a0ecSDave Kleikamp * without I/O. But we are potentially leaving up to five seconds' 4415ac27a0ecSDave Kleikamp * worth of inodes floating about which prune_icache wants us to 4416ac27a0ecSDave Kleikamp * write out. One way to fix that would be to get prune_icache() 4417ac27a0ecSDave Kleikamp * to do a write_super() to free up some memory. It has the desired 4418ac27a0ecSDave Kleikamp * effect. 4419ac27a0ecSDave Kleikamp */ 4420617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4421ac27a0ecSDave Kleikamp { 4422617ba13bSMingming Cao struct ext4_iloc iloc; 44236dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 44246dd4ee7cSKalpak Shah static unsigned int mnt_count; 44256dd4ee7cSKalpak Shah int err, ret; 4426ac27a0ecSDave Kleikamp 4427ac27a0ecSDave Kleikamp might_sleep(); 44287ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4429617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 44300390131bSFrank Mayhar if (ext4_handle_valid(handle) && 44310390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 443219f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 44336dd4ee7cSKalpak Shah /* 44346dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 44356dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 44366dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 44376dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 44386dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 44396dd4ee7cSKalpak Shah */ 44406dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 44416dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 44426dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 44436dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 44446dd4ee7cSKalpak Shah iloc, handle); 44456dd4ee7cSKalpak Shah if (ret) { 444619f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 444719f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4448c1bddad9SAneesh Kumar K.V if (mnt_count != 4449c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 445012062dddSEric Sandeen ext4_warning(inode->i_sb, 44516dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 44526dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 44536dd4ee7cSKalpak Shah inode->i_ino); 4454c1bddad9SAneesh Kumar K.V mnt_count = 4455c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 44566dd4ee7cSKalpak Shah } 44576dd4ee7cSKalpak Shah } 44586dd4ee7cSKalpak Shah } 44596dd4ee7cSKalpak Shah } 4460ac27a0ecSDave Kleikamp if (!err) 4461617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4462ac27a0ecSDave Kleikamp return err; 4463ac27a0ecSDave Kleikamp } 4464ac27a0ecSDave Kleikamp 4465ac27a0ecSDave Kleikamp /* 4466617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4467ac27a0ecSDave Kleikamp * 4468ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4469ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4470ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4471ac27a0ecSDave Kleikamp * 44725dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4473ac27a0ecSDave Kleikamp * are allocated to the file. 4474ac27a0ecSDave Kleikamp * 4475ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4476ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4477ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4478ac27a0ecSDave Kleikamp */ 4479aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4480ac27a0ecSDave Kleikamp { 4481ac27a0ecSDave Kleikamp handle_t *handle; 4482ac27a0ecSDave Kleikamp 4483617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4484ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4485ac27a0ecSDave Kleikamp goto out; 4486f3dc272fSCurt Wohlgemuth 4487617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4488f3dc272fSCurt Wohlgemuth 4489617ba13bSMingming Cao ext4_journal_stop(handle); 4490ac27a0ecSDave Kleikamp out: 4491ac27a0ecSDave Kleikamp return; 4492ac27a0ecSDave Kleikamp } 4493ac27a0ecSDave Kleikamp 4494ac27a0ecSDave Kleikamp #if 0 4495ac27a0ecSDave Kleikamp /* 4496ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4497ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4498617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4499ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4500ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4501ac27a0ecSDave Kleikamp */ 4502617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4503ac27a0ecSDave Kleikamp { 4504617ba13bSMingming Cao struct ext4_iloc iloc; 4505ac27a0ecSDave Kleikamp 4506ac27a0ecSDave Kleikamp int err = 0; 4507ac27a0ecSDave Kleikamp if (handle) { 4508617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4509ac27a0ecSDave Kleikamp if (!err) { 4510ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4511dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4512ac27a0ecSDave Kleikamp if (!err) 45130390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 451473b50c1cSCurt Wohlgemuth NULL, 4515ac27a0ecSDave Kleikamp iloc.bh); 4516ac27a0ecSDave Kleikamp brelse(iloc.bh); 4517ac27a0ecSDave Kleikamp } 4518ac27a0ecSDave Kleikamp } 4519617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4520ac27a0ecSDave Kleikamp return err; 4521ac27a0ecSDave Kleikamp } 4522ac27a0ecSDave Kleikamp #endif 4523ac27a0ecSDave Kleikamp 4524617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4525ac27a0ecSDave Kleikamp { 4526ac27a0ecSDave Kleikamp journal_t *journal; 4527ac27a0ecSDave Kleikamp handle_t *handle; 4528ac27a0ecSDave Kleikamp int err; 4529ac27a0ecSDave Kleikamp 4530ac27a0ecSDave Kleikamp /* 4531ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4532ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4533ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4534ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4535ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4536ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4537ac27a0ecSDave Kleikamp * nobody is changing anything. 4538ac27a0ecSDave Kleikamp */ 4539ac27a0ecSDave Kleikamp 4540617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 45410390131bSFrank Mayhar if (!journal) 45420390131bSFrank Mayhar return 0; 4543d699594dSDave Hansen if (is_journal_aborted(journal)) 4544ac27a0ecSDave Kleikamp return -EROFS; 45452aff57b0SYongqiang Yang /* We have to allocate physical blocks for delalloc blocks 45462aff57b0SYongqiang Yang * before flushing journal. otherwise delalloc blocks can not 45472aff57b0SYongqiang Yang * be allocated any more. even more truncate on delalloc blocks 45482aff57b0SYongqiang Yang * could trigger BUG by flushing delalloc blocks in journal. 45492aff57b0SYongqiang Yang * There is no delalloc block in non-journal data mode. 45502aff57b0SYongqiang Yang */ 45512aff57b0SYongqiang Yang if (val && test_opt(inode->i_sb, DELALLOC)) { 45522aff57b0SYongqiang Yang err = ext4_alloc_da_blocks(inode); 45532aff57b0SYongqiang Yang if (err < 0) 45542aff57b0SYongqiang Yang return err; 45552aff57b0SYongqiang Yang } 4556ac27a0ecSDave Kleikamp 4557dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4558ac27a0ecSDave Kleikamp 4559ac27a0ecSDave Kleikamp /* 4560ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4561ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4562ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4563ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4564ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4565ac27a0ecSDave Kleikamp */ 4566ac27a0ecSDave Kleikamp 4567ac27a0ecSDave Kleikamp if (val) 456812e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 45695872ddaaSYongqiang Yang else { 45705872ddaaSYongqiang Yang jbd2_journal_flush(journal); 457112e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 45725872ddaaSYongqiang Yang } 4573617ba13bSMingming Cao ext4_set_aops(inode); 4574ac27a0ecSDave Kleikamp 4575dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 4576ac27a0ecSDave Kleikamp 4577ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4578ac27a0ecSDave Kleikamp 4579617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4580ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4581ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4582ac27a0ecSDave Kleikamp 4583617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 45840390131bSFrank Mayhar ext4_handle_sync(handle); 4585617ba13bSMingming Cao ext4_journal_stop(handle); 4586617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4587ac27a0ecSDave Kleikamp 4588ac27a0ecSDave Kleikamp return err; 4589ac27a0ecSDave Kleikamp } 45902e9ee850SAneesh Kumar K.V 45912e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 45922e9ee850SAneesh Kumar K.V { 45932e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 45942e9ee850SAneesh Kumar K.V } 45952e9ee850SAneesh Kumar K.V 4596c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 45972e9ee850SAneesh Kumar K.V { 4598c2ec175cSNick Piggin struct page *page = vmf->page; 45992e9ee850SAneesh Kumar K.V loff_t size; 46002e9ee850SAneesh Kumar K.V unsigned long len; 46019ea7df53SJan Kara int ret; 46022e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 46032e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 46042e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 46059ea7df53SJan Kara handle_t *handle; 46069ea7df53SJan Kara get_block_t *get_block; 46079ea7df53SJan Kara int retries = 0; 46082e9ee850SAneesh Kumar K.V 46092e9ee850SAneesh Kumar K.V /* 46109ea7df53SJan Kara * This check is racy but catches the common case. We rely on 46119ea7df53SJan Kara * __block_page_mkwrite() to do a reliable check. 46122e9ee850SAneesh Kumar K.V */ 46139ea7df53SJan Kara vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 46149ea7df53SJan Kara /* Delalloc case is easy... */ 46159ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 46169ea7df53SJan Kara !ext4_should_journal_data(inode) && 46179ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 46189ea7df53SJan Kara do { 46199ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 46209ea7df53SJan Kara ext4_da_get_block_prep); 46219ea7df53SJan Kara } while (ret == -ENOSPC && 46229ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 46239ea7df53SJan Kara goto out_ret; 46242e9ee850SAneesh Kumar K.V } 46250e499890SDarrick J. Wong 46260e499890SDarrick J. Wong lock_page(page); 46279ea7df53SJan Kara size = i_size_read(inode); 46289ea7df53SJan Kara /* Page got truncated from under us? */ 46299ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 46309ea7df53SJan Kara unlock_page(page); 46319ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 46329ea7df53SJan Kara goto out; 46330e499890SDarrick J. Wong } 46342e9ee850SAneesh Kumar K.V 46352e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 46362e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 46372e9ee850SAneesh Kumar K.V else 46382e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4639a827eaffSAneesh Kumar K.V /* 46409ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 46419ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4642a827eaffSAneesh Kumar K.V */ 46432e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 46442e9ee850SAneesh Kumar K.V if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 4645a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 46469ea7df53SJan Kara /* Wait so that we don't change page under IO */ 46479ea7df53SJan Kara wait_on_page_writeback(page); 46489ea7df53SJan Kara ret = VM_FAULT_LOCKED; 46499ea7df53SJan Kara goto out; 46502e9ee850SAneesh Kumar K.V } 4651a827eaffSAneesh Kumar K.V } 4652a827eaffSAneesh Kumar K.V unlock_page(page); 46539ea7df53SJan Kara /* OK, we need to fill the hole... */ 46549ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 46559ea7df53SJan Kara get_block = ext4_get_block_write; 46569ea7df53SJan Kara else 46579ea7df53SJan Kara get_block = ext4_get_block; 46589ea7df53SJan Kara retry_alloc: 46599ea7df53SJan Kara handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 46609ea7df53SJan Kara if (IS_ERR(handle)) { 4661c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 46629ea7df53SJan Kara goto out; 46639ea7df53SJan Kara } 46649ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 46659ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 46669ea7df53SJan Kara if (walk_page_buffers(handle, page_buffers(page), 0, 46679ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 46689ea7df53SJan Kara unlock_page(page); 46699ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 4670fcbb5515SYongqiang Yang ext4_journal_stop(handle); 46719ea7df53SJan Kara goto out; 46729ea7df53SJan Kara } 46739ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 46749ea7df53SJan Kara } 46759ea7df53SJan Kara ext4_journal_stop(handle); 46769ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 46779ea7df53SJan Kara goto retry_alloc; 46789ea7df53SJan Kara out_ret: 46799ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 46809ea7df53SJan Kara out: 46812e9ee850SAneesh Kumar K.V return ret; 46822e9ee850SAneesh Kumar K.V } 4683