1ac27a0ecSDave Kleikamp /* 2617ba13bSMingming Cao * linux/fs/ext4/inode.c 3ac27a0ecSDave Kleikamp * 4ac27a0ecSDave Kleikamp * Copyright (C) 1992, 1993, 1994, 1995 5ac27a0ecSDave Kleikamp * Remy Card (card@masi.ibp.fr) 6ac27a0ecSDave Kleikamp * Laboratoire MASI - Institut Blaise Pascal 7ac27a0ecSDave Kleikamp * Universite Pierre et Marie Curie (Paris VI) 8ac27a0ecSDave Kleikamp * 9ac27a0ecSDave Kleikamp * from 10ac27a0ecSDave Kleikamp * 11ac27a0ecSDave Kleikamp * linux/fs/minix/inode.c 12ac27a0ecSDave Kleikamp * 13ac27a0ecSDave Kleikamp * Copyright (C) 1991, 1992 Linus Torvalds 14ac27a0ecSDave Kleikamp * 15ac27a0ecSDave Kleikamp * 64-bit file support on 64-bit platforms by Jakub Jelinek 16ac27a0ecSDave Kleikamp * (jj@sunsite.ms.mff.cuni.cz) 17ac27a0ecSDave Kleikamp * 18617ba13bSMingming Cao * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000 19ac27a0ecSDave Kleikamp */ 20ac27a0ecSDave Kleikamp 21ac27a0ecSDave Kleikamp #include <linux/module.h> 22ac27a0ecSDave Kleikamp #include <linux/fs.h> 23ac27a0ecSDave Kleikamp #include <linux/time.h> 24dab291afSMingming Cao #include <linux/jbd2.h> 25ac27a0ecSDave Kleikamp #include <linux/highuid.h> 26ac27a0ecSDave Kleikamp #include <linux/pagemap.h> 27ac27a0ecSDave Kleikamp #include <linux/quotaops.h> 28ac27a0ecSDave Kleikamp #include <linux/string.h> 29ac27a0ecSDave Kleikamp #include <linux/buffer_head.h> 30ac27a0ecSDave Kleikamp #include <linux/writeback.h> 3164769240SAlex Tomas #include <linux/pagevec.h> 32ac27a0ecSDave Kleikamp #include <linux/mpage.h> 33e83c1397SDuane Griffin #include <linux/namei.h> 34ac27a0ecSDave Kleikamp #include <linux/uio.h> 35ac27a0ecSDave Kleikamp #include <linux/bio.h> 364c0425ffSMingming Cao #include <linux/workqueue.h> 37744692dcSJiaying Zhang #include <linux/kernel.h> 386db26ffcSAndrew Morton #include <linux/printk.h> 395a0e3ad6STejun Heo #include <linux/slab.h> 40a8901d34STheodore Ts'o #include <linux/ratelimit.h> 419bffad1eSTheodore Ts'o 423dcf5451SChristoph Hellwig #include "ext4_jbd2.h" 43ac27a0ecSDave Kleikamp #include "xattr.h" 44ac27a0ecSDave Kleikamp #include "acl.h" 459f125d64STheodore Ts'o #include "truncate.h" 46ac27a0ecSDave Kleikamp 479bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 489bffad1eSTheodore Ts'o 49a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01 50a1d6cc56SAneesh Kumar K.V 51678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode, 52678aaf48SJan Kara loff_t new_size) 53678aaf48SJan Kara { 547ff9c073STheodore Ts'o trace_ext4_begin_ordered_truncate(inode, new_size); 558aefcd55STheodore Ts'o /* 568aefcd55STheodore Ts'o * If jinode is zero, then we never opened the file for 578aefcd55STheodore Ts'o * writing, so there's no need to call 588aefcd55STheodore Ts'o * jbd2_journal_begin_ordered_truncate() since there's no 598aefcd55STheodore Ts'o * outstanding writes we need to flush. 608aefcd55STheodore Ts'o */ 618aefcd55STheodore Ts'o if (!EXT4_I(inode)->jinode) 628aefcd55STheodore Ts'o return 0; 638aefcd55STheodore Ts'o return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), 648aefcd55STheodore Ts'o EXT4_I(inode)->jinode, 65678aaf48SJan Kara new_size); 66678aaf48SJan Kara } 67678aaf48SJan Kara 6864769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset); 69cb20d518STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 70cb20d518STheodore Ts'o struct buffer_head *bh_result, int create); 71cb20d518STheodore Ts'o static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 72cb20d518STheodore Ts'o static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 73cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len); 74cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh); 7564769240SAlex Tomas 76ac27a0ecSDave Kleikamp /* 77ac27a0ecSDave Kleikamp * Test whether an inode is a fast symlink. 78ac27a0ecSDave Kleikamp */ 79617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode) 80ac27a0ecSDave Kleikamp { 81617ba13bSMingming Cao int ea_blocks = EXT4_I(inode)->i_file_acl ? 82ac27a0ecSDave Kleikamp (inode->i_sb->s_blocksize >> 9) : 0; 83ac27a0ecSDave Kleikamp 84ac27a0ecSDave Kleikamp return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); 85ac27a0ecSDave Kleikamp } 86ac27a0ecSDave Kleikamp 87ac27a0ecSDave Kleikamp /* 88ac27a0ecSDave Kleikamp * Restart the transaction associated with *handle. This does a commit, 89ac27a0ecSDave Kleikamp * so before we call here everything must be consistently dirtied against 90ac27a0ecSDave Kleikamp * this transaction. 91ac27a0ecSDave Kleikamp */ 92487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, 93487caeefSJan Kara int nblocks) 94ac27a0ecSDave Kleikamp { 95487caeefSJan Kara int ret; 96487caeefSJan Kara 97487caeefSJan Kara /* 98e35fd660STheodore Ts'o * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this 99487caeefSJan Kara * moment, get_block can be called only for blocks inside i_size since 100487caeefSJan Kara * page cache has been already dropped and writes are blocked by 101487caeefSJan Kara * i_mutex. So we can safely drop the i_data_sem here. 102487caeefSJan Kara */ 1030390131bSFrank Mayhar BUG_ON(EXT4_JOURNAL(inode) == NULL); 104ac27a0ecSDave Kleikamp jbd_debug(2, "restarting handle %p\n", handle); 105487caeefSJan Kara up_write(&EXT4_I(inode)->i_data_sem); 1068e8eaabeSAmir Goldstein ret = ext4_journal_restart(handle, nblocks); 107487caeefSJan Kara down_write(&EXT4_I(inode)->i_data_sem); 108fa5d1113SAneesh Kumar K.V ext4_discard_preallocations(inode); 109487caeefSJan Kara 110487caeefSJan Kara return ret; 111ac27a0ecSDave Kleikamp } 112ac27a0ecSDave Kleikamp 113ac27a0ecSDave Kleikamp /* 114ac27a0ecSDave Kleikamp * Called at the last iput() if i_nlink is zero. 115ac27a0ecSDave Kleikamp */ 1160930fcc1SAl Viro void ext4_evict_inode(struct inode *inode) 117ac27a0ecSDave Kleikamp { 118ac27a0ecSDave Kleikamp handle_t *handle; 119bc965ab3STheodore Ts'o int err; 120ac27a0ecSDave Kleikamp 1217ff9c073STheodore Ts'o trace_ext4_evict_inode(inode); 1222581fdc8SJiaying Zhang 1232581fdc8SJiaying Zhang ext4_ioend_wait(inode); 1242581fdc8SJiaying Zhang 1250930fcc1SAl Viro if (inode->i_nlink) { 1262d859db3SJan Kara /* 1272d859db3SJan Kara * When journalling data dirty buffers are tracked only in the 1282d859db3SJan Kara * journal. So although mm thinks everything is clean and 1292d859db3SJan Kara * ready for reaping the inode might still have some pages to 1302d859db3SJan Kara * write in the running transaction or waiting to be 1312d859db3SJan Kara * checkpointed. Thus calling jbd2_journal_invalidatepage() 1322d859db3SJan Kara * (via truncate_inode_pages()) to discard these buffers can 1332d859db3SJan Kara * cause data loss. Also even if we did not discard these 1342d859db3SJan Kara * buffers, we would have no way to find them after the inode 1352d859db3SJan Kara * is reaped and thus user could see stale data if he tries to 1362d859db3SJan Kara * read them before the transaction is checkpointed. So be 1372d859db3SJan Kara * careful and force everything to disk here... We use 1382d859db3SJan Kara * ei->i_datasync_tid to store the newest transaction 1392d859db3SJan Kara * containing inode's data. 1402d859db3SJan Kara * 1412d859db3SJan Kara * Note that directories do not have this problem because they 1422d859db3SJan Kara * don't use page cache. 1432d859db3SJan Kara */ 1442d859db3SJan Kara if (ext4_should_journal_data(inode) && 1452d859db3SJan Kara (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { 1462d859db3SJan Kara journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; 1472d859db3SJan Kara tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; 1482d859db3SJan Kara 1492d859db3SJan Kara jbd2_log_start_commit(journal, commit_tid); 1502d859db3SJan Kara jbd2_log_wait_commit(journal, commit_tid); 1512d859db3SJan Kara filemap_write_and_wait(&inode->i_data); 1522d859db3SJan Kara } 1530930fcc1SAl Viro truncate_inode_pages(&inode->i_data, 0); 1540930fcc1SAl Viro goto no_delete; 1550930fcc1SAl Viro } 1560930fcc1SAl Viro 157907f4554SChristoph Hellwig if (!is_bad_inode(inode)) 158871a2931SChristoph Hellwig dquot_initialize(inode); 159907f4554SChristoph Hellwig 160678aaf48SJan Kara if (ext4_should_order_data(inode)) 161678aaf48SJan Kara ext4_begin_ordered_truncate(inode, 0); 162ac27a0ecSDave Kleikamp truncate_inode_pages(&inode->i_data, 0); 163ac27a0ecSDave Kleikamp 164ac27a0ecSDave Kleikamp if (is_bad_inode(inode)) 165ac27a0ecSDave Kleikamp goto no_delete; 166ac27a0ecSDave Kleikamp 1679f125d64STheodore Ts'o handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3); 168ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 169bc965ab3STheodore Ts'o ext4_std_error(inode->i_sb, PTR_ERR(handle)); 170ac27a0ecSDave Kleikamp /* 171ac27a0ecSDave Kleikamp * If we're going to skip the normal cleanup, we still need to 172ac27a0ecSDave Kleikamp * make sure that the in-core orphan linked list is properly 173ac27a0ecSDave Kleikamp * cleaned up. 174ac27a0ecSDave Kleikamp */ 175617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 176ac27a0ecSDave Kleikamp goto no_delete; 177ac27a0ecSDave Kleikamp } 178ac27a0ecSDave Kleikamp 179ac27a0ecSDave Kleikamp if (IS_SYNC(inode)) 1800390131bSFrank Mayhar ext4_handle_sync(handle); 181ac27a0ecSDave Kleikamp inode->i_size = 0; 182bc965ab3STheodore Ts'o err = ext4_mark_inode_dirty(handle, inode); 183bc965ab3STheodore Ts'o if (err) { 18412062dddSEric Sandeen ext4_warning(inode->i_sb, 185bc965ab3STheodore Ts'o "couldn't mark inode dirty (err %d)", err); 186bc965ab3STheodore Ts'o goto stop_handle; 187bc965ab3STheodore Ts'o } 188ac27a0ecSDave Kleikamp if (inode->i_blocks) 189617ba13bSMingming Cao ext4_truncate(inode); 190bc965ab3STheodore Ts'o 191bc965ab3STheodore Ts'o /* 192bc965ab3STheodore Ts'o * ext4_ext_truncate() doesn't reserve any slop when it 193bc965ab3STheodore Ts'o * restarts journal transactions; therefore there may not be 194bc965ab3STheodore Ts'o * enough credits left in the handle to remove the inode from 195bc965ab3STheodore Ts'o * the orphan list and set the dtime field. 196bc965ab3STheodore Ts'o */ 1970390131bSFrank Mayhar if (!ext4_handle_has_enough_credits(handle, 3)) { 198bc965ab3STheodore Ts'o err = ext4_journal_extend(handle, 3); 199bc965ab3STheodore Ts'o if (err > 0) 200bc965ab3STheodore Ts'o err = ext4_journal_restart(handle, 3); 201bc965ab3STheodore Ts'o if (err != 0) { 20212062dddSEric Sandeen ext4_warning(inode->i_sb, 203bc965ab3STheodore Ts'o "couldn't extend journal (err %d)", err); 204bc965ab3STheodore Ts'o stop_handle: 205bc965ab3STheodore Ts'o ext4_journal_stop(handle); 20645388219STheodore Ts'o ext4_orphan_del(NULL, inode); 207bc965ab3STheodore Ts'o goto no_delete; 208bc965ab3STheodore Ts'o } 209bc965ab3STheodore Ts'o } 210bc965ab3STheodore Ts'o 211ac27a0ecSDave Kleikamp /* 212617ba13bSMingming Cao * Kill off the orphan record which ext4_truncate created. 213ac27a0ecSDave Kleikamp * AKPM: I think this can be inside the above `if'. 214617ba13bSMingming Cao * Note that ext4_orphan_del() has to be able to cope with the 215ac27a0ecSDave Kleikamp * deletion of a non-existent orphan - this is because we don't 216617ba13bSMingming Cao * know if ext4_truncate() actually created an orphan record. 217ac27a0ecSDave Kleikamp * (Well, we could do this if we need to, but heck - it works) 218ac27a0ecSDave Kleikamp */ 219617ba13bSMingming Cao ext4_orphan_del(handle, inode); 220617ba13bSMingming Cao EXT4_I(inode)->i_dtime = get_seconds(); 221ac27a0ecSDave Kleikamp 222ac27a0ecSDave Kleikamp /* 223ac27a0ecSDave Kleikamp * One subtle ordering requirement: if anything has gone wrong 224ac27a0ecSDave Kleikamp * (transaction abort, IO errors, whatever), then we can still 225ac27a0ecSDave Kleikamp * do these next steps (the fs will already have been marked as 226ac27a0ecSDave Kleikamp * having errors), but we can't free the inode if the mark_dirty 227ac27a0ecSDave Kleikamp * fails. 228ac27a0ecSDave Kleikamp */ 229617ba13bSMingming Cao if (ext4_mark_inode_dirty(handle, inode)) 230ac27a0ecSDave Kleikamp /* If that failed, just do the required in-core inode clear. */ 2310930fcc1SAl Viro ext4_clear_inode(inode); 232ac27a0ecSDave Kleikamp else 233617ba13bSMingming Cao ext4_free_inode(handle, inode); 234617ba13bSMingming Cao ext4_journal_stop(handle); 235ac27a0ecSDave Kleikamp return; 236ac27a0ecSDave Kleikamp no_delete: 2370930fcc1SAl Viro ext4_clear_inode(inode); /* We must guarantee clearing of inode... */ 238ac27a0ecSDave Kleikamp } 239ac27a0ecSDave Kleikamp 240a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 241a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode) 24260e58e0fSMingming Cao { 243a9e7f447SDmitry Monakhov return &EXT4_I(inode)->i_reserved_quota; 24460e58e0fSMingming Cao } 245a9e7f447SDmitry Monakhov #endif 2469d0be502STheodore Ts'o 24712219aeaSAneesh Kumar K.V /* 24812219aeaSAneesh Kumar K.V * Calculate the number of metadata blocks need to reserve 2499d0be502STheodore Ts'o * to allocate a block located at @lblock 25012219aeaSAneesh Kumar K.V */ 25101f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) 25212219aeaSAneesh Kumar K.V { 25312e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 2549d0be502STheodore Ts'o return ext4_ext_calc_metadata_amount(inode, lblock); 25512219aeaSAneesh Kumar K.V 2568bb2b247SAmir Goldstein return ext4_ind_calc_metadata_amount(inode, lblock); 25712219aeaSAneesh Kumar K.V } 25812219aeaSAneesh Kumar K.V 2590637c6f4STheodore Ts'o /* 2600637c6f4STheodore Ts'o * Called with i_data_sem down, which is important since we can call 2610637c6f4STheodore Ts'o * ext4_discard_preallocations() from here. 2620637c6f4STheodore Ts'o */ 2635f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode, 2645f634d06SAneesh Kumar K.V int used, int quota_claim) 26512219aeaSAneesh Kumar K.V { 26612219aeaSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2670637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 26812219aeaSAneesh Kumar K.V 2690637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 270*d8990240SAditya Kali trace_ext4_da_update_reserve_space(inode, used, quota_claim); 2710637c6f4STheodore Ts'o if (unlikely(used > ei->i_reserved_data_blocks)) { 2720637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d " 2730637c6f4STheodore Ts'o "with only %d reserved data blocks\n", 2740637c6f4STheodore Ts'o __func__, inode->i_ino, used, 2750637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 2760637c6f4STheodore Ts'o WARN_ON(1); 2770637c6f4STheodore Ts'o used = ei->i_reserved_data_blocks; 2786bc6e63fSAneesh Kumar K.V } 27912219aeaSAneesh Kumar K.V 2800637c6f4STheodore Ts'o /* Update per-inode reservations */ 2810637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= used; 2820637c6f4STheodore Ts'o ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks; 28357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 28472b8ab9dSEric Sandeen used + ei->i_allocated_meta_blocks); 2850637c6f4STheodore Ts'o ei->i_allocated_meta_blocks = 0; 2860637c6f4STheodore Ts'o 2870637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 2880637c6f4STheodore Ts'o /* 2890637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 2900637c6f4STheodore Ts'o * only when we have written all of the delayed 2910637c6f4STheodore Ts'o * allocation blocks. 2920637c6f4STheodore Ts'o */ 29357042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 29472b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 295ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 2969d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 2970637c6f4STheodore Ts'o } 29812219aeaSAneesh Kumar K.V spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 29960e58e0fSMingming Cao 30072b8ab9dSEric Sandeen /* Update quota subsystem for data blocks */ 30172b8ab9dSEric Sandeen if (quota_claim) 3027b415bf6SAditya Kali dquot_claim_block(inode, EXT4_C2B(sbi, used)); 30372b8ab9dSEric Sandeen else { 3045f634d06SAneesh Kumar K.V /* 3055f634d06SAneesh Kumar K.V * We did fallocate with an offset that is already delayed 3065f634d06SAneesh Kumar K.V * allocated. So on delayed allocated writeback we should 30772b8ab9dSEric Sandeen * not re-claim the quota for fallocated blocks. 3085f634d06SAneesh Kumar K.V */ 3097b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, used)); 3105f634d06SAneesh Kumar K.V } 311d6014301SAneesh Kumar K.V 312d6014301SAneesh Kumar K.V /* 313d6014301SAneesh Kumar K.V * If we have done all the pending block allocations and if 314d6014301SAneesh Kumar K.V * there aren't any writers on the inode, we can discard the 315d6014301SAneesh Kumar K.V * inode's preallocations. 316d6014301SAneesh Kumar K.V */ 3170637c6f4STheodore Ts'o if ((ei->i_reserved_data_blocks == 0) && 3180637c6f4STheodore Ts'o (atomic_read(&inode->i_writecount) == 0)) 319d6014301SAneesh Kumar K.V ext4_discard_preallocations(inode); 32012219aeaSAneesh Kumar K.V } 32112219aeaSAneesh Kumar K.V 322e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func, 323c398eda0STheodore Ts'o unsigned int line, 32424676da4STheodore Ts'o struct ext4_map_blocks *map) 3256fd058f7STheodore Ts'o { 32624676da4STheodore Ts'o if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, 32724676da4STheodore Ts'o map->m_len)) { 328c398eda0STheodore Ts'o ext4_error_inode(inode, func, line, map->m_pblk, 329c398eda0STheodore Ts'o "lblock %lu mapped to illegal pblock " 33024676da4STheodore Ts'o "(length %d)", (unsigned long) map->m_lblk, 331c398eda0STheodore Ts'o map->m_len); 3326fd058f7STheodore Ts'o return -EIO; 3336fd058f7STheodore Ts'o } 3346fd058f7STheodore Ts'o return 0; 3356fd058f7STheodore Ts'o } 3366fd058f7STheodore Ts'o 337e29136f8STheodore Ts'o #define check_block_validity(inode, map) \ 338c398eda0STheodore Ts'o __check_block_validity((inode), __func__, __LINE__, (map)) 339e29136f8STheodore Ts'o 340f5ab0d1fSMingming Cao /* 3411f94533dSTheodore Ts'o * Return the number of contiguous dirty pages in a given inode 3421f94533dSTheodore Ts'o * starting at page frame idx. 34355138e0bSTheodore Ts'o */ 34455138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, 34555138e0bSTheodore Ts'o unsigned int max_pages) 34655138e0bSTheodore Ts'o { 34755138e0bSTheodore Ts'o struct address_space *mapping = inode->i_mapping; 34855138e0bSTheodore Ts'o pgoff_t index; 34955138e0bSTheodore Ts'o struct pagevec pvec; 35055138e0bSTheodore Ts'o pgoff_t num = 0; 35155138e0bSTheodore Ts'o int i, nr_pages, done = 0; 35255138e0bSTheodore Ts'o 35355138e0bSTheodore Ts'o if (max_pages == 0) 35455138e0bSTheodore Ts'o return 0; 35555138e0bSTheodore Ts'o pagevec_init(&pvec, 0); 35655138e0bSTheodore Ts'o while (!done) { 35755138e0bSTheodore Ts'o index = idx; 35855138e0bSTheodore Ts'o nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 35955138e0bSTheodore Ts'o PAGECACHE_TAG_DIRTY, 36055138e0bSTheodore Ts'o (pgoff_t)PAGEVEC_SIZE); 36155138e0bSTheodore Ts'o if (nr_pages == 0) 36255138e0bSTheodore Ts'o break; 36355138e0bSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 36455138e0bSTheodore Ts'o struct page *page = pvec.pages[i]; 36555138e0bSTheodore Ts'o struct buffer_head *bh, *head; 36655138e0bSTheodore Ts'o 36755138e0bSTheodore Ts'o lock_page(page); 36855138e0bSTheodore Ts'o if (unlikely(page->mapping != mapping) || 36955138e0bSTheodore Ts'o !PageDirty(page) || 37055138e0bSTheodore Ts'o PageWriteback(page) || 37155138e0bSTheodore Ts'o page->index != idx) { 37255138e0bSTheodore Ts'o done = 1; 37355138e0bSTheodore Ts'o unlock_page(page); 37455138e0bSTheodore Ts'o break; 37555138e0bSTheodore Ts'o } 3761f94533dSTheodore Ts'o if (page_has_buffers(page)) { 3771f94533dSTheodore Ts'o bh = head = page_buffers(page); 37855138e0bSTheodore Ts'o do { 37955138e0bSTheodore Ts'o if (!buffer_delay(bh) && 3801f94533dSTheodore Ts'o !buffer_unwritten(bh)) 38155138e0bSTheodore Ts'o done = 1; 3821f94533dSTheodore Ts'o bh = bh->b_this_page; 3831f94533dSTheodore Ts'o } while (!done && (bh != head)); 38455138e0bSTheodore Ts'o } 38555138e0bSTheodore Ts'o unlock_page(page); 38655138e0bSTheodore Ts'o if (done) 38755138e0bSTheodore Ts'o break; 38855138e0bSTheodore Ts'o idx++; 38955138e0bSTheodore Ts'o num++; 390659c6009SEric Sandeen if (num >= max_pages) { 391659c6009SEric Sandeen done = 1; 39255138e0bSTheodore Ts'o break; 39355138e0bSTheodore Ts'o } 394659c6009SEric Sandeen } 39555138e0bSTheodore Ts'o pagevec_release(&pvec); 39655138e0bSTheodore Ts'o } 39755138e0bSTheodore Ts'o return num; 39855138e0bSTheodore Ts'o } 39955138e0bSTheodore Ts'o 40055138e0bSTheodore Ts'o /* 401e35fd660STheodore Ts'o * The ext4_map_blocks() function tries to look up the requested blocks, 4022b2d6d01STheodore Ts'o * and returns if the blocks are already mapped. 403f5ab0d1fSMingming Cao * 404f5ab0d1fSMingming Cao * Otherwise it takes the write lock of the i_data_sem and allocate blocks 405f5ab0d1fSMingming Cao * and store the allocated blocks in the result buffer head and mark it 406f5ab0d1fSMingming Cao * mapped. 407f5ab0d1fSMingming Cao * 408e35fd660STheodore Ts'o * If file type is extents based, it will call ext4_ext_map_blocks(), 409e35fd660STheodore Ts'o * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping 410f5ab0d1fSMingming Cao * based files 411f5ab0d1fSMingming Cao * 412f5ab0d1fSMingming Cao * On success, it returns the number of blocks being mapped or allocate. 413f5ab0d1fSMingming Cao * if create==0 and the blocks are pre-allocated and uninitialized block, 414f5ab0d1fSMingming Cao * the result buffer head is unmapped. If the create ==1, it will make sure 415f5ab0d1fSMingming Cao * the buffer head is mapped. 416f5ab0d1fSMingming Cao * 417f5ab0d1fSMingming Cao * It returns 0 if plain look up failed (blocks have not been allocated), in 418f5ab0d1fSMingming Cao * that casem, buffer head is unmapped 419f5ab0d1fSMingming Cao * 420f5ab0d1fSMingming Cao * It returns the error in case of allocation failure. 421f5ab0d1fSMingming Cao */ 422e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode, 423e35fd660STheodore Ts'o struct ext4_map_blocks *map, int flags) 4240e855ac8SAneesh Kumar K.V { 4250e855ac8SAneesh Kumar K.V int retval; 426f5ab0d1fSMingming Cao 427e35fd660STheodore Ts'o map->m_flags = 0; 428e35fd660STheodore Ts'o ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," 429e35fd660STheodore Ts'o "logical block %lu\n", inode->i_ino, flags, map->m_len, 430e35fd660STheodore Ts'o (unsigned long) map->m_lblk); 4314df3d265SAneesh Kumar K.V /* 432b920c755STheodore Ts'o * Try to see if we can get the block without requesting a new 433b920c755STheodore Ts'o * file system block. 4344df3d265SAneesh Kumar K.V */ 4350e855ac8SAneesh Kumar K.V down_read((&EXT4_I(inode)->i_data_sem)); 43612e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 437e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, 0); 4384df3d265SAneesh Kumar K.V } else { 439e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, 0); 4400e855ac8SAneesh Kumar K.V } 4414df3d265SAneesh Kumar K.V up_read((&EXT4_I(inode)->i_data_sem)); 442f5ab0d1fSMingming Cao 443e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 444e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 4456fd058f7STheodore Ts'o if (ret != 0) 4466fd058f7STheodore Ts'o return ret; 4476fd058f7STheodore Ts'o } 4486fd058f7STheodore Ts'o 449f5ab0d1fSMingming Cao /* If it is only a block(s) look up */ 450c2177057STheodore Ts'o if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) 4514df3d265SAneesh Kumar K.V return retval; 4524df3d265SAneesh Kumar K.V 4534df3d265SAneesh Kumar K.V /* 454f5ab0d1fSMingming Cao * Returns if the blocks have already allocated 455f5ab0d1fSMingming Cao * 456f5ab0d1fSMingming Cao * Note that if blocks have been preallocated 457f5ab0d1fSMingming Cao * ext4_ext_get_block() returns th create = 0 458f5ab0d1fSMingming Cao * with buffer head unmapped. 459f5ab0d1fSMingming Cao */ 460e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) 461f5ab0d1fSMingming Cao return retval; 462f5ab0d1fSMingming Cao 463f5ab0d1fSMingming Cao /* 4642a8964d6SAneesh Kumar K.V * When we call get_blocks without the create flag, the 4652a8964d6SAneesh Kumar K.V * BH_Unwritten flag could have gotten set if the blocks 4662a8964d6SAneesh Kumar K.V * requested were part of a uninitialized extent. We need to 4672a8964d6SAneesh Kumar K.V * clear this flag now that we are committed to convert all or 4682a8964d6SAneesh Kumar K.V * part of the uninitialized extent to be an initialized 4692a8964d6SAneesh Kumar K.V * extent. This is because we need to avoid the combination 4702a8964d6SAneesh Kumar K.V * of BH_Unwritten and BH_Mapped flags being simultaneously 4712a8964d6SAneesh Kumar K.V * set on the buffer_head. 4722a8964d6SAneesh Kumar K.V */ 473e35fd660STheodore Ts'o map->m_flags &= ~EXT4_MAP_UNWRITTEN; 4742a8964d6SAneesh Kumar K.V 4752a8964d6SAneesh Kumar K.V /* 476f5ab0d1fSMingming Cao * New blocks allocate and/or writing to uninitialized extent 477f5ab0d1fSMingming Cao * will possibly result in updating i_data, so we take 478f5ab0d1fSMingming Cao * the write lock of i_data_sem, and call get_blocks() 479f5ab0d1fSMingming Cao * with create == 1 flag. 4804df3d265SAneesh Kumar K.V */ 4814df3d265SAneesh Kumar K.V down_write((&EXT4_I(inode)->i_data_sem)); 482d2a17637SMingming Cao 483d2a17637SMingming Cao /* 484d2a17637SMingming Cao * if the caller is from delayed allocation writeout path 485d2a17637SMingming Cao * we have already reserved fs blocks for allocation 486d2a17637SMingming Cao * let the underlying get_block() function know to 487d2a17637SMingming Cao * avoid double accounting 488d2a17637SMingming Cao */ 489c2177057STheodore Ts'o if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 490f2321097STheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 4914df3d265SAneesh Kumar K.V /* 4924df3d265SAneesh Kumar K.V * We need to check for EXT4 here because migrate 4934df3d265SAneesh Kumar K.V * could have changed the inode type in between 4944df3d265SAneesh Kumar K.V */ 49512e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 496e35fd660STheodore Ts'o retval = ext4_ext_map_blocks(handle, inode, map, flags); 4970e855ac8SAneesh Kumar K.V } else { 498e35fd660STheodore Ts'o retval = ext4_ind_map_blocks(handle, inode, map, flags); 499267e4db9SAneesh Kumar K.V 500e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { 501267e4db9SAneesh Kumar K.V /* 502267e4db9SAneesh Kumar K.V * We allocated new blocks which will result in 503267e4db9SAneesh Kumar K.V * i_data's format changing. Force the migrate 504267e4db9SAneesh Kumar K.V * to fail by clearing migrate flags 505267e4db9SAneesh Kumar K.V */ 50619f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); 507267e4db9SAneesh Kumar K.V } 5082ac3b6e0STheodore Ts'o 509d2a17637SMingming Cao /* 5102ac3b6e0STheodore Ts'o * Update reserved blocks/metadata blocks after successful 5115f634d06SAneesh Kumar K.V * block allocation which had been deferred till now. We don't 5125f634d06SAneesh Kumar K.V * support fallocate for non extent files. So we can update 5135f634d06SAneesh Kumar K.V * reserve space here. 514d2a17637SMingming Cao */ 5155f634d06SAneesh Kumar K.V if ((retval > 0) && 5161296cc85SAneesh Kumar K.V (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 5175f634d06SAneesh Kumar K.V ext4_da_update_reserve_space(inode, retval, 1); 5185f634d06SAneesh Kumar K.V } 5195f634d06SAneesh Kumar K.V if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 520f2321097STheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); 521d2a17637SMingming Cao 5220e855ac8SAneesh Kumar K.V up_write((&EXT4_I(inode)->i_data_sem)); 523e35fd660STheodore Ts'o if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 524e29136f8STheodore Ts'o int ret = check_block_validity(inode, map); 5256fd058f7STheodore Ts'o if (ret != 0) 5266fd058f7STheodore Ts'o return ret; 5276fd058f7STheodore Ts'o } 5280e855ac8SAneesh Kumar K.V return retval; 5290e855ac8SAneesh Kumar K.V } 5300e855ac8SAneesh Kumar K.V 531f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */ 532f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096 533f3bd1f3fSMingming Cao 5342ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock, 5352ed88685STheodore Ts'o struct buffer_head *bh, int flags) 536ac27a0ecSDave Kleikamp { 5373e4fdaf8SDmitriy Monakhov handle_t *handle = ext4_journal_current_handle(); 5382ed88685STheodore Ts'o struct ext4_map_blocks map; 5397fb5409dSJan Kara int ret = 0, started = 0; 540f3bd1f3fSMingming Cao int dio_credits; 541ac27a0ecSDave Kleikamp 5422ed88685STheodore Ts'o map.m_lblk = iblock; 5432ed88685STheodore Ts'o map.m_len = bh->b_size >> inode->i_blkbits; 5442ed88685STheodore Ts'o 5452ed88685STheodore Ts'o if (flags && !handle) { 5467fb5409dSJan Kara /* Direct IO write... */ 5472ed88685STheodore Ts'o if (map.m_len > DIO_MAX_BLOCKS) 5482ed88685STheodore Ts'o map.m_len = DIO_MAX_BLOCKS; 5492ed88685STheodore Ts'o dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); 550f3bd1f3fSMingming Cao handle = ext4_journal_start(inode, dio_credits); 5517fb5409dSJan Kara if (IS_ERR(handle)) { 552ac27a0ecSDave Kleikamp ret = PTR_ERR(handle); 5532ed88685STheodore Ts'o return ret; 5547fb5409dSJan Kara } 5557fb5409dSJan Kara started = 1; 556ac27a0ecSDave Kleikamp } 557ac27a0ecSDave Kleikamp 5582ed88685STheodore Ts'o ret = ext4_map_blocks(handle, inode, &map, flags); 559ac27a0ecSDave Kleikamp if (ret > 0) { 5602ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 5612ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 5622ed88685STheodore Ts'o bh->b_size = inode->i_sb->s_blocksize * map.m_len; 563ac27a0ecSDave Kleikamp ret = 0; 564ac27a0ecSDave Kleikamp } 5657fb5409dSJan Kara if (started) 5667fb5409dSJan Kara ext4_journal_stop(handle); 567ac27a0ecSDave Kleikamp return ret; 568ac27a0ecSDave Kleikamp } 569ac27a0ecSDave Kleikamp 5702ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock, 5712ed88685STheodore Ts'o struct buffer_head *bh, int create) 5722ed88685STheodore Ts'o { 5732ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh, 5742ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 5752ed88685STheodore Ts'o } 5762ed88685STheodore Ts'o 577ac27a0ecSDave Kleikamp /* 578ac27a0ecSDave Kleikamp * `handle' can be NULL if create is zero 579ac27a0ecSDave Kleikamp */ 580617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 581725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *errp) 582ac27a0ecSDave Kleikamp { 5832ed88685STheodore Ts'o struct ext4_map_blocks map; 5842ed88685STheodore Ts'o struct buffer_head *bh; 585ac27a0ecSDave Kleikamp int fatal = 0, err; 586ac27a0ecSDave Kleikamp 587ac27a0ecSDave Kleikamp J_ASSERT(handle != NULL || create == 0); 588ac27a0ecSDave Kleikamp 5892ed88685STheodore Ts'o map.m_lblk = block; 5902ed88685STheodore Ts'o map.m_len = 1; 5912ed88685STheodore Ts'o err = ext4_map_blocks(handle, inode, &map, 5922ed88685STheodore Ts'o create ? EXT4_GET_BLOCKS_CREATE : 0); 5932ed88685STheodore Ts'o 5942ed88685STheodore Ts'o if (err < 0) 595ac27a0ecSDave Kleikamp *errp = err; 5962ed88685STheodore Ts'o if (err <= 0) 5972ed88685STheodore Ts'o return NULL; 5982ed88685STheodore Ts'o *errp = 0; 5992ed88685STheodore Ts'o 6002ed88685STheodore Ts'o bh = sb_getblk(inode->i_sb, map.m_pblk); 601ac27a0ecSDave Kleikamp if (!bh) { 602ac27a0ecSDave Kleikamp *errp = -EIO; 6032ed88685STheodore Ts'o return NULL; 604ac27a0ecSDave Kleikamp } 6052ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 606ac27a0ecSDave Kleikamp J_ASSERT(create != 0); 607ac39849dSAneesh Kumar K.V J_ASSERT(handle != NULL); 608ac27a0ecSDave Kleikamp 609ac27a0ecSDave Kleikamp /* 610ac27a0ecSDave Kleikamp * Now that we do not always journal data, we should 611ac27a0ecSDave Kleikamp * keep in mind whether this should always journal the 612ac27a0ecSDave Kleikamp * new buffer as metadata. For now, regular file 613617ba13bSMingming Cao * writes use ext4_get_block instead, so it's not a 614ac27a0ecSDave Kleikamp * problem. 615ac27a0ecSDave Kleikamp */ 616ac27a0ecSDave Kleikamp lock_buffer(bh); 617ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "call get_create_access"); 618617ba13bSMingming Cao fatal = ext4_journal_get_create_access(handle, bh); 619ac27a0ecSDave Kleikamp if (!fatal && !buffer_uptodate(bh)) { 620ac27a0ecSDave Kleikamp memset(bh->b_data, 0, inode->i_sb->s_blocksize); 621ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 622ac27a0ecSDave Kleikamp } 623ac27a0ecSDave Kleikamp unlock_buffer(bh); 6240390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 6250390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 626ac27a0ecSDave Kleikamp if (!fatal) 627ac27a0ecSDave Kleikamp fatal = err; 628ac27a0ecSDave Kleikamp } else { 629ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "not a new buffer"); 630ac27a0ecSDave Kleikamp } 631ac27a0ecSDave Kleikamp if (fatal) { 632ac27a0ecSDave Kleikamp *errp = fatal; 633ac27a0ecSDave Kleikamp brelse(bh); 634ac27a0ecSDave Kleikamp bh = NULL; 635ac27a0ecSDave Kleikamp } 636ac27a0ecSDave Kleikamp return bh; 637ac27a0ecSDave Kleikamp } 638ac27a0ecSDave Kleikamp 639617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 640725d26d3SAneesh Kumar K.V ext4_lblk_t block, int create, int *err) 641ac27a0ecSDave Kleikamp { 642ac27a0ecSDave Kleikamp struct buffer_head *bh; 643ac27a0ecSDave Kleikamp 644617ba13bSMingming Cao bh = ext4_getblk(handle, inode, block, create, err); 645ac27a0ecSDave Kleikamp if (!bh) 646ac27a0ecSDave Kleikamp return bh; 647ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 648ac27a0ecSDave Kleikamp return bh; 649ac27a0ecSDave Kleikamp ll_rw_block(READ_META, 1, &bh); 650ac27a0ecSDave Kleikamp wait_on_buffer(bh); 651ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) 652ac27a0ecSDave Kleikamp return bh; 653ac27a0ecSDave Kleikamp put_bh(bh); 654ac27a0ecSDave Kleikamp *err = -EIO; 655ac27a0ecSDave Kleikamp return NULL; 656ac27a0ecSDave Kleikamp } 657ac27a0ecSDave Kleikamp 658ac27a0ecSDave Kleikamp static int walk_page_buffers(handle_t *handle, 659ac27a0ecSDave Kleikamp struct buffer_head *head, 660ac27a0ecSDave Kleikamp unsigned from, 661ac27a0ecSDave Kleikamp unsigned to, 662ac27a0ecSDave Kleikamp int *partial, 663ac27a0ecSDave Kleikamp int (*fn)(handle_t *handle, 664ac27a0ecSDave Kleikamp struct buffer_head *bh)) 665ac27a0ecSDave Kleikamp { 666ac27a0ecSDave Kleikamp struct buffer_head *bh; 667ac27a0ecSDave Kleikamp unsigned block_start, block_end; 668ac27a0ecSDave Kleikamp unsigned blocksize = head->b_size; 669ac27a0ecSDave Kleikamp int err, ret = 0; 670ac27a0ecSDave Kleikamp struct buffer_head *next; 671ac27a0ecSDave Kleikamp 672ac27a0ecSDave Kleikamp for (bh = head, block_start = 0; 673ac27a0ecSDave Kleikamp ret == 0 && (bh != head || !block_start); 674de9a55b8STheodore Ts'o block_start = block_end, bh = next) { 675ac27a0ecSDave Kleikamp next = bh->b_this_page; 676ac27a0ecSDave Kleikamp block_end = block_start + blocksize; 677ac27a0ecSDave Kleikamp if (block_end <= from || block_start >= to) { 678ac27a0ecSDave Kleikamp if (partial && !buffer_uptodate(bh)) 679ac27a0ecSDave Kleikamp *partial = 1; 680ac27a0ecSDave Kleikamp continue; 681ac27a0ecSDave Kleikamp } 682ac27a0ecSDave Kleikamp err = (*fn)(handle, bh); 683ac27a0ecSDave Kleikamp if (!ret) 684ac27a0ecSDave Kleikamp ret = err; 685ac27a0ecSDave Kleikamp } 686ac27a0ecSDave Kleikamp return ret; 687ac27a0ecSDave Kleikamp } 688ac27a0ecSDave Kleikamp 689ac27a0ecSDave Kleikamp /* 690ac27a0ecSDave Kleikamp * To preserve ordering, it is essential that the hole instantiation and 691ac27a0ecSDave Kleikamp * the data write be encapsulated in a single transaction. We cannot 692617ba13bSMingming Cao * close off a transaction and start a new one between the ext4_get_block() 693dab291afSMingming Cao * and the commit_write(). So doing the jbd2_journal_start at the start of 694ac27a0ecSDave Kleikamp * prepare_write() is the right place. 695ac27a0ecSDave Kleikamp * 696617ba13bSMingming Cao * Also, this function can nest inside ext4_writepage() -> 697617ba13bSMingming Cao * block_write_full_page(). In that case, we *know* that ext4_writepage() 698ac27a0ecSDave Kleikamp * has generated enough buffer credits to do the whole page. So we won't 699ac27a0ecSDave Kleikamp * block on the journal in that case, which is good, because the caller may 700ac27a0ecSDave Kleikamp * be PF_MEMALLOC. 701ac27a0ecSDave Kleikamp * 702617ba13bSMingming Cao * By accident, ext4 can be reentered when a transaction is open via 703ac27a0ecSDave Kleikamp * quota file writes. If we were to commit the transaction while thus 704ac27a0ecSDave Kleikamp * reentered, there can be a deadlock - we would be holding a quota 705ac27a0ecSDave Kleikamp * lock, and the commit would never complete if another thread had a 706ac27a0ecSDave Kleikamp * transaction open and was blocking on the quota lock - a ranking 707ac27a0ecSDave Kleikamp * violation. 708ac27a0ecSDave Kleikamp * 709dab291afSMingming Cao * So what we do is to rely on the fact that jbd2_journal_stop/journal_start 710ac27a0ecSDave Kleikamp * will _not_ run commit under these circumstances because handle->h_ref 711ac27a0ecSDave Kleikamp * is elevated. We'll still have enough credits for the tiny quotafile 712ac27a0ecSDave Kleikamp * write. 713ac27a0ecSDave Kleikamp */ 714ac27a0ecSDave Kleikamp static int do_journal_get_write_access(handle_t *handle, 715ac27a0ecSDave Kleikamp struct buffer_head *bh) 716ac27a0ecSDave Kleikamp { 71756d35a4cSJan Kara int dirty = buffer_dirty(bh); 71856d35a4cSJan Kara int ret; 71956d35a4cSJan Kara 720ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 721ac27a0ecSDave Kleikamp return 0; 72256d35a4cSJan Kara /* 723ebdec241SChristoph Hellwig * __block_write_begin() could have dirtied some buffers. Clean 72456d35a4cSJan Kara * the dirty bit as jbd2_journal_get_write_access() could complain 72556d35a4cSJan Kara * otherwise about fs integrity issues. Setting of the dirty bit 726ebdec241SChristoph Hellwig * by __block_write_begin() isn't a real problem here as we clear 72756d35a4cSJan Kara * the bit before releasing a page lock and thus writeback cannot 72856d35a4cSJan Kara * ever write the buffer. 72956d35a4cSJan Kara */ 73056d35a4cSJan Kara if (dirty) 73156d35a4cSJan Kara clear_buffer_dirty(bh); 73256d35a4cSJan Kara ret = ext4_journal_get_write_access(handle, bh); 73356d35a4cSJan Kara if (!ret && dirty) 73456d35a4cSJan Kara ret = ext4_handle_dirty_metadata(handle, NULL, bh); 73556d35a4cSJan Kara return ret; 736ac27a0ecSDave Kleikamp } 737ac27a0ecSDave Kleikamp 738744692dcSJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock, 739744692dcSJiaying Zhang struct buffer_head *bh_result, int create); 740bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping, 741bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned flags, 742bfc1af65SNick Piggin struct page **pagep, void **fsdata) 743ac27a0ecSDave Kleikamp { 744bfc1af65SNick Piggin struct inode *inode = mapping->host; 7451938a150SAneesh Kumar K.V int ret, needed_blocks; 746ac27a0ecSDave Kleikamp handle_t *handle; 747ac27a0ecSDave Kleikamp int retries = 0; 748bfc1af65SNick Piggin struct page *page; 749bfc1af65SNick Piggin pgoff_t index; 750bfc1af65SNick Piggin unsigned from, to; 751bfc1af65SNick Piggin 7529bffad1eSTheodore Ts'o trace_ext4_write_begin(inode, pos, len, flags); 7531938a150SAneesh Kumar K.V /* 7541938a150SAneesh Kumar K.V * Reserve one block more for addition to orphan list in case 7551938a150SAneesh Kumar K.V * we allocate blocks but write fails for some reason 7561938a150SAneesh Kumar K.V */ 7571938a150SAneesh Kumar K.V needed_blocks = ext4_writepage_trans_blocks(inode) + 1; 758bfc1af65SNick Piggin index = pos >> PAGE_CACHE_SHIFT; 759bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 760bfc1af65SNick Piggin to = from + len; 761ac27a0ecSDave Kleikamp 762ac27a0ecSDave Kleikamp retry: 763617ba13bSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 7647479d2b9SAndrew Morton if (IS_ERR(handle)) { 7657479d2b9SAndrew Morton ret = PTR_ERR(handle); 7667479d2b9SAndrew Morton goto out; 7677479d2b9SAndrew Morton } 768ac27a0ecSDave Kleikamp 769ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 770ebd3610bSJan Kara * started */ 771ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 772ebd3610bSJan Kara 77354566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 774cf108bcaSJan Kara if (!page) { 775cf108bcaSJan Kara ext4_journal_stop(handle); 776cf108bcaSJan Kara ret = -ENOMEM; 777cf108bcaSJan Kara goto out; 778cf108bcaSJan Kara } 779cf108bcaSJan Kara *pagep = page; 780cf108bcaSJan Kara 781744692dcSJiaying Zhang if (ext4_should_dioread_nolock(inode)) 7826e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block_write); 783744692dcSJiaying Zhang else 7846e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_get_block); 785bfc1af65SNick Piggin 786bfc1af65SNick Piggin if (!ret && ext4_should_journal_data(inode)) { 787ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), 788ac27a0ecSDave Kleikamp from, to, NULL, do_journal_get_write_access); 789b46be050SAndrey Savochkin } 790bfc1af65SNick Piggin 791bfc1af65SNick Piggin if (ret) { 792bfc1af65SNick Piggin unlock_page(page); 793bfc1af65SNick Piggin page_cache_release(page); 794ae4d5372SAneesh Kumar K.V /* 7956e1db88dSChristoph Hellwig * __block_write_begin may have instantiated a few blocks 796ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 797ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 7981938a150SAneesh Kumar K.V * 7991938a150SAneesh Kumar K.V * Add inode to orphan list in case we crash before 8001938a150SAneesh Kumar K.V * truncate finishes 801ae4d5372SAneesh Kumar K.V */ 802ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 8031938a150SAneesh Kumar K.V ext4_orphan_add(handle, inode); 8041938a150SAneesh Kumar K.V 8051938a150SAneesh Kumar K.V ext4_journal_stop(handle); 8061938a150SAneesh Kumar K.V if (pos + len > inode->i_size) { 807b9a4207dSJan Kara ext4_truncate_failed_write(inode); 8081938a150SAneesh Kumar K.V /* 809ffacfa7aSJan Kara * If truncate failed early the inode might 8101938a150SAneesh Kumar K.V * still be on the orphan list; we need to 8111938a150SAneesh Kumar K.V * make sure the inode is removed from the 8121938a150SAneesh Kumar K.V * orphan list in that case. 8131938a150SAneesh Kumar K.V */ 8141938a150SAneesh Kumar K.V if (inode->i_nlink) 8151938a150SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 8161938a150SAneesh Kumar K.V } 817bfc1af65SNick Piggin } 818bfc1af65SNick Piggin 819617ba13bSMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 820ac27a0ecSDave Kleikamp goto retry; 8217479d2b9SAndrew Morton out: 822ac27a0ecSDave Kleikamp return ret; 823ac27a0ecSDave Kleikamp } 824ac27a0ecSDave Kleikamp 825bfc1af65SNick Piggin /* For write_end() in data=journal mode */ 826bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh) 827ac27a0ecSDave Kleikamp { 828ac27a0ecSDave Kleikamp if (!buffer_mapped(bh) || buffer_freed(bh)) 829ac27a0ecSDave Kleikamp return 0; 830ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 8310390131bSFrank Mayhar return ext4_handle_dirty_metadata(handle, NULL, bh); 832ac27a0ecSDave Kleikamp } 833ac27a0ecSDave Kleikamp 834f8514083SAneesh Kumar K.V static int ext4_generic_write_end(struct file *file, 835f8514083SAneesh Kumar K.V struct address_space *mapping, 836f8514083SAneesh Kumar K.V loff_t pos, unsigned len, unsigned copied, 837f8514083SAneesh Kumar K.V struct page *page, void *fsdata) 838f8514083SAneesh Kumar K.V { 839f8514083SAneesh Kumar K.V int i_size_changed = 0; 840f8514083SAneesh Kumar K.V struct inode *inode = mapping->host; 841f8514083SAneesh Kumar K.V handle_t *handle = ext4_journal_current_handle(); 842f8514083SAneesh Kumar K.V 843f8514083SAneesh Kumar K.V copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 844f8514083SAneesh Kumar K.V 845f8514083SAneesh Kumar K.V /* 846f8514083SAneesh Kumar K.V * No need to use i_size_read() here, the i_size 847f8514083SAneesh Kumar K.V * cannot change under us because we hold i_mutex. 848f8514083SAneesh Kumar K.V * 849f8514083SAneesh Kumar K.V * But it's important to update i_size while still holding page lock: 850f8514083SAneesh Kumar K.V * page writeout could otherwise come in and zero beyond i_size. 851f8514083SAneesh Kumar K.V */ 852f8514083SAneesh Kumar K.V if (pos + copied > inode->i_size) { 853f8514083SAneesh Kumar K.V i_size_write(inode, pos + copied); 854f8514083SAneesh Kumar K.V i_size_changed = 1; 855f8514083SAneesh Kumar K.V } 856f8514083SAneesh Kumar K.V 857f8514083SAneesh Kumar K.V if (pos + copied > EXT4_I(inode)->i_disksize) { 858f8514083SAneesh Kumar K.V /* We need to mark inode dirty even if 859f8514083SAneesh Kumar K.V * new_i_size is less that inode->i_size 860f8514083SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 861f8514083SAneesh Kumar K.V */ 862f8514083SAneesh Kumar K.V ext4_update_i_disksize(inode, (pos + copied)); 863f8514083SAneesh Kumar K.V i_size_changed = 1; 864f8514083SAneesh Kumar K.V } 865f8514083SAneesh Kumar K.V unlock_page(page); 866f8514083SAneesh Kumar K.V page_cache_release(page); 867f8514083SAneesh Kumar K.V 868f8514083SAneesh Kumar K.V /* 869f8514083SAneesh Kumar K.V * Don't mark the inode dirty under page lock. First, it unnecessarily 870f8514083SAneesh Kumar K.V * makes the holding time of page lock longer. Second, it forces lock 871f8514083SAneesh Kumar K.V * ordering of page lock and transaction start for journaling 872f8514083SAneesh Kumar K.V * filesystems. 873f8514083SAneesh Kumar K.V */ 874f8514083SAneesh Kumar K.V if (i_size_changed) 875f8514083SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 876f8514083SAneesh Kumar K.V 877f8514083SAneesh Kumar K.V return copied; 878f8514083SAneesh Kumar K.V } 879f8514083SAneesh Kumar K.V 880ac27a0ecSDave Kleikamp /* 881ac27a0ecSDave Kleikamp * We need to pick up the new inode size which generic_commit_write gave us 882ac27a0ecSDave Kleikamp * `file' can be NULL - eg, when called from page_symlink(). 883ac27a0ecSDave Kleikamp * 884617ba13bSMingming Cao * ext4 never places buffers on inode->i_mapping->private_list. metadata 885ac27a0ecSDave Kleikamp * buffers are managed internally. 886ac27a0ecSDave Kleikamp */ 887bfc1af65SNick Piggin static int ext4_ordered_write_end(struct file *file, 888bfc1af65SNick Piggin struct address_space *mapping, 889bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 890bfc1af65SNick Piggin struct page *page, void *fsdata) 891ac27a0ecSDave Kleikamp { 892617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 893cf108bcaSJan Kara struct inode *inode = mapping->host; 894ac27a0ecSDave Kleikamp int ret = 0, ret2; 895ac27a0ecSDave Kleikamp 8969bffad1eSTheodore Ts'o trace_ext4_ordered_write_end(inode, pos, len, copied); 897678aaf48SJan Kara ret = ext4_jbd2_file_inode(handle, inode); 898ac27a0ecSDave Kleikamp 899ac27a0ecSDave Kleikamp if (ret == 0) { 900f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 901bfc1af65SNick Piggin page, fsdata); 902f8a87d89SRoel Kluin copied = ret2; 903ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 904f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 905f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 906f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 907f8514083SAneesh Kumar K.V */ 908f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 909f8a87d89SRoel Kluin if (ret2 < 0) 910f8a87d89SRoel Kluin ret = ret2; 911ac27a0ecSDave Kleikamp } 912617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 913ac27a0ecSDave Kleikamp if (!ret) 914ac27a0ecSDave Kleikamp ret = ret2; 915bfc1af65SNick Piggin 916f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 917b9a4207dSJan Kara ext4_truncate_failed_write(inode); 918f8514083SAneesh Kumar K.V /* 919ffacfa7aSJan Kara * If truncate failed early the inode might still be 920f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 921f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 922f8514083SAneesh Kumar K.V */ 923f8514083SAneesh Kumar K.V if (inode->i_nlink) 924f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 925f8514083SAneesh Kumar K.V } 926f8514083SAneesh Kumar K.V 927f8514083SAneesh Kumar K.V 928bfc1af65SNick Piggin return ret ? ret : copied; 929ac27a0ecSDave Kleikamp } 930ac27a0ecSDave Kleikamp 931bfc1af65SNick Piggin static int ext4_writeback_write_end(struct file *file, 932bfc1af65SNick Piggin struct address_space *mapping, 933bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 934bfc1af65SNick Piggin struct page *page, void *fsdata) 935ac27a0ecSDave Kleikamp { 936617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 937cf108bcaSJan Kara struct inode *inode = mapping->host; 938ac27a0ecSDave Kleikamp int ret = 0, ret2; 939ac27a0ecSDave Kleikamp 9409bffad1eSTheodore Ts'o trace_ext4_writeback_write_end(inode, pos, len, copied); 941f8514083SAneesh Kumar K.V ret2 = ext4_generic_write_end(file, mapping, pos, len, copied, 942bfc1af65SNick Piggin page, fsdata); 943f8a87d89SRoel Kluin copied = ret2; 944ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 945f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 946f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 947f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 948f8514083SAneesh Kumar K.V */ 949f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 950f8514083SAneesh Kumar K.V 951f8a87d89SRoel Kluin if (ret2 < 0) 952f8a87d89SRoel Kluin ret = ret2; 953ac27a0ecSDave Kleikamp 954617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 955ac27a0ecSDave Kleikamp if (!ret) 956ac27a0ecSDave Kleikamp ret = ret2; 957bfc1af65SNick Piggin 958f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 959b9a4207dSJan Kara ext4_truncate_failed_write(inode); 960f8514083SAneesh Kumar K.V /* 961ffacfa7aSJan Kara * If truncate failed early the inode might still be 962f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 963f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 964f8514083SAneesh Kumar K.V */ 965f8514083SAneesh Kumar K.V if (inode->i_nlink) 966f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 967f8514083SAneesh Kumar K.V } 968f8514083SAneesh Kumar K.V 969bfc1af65SNick Piggin return ret ? ret : copied; 970ac27a0ecSDave Kleikamp } 971ac27a0ecSDave Kleikamp 972bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file, 973bfc1af65SNick Piggin struct address_space *mapping, 974bfc1af65SNick Piggin loff_t pos, unsigned len, unsigned copied, 975bfc1af65SNick Piggin struct page *page, void *fsdata) 976ac27a0ecSDave Kleikamp { 977617ba13bSMingming Cao handle_t *handle = ext4_journal_current_handle(); 978bfc1af65SNick Piggin struct inode *inode = mapping->host; 979ac27a0ecSDave Kleikamp int ret = 0, ret2; 980ac27a0ecSDave Kleikamp int partial = 0; 981bfc1af65SNick Piggin unsigned from, to; 982cf17fea6SAneesh Kumar K.V loff_t new_i_size; 983ac27a0ecSDave Kleikamp 9849bffad1eSTheodore Ts'o trace_ext4_journalled_write_end(inode, pos, len, copied); 985bfc1af65SNick Piggin from = pos & (PAGE_CACHE_SIZE - 1); 986bfc1af65SNick Piggin to = from + len; 987bfc1af65SNick Piggin 988441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 989441c8508SCurt Wohlgemuth 990bfc1af65SNick Piggin if (copied < len) { 991bfc1af65SNick Piggin if (!PageUptodate(page)) 992bfc1af65SNick Piggin copied = 0; 993bfc1af65SNick Piggin page_zero_new_buffers(page, from+copied, to); 994bfc1af65SNick Piggin } 995ac27a0ecSDave Kleikamp 996ac27a0ecSDave Kleikamp ret = walk_page_buffers(handle, page_buffers(page), from, 997bfc1af65SNick Piggin to, &partial, write_end_fn); 998ac27a0ecSDave Kleikamp if (!partial) 999ac27a0ecSDave Kleikamp SetPageUptodate(page); 1000cf17fea6SAneesh Kumar K.V new_i_size = pos + copied; 1001cf17fea6SAneesh Kumar K.V if (new_i_size > inode->i_size) 1002bfc1af65SNick Piggin i_size_write(inode, pos+copied); 100319f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 10042d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 1005cf17fea6SAneesh Kumar K.V if (new_i_size > EXT4_I(inode)->i_disksize) { 1006cf17fea6SAneesh Kumar K.V ext4_update_i_disksize(inode, new_i_size); 1007617ba13bSMingming Cao ret2 = ext4_mark_inode_dirty(handle, inode); 1008ac27a0ecSDave Kleikamp if (!ret) 1009ac27a0ecSDave Kleikamp ret = ret2; 1010ac27a0ecSDave Kleikamp } 1011bfc1af65SNick Piggin 1012cf108bcaSJan Kara unlock_page(page); 1013f8514083SAneesh Kumar K.V page_cache_release(page); 1014ffacfa7aSJan Kara if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1015f8514083SAneesh Kumar K.V /* if we have allocated more blocks and copied 1016f8514083SAneesh Kumar K.V * less. We will have blocks allocated outside 1017f8514083SAneesh Kumar K.V * inode->i_size. So truncate them 1018f8514083SAneesh Kumar K.V */ 1019f8514083SAneesh Kumar K.V ext4_orphan_add(handle, inode); 1020f8514083SAneesh Kumar K.V 1021617ba13bSMingming Cao ret2 = ext4_journal_stop(handle); 1022ac27a0ecSDave Kleikamp if (!ret) 1023ac27a0ecSDave Kleikamp ret = ret2; 1024f8514083SAneesh Kumar K.V if (pos + len > inode->i_size) { 1025b9a4207dSJan Kara ext4_truncate_failed_write(inode); 1026f8514083SAneesh Kumar K.V /* 1027ffacfa7aSJan Kara * If truncate failed early the inode might still be 1028f8514083SAneesh Kumar K.V * on the orphan list; we need to make sure the inode 1029f8514083SAneesh Kumar K.V * is removed from the orphan list in that case. 1030f8514083SAneesh Kumar K.V */ 1031f8514083SAneesh Kumar K.V if (inode->i_nlink) 1032f8514083SAneesh Kumar K.V ext4_orphan_del(NULL, inode); 1033f8514083SAneesh Kumar K.V } 1034bfc1af65SNick Piggin 1035bfc1af65SNick Piggin return ret ? ret : copied; 1036ac27a0ecSDave Kleikamp } 1037d2a17637SMingming Cao 10389d0be502STheodore Ts'o /* 10397b415bf6SAditya Kali * Reserve a single cluster located at lblock 10409d0be502STheodore Ts'o */ 10417b415bf6SAditya Kali int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) 1042d2a17637SMingming Cao { 1043030ba6bcSAneesh Kumar K.V int retries = 0; 1044d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 10450637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 10467b415bf6SAditya Kali unsigned int md_needed; 10475dd4056dSChristoph Hellwig int ret; 1048d2a17637SMingming Cao 1049d2a17637SMingming Cao /* 1050d2a17637SMingming Cao * recalculate the amount of metadata blocks to reserve 1051d2a17637SMingming Cao * in order to allocate nrblocks 1052d2a17637SMingming Cao * worse case is one extent per block 1053d2a17637SMingming Cao */ 1054030ba6bcSAneesh Kumar K.V repeat: 10550637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 10567b415bf6SAditya Kali md_needed = EXT4_NUM_B2C(sbi, 10577b415bf6SAditya Kali ext4_calc_metadata_amount(inode, lblock)); 1058f8ec9d68STheodore Ts'o trace_ext4_da_reserve_space(inode, md_needed); 10590637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 1060d2a17637SMingming Cao 106160e58e0fSMingming Cao /* 106272b8ab9dSEric Sandeen * We will charge metadata quota at writeout time; this saves 106372b8ab9dSEric Sandeen * us from metadata over-estimation, though we may go over by 106472b8ab9dSEric Sandeen * a small amount in the end. Here we just reserve for data. 106560e58e0fSMingming Cao */ 10667b415bf6SAditya Kali ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1)); 10675dd4056dSChristoph Hellwig if (ret) 10685dd4056dSChristoph Hellwig return ret; 106972b8ab9dSEric Sandeen /* 107072b8ab9dSEric Sandeen * We do still charge estimated metadata to the sb though; 107172b8ab9dSEric Sandeen * we cannot afford to run out of free blocks. 107272b8ab9dSEric Sandeen */ 1073e7d5f315STheodore Ts'o if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) { 10747b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); 1075030ba6bcSAneesh Kumar K.V if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1076030ba6bcSAneesh Kumar K.V yield(); 1077030ba6bcSAneesh Kumar K.V goto repeat; 1078030ba6bcSAneesh Kumar K.V } 1079d2a17637SMingming Cao return -ENOSPC; 1080d2a17637SMingming Cao } 10810637c6f4STheodore Ts'o spin_lock(&ei->i_block_reservation_lock); 10829d0be502STheodore Ts'o ei->i_reserved_data_blocks++; 10830637c6f4STheodore Ts'o ei->i_reserved_meta_blocks += md_needed; 10840637c6f4STheodore Ts'o spin_unlock(&ei->i_block_reservation_lock); 108539bc680aSDmitry Monakhov 1086d2a17637SMingming Cao return 0; /* success */ 1087d2a17637SMingming Cao } 1088d2a17637SMingming Cao 108912219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free) 1090d2a17637SMingming Cao { 1091d2a17637SMingming Cao struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 10920637c6f4STheodore Ts'o struct ext4_inode_info *ei = EXT4_I(inode); 1093d2a17637SMingming Cao 1094cd213226SMingming Cao if (!to_free) 1095cd213226SMingming Cao return; /* Nothing to release, exit */ 1096cd213226SMingming Cao 1097d2a17637SMingming Cao spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1098cd213226SMingming Cao 10995a58ec87SLi Zefan trace_ext4_da_release_space(inode, to_free); 11000637c6f4STheodore Ts'o if (unlikely(to_free > ei->i_reserved_data_blocks)) { 1101cd213226SMingming Cao /* 11020637c6f4STheodore Ts'o * if there aren't enough reserved blocks, then the 11030637c6f4STheodore Ts'o * counter is messed up somewhere. Since this 11040637c6f4STheodore Ts'o * function is called from invalidate page, it's 11050637c6f4STheodore Ts'o * harmless to return without any action. 1106cd213226SMingming Cao */ 11070637c6f4STheodore Ts'o ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: " 11080637c6f4STheodore Ts'o "ino %lu, to_free %d with only %d reserved " 11090637c6f4STheodore Ts'o "data blocks\n", inode->i_ino, to_free, 11100637c6f4STheodore Ts'o ei->i_reserved_data_blocks); 11110637c6f4STheodore Ts'o WARN_ON(1); 11120637c6f4STheodore Ts'o to_free = ei->i_reserved_data_blocks; 11130637c6f4STheodore Ts'o } 11140637c6f4STheodore Ts'o ei->i_reserved_data_blocks -= to_free; 11150637c6f4STheodore Ts'o 11160637c6f4STheodore Ts'o if (ei->i_reserved_data_blocks == 0) { 11170637c6f4STheodore Ts'o /* 11180637c6f4STheodore Ts'o * We can release all of the reserved metadata blocks 11190637c6f4STheodore Ts'o * only when we have written all of the delayed 11200637c6f4STheodore Ts'o * allocation blocks. 11217b415bf6SAditya Kali * Note that in case of bigalloc, i_reserved_meta_blocks, 11227b415bf6SAditya Kali * i_reserved_data_blocks, etc. refer to number of clusters. 11230637c6f4STheodore Ts'o */ 112457042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 112572b8ab9dSEric Sandeen ei->i_reserved_meta_blocks); 1126ee5f4d9cSTheodore Ts'o ei->i_reserved_meta_blocks = 0; 11279d0be502STheodore Ts'o ei->i_da_metadata_calc_len = 0; 1128cd213226SMingming Cao } 1129cd213226SMingming Cao 113072b8ab9dSEric Sandeen /* update fs dirty data blocks counter */ 113157042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free); 1132d2a17637SMingming Cao 1133d2a17637SMingming Cao spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 113460e58e0fSMingming Cao 11357b415bf6SAditya Kali dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free)); 1136d2a17637SMingming Cao } 1137d2a17637SMingming Cao 1138d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page, 1139d2a17637SMingming Cao unsigned long offset) 1140d2a17637SMingming Cao { 1141d2a17637SMingming Cao int to_release = 0; 1142d2a17637SMingming Cao struct buffer_head *head, *bh; 1143d2a17637SMingming Cao unsigned int curr_off = 0; 11447b415bf6SAditya Kali struct inode *inode = page->mapping->host; 11457b415bf6SAditya Kali struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 11467b415bf6SAditya Kali int num_clusters; 1147d2a17637SMingming Cao 1148d2a17637SMingming Cao head = page_buffers(page); 1149d2a17637SMingming Cao bh = head; 1150d2a17637SMingming Cao do { 1151d2a17637SMingming Cao unsigned int next_off = curr_off + bh->b_size; 1152d2a17637SMingming Cao 1153d2a17637SMingming Cao if ((offset <= curr_off) && (buffer_delay(bh))) { 1154d2a17637SMingming Cao to_release++; 1155d2a17637SMingming Cao clear_buffer_delay(bh); 1156d2a17637SMingming Cao } 1157d2a17637SMingming Cao curr_off = next_off; 1158d2a17637SMingming Cao } while ((bh = bh->b_this_page) != head); 11597b415bf6SAditya Kali 11607b415bf6SAditya Kali /* If we have released all the blocks belonging to a cluster, then we 11617b415bf6SAditya Kali * need to release the reserved space for that cluster. */ 11627b415bf6SAditya Kali num_clusters = EXT4_NUM_B2C(sbi, to_release); 11637b415bf6SAditya Kali while (num_clusters > 0) { 11647b415bf6SAditya Kali ext4_fsblk_t lblk; 11657b415bf6SAditya Kali lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + 11667b415bf6SAditya Kali ((num_clusters - 1) << sbi->s_cluster_bits); 11677b415bf6SAditya Kali if (sbi->s_cluster_ratio == 1 || 11687b415bf6SAditya Kali !ext4_find_delalloc_cluster(inode, lblk, 1)) 11697b415bf6SAditya Kali ext4_da_release_space(inode, 1); 11707b415bf6SAditya Kali 11717b415bf6SAditya Kali num_clusters--; 11727b415bf6SAditya Kali } 1173d2a17637SMingming Cao } 1174ac27a0ecSDave Kleikamp 1175ac27a0ecSDave Kleikamp /* 117664769240SAlex Tomas * Delayed allocation stuff 117764769240SAlex Tomas */ 117864769240SAlex Tomas 117964769240SAlex Tomas /* 118064769240SAlex Tomas * mpage_da_submit_io - walks through extent of pages and try to write 1181a1d6cc56SAneesh Kumar K.V * them with writepage() call back 118264769240SAlex Tomas * 118364769240SAlex Tomas * @mpd->inode: inode 118464769240SAlex Tomas * @mpd->first_page: first page of the extent 118564769240SAlex Tomas * @mpd->next_page: page after the last page of the extent 118664769240SAlex Tomas * 118764769240SAlex Tomas * By the time mpage_da_submit_io() is called we expect all blocks 118864769240SAlex Tomas * to be allocated. this may be wrong if allocation failed. 118964769240SAlex Tomas * 119064769240SAlex Tomas * As pages are already locked by write_cache_pages(), we can't use it 119164769240SAlex Tomas */ 11921de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd, 11931de3e3dfSTheodore Ts'o struct ext4_map_blocks *map) 119464769240SAlex Tomas { 1195791b7f08SAneesh Kumar K.V struct pagevec pvec; 1196791b7f08SAneesh Kumar K.V unsigned long index, end; 1197791b7f08SAneesh Kumar K.V int ret = 0, err, nr_pages, i; 1198791b7f08SAneesh Kumar K.V struct inode *inode = mpd->inode; 1199791b7f08SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1200cb20d518STheodore Ts'o loff_t size = i_size_read(inode); 12013ecdb3a1STheodore Ts'o unsigned int len, block_start; 12023ecdb3a1STheodore Ts'o struct buffer_head *bh, *page_bufs = NULL; 1203cb20d518STheodore Ts'o int journal_data = ext4_should_journal_data(inode); 12041de3e3dfSTheodore Ts'o sector_t pblock = 0, cur_logical = 0; 1205bd2d0210STheodore Ts'o struct ext4_io_submit io_submit; 120664769240SAlex Tomas 120764769240SAlex Tomas BUG_ON(mpd->next_page <= mpd->first_page); 1208bd2d0210STheodore Ts'o memset(&io_submit, 0, sizeof(io_submit)); 1209791b7f08SAneesh Kumar K.V /* 1210791b7f08SAneesh Kumar K.V * We need to start from the first_page to the next_page - 1 1211791b7f08SAneesh Kumar K.V * to make sure we also write the mapped dirty buffer_heads. 12128dc207c0STheodore Ts'o * If we look at mpd->b_blocknr we would only be looking 1213791b7f08SAneesh Kumar K.V * at the currently mapped buffer_heads. 1214791b7f08SAneesh Kumar K.V */ 121564769240SAlex Tomas index = mpd->first_page; 121664769240SAlex Tomas end = mpd->next_page - 1; 121764769240SAlex Tomas 1218791b7f08SAneesh Kumar K.V pagevec_init(&pvec, 0); 121964769240SAlex Tomas while (index <= end) { 1220791b7f08SAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 122164769240SAlex Tomas if (nr_pages == 0) 122264769240SAlex Tomas break; 122364769240SAlex Tomas for (i = 0; i < nr_pages; i++) { 122497498956STheodore Ts'o int commit_write = 0, skip_page = 0; 122564769240SAlex Tomas struct page *page = pvec.pages[i]; 122664769240SAlex Tomas 1227791b7f08SAneesh Kumar K.V index = page->index; 1228791b7f08SAneesh Kumar K.V if (index > end) 1229791b7f08SAneesh Kumar K.V break; 1230cb20d518STheodore Ts'o 1231cb20d518STheodore Ts'o if (index == size >> PAGE_CACHE_SHIFT) 1232cb20d518STheodore Ts'o len = size & ~PAGE_CACHE_MASK; 1233cb20d518STheodore Ts'o else 1234cb20d518STheodore Ts'o len = PAGE_CACHE_SIZE; 12351de3e3dfSTheodore Ts'o if (map) { 12361de3e3dfSTheodore Ts'o cur_logical = index << (PAGE_CACHE_SHIFT - 12371de3e3dfSTheodore Ts'o inode->i_blkbits); 12381de3e3dfSTheodore Ts'o pblock = map->m_pblk + (cur_logical - 12391de3e3dfSTheodore Ts'o map->m_lblk); 12401de3e3dfSTheodore Ts'o } 1241791b7f08SAneesh Kumar K.V index++; 1242791b7f08SAneesh Kumar K.V 1243791b7f08SAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1244791b7f08SAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1245791b7f08SAneesh Kumar K.V 124622208dedSAneesh Kumar K.V /* 1247cb20d518STheodore Ts'o * If the page does not have buffers (for 1248cb20d518STheodore Ts'o * whatever reason), try to create them using 1249a107e5a3STheodore Ts'o * __block_write_begin. If this fails, 125097498956STheodore Ts'o * skip the page and move on. 125122208dedSAneesh Kumar K.V */ 1252cb20d518STheodore Ts'o if (!page_has_buffers(page)) { 1253a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1254cb20d518STheodore Ts'o noalloc_get_block_write)) { 125597498956STheodore Ts'o skip_page: 1256cb20d518STheodore Ts'o unlock_page(page); 1257cb20d518STheodore Ts'o continue; 1258cb20d518STheodore Ts'o } 1259cb20d518STheodore Ts'o commit_write = 1; 1260cb20d518STheodore Ts'o } 12613ecdb3a1STheodore Ts'o 12623ecdb3a1STheodore Ts'o bh = page_bufs = page_buffers(page); 12633ecdb3a1STheodore Ts'o block_start = 0; 12643ecdb3a1STheodore Ts'o do { 12651de3e3dfSTheodore Ts'o if (!bh) 126697498956STheodore Ts'o goto skip_page; 12671de3e3dfSTheodore Ts'o if (map && (cur_logical >= map->m_lblk) && 12681de3e3dfSTheodore Ts'o (cur_logical <= (map->m_lblk + 12691de3e3dfSTheodore Ts'o (map->m_len - 1)))) { 12701de3e3dfSTheodore Ts'o if (buffer_delay(bh)) { 12711de3e3dfSTheodore Ts'o clear_buffer_delay(bh); 12721de3e3dfSTheodore Ts'o bh->b_blocknr = pblock; 12731de3e3dfSTheodore Ts'o } 12741de3e3dfSTheodore Ts'o if (buffer_unwritten(bh) || 12751de3e3dfSTheodore Ts'o buffer_mapped(bh)) 12761de3e3dfSTheodore Ts'o BUG_ON(bh->b_blocknr != pblock); 12771de3e3dfSTheodore Ts'o if (map->m_flags & EXT4_MAP_UNINIT) 12781de3e3dfSTheodore Ts'o set_buffer_uninit(bh); 12791de3e3dfSTheodore Ts'o clear_buffer_unwritten(bh); 12801de3e3dfSTheodore Ts'o } 12811de3e3dfSTheodore Ts'o 128297498956STheodore Ts'o /* skip page if block allocation undone */ 12831de3e3dfSTheodore Ts'o if (buffer_delay(bh) || buffer_unwritten(bh)) 128497498956STheodore Ts'o skip_page = 1; 12853ecdb3a1STheodore Ts'o bh = bh->b_this_page; 12863ecdb3a1STheodore Ts'o block_start += bh->b_size; 12871de3e3dfSTheodore Ts'o cur_logical++; 12881de3e3dfSTheodore Ts'o pblock++; 12891de3e3dfSTheodore Ts'o } while (bh != page_bufs); 12901de3e3dfSTheodore Ts'o 129197498956STheodore Ts'o if (skip_page) 129297498956STheodore Ts'o goto skip_page; 1293cb20d518STheodore Ts'o 1294cb20d518STheodore Ts'o if (commit_write) 1295cb20d518STheodore Ts'o /* mark the buffer_heads as dirty & uptodate */ 1296cb20d518STheodore Ts'o block_commit_write(page, 0, len); 1297cb20d518STheodore Ts'o 129897498956STheodore Ts'o clear_page_dirty_for_io(page); 1299bd2d0210STheodore Ts'o /* 1300bd2d0210STheodore Ts'o * Delalloc doesn't support data journalling, 1301bd2d0210STheodore Ts'o * but eventually maybe we'll lift this 1302bd2d0210STheodore Ts'o * restriction. 1303bd2d0210STheodore Ts'o */ 1304bd2d0210STheodore Ts'o if (unlikely(journal_data && PageChecked(page))) 1305cb20d518STheodore Ts'o err = __ext4_journalled_writepage(page, len); 13061449032bSTheodore Ts'o else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) 1307bd2d0210STheodore Ts'o err = ext4_bio_write_page(&io_submit, page, 1308bd2d0210STheodore Ts'o len, mpd->wbc); 13099dd75f1fSTheodore Ts'o else if (buffer_uninit(page_bufs)) { 13109dd75f1fSTheodore Ts'o ext4_set_bh_endio(page_bufs, inode); 13119dd75f1fSTheodore Ts'o err = block_write_full_page_endio(page, 13129dd75f1fSTheodore Ts'o noalloc_get_block_write, 13139dd75f1fSTheodore Ts'o mpd->wbc, ext4_end_io_buffer_write); 13149dd75f1fSTheodore Ts'o } else 13151449032bSTheodore Ts'o err = block_write_full_page(page, 13161449032bSTheodore Ts'o noalloc_get_block_write, mpd->wbc); 1317cb20d518STheodore Ts'o 1318cb20d518STheodore Ts'o if (!err) 1319a1d6cc56SAneesh Kumar K.V mpd->pages_written++; 132064769240SAlex Tomas /* 132164769240SAlex Tomas * In error case, we have to continue because 132264769240SAlex Tomas * remaining pages are still locked 132364769240SAlex Tomas */ 132464769240SAlex Tomas if (ret == 0) 132564769240SAlex Tomas ret = err; 132664769240SAlex Tomas } 132764769240SAlex Tomas pagevec_release(&pvec); 132864769240SAlex Tomas } 1329bd2d0210STheodore Ts'o ext4_io_submit(&io_submit); 133064769240SAlex Tomas return ret; 133164769240SAlex Tomas } 133264769240SAlex Tomas 1333c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) 1334c4a0c46eSAneesh Kumar K.V { 1335c4a0c46eSAneesh Kumar K.V int nr_pages, i; 1336c4a0c46eSAneesh Kumar K.V pgoff_t index, end; 1337c4a0c46eSAneesh Kumar K.V struct pagevec pvec; 1338c4a0c46eSAneesh Kumar K.V struct inode *inode = mpd->inode; 1339c4a0c46eSAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 1340c4a0c46eSAneesh Kumar K.V 1341c7f5938aSCurt Wohlgemuth index = mpd->first_page; 1342c7f5938aSCurt Wohlgemuth end = mpd->next_page - 1; 1343c4a0c46eSAneesh Kumar K.V while (index <= end) { 1344c4a0c46eSAneesh Kumar K.V nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 1345c4a0c46eSAneesh Kumar K.V if (nr_pages == 0) 1346c4a0c46eSAneesh Kumar K.V break; 1347c4a0c46eSAneesh Kumar K.V for (i = 0; i < nr_pages; i++) { 1348c4a0c46eSAneesh Kumar K.V struct page *page = pvec.pages[i]; 13499b1d0998SJan Kara if (page->index > end) 1350c4a0c46eSAneesh Kumar K.V break; 1351c4a0c46eSAneesh Kumar K.V BUG_ON(!PageLocked(page)); 1352c4a0c46eSAneesh Kumar K.V BUG_ON(PageWriteback(page)); 1353c4a0c46eSAneesh Kumar K.V block_invalidatepage(page, 0); 1354c4a0c46eSAneesh Kumar K.V ClearPageUptodate(page); 1355c4a0c46eSAneesh Kumar K.V unlock_page(page); 1356c4a0c46eSAneesh Kumar K.V } 13579b1d0998SJan Kara index = pvec.pages[nr_pages - 1]->index + 1; 13589b1d0998SJan Kara pagevec_release(&pvec); 1359c4a0c46eSAneesh Kumar K.V } 1360c4a0c46eSAneesh Kumar K.V return; 1361c4a0c46eSAneesh Kumar K.V } 1362c4a0c46eSAneesh Kumar K.V 1363df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode) 1364df22291fSAneesh Kumar K.V { 1365df22291fSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 13661693918eSTheodore Ts'o printk(KERN_CRIT "Total free blocks count %lld\n", 13675dee5437STheodore Ts'o EXT4_C2B(EXT4_SB(inode->i_sb), 13685dee5437STheodore Ts'o ext4_count_free_clusters(inode->i_sb))); 13691693918eSTheodore Ts'o printk(KERN_CRIT "Free/Dirty block details\n"); 13701693918eSTheodore Ts'o printk(KERN_CRIT "free_blocks=%lld\n", 137157042651STheodore Ts'o (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 137257042651STheodore Ts'o percpu_counter_sum(&sbi->s_freeclusters_counter))); 13731693918eSTheodore Ts'o printk(KERN_CRIT "dirty_blocks=%lld\n", 13747b415bf6SAditya Kali (long long) EXT4_C2B(EXT4_SB(inode->i_sb), 13757b415bf6SAditya Kali percpu_counter_sum(&sbi->s_dirtyclusters_counter))); 13761693918eSTheodore Ts'o printk(KERN_CRIT "Block reservation details\n"); 13771693918eSTheodore Ts'o printk(KERN_CRIT "i_reserved_data_blocks=%u\n", 1378df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_data_blocks); 13791693918eSTheodore Ts'o printk(KERN_CRIT "i_reserved_meta_blocks=%u\n", 1380df22291fSAneesh Kumar K.V EXT4_I(inode)->i_reserved_meta_blocks); 1381df22291fSAneesh Kumar K.V return; 1382df22291fSAneesh Kumar K.V } 1383df22291fSAneesh Kumar K.V 1384b920c755STheodore Ts'o /* 13855a87b7a5STheodore Ts'o * mpage_da_map_and_submit - go through given space, map them 13865a87b7a5STheodore Ts'o * if necessary, and then submit them for I/O 138764769240SAlex Tomas * 13888dc207c0STheodore Ts'o * @mpd - bh describing space 138964769240SAlex Tomas * 139064769240SAlex Tomas * The function skips space we know is already mapped to disk blocks. 139164769240SAlex Tomas * 139264769240SAlex Tomas */ 13935a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd) 139464769240SAlex Tomas { 13952ac3b6e0STheodore Ts'o int err, blks, get_blocks_flags; 13961de3e3dfSTheodore Ts'o struct ext4_map_blocks map, *mapp = NULL; 13972fa3cdfbSTheodore Ts'o sector_t next = mpd->b_blocknr; 13982fa3cdfbSTheodore Ts'o unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits; 13992fa3cdfbSTheodore Ts'o loff_t disksize = EXT4_I(mpd->inode)->i_disksize; 14002fa3cdfbSTheodore Ts'o handle_t *handle = NULL; 140164769240SAlex Tomas 140264769240SAlex Tomas /* 14035a87b7a5STheodore Ts'o * If the blocks are mapped already, or we couldn't accumulate 14045a87b7a5STheodore Ts'o * any blocks, then proceed immediately to the submission stage. 140564769240SAlex Tomas */ 14065a87b7a5STheodore Ts'o if ((mpd->b_size == 0) || 14075a87b7a5STheodore Ts'o ((mpd->b_state & (1 << BH_Mapped)) && 140829fa89d0SAneesh Kumar K.V !(mpd->b_state & (1 << BH_Delay)) && 14095a87b7a5STheodore Ts'o !(mpd->b_state & (1 << BH_Unwritten)))) 14105a87b7a5STheodore Ts'o goto submit_io; 14112fa3cdfbSTheodore Ts'o 14122fa3cdfbSTheodore Ts'o handle = ext4_journal_current_handle(); 14132fa3cdfbSTheodore Ts'o BUG_ON(!handle); 14142fa3cdfbSTheodore Ts'o 141579ffab34SAneesh Kumar K.V /* 141679e83036SEric Sandeen * Call ext4_map_blocks() to allocate any delayed allocation 14172ac3b6e0STheodore Ts'o * blocks, or to convert an uninitialized extent to be 14182ac3b6e0STheodore Ts'o * initialized (in the case where we have written into 14192ac3b6e0STheodore Ts'o * one or more preallocated blocks). 14202ac3b6e0STheodore Ts'o * 14212ac3b6e0STheodore Ts'o * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to 14222ac3b6e0STheodore Ts'o * indicate that we are on the delayed allocation path. This 14232ac3b6e0STheodore Ts'o * affects functions in many different parts of the allocation 14242ac3b6e0STheodore Ts'o * call path. This flag exists primarily because we don't 142579e83036SEric Sandeen * want to change *many* call functions, so ext4_map_blocks() 1426f2321097STheodore Ts'o * will set the EXT4_STATE_DELALLOC_RESERVED flag once the 14272ac3b6e0STheodore Ts'o * inode's allocation semaphore is taken. 14282ac3b6e0STheodore Ts'o * 14292ac3b6e0STheodore Ts'o * If the blocks in questions were delalloc blocks, set 14302ac3b6e0STheodore Ts'o * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting 14312ac3b6e0STheodore Ts'o * variables are updated after the blocks have been allocated. 143279ffab34SAneesh Kumar K.V */ 14332ed88685STheodore Ts'o map.m_lblk = next; 14342ed88685STheodore Ts'o map.m_len = max_blocks; 14351296cc85SAneesh Kumar K.V get_blocks_flags = EXT4_GET_BLOCKS_CREATE; 1436744692dcSJiaying Zhang if (ext4_should_dioread_nolock(mpd->inode)) 1437744692dcSJiaying Zhang get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; 14382ac3b6e0STheodore Ts'o if (mpd->b_state & (1 << BH_Delay)) 14391296cc85SAneesh Kumar K.V get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE; 14401296cc85SAneesh Kumar K.V 14412ed88685STheodore Ts'o blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags); 14422fa3cdfbSTheodore Ts'o if (blks < 0) { 1443e3570639SEric Sandeen struct super_block *sb = mpd->inode->i_sb; 1444e3570639SEric Sandeen 14452fa3cdfbSTheodore Ts'o err = blks; 1446ed5bde0bSTheodore Ts'o /* 14475a87b7a5STheodore Ts'o * If get block returns EAGAIN or ENOSPC and there 144897498956STheodore Ts'o * appears to be free blocks we will just let 144997498956STheodore Ts'o * mpage_da_submit_io() unlock all of the pages. 1450c4a0c46eSAneesh Kumar K.V */ 1451c4a0c46eSAneesh Kumar K.V if (err == -EAGAIN) 14525a87b7a5STheodore Ts'o goto submit_io; 1453df22291fSAneesh Kumar K.V 14545dee5437STheodore Ts'o if (err == -ENOSPC && ext4_count_free_clusters(sb)) { 1455df22291fSAneesh Kumar K.V mpd->retval = err; 14565a87b7a5STheodore Ts'o goto submit_io; 1457df22291fSAneesh Kumar K.V } 1458df22291fSAneesh Kumar K.V 1459c4a0c46eSAneesh Kumar K.V /* 1460ed5bde0bSTheodore Ts'o * get block failure will cause us to loop in 1461ed5bde0bSTheodore Ts'o * writepages, because a_ops->writepage won't be able 1462ed5bde0bSTheodore Ts'o * to make progress. The page will be redirtied by 1463ed5bde0bSTheodore Ts'o * writepage and writepages will again try to write 1464ed5bde0bSTheodore Ts'o * the same. 1465c4a0c46eSAneesh Kumar K.V */ 1466e3570639SEric Sandeen if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) { 1467e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1468e3570639SEric Sandeen "delayed block allocation failed for inode %lu " 1469e3570639SEric Sandeen "at logical offset %llu with max blocks %zd " 1470e3570639SEric Sandeen "with error %d", mpd->inode->i_ino, 1471c4a0c46eSAneesh Kumar K.V (unsigned long long) next, 14728dc207c0STheodore Ts'o mpd->b_size >> mpd->inode->i_blkbits, err); 1473e3570639SEric Sandeen ext4_msg(sb, KERN_CRIT, 1474e3570639SEric Sandeen "This should not happen!! Data will be lost\n"); 1475e3570639SEric Sandeen if (err == -ENOSPC) 1476df22291fSAneesh Kumar K.V ext4_print_free_blocks(mpd->inode); 1477030ba6bcSAneesh Kumar K.V } 14782fa3cdfbSTheodore Ts'o /* invalidate all the pages */ 1479c7f5938aSCurt Wohlgemuth ext4_da_block_invalidatepages(mpd); 1480e0fd9b90SCurt Wohlgemuth 1481e0fd9b90SCurt Wohlgemuth /* Mark this page range as having been completed */ 1482e0fd9b90SCurt Wohlgemuth mpd->io_done = 1; 14835a87b7a5STheodore Ts'o return; 1484c4a0c46eSAneesh Kumar K.V } 14852fa3cdfbSTheodore Ts'o BUG_ON(blks == 0); 14862fa3cdfbSTheodore Ts'o 14871de3e3dfSTheodore Ts'o mapp = ↦ 14882ed88685STheodore Ts'o if (map.m_flags & EXT4_MAP_NEW) { 14892ed88685STheodore Ts'o struct block_device *bdev = mpd->inode->i_sb->s_bdev; 14902ed88685STheodore Ts'o int i; 149164769240SAlex Tomas 14922ed88685STheodore Ts'o for (i = 0; i < map.m_len; i++) 14932ed88685STheodore Ts'o unmap_underlying_metadata(bdev, map.m_pblk + i); 149464769240SAlex Tomas 14952fa3cdfbSTheodore Ts'o if (ext4_should_order_data(mpd->inode)) { 14962fa3cdfbSTheodore Ts'o err = ext4_jbd2_file_inode(handle, mpd->inode); 14972fa3cdfbSTheodore Ts'o if (err) 1498decbd919STheodore Ts'o /* Only if the journal is aborted */ 14995a87b7a5STheodore Ts'o return; 15002fa3cdfbSTheodore Ts'o } 1501decbd919STheodore Ts'o } 15022fa3cdfbSTheodore Ts'o 15032fa3cdfbSTheodore Ts'o /* 150403f5d8bcSJan Kara * Update on-disk size along with block allocation. 15052fa3cdfbSTheodore Ts'o */ 15062fa3cdfbSTheodore Ts'o disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; 15072fa3cdfbSTheodore Ts'o if (disksize > i_size_read(mpd->inode)) 15082fa3cdfbSTheodore Ts'o disksize = i_size_read(mpd->inode); 15092fa3cdfbSTheodore Ts'o if (disksize > EXT4_I(mpd->inode)->i_disksize) { 15102fa3cdfbSTheodore Ts'o ext4_update_i_disksize(mpd->inode, disksize); 15115a87b7a5STheodore Ts'o err = ext4_mark_inode_dirty(handle, mpd->inode); 15125a87b7a5STheodore Ts'o if (err) 15135a87b7a5STheodore Ts'o ext4_error(mpd->inode->i_sb, 15145a87b7a5STheodore Ts'o "Failed to mark inode %lu dirty", 15155a87b7a5STheodore Ts'o mpd->inode->i_ino); 15162fa3cdfbSTheodore Ts'o } 15172fa3cdfbSTheodore Ts'o 15185a87b7a5STheodore Ts'o submit_io: 15191de3e3dfSTheodore Ts'o mpage_da_submit_io(mpd, mapp); 15205a87b7a5STheodore Ts'o mpd->io_done = 1; 152164769240SAlex Tomas } 152264769240SAlex Tomas 1523bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \ 1524bf068ee2SAneesh Kumar K.V (1 << BH_Delay) | (1 << BH_Unwritten)) 152564769240SAlex Tomas 152664769240SAlex Tomas /* 152764769240SAlex Tomas * mpage_add_bh_to_extent - try to add one more block to extent of blocks 152864769240SAlex Tomas * 152964769240SAlex Tomas * @mpd->lbh - extent of blocks 153064769240SAlex Tomas * @logical - logical number of the block in the file 153164769240SAlex Tomas * @bh - bh of the block (used to access block's state) 153264769240SAlex Tomas * 153364769240SAlex Tomas * the function is used to collect contig. blocks in same state 153464769240SAlex Tomas */ 153564769240SAlex Tomas static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, 15368dc207c0STheodore Ts'o sector_t logical, size_t b_size, 15378dc207c0STheodore Ts'o unsigned long b_state) 153864769240SAlex Tomas { 153964769240SAlex Tomas sector_t next; 15408dc207c0STheodore Ts'o int nrblocks = mpd->b_size >> mpd->inode->i_blkbits; 154164769240SAlex Tomas 1542c445e3e0SEric Sandeen /* 1543c445e3e0SEric Sandeen * XXX Don't go larger than mballoc is willing to allocate 1544c445e3e0SEric Sandeen * This is a stopgap solution. We eventually need to fold 1545c445e3e0SEric Sandeen * mpage_da_submit_io() into this function and then call 154679e83036SEric Sandeen * ext4_map_blocks() multiple times in a loop 1547c445e3e0SEric Sandeen */ 1548c445e3e0SEric Sandeen if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize) 1549c445e3e0SEric Sandeen goto flush_it; 1550c445e3e0SEric Sandeen 1551525f4ed8SMingming Cao /* check if thereserved journal credits might overflow */ 155212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) { 1553525f4ed8SMingming Cao if (nrblocks >= EXT4_MAX_TRANS_DATA) { 1554525f4ed8SMingming Cao /* 1555525f4ed8SMingming Cao * With non-extent format we are limited by the journal 1556525f4ed8SMingming Cao * credit available. Total credit needed to insert 1557525f4ed8SMingming Cao * nrblocks contiguous blocks is dependent on the 1558525f4ed8SMingming Cao * nrblocks. So limit nrblocks. 1559525f4ed8SMingming Cao */ 1560525f4ed8SMingming Cao goto flush_it; 1561525f4ed8SMingming Cao } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) > 1562525f4ed8SMingming Cao EXT4_MAX_TRANS_DATA) { 1563525f4ed8SMingming Cao /* 1564525f4ed8SMingming Cao * Adding the new buffer_head would make it cross the 1565525f4ed8SMingming Cao * allowed limit for which we have journal credit 1566525f4ed8SMingming Cao * reserved. So limit the new bh->b_size 1567525f4ed8SMingming Cao */ 1568525f4ed8SMingming Cao b_size = (EXT4_MAX_TRANS_DATA - nrblocks) << 1569525f4ed8SMingming Cao mpd->inode->i_blkbits; 1570525f4ed8SMingming Cao /* we will do mpage_da_submit_io in the next loop */ 1571525f4ed8SMingming Cao } 1572525f4ed8SMingming Cao } 157364769240SAlex Tomas /* 157464769240SAlex Tomas * First block in the extent 157564769240SAlex Tomas */ 15768dc207c0STheodore Ts'o if (mpd->b_size == 0) { 15778dc207c0STheodore Ts'o mpd->b_blocknr = logical; 15788dc207c0STheodore Ts'o mpd->b_size = b_size; 15798dc207c0STheodore Ts'o mpd->b_state = b_state & BH_FLAGS; 158064769240SAlex Tomas return; 158164769240SAlex Tomas } 158264769240SAlex Tomas 15838dc207c0STheodore Ts'o next = mpd->b_blocknr + nrblocks; 158464769240SAlex Tomas /* 158564769240SAlex Tomas * Can we merge the block to our big extent? 158664769240SAlex Tomas */ 15878dc207c0STheodore Ts'o if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) { 15888dc207c0STheodore Ts'o mpd->b_size += b_size; 158964769240SAlex Tomas return; 159064769240SAlex Tomas } 159164769240SAlex Tomas 1592525f4ed8SMingming Cao flush_it: 159364769240SAlex Tomas /* 159464769240SAlex Tomas * We couldn't merge the block to our extent, so we 159564769240SAlex Tomas * need to flush current extent and start new one 159664769240SAlex Tomas */ 15975a87b7a5STheodore Ts'o mpage_da_map_and_submit(mpd); 1598a1d6cc56SAneesh Kumar K.V return; 159964769240SAlex Tomas } 160064769240SAlex Tomas 1601c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh) 160229fa89d0SAneesh Kumar K.V { 1603c364b22cSAneesh Kumar K.V return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh); 160429fa89d0SAneesh Kumar K.V } 160529fa89d0SAneesh Kumar K.V 160664769240SAlex Tomas /* 1607b920c755STheodore Ts'o * This is a special get_blocks_t callback which is used by 1608b920c755STheodore Ts'o * ext4_da_write_begin(). It will either return mapped block or 1609b920c755STheodore Ts'o * reserve space for a single block. 161029fa89d0SAneesh Kumar K.V * 161129fa89d0SAneesh Kumar K.V * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set. 161229fa89d0SAneesh Kumar K.V * We also have b_blocknr = -1 and b_bdev initialized properly 161329fa89d0SAneesh Kumar K.V * 161429fa89d0SAneesh Kumar K.V * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set. 161529fa89d0SAneesh Kumar K.V * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev 161629fa89d0SAneesh Kumar K.V * initialized properly. 161764769240SAlex Tomas */ 161864769240SAlex Tomas static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, 16192ed88685STheodore Ts'o struct buffer_head *bh, int create) 162064769240SAlex Tomas { 16212ed88685STheodore Ts'o struct ext4_map_blocks map; 162264769240SAlex Tomas int ret = 0; 162333b9817eSAneesh Kumar K.V sector_t invalid_block = ~((sector_t) 0xffff); 162433b9817eSAneesh Kumar K.V 162533b9817eSAneesh Kumar K.V if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) 162633b9817eSAneesh Kumar K.V invalid_block = ~0; 162764769240SAlex Tomas 162864769240SAlex Tomas BUG_ON(create == 0); 16292ed88685STheodore Ts'o BUG_ON(bh->b_size != inode->i_sb->s_blocksize); 16302ed88685STheodore Ts'o 16312ed88685STheodore Ts'o map.m_lblk = iblock; 16322ed88685STheodore Ts'o map.m_len = 1; 163364769240SAlex Tomas 163464769240SAlex Tomas /* 163564769240SAlex Tomas * first, we need to know whether the block is allocated already 163664769240SAlex Tomas * preallocated blocks are unmapped but should treated 163764769240SAlex Tomas * the same as allocated blocks. 163864769240SAlex Tomas */ 16392ed88685STheodore Ts'o ret = ext4_map_blocks(NULL, inode, &map, 0); 16402ed88685STheodore Ts'o if (ret < 0) 16412ed88685STheodore Ts'o return ret; 16422ed88685STheodore Ts'o if (ret == 0) { 16432ed88685STheodore Ts'o if (buffer_delay(bh)) 16442ed88685STheodore Ts'o return 0; /* Not sure this could or should happen */ 164564769240SAlex Tomas /* 1646ebdec241SChristoph Hellwig * XXX: __block_write_begin() unmaps passed block, is it OK? 164764769240SAlex Tomas */ 16487b415bf6SAditya Kali /* If the block was allocated from previously allocated cluster, 16497b415bf6SAditya Kali * then we dont need to reserve it again. */ 16507b415bf6SAditya Kali if (!(map.m_flags & EXT4_MAP_FROM_CLUSTER)) { 16519d0be502STheodore Ts'o ret = ext4_da_reserve_space(inode, iblock); 1652d2a17637SMingming Cao if (ret) 1653d2a17637SMingming Cao /* not enough space to reserve */ 1654d2a17637SMingming Cao return ret; 16557b415bf6SAditya Kali } 1656d2a17637SMingming Cao 16572ed88685STheodore Ts'o map_bh(bh, inode->i_sb, invalid_block); 16582ed88685STheodore Ts'o set_buffer_new(bh); 16592ed88685STheodore Ts'o set_buffer_delay(bh); 16602ed88685STheodore Ts'o return 0; 166164769240SAlex Tomas } 166264769240SAlex Tomas 16632ed88685STheodore Ts'o map_bh(bh, inode->i_sb, map.m_pblk); 16642ed88685STheodore Ts'o bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 16652ed88685STheodore Ts'o 16662ed88685STheodore Ts'o if (buffer_unwritten(bh)) { 16672ed88685STheodore Ts'o /* A delayed write to unwritten bh should be marked 16682ed88685STheodore Ts'o * new and mapped. Mapped ensures that we don't do 16692ed88685STheodore Ts'o * get_block multiple times when we write to the same 16702ed88685STheodore Ts'o * offset and new ensures that we do proper zero out 16712ed88685STheodore Ts'o * for partial write. 16722ed88685STheodore Ts'o */ 16732ed88685STheodore Ts'o set_buffer_new(bh); 1674c8205636STheodore Ts'o set_buffer_mapped(bh); 16752ed88685STheodore Ts'o } 16762ed88685STheodore Ts'o return 0; 167764769240SAlex Tomas } 167861628a3fSMingming Cao 1679b920c755STheodore Ts'o /* 1680b920c755STheodore Ts'o * This function is used as a standard get_block_t calback function 1681b920c755STheodore Ts'o * when there is no desire to allocate any blocks. It is used as a 1682ebdec241SChristoph Hellwig * callback function for block_write_begin() and block_write_full_page(). 1683206f7ab4SChristoph Hellwig * These functions should only try to map a single block at a time. 1684b920c755STheodore Ts'o * 1685b920c755STheodore Ts'o * Since this function doesn't do block allocations even if the caller 1686b920c755STheodore Ts'o * requests it by passing in create=1, it is critically important that 1687b920c755STheodore Ts'o * any caller checks to make sure that any buffer heads are returned 1688b920c755STheodore Ts'o * by this function are either all already mapped or marked for 1689206f7ab4SChristoph Hellwig * delayed allocation before calling block_write_full_page(). Otherwise, 1690206f7ab4SChristoph Hellwig * b_blocknr could be left unitialized, and the page write functions will 1691206f7ab4SChristoph Hellwig * be taken by surprise. 1692b920c755STheodore Ts'o */ 1693b920c755STheodore Ts'o static int noalloc_get_block_write(struct inode *inode, sector_t iblock, 1694f0e6c985SAneesh Kumar K.V struct buffer_head *bh_result, int create) 1695f0e6c985SAneesh Kumar K.V { 1696a2dc52b5STheodore Ts'o BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize); 16972ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 0); 169861628a3fSMingming Cao } 169961628a3fSMingming Cao 170062e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh) 170162e086beSAneesh Kumar K.V { 170262e086beSAneesh Kumar K.V get_bh(bh); 170362e086beSAneesh Kumar K.V return 0; 170462e086beSAneesh Kumar K.V } 170562e086beSAneesh Kumar K.V 170662e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh) 170762e086beSAneesh Kumar K.V { 170862e086beSAneesh Kumar K.V put_bh(bh); 170962e086beSAneesh Kumar K.V return 0; 171062e086beSAneesh Kumar K.V } 171162e086beSAneesh Kumar K.V 171262e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page, 171362e086beSAneesh Kumar K.V unsigned int len) 171462e086beSAneesh Kumar K.V { 171562e086beSAneesh Kumar K.V struct address_space *mapping = page->mapping; 171662e086beSAneesh Kumar K.V struct inode *inode = mapping->host; 171762e086beSAneesh Kumar K.V struct buffer_head *page_bufs; 171862e086beSAneesh Kumar K.V handle_t *handle = NULL; 171962e086beSAneesh Kumar K.V int ret = 0; 172062e086beSAneesh Kumar K.V int err; 172162e086beSAneesh Kumar K.V 1722cb20d518STheodore Ts'o ClearPageChecked(page); 172362e086beSAneesh Kumar K.V page_bufs = page_buffers(page); 172462e086beSAneesh Kumar K.V BUG_ON(!page_bufs); 172562e086beSAneesh Kumar K.V walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one); 172662e086beSAneesh Kumar K.V /* As soon as we unlock the page, it can go away, but we have 172762e086beSAneesh Kumar K.V * references to buffers so we are safe */ 172862e086beSAneesh Kumar K.V unlock_page(page); 172962e086beSAneesh Kumar K.V 173062e086beSAneesh Kumar K.V handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 173162e086beSAneesh Kumar K.V if (IS_ERR(handle)) { 173262e086beSAneesh Kumar K.V ret = PTR_ERR(handle); 173362e086beSAneesh Kumar K.V goto out; 173462e086beSAneesh Kumar K.V } 173562e086beSAneesh Kumar K.V 1736441c8508SCurt Wohlgemuth BUG_ON(!ext4_handle_valid(handle)); 1737441c8508SCurt Wohlgemuth 173862e086beSAneesh Kumar K.V ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, 173962e086beSAneesh Kumar K.V do_journal_get_write_access); 174062e086beSAneesh Kumar K.V 174162e086beSAneesh Kumar K.V err = walk_page_buffers(handle, page_bufs, 0, len, NULL, 174262e086beSAneesh Kumar K.V write_end_fn); 174362e086beSAneesh Kumar K.V if (ret == 0) 174462e086beSAneesh Kumar K.V ret = err; 17452d859db3SJan Kara EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; 174662e086beSAneesh Kumar K.V err = ext4_journal_stop(handle); 174762e086beSAneesh Kumar K.V if (!ret) 174862e086beSAneesh Kumar K.V ret = err; 174962e086beSAneesh Kumar K.V 175062e086beSAneesh Kumar K.V walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one); 175119f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_JDATA); 175262e086beSAneesh Kumar K.V out: 175362e086beSAneesh Kumar K.V return ret; 175462e086beSAneesh Kumar K.V } 175562e086beSAneesh Kumar K.V 1756744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode); 1757744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); 1758744692dcSJiaying Zhang 175961628a3fSMingming Cao /* 176043ce1d23SAneesh Kumar K.V * Note that we don't need to start a transaction unless we're journaling data 176143ce1d23SAneesh Kumar K.V * because we should have holes filled from ext4_page_mkwrite(). We even don't 176243ce1d23SAneesh Kumar K.V * need to file the inode to the transaction's list in ordered mode because if 176343ce1d23SAneesh Kumar K.V * we are writing back data added by write(), the inode is already there and if 176443ce1d23SAneesh Kumar K.V * we are writing back data modified via mmap(), no one guarantees in which 176543ce1d23SAneesh Kumar K.V * transaction the data will hit the disk. In case we are journaling data, we 176643ce1d23SAneesh Kumar K.V * cannot start transaction directly because transaction start ranks above page 176743ce1d23SAneesh Kumar K.V * lock so we have to do some magic. 176843ce1d23SAneesh Kumar K.V * 1769b920c755STheodore Ts'o * This function can get called via... 1770b920c755STheodore Ts'o * - ext4_da_writepages after taking page lock (have journal handle) 1771b920c755STheodore Ts'o * - journal_submit_inode_data_buffers (no journal handle) 1772b920c755STheodore Ts'o * - shrink_page_list via pdflush (no journal handle) 1773b920c755STheodore Ts'o * - grab_page_cache when doing write_begin (have journal handle) 177443ce1d23SAneesh Kumar K.V * 177543ce1d23SAneesh Kumar K.V * We don't do any block allocation in this function. If we have page with 177643ce1d23SAneesh Kumar K.V * multiple blocks we need to write those buffer_heads that are mapped. This 177743ce1d23SAneesh Kumar K.V * is important for mmaped based write. So if we do with blocksize 1K 177843ce1d23SAneesh Kumar K.V * truncate(f, 1024); 177943ce1d23SAneesh Kumar K.V * a = mmap(f, 0, 4096); 178043ce1d23SAneesh Kumar K.V * a[0] = 'a'; 178143ce1d23SAneesh Kumar K.V * truncate(f, 4096); 178243ce1d23SAneesh Kumar K.V * we have in the page first buffer_head mapped via page_mkwrite call back 178343ce1d23SAneesh Kumar K.V * but other bufer_heads would be unmapped but dirty(dirty done via the 178443ce1d23SAneesh Kumar K.V * do_wp_page). So writepage should write the first block. If we modify 178543ce1d23SAneesh Kumar K.V * the mmap area beyond 1024 we will again get a page_fault and the 178643ce1d23SAneesh Kumar K.V * page_mkwrite callback will do the block allocation and mark the 178743ce1d23SAneesh Kumar K.V * buffer_heads mapped. 178843ce1d23SAneesh Kumar K.V * 178943ce1d23SAneesh Kumar K.V * We redirty the page if we have any buffer_heads that is either delay or 179043ce1d23SAneesh Kumar K.V * unwritten in the page. 179143ce1d23SAneesh Kumar K.V * 179243ce1d23SAneesh Kumar K.V * We can get recursively called as show below. 179343ce1d23SAneesh Kumar K.V * 179443ce1d23SAneesh Kumar K.V * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() -> 179543ce1d23SAneesh Kumar K.V * ext4_writepage() 179643ce1d23SAneesh Kumar K.V * 179743ce1d23SAneesh Kumar K.V * But since we don't do any block allocation we should not deadlock. 179843ce1d23SAneesh Kumar K.V * Page also have the dirty flag cleared so we don't get recurive page_lock. 179961628a3fSMingming Cao */ 180043ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page, 180164769240SAlex Tomas struct writeback_control *wbc) 180264769240SAlex Tomas { 1803a42afc5fSTheodore Ts'o int ret = 0, commit_write = 0; 180461628a3fSMingming Cao loff_t size; 1805498e5f24STheodore Ts'o unsigned int len; 1806744692dcSJiaying Zhang struct buffer_head *page_bufs = NULL; 180761628a3fSMingming Cao struct inode *inode = page->mapping->host; 180864769240SAlex Tomas 1809a9c667f8SLukas Czerner trace_ext4_writepage(page); 181061628a3fSMingming Cao size = i_size_read(inode); 181161628a3fSMingming Cao if (page->index == size >> PAGE_CACHE_SHIFT) 181261628a3fSMingming Cao len = size & ~PAGE_CACHE_MASK; 181361628a3fSMingming Cao else 181461628a3fSMingming Cao len = PAGE_CACHE_SIZE; 181561628a3fSMingming Cao 1816a42afc5fSTheodore Ts'o /* 1817a42afc5fSTheodore Ts'o * If the page does not have buffers (for whatever reason), 1818a107e5a3STheodore Ts'o * try to create them using __block_write_begin. If this 1819a42afc5fSTheodore Ts'o * fails, redirty the page and move on. 1820a42afc5fSTheodore Ts'o */ 1821b1142e8fSTheodore Ts'o if (!page_has_buffers(page)) { 1822a107e5a3STheodore Ts'o if (__block_write_begin(page, 0, len, 1823a42afc5fSTheodore Ts'o noalloc_get_block_write)) { 1824a42afc5fSTheodore Ts'o redirty_page: 1825a42afc5fSTheodore Ts'o redirty_page_for_writepage(wbc, page); 1826a42afc5fSTheodore Ts'o unlock_page(page); 1827a42afc5fSTheodore Ts'o return 0; 1828a42afc5fSTheodore Ts'o } 1829a42afc5fSTheodore Ts'o commit_write = 1; 1830a42afc5fSTheodore Ts'o } 1831f0e6c985SAneesh Kumar K.V page_bufs = page_buffers(page); 1832f0e6c985SAneesh Kumar K.V if (walk_page_buffers(NULL, page_bufs, 0, len, NULL, 1833c364b22cSAneesh Kumar K.V ext4_bh_delay_or_unwritten)) { 183461628a3fSMingming Cao /* 1835b1142e8fSTheodore Ts'o * We don't want to do block allocation, so redirty 1836b1142e8fSTheodore Ts'o * the page and return. We may reach here when we do 1837b1142e8fSTheodore Ts'o * a journal commit via journal_submit_inode_data_buffers. 1838b1142e8fSTheodore Ts'o * We can also reach here via shrink_page_list 1839f0e6c985SAneesh Kumar K.V */ 1840a42afc5fSTheodore Ts'o goto redirty_page; 1841f0e6c985SAneesh Kumar K.V } 1842a42afc5fSTheodore Ts'o if (commit_write) 1843ed9b3e33SAneesh Kumar K.V /* now mark the buffer_heads as dirty and uptodate */ 1844b767e78aSAneesh Kumar K.V block_commit_write(page, 0, len); 184564769240SAlex Tomas 1846cb20d518STheodore Ts'o if (PageChecked(page) && ext4_should_journal_data(inode)) 184743ce1d23SAneesh Kumar K.V /* 184843ce1d23SAneesh Kumar K.V * It's mmapped pagecache. Add buffers and journal it. There 184943ce1d23SAneesh Kumar K.V * doesn't seem much point in redirtying the page here. 185043ce1d23SAneesh Kumar K.V */ 18513f0ca309SWu Fengguang return __ext4_journalled_writepage(page, len); 185243ce1d23SAneesh Kumar K.V 1853a42afc5fSTheodore Ts'o if (buffer_uninit(page_bufs)) { 1854744692dcSJiaying Zhang ext4_set_bh_endio(page_bufs, inode); 1855744692dcSJiaying Zhang ret = block_write_full_page_endio(page, noalloc_get_block_write, 1856744692dcSJiaying Zhang wbc, ext4_end_io_buffer_write); 1857744692dcSJiaying Zhang } else 1858b920c755STheodore Ts'o ret = block_write_full_page(page, noalloc_get_block_write, 1859f0e6c985SAneesh Kumar K.V wbc); 186064769240SAlex Tomas 186164769240SAlex Tomas return ret; 186264769240SAlex Tomas } 186364769240SAlex Tomas 186461628a3fSMingming Cao /* 1865525f4ed8SMingming Cao * This is called via ext4_da_writepages() to 186625985edcSLucas De Marchi * calculate the total number of credits to reserve to fit 1867525f4ed8SMingming Cao * a single extent allocation into a single transaction, 1868525f4ed8SMingming Cao * ext4_da_writpeages() will loop calling this before 1869525f4ed8SMingming Cao * the block allocation. 187061628a3fSMingming Cao */ 1871525f4ed8SMingming Cao 1872525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode) 1873525f4ed8SMingming Cao { 1874525f4ed8SMingming Cao int max_blocks = EXT4_I(inode)->i_reserved_data_blocks; 1875525f4ed8SMingming Cao 1876525f4ed8SMingming Cao /* 1877525f4ed8SMingming Cao * With non-extent format the journal credit needed to 1878525f4ed8SMingming Cao * insert nrblocks contiguous block is dependent on 1879525f4ed8SMingming Cao * number of contiguous block. So we will limit 1880525f4ed8SMingming Cao * number of contiguous block to a sane value 1881525f4ed8SMingming Cao */ 188212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) && 1883525f4ed8SMingming Cao (max_blocks > EXT4_MAX_TRANS_DATA)) 1884525f4ed8SMingming Cao max_blocks = EXT4_MAX_TRANS_DATA; 1885525f4ed8SMingming Cao 1886525f4ed8SMingming Cao return ext4_chunk_trans_blocks(inode, max_blocks); 1887525f4ed8SMingming Cao } 188861628a3fSMingming Cao 18898e48dcfbSTheodore Ts'o /* 18908e48dcfbSTheodore Ts'o * write_cache_pages_da - walk the list of dirty pages of the given 18918eb9e5ceSTheodore Ts'o * address space and accumulate pages that need writing, and call 1892168fc022STheodore Ts'o * mpage_da_map_and_submit to map a single contiguous memory region 1893168fc022STheodore Ts'o * and then write them. 18948e48dcfbSTheodore Ts'o */ 18958e48dcfbSTheodore Ts'o static int write_cache_pages_da(struct address_space *mapping, 18968e48dcfbSTheodore Ts'o struct writeback_control *wbc, 189772f84e65SEric Sandeen struct mpage_da_data *mpd, 189872f84e65SEric Sandeen pgoff_t *done_index) 18998e48dcfbSTheodore Ts'o { 19008eb9e5ceSTheodore Ts'o struct buffer_head *bh, *head; 1901168fc022STheodore Ts'o struct inode *inode = mapping->host; 19028e48dcfbSTheodore Ts'o struct pagevec pvec; 19034f01b02cSTheodore Ts'o unsigned int nr_pages; 19044f01b02cSTheodore Ts'o sector_t logical; 19054f01b02cSTheodore Ts'o pgoff_t index, end; 19068e48dcfbSTheodore Ts'o long nr_to_write = wbc->nr_to_write; 19074f01b02cSTheodore Ts'o int i, tag, ret = 0; 19088e48dcfbSTheodore Ts'o 1909168fc022STheodore Ts'o memset(mpd, 0, sizeof(struct mpage_da_data)); 1910168fc022STheodore Ts'o mpd->wbc = wbc; 1911168fc022STheodore Ts'o mpd->inode = inode; 19128e48dcfbSTheodore Ts'o pagevec_init(&pvec, 0); 19138e48dcfbSTheodore Ts'o index = wbc->range_start >> PAGE_CACHE_SHIFT; 19148e48dcfbSTheodore Ts'o end = wbc->range_end >> PAGE_CACHE_SHIFT; 19158e48dcfbSTheodore Ts'o 19166e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 19175b41d924SEric Sandeen tag = PAGECACHE_TAG_TOWRITE; 19185b41d924SEric Sandeen else 19195b41d924SEric Sandeen tag = PAGECACHE_TAG_DIRTY; 19205b41d924SEric Sandeen 192172f84e65SEric Sandeen *done_index = index; 19224f01b02cSTheodore Ts'o while (index <= end) { 19235b41d924SEric Sandeen nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 19248e48dcfbSTheodore Ts'o min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 19258e48dcfbSTheodore Ts'o if (nr_pages == 0) 19264f01b02cSTheodore Ts'o return 0; 19278e48dcfbSTheodore Ts'o 19288e48dcfbSTheodore Ts'o for (i = 0; i < nr_pages; i++) { 19298e48dcfbSTheodore Ts'o struct page *page = pvec.pages[i]; 19308e48dcfbSTheodore Ts'o 19318e48dcfbSTheodore Ts'o /* 19328e48dcfbSTheodore Ts'o * At this point, the page may be truncated or 19338e48dcfbSTheodore Ts'o * invalidated (changing page->mapping to NULL), or 19348e48dcfbSTheodore Ts'o * even swizzled back from swapper_space to tmpfs file 19358e48dcfbSTheodore Ts'o * mapping. However, page->index will not change 19368e48dcfbSTheodore Ts'o * because we have a reference on the page. 19378e48dcfbSTheodore Ts'o */ 19384f01b02cSTheodore Ts'o if (page->index > end) 19394f01b02cSTheodore Ts'o goto out; 19408e48dcfbSTheodore Ts'o 194172f84e65SEric Sandeen *done_index = page->index + 1; 194272f84e65SEric Sandeen 194378aaced3STheodore Ts'o /* 194478aaced3STheodore Ts'o * If we can't merge this page, and we have 194578aaced3STheodore Ts'o * accumulated an contiguous region, write it 194678aaced3STheodore Ts'o */ 194778aaced3STheodore Ts'o if ((mpd->next_page != page->index) && 194878aaced3STheodore Ts'o (mpd->next_page != mpd->first_page)) { 194978aaced3STheodore Ts'o mpage_da_map_and_submit(mpd); 195078aaced3STheodore Ts'o goto ret_extent_tail; 195178aaced3STheodore Ts'o } 195278aaced3STheodore Ts'o 19538e48dcfbSTheodore Ts'o lock_page(page); 19548e48dcfbSTheodore Ts'o 19558e48dcfbSTheodore Ts'o /* 19564f01b02cSTheodore Ts'o * If the page is no longer dirty, or its 19574f01b02cSTheodore Ts'o * mapping no longer corresponds to inode we 19584f01b02cSTheodore Ts'o * are writing (which means it has been 19594f01b02cSTheodore Ts'o * truncated or invalidated), or the page is 19604f01b02cSTheodore Ts'o * already under writeback and we are not 19614f01b02cSTheodore Ts'o * doing a data integrity writeback, skip the page 19628e48dcfbSTheodore Ts'o */ 19634f01b02cSTheodore Ts'o if (!PageDirty(page) || 19644f01b02cSTheodore Ts'o (PageWriteback(page) && 19654f01b02cSTheodore Ts'o (wbc->sync_mode == WB_SYNC_NONE)) || 19664f01b02cSTheodore Ts'o unlikely(page->mapping != mapping)) { 19678e48dcfbSTheodore Ts'o unlock_page(page); 19688e48dcfbSTheodore Ts'o continue; 19698e48dcfbSTheodore Ts'o } 19708e48dcfbSTheodore Ts'o 19718e48dcfbSTheodore Ts'o wait_on_page_writeback(page); 19728e48dcfbSTheodore Ts'o BUG_ON(PageWriteback(page)); 19738e48dcfbSTheodore Ts'o 1974168fc022STheodore Ts'o if (mpd->next_page != page->index) 19758eb9e5ceSTheodore Ts'o mpd->first_page = page->index; 19768eb9e5ceSTheodore Ts'o mpd->next_page = page->index + 1; 19778eb9e5ceSTheodore Ts'o logical = (sector_t) page->index << 19788eb9e5ceSTheodore Ts'o (PAGE_CACHE_SHIFT - inode->i_blkbits); 19798eb9e5ceSTheodore Ts'o 19808eb9e5ceSTheodore Ts'o if (!page_has_buffers(page)) { 19814f01b02cSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 19824f01b02cSTheodore Ts'o PAGE_CACHE_SIZE, 19838eb9e5ceSTheodore Ts'o (1 << BH_Dirty) | (1 << BH_Uptodate)); 19844f01b02cSTheodore Ts'o if (mpd->io_done) 19854f01b02cSTheodore Ts'o goto ret_extent_tail; 19868e48dcfbSTheodore Ts'o } else { 19878eb9e5ceSTheodore Ts'o /* 19884f01b02cSTheodore Ts'o * Page with regular buffer heads, 19894f01b02cSTheodore Ts'o * just add all dirty ones 19908eb9e5ceSTheodore Ts'o */ 19918eb9e5ceSTheodore Ts'o head = page_buffers(page); 19928eb9e5ceSTheodore Ts'o bh = head; 19938eb9e5ceSTheodore Ts'o do { 19948eb9e5ceSTheodore Ts'o BUG_ON(buffer_locked(bh)); 19958eb9e5ceSTheodore Ts'o /* 19968eb9e5ceSTheodore Ts'o * We need to try to allocate 19978eb9e5ceSTheodore Ts'o * unmapped blocks in the same page. 19988eb9e5ceSTheodore Ts'o * Otherwise we won't make progress 19998eb9e5ceSTheodore Ts'o * with the page in ext4_writepage 20008eb9e5ceSTheodore Ts'o */ 20018eb9e5ceSTheodore Ts'o if (ext4_bh_delay_or_unwritten(NULL, bh)) { 20028eb9e5ceSTheodore Ts'o mpage_add_bh_to_extent(mpd, logical, 20038eb9e5ceSTheodore Ts'o bh->b_size, 20048eb9e5ceSTheodore Ts'o bh->b_state); 20054f01b02cSTheodore Ts'o if (mpd->io_done) 20064f01b02cSTheodore Ts'o goto ret_extent_tail; 20078eb9e5ceSTheodore Ts'o } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { 20088eb9e5ceSTheodore Ts'o /* 20094f01b02cSTheodore Ts'o * mapped dirty buffer. We need 20104f01b02cSTheodore Ts'o * to update the b_state 20114f01b02cSTheodore Ts'o * because we look at b_state 20124f01b02cSTheodore Ts'o * in mpage_da_map_blocks. We 20134f01b02cSTheodore Ts'o * don't update b_size because 20144f01b02cSTheodore Ts'o * if we find an unmapped 20154f01b02cSTheodore Ts'o * buffer_head later we need to 20164f01b02cSTheodore Ts'o * use the b_state flag of that 20174f01b02cSTheodore Ts'o * buffer_head. 20188eb9e5ceSTheodore Ts'o */ 20198eb9e5ceSTheodore Ts'o if (mpd->b_size == 0) 20208eb9e5ceSTheodore Ts'o mpd->b_state = bh->b_state & BH_FLAGS; 20218e48dcfbSTheodore Ts'o } 20228eb9e5ceSTheodore Ts'o logical++; 20238eb9e5ceSTheodore Ts'o } while ((bh = bh->b_this_page) != head); 20248e48dcfbSTheodore Ts'o } 20258e48dcfbSTheodore Ts'o 20268e48dcfbSTheodore Ts'o if (nr_to_write > 0) { 20278e48dcfbSTheodore Ts'o nr_to_write--; 20288e48dcfbSTheodore Ts'o if (nr_to_write == 0 && 20294f01b02cSTheodore Ts'o wbc->sync_mode == WB_SYNC_NONE) 20308e48dcfbSTheodore Ts'o /* 20318e48dcfbSTheodore Ts'o * We stop writing back only if we are 20328e48dcfbSTheodore Ts'o * not doing integrity sync. In case of 20338e48dcfbSTheodore Ts'o * integrity sync we have to keep going 20348e48dcfbSTheodore Ts'o * because someone may be concurrently 20358e48dcfbSTheodore Ts'o * dirtying pages, and we might have 20368e48dcfbSTheodore Ts'o * synced a lot of newly appeared dirty 20378e48dcfbSTheodore Ts'o * pages, but have not synced all of the 20388e48dcfbSTheodore Ts'o * old dirty pages. 20398e48dcfbSTheodore Ts'o */ 20404f01b02cSTheodore Ts'o goto out; 20418e48dcfbSTheodore Ts'o } 20428e48dcfbSTheodore Ts'o } 20438e48dcfbSTheodore Ts'o pagevec_release(&pvec); 20448e48dcfbSTheodore Ts'o cond_resched(); 20458e48dcfbSTheodore Ts'o } 20464f01b02cSTheodore Ts'o return 0; 20474f01b02cSTheodore Ts'o ret_extent_tail: 20484f01b02cSTheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 20498eb9e5ceSTheodore Ts'o out: 20508eb9e5ceSTheodore Ts'o pagevec_release(&pvec); 20518eb9e5ceSTheodore Ts'o cond_resched(); 20528e48dcfbSTheodore Ts'o return ret; 20538e48dcfbSTheodore Ts'o } 20548e48dcfbSTheodore Ts'o 20558e48dcfbSTheodore Ts'o 205664769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping, 205764769240SAlex Tomas struct writeback_control *wbc) 205864769240SAlex Tomas { 205922208dedSAneesh Kumar K.V pgoff_t index; 206022208dedSAneesh Kumar K.V int range_whole = 0; 206161628a3fSMingming Cao handle_t *handle = NULL; 2062df22291fSAneesh Kumar K.V struct mpage_da_data mpd; 20635e745b04SAneesh Kumar K.V struct inode *inode = mapping->host; 2064498e5f24STheodore Ts'o int pages_written = 0; 206555138e0bSTheodore Ts'o unsigned int max_pages; 20662acf2c26SAneesh Kumar K.V int range_cyclic, cycled = 1, io_done = 0; 206755138e0bSTheodore Ts'o int needed_blocks, ret = 0; 206855138e0bSTheodore Ts'o long desired_nr_to_write, nr_to_writebump = 0; 2069de89de6eSTheodore Ts'o loff_t range_start = wbc->range_start; 20705e745b04SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); 207172f84e65SEric Sandeen pgoff_t done_index = 0; 20725b41d924SEric Sandeen pgoff_t end; 207361628a3fSMingming Cao 20749bffad1eSTheodore Ts'o trace_ext4_da_writepages(inode, wbc); 2075ba80b101STheodore Ts'o 207661628a3fSMingming Cao /* 207761628a3fSMingming Cao * No pages to write? This is mainly a kludge to avoid starting 207861628a3fSMingming Cao * a transaction for special inodes like journal inode on last iput() 207961628a3fSMingming Cao * because that could violate lock ordering on umount 208061628a3fSMingming Cao */ 2081a1d6cc56SAneesh Kumar K.V if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 208261628a3fSMingming Cao return 0; 20832a21e37eSTheodore Ts'o 20842a21e37eSTheodore Ts'o /* 20852a21e37eSTheodore Ts'o * If the filesystem has aborted, it is read-only, so return 20862a21e37eSTheodore Ts'o * right away instead of dumping stack traces later on that 20872a21e37eSTheodore Ts'o * will obscure the real source of the problem. We test 20884ab2f15bSTheodore Ts'o * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because 20892a21e37eSTheodore Ts'o * the latter could be true if the filesystem is mounted 20902a21e37eSTheodore Ts'o * read-only, and in that case, ext4_da_writepages should 20912a21e37eSTheodore Ts'o * *never* be called, so if that ever happens, we would want 20922a21e37eSTheodore Ts'o * the stack trace. 20932a21e37eSTheodore Ts'o */ 20944ab2f15bSTheodore Ts'o if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) 20952a21e37eSTheodore Ts'o return -EROFS; 20962a21e37eSTheodore Ts'o 209722208dedSAneesh Kumar K.V if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 209822208dedSAneesh Kumar K.V range_whole = 1; 209961628a3fSMingming Cao 21002acf2c26SAneesh Kumar K.V range_cyclic = wbc->range_cyclic; 21012acf2c26SAneesh Kumar K.V if (wbc->range_cyclic) { 210222208dedSAneesh Kumar K.V index = mapping->writeback_index; 21032acf2c26SAneesh Kumar K.V if (index) 21042acf2c26SAneesh Kumar K.V cycled = 0; 21052acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 21062acf2c26SAneesh Kumar K.V wbc->range_end = LLONG_MAX; 21072acf2c26SAneesh Kumar K.V wbc->range_cyclic = 0; 21085b41d924SEric Sandeen end = -1; 21095b41d924SEric Sandeen } else { 211022208dedSAneesh Kumar K.V index = wbc->range_start >> PAGE_CACHE_SHIFT; 21115b41d924SEric Sandeen end = wbc->range_end >> PAGE_CACHE_SHIFT; 21125b41d924SEric Sandeen } 2113a1d6cc56SAneesh Kumar K.V 211455138e0bSTheodore Ts'o /* 211555138e0bSTheodore Ts'o * This works around two forms of stupidity. The first is in 211655138e0bSTheodore Ts'o * the writeback code, which caps the maximum number of pages 211755138e0bSTheodore Ts'o * written to be 1024 pages. This is wrong on multiple 211855138e0bSTheodore Ts'o * levels; different architectues have a different page size, 211955138e0bSTheodore Ts'o * which changes the maximum amount of data which gets 212055138e0bSTheodore Ts'o * written. Secondly, 4 megabytes is way too small. XFS 212155138e0bSTheodore Ts'o * forces this value to be 16 megabytes by multiplying 212255138e0bSTheodore Ts'o * nr_to_write parameter by four, and then relies on its 212355138e0bSTheodore Ts'o * allocator to allocate larger extents to make them 212455138e0bSTheodore Ts'o * contiguous. Unfortunately this brings us to the second 212555138e0bSTheodore Ts'o * stupidity, which is that ext4's mballoc code only allocates 212655138e0bSTheodore Ts'o * at most 2048 blocks. So we force contiguous writes up to 212755138e0bSTheodore Ts'o * the number of dirty blocks in the inode, or 212855138e0bSTheodore Ts'o * sbi->max_writeback_mb_bump whichever is smaller. 212955138e0bSTheodore Ts'o */ 213055138e0bSTheodore Ts'o max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT); 2131b443e733SEric Sandeen if (!range_cyclic && range_whole) { 2132b443e733SEric Sandeen if (wbc->nr_to_write == LONG_MAX) 2133b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write; 213455138e0bSTheodore Ts'o else 2135b443e733SEric Sandeen desired_nr_to_write = wbc->nr_to_write * 8; 2136b443e733SEric Sandeen } else 213755138e0bSTheodore Ts'o desired_nr_to_write = ext4_num_dirty_pages(inode, index, 213855138e0bSTheodore Ts'o max_pages); 213955138e0bSTheodore Ts'o if (desired_nr_to_write > max_pages) 214055138e0bSTheodore Ts'o desired_nr_to_write = max_pages; 214155138e0bSTheodore Ts'o 214255138e0bSTheodore Ts'o if (wbc->nr_to_write < desired_nr_to_write) { 214355138e0bSTheodore Ts'o nr_to_writebump = desired_nr_to_write - wbc->nr_to_write; 214455138e0bSTheodore Ts'o wbc->nr_to_write = desired_nr_to_write; 214555138e0bSTheodore Ts'o } 214655138e0bSTheodore Ts'o 21472acf2c26SAneesh Kumar K.V retry: 21486e6938b6SWu Fengguang if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 21495b41d924SEric Sandeen tag_pages_for_writeback(mapping, index, end); 21505b41d924SEric Sandeen 215122208dedSAneesh Kumar K.V while (!ret && wbc->nr_to_write > 0) { 2152a1d6cc56SAneesh Kumar K.V 2153a1d6cc56SAneesh Kumar K.V /* 2154a1d6cc56SAneesh Kumar K.V * we insert one extent at a time. So we need 2155a1d6cc56SAneesh Kumar K.V * credit needed for single extent allocation. 2156a1d6cc56SAneesh Kumar K.V * journalled mode is currently not supported 2157a1d6cc56SAneesh Kumar K.V * by delalloc 2158a1d6cc56SAneesh Kumar K.V */ 2159a1d6cc56SAneesh Kumar K.V BUG_ON(ext4_should_journal_data(inode)); 2160525f4ed8SMingming Cao needed_blocks = ext4_da_writepages_trans_blocks(inode); 2161a1d6cc56SAneesh Kumar K.V 216261628a3fSMingming Cao /* start a new transaction*/ 216361628a3fSMingming Cao handle = ext4_journal_start(inode, needed_blocks); 216461628a3fSMingming Cao if (IS_ERR(handle)) { 216561628a3fSMingming Cao ret = PTR_ERR(handle); 21661693918eSTheodore Ts'o ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: " 2167fbe845ddSCurt Wohlgemuth "%ld pages, ino %lu; err %d", __func__, 2168a1d6cc56SAneesh Kumar K.V wbc->nr_to_write, inode->i_ino, ret); 216961628a3fSMingming Cao goto out_writepages; 217061628a3fSMingming Cao } 2171f63e6005STheodore Ts'o 2172f63e6005STheodore Ts'o /* 21738eb9e5ceSTheodore Ts'o * Now call write_cache_pages_da() to find the next 2174f63e6005STheodore Ts'o * contiguous region of logical blocks that need 21758eb9e5ceSTheodore Ts'o * blocks to be allocated by ext4 and submit them. 2176f63e6005STheodore Ts'o */ 217772f84e65SEric Sandeen ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 2178f63e6005STheodore Ts'o /* 2179af901ca1SAndré Goddard Rosa * If we have a contiguous extent of pages and we 2180f63e6005STheodore Ts'o * haven't done the I/O yet, map the blocks and submit 2181f63e6005STheodore Ts'o * them for I/O. 2182f63e6005STheodore Ts'o */ 2183f63e6005STheodore Ts'o if (!mpd.io_done && mpd.next_page != mpd.first_page) { 21845a87b7a5STheodore Ts'o mpage_da_map_and_submit(&mpd); 2185f63e6005STheodore Ts'o ret = MPAGE_DA_EXTENT_TAIL; 2186f63e6005STheodore Ts'o } 2187b3a3ca8cSTheodore Ts'o trace_ext4_da_write_pages(inode, &mpd); 2188f63e6005STheodore Ts'o wbc->nr_to_write -= mpd.pages_written; 2189df22291fSAneesh Kumar K.V 219061628a3fSMingming Cao ext4_journal_stop(handle); 2191df22291fSAneesh Kumar K.V 21928f64b32eSEric Sandeen if ((mpd.retval == -ENOSPC) && sbi->s_journal) { 219322208dedSAneesh Kumar K.V /* commit the transaction which would 219422208dedSAneesh Kumar K.V * free blocks released in the transaction 219522208dedSAneesh Kumar K.V * and try again 219622208dedSAneesh Kumar K.V */ 2197df22291fSAneesh Kumar K.V jbd2_journal_force_commit_nested(sbi->s_journal); 219822208dedSAneesh Kumar K.V ret = 0; 219922208dedSAneesh Kumar K.V } else if (ret == MPAGE_DA_EXTENT_TAIL) { 2200a1d6cc56SAneesh Kumar K.V /* 2201a1d6cc56SAneesh Kumar K.V * got one extent now try with 2202a1d6cc56SAneesh Kumar K.V * rest of the pages 2203a1d6cc56SAneesh Kumar K.V */ 220422208dedSAneesh Kumar K.V pages_written += mpd.pages_written; 2205a1d6cc56SAneesh Kumar K.V ret = 0; 22062acf2c26SAneesh Kumar K.V io_done = 1; 220722208dedSAneesh Kumar K.V } else if (wbc->nr_to_write) 220861628a3fSMingming Cao /* 220961628a3fSMingming Cao * There is no more writeout needed 221061628a3fSMingming Cao * or we requested for a noblocking writeout 221161628a3fSMingming Cao * and we found the device congested 221261628a3fSMingming Cao */ 221361628a3fSMingming Cao break; 221461628a3fSMingming Cao } 22152acf2c26SAneesh Kumar K.V if (!io_done && !cycled) { 22162acf2c26SAneesh Kumar K.V cycled = 1; 22172acf2c26SAneesh Kumar K.V index = 0; 22182acf2c26SAneesh Kumar K.V wbc->range_start = index << PAGE_CACHE_SHIFT; 22192acf2c26SAneesh Kumar K.V wbc->range_end = mapping->writeback_index - 1; 22202acf2c26SAneesh Kumar K.V goto retry; 22212acf2c26SAneesh Kumar K.V } 222261628a3fSMingming Cao 222322208dedSAneesh Kumar K.V /* Update index */ 22242acf2c26SAneesh Kumar K.V wbc->range_cyclic = range_cyclic; 222522208dedSAneesh Kumar K.V if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 222622208dedSAneesh Kumar K.V /* 222722208dedSAneesh Kumar K.V * set the writeback_index so that range_cyclic 222822208dedSAneesh Kumar K.V * mode will write it back later 222922208dedSAneesh Kumar K.V */ 223072f84e65SEric Sandeen mapping->writeback_index = done_index; 2231a1d6cc56SAneesh Kumar K.V 223261628a3fSMingming Cao out_writepages: 223322208dedSAneesh Kumar K.V wbc->nr_to_write -= nr_to_writebump; 2234de89de6eSTheodore Ts'o wbc->range_start = range_start; 22359bffad1eSTheodore Ts'o trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); 223661628a3fSMingming Cao return ret; 223764769240SAlex Tomas } 223864769240SAlex Tomas 223979f0be8dSAneesh Kumar K.V #define FALL_BACK_TO_NONDELALLOC 1 224079f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb) 224179f0be8dSAneesh Kumar K.V { 224279f0be8dSAneesh Kumar K.V s64 free_blocks, dirty_blocks; 224379f0be8dSAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 224479f0be8dSAneesh Kumar K.V 224579f0be8dSAneesh Kumar K.V /* 224679f0be8dSAneesh Kumar K.V * switch to non delalloc mode if we are running low 224779f0be8dSAneesh Kumar K.V * on free block. The free block accounting via percpu 2248179f7ebfSEric Dumazet * counters can get slightly wrong with percpu_counter_batch getting 224979f0be8dSAneesh Kumar K.V * accumulated on each CPU without updating global counters 225079f0be8dSAneesh Kumar K.V * Delalloc need an accurate free block accounting. So switch 225179f0be8dSAneesh Kumar K.V * to non delalloc when we are near to error range. 225279f0be8dSAneesh Kumar K.V */ 225357042651STheodore Ts'o free_blocks = EXT4_C2B(sbi, 225457042651STheodore Ts'o percpu_counter_read_positive(&sbi->s_freeclusters_counter)); 225557042651STheodore Ts'o dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter); 225679f0be8dSAneesh Kumar K.V if (2 * free_blocks < 3 * dirty_blocks || 2257df55c99dSTheodore Ts'o free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) { 225879f0be8dSAneesh Kumar K.V /* 2259c8afb446SEric Sandeen * free block count is less than 150% of dirty blocks 2260c8afb446SEric Sandeen * or free blocks is less than watermark 226179f0be8dSAneesh Kumar K.V */ 226279f0be8dSAneesh Kumar K.V return 1; 226379f0be8dSAneesh Kumar K.V } 2264c8afb446SEric Sandeen /* 2265c8afb446SEric Sandeen * Even if we don't switch but are nearing capacity, 2266c8afb446SEric Sandeen * start pushing delalloc when 1/2 of free blocks are dirty. 2267c8afb446SEric Sandeen */ 2268c8afb446SEric Sandeen if (free_blocks < 2 * dirty_blocks) 2269c8afb446SEric Sandeen writeback_inodes_sb_if_idle(sb); 2270c8afb446SEric Sandeen 227179f0be8dSAneesh Kumar K.V return 0; 227279f0be8dSAneesh Kumar K.V } 227379f0be8dSAneesh Kumar K.V 227464769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 227564769240SAlex Tomas loff_t pos, unsigned len, unsigned flags, 227664769240SAlex Tomas struct page **pagep, void **fsdata) 227764769240SAlex Tomas { 227872b8ab9dSEric Sandeen int ret, retries = 0; 227964769240SAlex Tomas struct page *page; 228064769240SAlex Tomas pgoff_t index; 228164769240SAlex Tomas struct inode *inode = mapping->host; 228264769240SAlex Tomas handle_t *handle; 228302fac129SAllison Henderson loff_t page_len; 228464769240SAlex Tomas 228564769240SAlex Tomas index = pos >> PAGE_CACHE_SHIFT; 228679f0be8dSAneesh Kumar K.V 228779f0be8dSAneesh Kumar K.V if (ext4_nonda_switch(inode->i_sb)) { 228879f0be8dSAneesh Kumar K.V *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; 228979f0be8dSAneesh Kumar K.V return ext4_write_begin(file, mapping, pos, 229079f0be8dSAneesh Kumar K.V len, flags, pagep, fsdata); 229179f0be8dSAneesh Kumar K.V } 229279f0be8dSAneesh Kumar K.V *fsdata = (void *)0; 22939bffad1eSTheodore Ts'o trace_ext4_da_write_begin(inode, pos, len, flags); 2294d2a17637SMingming Cao retry: 229564769240SAlex Tomas /* 229664769240SAlex Tomas * With delayed allocation, we don't log the i_disksize update 229764769240SAlex Tomas * if there is delayed block allocation. But we still need 229864769240SAlex Tomas * to journalling the i_disksize update if writes to the end 229964769240SAlex Tomas * of file which has an already mapped buffer. 230064769240SAlex Tomas */ 230164769240SAlex Tomas handle = ext4_journal_start(inode, 1); 230264769240SAlex Tomas if (IS_ERR(handle)) { 230364769240SAlex Tomas ret = PTR_ERR(handle); 230464769240SAlex Tomas goto out; 230564769240SAlex Tomas } 2306ebd3610bSJan Kara /* We cannot recurse into the filesystem as the transaction is already 2307ebd3610bSJan Kara * started */ 2308ebd3610bSJan Kara flags |= AOP_FLAG_NOFS; 230964769240SAlex Tomas 231054566b2cSNick Piggin page = grab_cache_page_write_begin(mapping, index, flags); 2311d5a0d4f7SEric Sandeen if (!page) { 2312d5a0d4f7SEric Sandeen ext4_journal_stop(handle); 2313d5a0d4f7SEric Sandeen ret = -ENOMEM; 2314d5a0d4f7SEric Sandeen goto out; 2315d5a0d4f7SEric Sandeen } 231664769240SAlex Tomas *pagep = page; 231764769240SAlex Tomas 23186e1db88dSChristoph Hellwig ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep); 231964769240SAlex Tomas if (ret < 0) { 232064769240SAlex Tomas unlock_page(page); 232164769240SAlex Tomas ext4_journal_stop(handle); 232264769240SAlex Tomas page_cache_release(page); 2323ae4d5372SAneesh Kumar K.V /* 2324ae4d5372SAneesh Kumar K.V * block_write_begin may have instantiated a few blocks 2325ae4d5372SAneesh Kumar K.V * outside i_size. Trim these off again. Don't need 2326ae4d5372SAneesh Kumar K.V * i_size_read because we hold i_mutex. 2327ae4d5372SAneesh Kumar K.V */ 2328ae4d5372SAneesh Kumar K.V if (pos + len > inode->i_size) 2329b9a4207dSJan Kara ext4_truncate_failed_write(inode); 233002fac129SAllison Henderson } else { 233102fac129SAllison Henderson page_len = pos & (PAGE_CACHE_SIZE - 1); 233202fac129SAllison Henderson if (page_len > 0) { 233302fac129SAllison Henderson ret = ext4_discard_partial_page_buffers_no_lock(handle, 233402fac129SAllison Henderson inode, page, pos - page_len, page_len, 233502fac129SAllison Henderson EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED); 233602fac129SAllison Henderson } 233764769240SAlex Tomas } 233864769240SAlex Tomas 2339d2a17637SMingming Cao if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 2340d2a17637SMingming Cao goto retry; 234164769240SAlex Tomas out: 234264769240SAlex Tomas return ret; 234364769240SAlex Tomas } 234464769240SAlex Tomas 2345632eaeabSMingming Cao /* 2346632eaeabSMingming Cao * Check if we should update i_disksize 2347632eaeabSMingming Cao * when write to the end of file but not require block allocation 2348632eaeabSMingming Cao */ 2349632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page, 2350632eaeabSMingming Cao unsigned long offset) 2351632eaeabSMingming Cao { 2352632eaeabSMingming Cao struct buffer_head *bh; 2353632eaeabSMingming Cao struct inode *inode = page->mapping->host; 2354632eaeabSMingming Cao unsigned int idx; 2355632eaeabSMingming Cao int i; 2356632eaeabSMingming Cao 2357632eaeabSMingming Cao bh = page_buffers(page); 2358632eaeabSMingming Cao idx = offset >> inode->i_blkbits; 2359632eaeabSMingming Cao 2360632eaeabSMingming Cao for (i = 0; i < idx; i++) 2361632eaeabSMingming Cao bh = bh->b_this_page; 2362632eaeabSMingming Cao 236329fa89d0SAneesh Kumar K.V if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh)) 2364632eaeabSMingming Cao return 0; 2365632eaeabSMingming Cao return 1; 2366632eaeabSMingming Cao } 2367632eaeabSMingming Cao 236864769240SAlex Tomas static int ext4_da_write_end(struct file *file, 236964769240SAlex Tomas struct address_space *mapping, 237064769240SAlex Tomas loff_t pos, unsigned len, unsigned copied, 237164769240SAlex Tomas struct page *page, void *fsdata) 237264769240SAlex Tomas { 237364769240SAlex Tomas struct inode *inode = mapping->host; 237464769240SAlex Tomas int ret = 0, ret2; 237564769240SAlex Tomas handle_t *handle = ext4_journal_current_handle(); 237664769240SAlex Tomas loff_t new_i_size; 2377632eaeabSMingming Cao unsigned long start, end; 237879f0be8dSAneesh Kumar K.V int write_mode = (int)(unsigned long)fsdata; 237902fac129SAllison Henderson loff_t page_len; 238079f0be8dSAneesh Kumar K.V 238179f0be8dSAneesh Kumar K.V if (write_mode == FALL_BACK_TO_NONDELALLOC) { 238279f0be8dSAneesh Kumar K.V if (ext4_should_order_data(inode)) { 238379f0be8dSAneesh Kumar K.V return ext4_ordered_write_end(file, mapping, pos, 238479f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 238579f0be8dSAneesh Kumar K.V } else if (ext4_should_writeback_data(inode)) { 238679f0be8dSAneesh Kumar K.V return ext4_writeback_write_end(file, mapping, pos, 238779f0be8dSAneesh Kumar K.V len, copied, page, fsdata); 238879f0be8dSAneesh Kumar K.V } else { 238979f0be8dSAneesh Kumar K.V BUG(); 239079f0be8dSAneesh Kumar K.V } 239179f0be8dSAneesh Kumar K.V } 2392632eaeabSMingming Cao 23939bffad1eSTheodore Ts'o trace_ext4_da_write_end(inode, pos, len, copied); 2394632eaeabSMingming Cao start = pos & (PAGE_CACHE_SIZE - 1); 2395632eaeabSMingming Cao end = start + copied - 1; 239664769240SAlex Tomas 239764769240SAlex Tomas /* 239864769240SAlex Tomas * generic_write_end() will run mark_inode_dirty() if i_size 239964769240SAlex Tomas * changes. So let's piggyback the i_disksize mark_inode_dirty 240064769240SAlex Tomas * into that. 240164769240SAlex Tomas */ 240264769240SAlex Tomas 240364769240SAlex Tomas new_i_size = pos + copied; 2404632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 2405632eaeabSMingming Cao if (ext4_da_should_update_i_disksize(page, end)) { 2406632eaeabSMingming Cao down_write(&EXT4_I(inode)->i_data_sem); 2407632eaeabSMingming Cao if (new_i_size > EXT4_I(inode)->i_disksize) { 240864769240SAlex Tomas /* 2409632eaeabSMingming Cao * Updating i_disksize when extending file 2410632eaeabSMingming Cao * without needing block allocation 241164769240SAlex Tomas */ 241264769240SAlex Tomas if (ext4_should_order_data(inode)) 2413632eaeabSMingming Cao ret = ext4_jbd2_file_inode(handle, 2414632eaeabSMingming Cao inode); 241564769240SAlex Tomas 241664769240SAlex Tomas EXT4_I(inode)->i_disksize = new_i_size; 241764769240SAlex Tomas } 2418632eaeabSMingming Cao up_write(&EXT4_I(inode)->i_data_sem); 2419cf17fea6SAneesh Kumar K.V /* We need to mark inode dirty even if 2420cf17fea6SAneesh Kumar K.V * new_i_size is less that inode->i_size 2421cf17fea6SAneesh Kumar K.V * bu greater than i_disksize.(hint delalloc) 2422cf17fea6SAneesh Kumar K.V */ 2423cf17fea6SAneesh Kumar K.V ext4_mark_inode_dirty(handle, inode); 2424632eaeabSMingming Cao } 2425632eaeabSMingming Cao } 242664769240SAlex Tomas ret2 = generic_write_end(file, mapping, pos, len, copied, 242764769240SAlex Tomas page, fsdata); 242802fac129SAllison Henderson 242902fac129SAllison Henderson page_len = PAGE_CACHE_SIZE - 243002fac129SAllison Henderson ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1)); 243102fac129SAllison Henderson 243202fac129SAllison Henderson if (page_len > 0) { 243302fac129SAllison Henderson ret = ext4_discard_partial_page_buffers_no_lock(handle, 243402fac129SAllison Henderson inode, page, pos + copied - 1, page_len, 243502fac129SAllison Henderson EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED); 243602fac129SAllison Henderson } 243702fac129SAllison Henderson 243864769240SAlex Tomas copied = ret2; 243964769240SAlex Tomas if (ret2 < 0) 244064769240SAlex Tomas ret = ret2; 244164769240SAlex Tomas ret2 = ext4_journal_stop(handle); 244264769240SAlex Tomas if (!ret) 244364769240SAlex Tomas ret = ret2; 244464769240SAlex Tomas 244564769240SAlex Tomas return ret ? ret : copied; 244664769240SAlex Tomas } 244764769240SAlex Tomas 244864769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset) 244964769240SAlex Tomas { 245064769240SAlex Tomas /* 245164769240SAlex Tomas * Drop reserved blocks 245264769240SAlex Tomas */ 245364769240SAlex Tomas BUG_ON(!PageLocked(page)); 245464769240SAlex Tomas if (!page_has_buffers(page)) 245564769240SAlex Tomas goto out; 245664769240SAlex Tomas 2457d2a17637SMingming Cao ext4_da_page_release_reservation(page, offset); 245864769240SAlex Tomas 245964769240SAlex Tomas out: 246064769240SAlex Tomas ext4_invalidatepage(page, offset); 246164769240SAlex Tomas 246264769240SAlex Tomas return; 246364769240SAlex Tomas } 246464769240SAlex Tomas 2465ccd2506bSTheodore Ts'o /* 2466ccd2506bSTheodore Ts'o * Force all delayed allocation blocks to be allocated for a given inode. 2467ccd2506bSTheodore Ts'o */ 2468ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode) 2469ccd2506bSTheodore Ts'o { 2470fb40ba0dSTheodore Ts'o trace_ext4_alloc_da_blocks(inode); 2471fb40ba0dSTheodore Ts'o 2472ccd2506bSTheodore Ts'o if (!EXT4_I(inode)->i_reserved_data_blocks && 2473ccd2506bSTheodore Ts'o !EXT4_I(inode)->i_reserved_meta_blocks) 2474ccd2506bSTheodore Ts'o return 0; 2475ccd2506bSTheodore Ts'o 2476ccd2506bSTheodore Ts'o /* 2477ccd2506bSTheodore Ts'o * We do something simple for now. The filemap_flush() will 2478ccd2506bSTheodore Ts'o * also start triggering a write of the data blocks, which is 2479ccd2506bSTheodore Ts'o * not strictly speaking necessary (and for users of 2480ccd2506bSTheodore Ts'o * laptop_mode, not even desirable). However, to do otherwise 2481ccd2506bSTheodore Ts'o * would require replicating code paths in: 2482ccd2506bSTheodore Ts'o * 2483ccd2506bSTheodore Ts'o * ext4_da_writepages() -> 2484ccd2506bSTheodore Ts'o * write_cache_pages() ---> (via passed in callback function) 2485ccd2506bSTheodore Ts'o * __mpage_da_writepage() --> 2486ccd2506bSTheodore Ts'o * mpage_add_bh_to_extent() 2487ccd2506bSTheodore Ts'o * mpage_da_map_blocks() 2488ccd2506bSTheodore Ts'o * 2489ccd2506bSTheodore Ts'o * The problem is that write_cache_pages(), located in 2490ccd2506bSTheodore Ts'o * mm/page-writeback.c, marks pages clean in preparation for 2491ccd2506bSTheodore Ts'o * doing I/O, which is not desirable if we're not planning on 2492ccd2506bSTheodore Ts'o * doing I/O at all. 2493ccd2506bSTheodore Ts'o * 2494ccd2506bSTheodore Ts'o * We could call write_cache_pages(), and then redirty all of 2495380cf090SWu Fengguang * the pages by calling redirty_page_for_writepage() but that 2496ccd2506bSTheodore Ts'o * would be ugly in the extreme. So instead we would need to 2497ccd2506bSTheodore Ts'o * replicate parts of the code in the above functions, 249825985edcSLucas De Marchi * simplifying them because we wouldn't actually intend to 2499ccd2506bSTheodore Ts'o * write out the pages, but rather only collect contiguous 2500ccd2506bSTheodore Ts'o * logical block extents, call the multi-block allocator, and 2501ccd2506bSTheodore Ts'o * then update the buffer heads with the block allocations. 2502ccd2506bSTheodore Ts'o * 2503ccd2506bSTheodore Ts'o * For now, though, we'll cheat by calling filemap_flush(), 2504ccd2506bSTheodore Ts'o * which will map the blocks, and start the I/O, but not 2505ccd2506bSTheodore Ts'o * actually wait for the I/O to complete. 2506ccd2506bSTheodore Ts'o */ 2507ccd2506bSTheodore Ts'o return filemap_flush(inode->i_mapping); 2508ccd2506bSTheodore Ts'o } 250964769240SAlex Tomas 251064769240SAlex Tomas /* 2511ac27a0ecSDave Kleikamp * bmap() is special. It gets used by applications such as lilo and by 2512ac27a0ecSDave Kleikamp * the swapper to find the on-disk block of a specific piece of data. 2513ac27a0ecSDave Kleikamp * 2514ac27a0ecSDave Kleikamp * Naturally, this is dangerous if the block concerned is still in the 2515617ba13bSMingming Cao * journal. If somebody makes a swapfile on an ext4 data-journaling 2516ac27a0ecSDave Kleikamp * filesystem and enables swap, then they may get a nasty shock when the 2517ac27a0ecSDave Kleikamp * data getting swapped to that swapfile suddenly gets overwritten by 2518ac27a0ecSDave Kleikamp * the original zero's written out previously to the journal and 2519ac27a0ecSDave Kleikamp * awaiting writeback in the kernel's buffer cache. 2520ac27a0ecSDave Kleikamp * 2521ac27a0ecSDave Kleikamp * So, if we see any bmap calls here on a modified, data-journaled file, 2522ac27a0ecSDave Kleikamp * take extra steps to flush any blocks which might be in the cache. 2523ac27a0ecSDave Kleikamp */ 2524617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block) 2525ac27a0ecSDave Kleikamp { 2526ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 2527ac27a0ecSDave Kleikamp journal_t *journal; 2528ac27a0ecSDave Kleikamp int err; 2529ac27a0ecSDave Kleikamp 253064769240SAlex Tomas if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && 253164769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) { 253264769240SAlex Tomas /* 253364769240SAlex Tomas * With delalloc we want to sync the file 253464769240SAlex Tomas * so that we can make sure we allocate 253564769240SAlex Tomas * blocks for file 253664769240SAlex Tomas */ 253764769240SAlex Tomas filemap_write_and_wait(mapping); 253864769240SAlex Tomas } 253964769240SAlex Tomas 254019f5fb7aSTheodore Ts'o if (EXT4_JOURNAL(inode) && 254119f5fb7aSTheodore Ts'o ext4_test_inode_state(inode, EXT4_STATE_JDATA)) { 2542ac27a0ecSDave Kleikamp /* 2543ac27a0ecSDave Kleikamp * This is a REALLY heavyweight approach, but the use of 2544ac27a0ecSDave Kleikamp * bmap on dirty files is expected to be extremely rare: 2545ac27a0ecSDave Kleikamp * only if we run lilo or swapon on a freshly made file 2546ac27a0ecSDave Kleikamp * do we expect this to happen. 2547ac27a0ecSDave Kleikamp * 2548ac27a0ecSDave Kleikamp * (bmap requires CAP_SYS_RAWIO so this does not 2549ac27a0ecSDave Kleikamp * represent an unprivileged user DOS attack --- we'd be 2550ac27a0ecSDave Kleikamp * in trouble if mortal users could trigger this path at 2551ac27a0ecSDave Kleikamp * will.) 2552ac27a0ecSDave Kleikamp * 2553617ba13bSMingming Cao * NB. EXT4_STATE_JDATA is not set on files other than 2554ac27a0ecSDave Kleikamp * regular files. If somebody wants to bmap a directory 2555ac27a0ecSDave Kleikamp * or symlink and gets confused because the buffer 2556ac27a0ecSDave Kleikamp * hasn't yet been flushed to disk, they deserve 2557ac27a0ecSDave Kleikamp * everything they get. 2558ac27a0ecSDave Kleikamp */ 2559ac27a0ecSDave Kleikamp 256019f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_JDATA); 2561617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 2562dab291afSMingming Cao jbd2_journal_lock_updates(journal); 2563dab291afSMingming Cao err = jbd2_journal_flush(journal); 2564dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 2565ac27a0ecSDave Kleikamp 2566ac27a0ecSDave Kleikamp if (err) 2567ac27a0ecSDave Kleikamp return 0; 2568ac27a0ecSDave Kleikamp } 2569ac27a0ecSDave Kleikamp 2570617ba13bSMingming Cao return generic_block_bmap(mapping, block, ext4_get_block); 2571ac27a0ecSDave Kleikamp } 2572ac27a0ecSDave Kleikamp 2573617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page) 2574ac27a0ecSDave Kleikamp { 25750562e0baSJiaying Zhang trace_ext4_readpage(page); 2576617ba13bSMingming Cao return mpage_readpage(page, ext4_get_block); 2577ac27a0ecSDave Kleikamp } 2578ac27a0ecSDave Kleikamp 2579ac27a0ecSDave Kleikamp static int 2580617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping, 2581ac27a0ecSDave Kleikamp struct list_head *pages, unsigned nr_pages) 2582ac27a0ecSDave Kleikamp { 2583617ba13bSMingming Cao return mpage_readpages(mapping, pages, nr_pages, ext4_get_block); 2584ac27a0ecSDave Kleikamp } 2585ac27a0ecSDave Kleikamp 2586744692dcSJiaying Zhang static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset) 2587744692dcSJiaying Zhang { 2588744692dcSJiaying Zhang struct buffer_head *head, *bh; 2589744692dcSJiaying Zhang unsigned int curr_off = 0; 2590744692dcSJiaying Zhang 2591744692dcSJiaying Zhang if (!page_has_buffers(page)) 2592744692dcSJiaying Zhang return; 2593744692dcSJiaying Zhang head = bh = page_buffers(page); 2594744692dcSJiaying Zhang do { 2595744692dcSJiaying Zhang if (offset <= curr_off && test_clear_buffer_uninit(bh) 2596744692dcSJiaying Zhang && bh->b_private) { 2597744692dcSJiaying Zhang ext4_free_io_end(bh->b_private); 2598744692dcSJiaying Zhang bh->b_private = NULL; 2599744692dcSJiaying Zhang bh->b_end_io = NULL; 2600744692dcSJiaying Zhang } 2601744692dcSJiaying Zhang curr_off = curr_off + bh->b_size; 2602744692dcSJiaying Zhang bh = bh->b_this_page; 2603744692dcSJiaying Zhang } while (bh != head); 2604744692dcSJiaying Zhang } 2605744692dcSJiaying Zhang 2606617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset) 2607ac27a0ecSDave Kleikamp { 2608617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2609ac27a0ecSDave Kleikamp 26100562e0baSJiaying Zhang trace_ext4_invalidatepage(page, offset); 26110562e0baSJiaying Zhang 2612ac27a0ecSDave Kleikamp /* 2613744692dcSJiaying Zhang * free any io_end structure allocated for buffers to be discarded 2614744692dcSJiaying Zhang */ 2615744692dcSJiaying Zhang if (ext4_should_dioread_nolock(page->mapping->host)) 2616744692dcSJiaying Zhang ext4_invalidatepage_free_endio(page, offset); 2617744692dcSJiaying Zhang /* 2618ac27a0ecSDave Kleikamp * If it's a full truncate we just forget about the pending dirtying 2619ac27a0ecSDave Kleikamp */ 2620ac27a0ecSDave Kleikamp if (offset == 0) 2621ac27a0ecSDave Kleikamp ClearPageChecked(page); 2622ac27a0ecSDave Kleikamp 26230390131bSFrank Mayhar if (journal) 2624dab291afSMingming Cao jbd2_journal_invalidatepage(journal, page, offset); 26250390131bSFrank Mayhar else 26260390131bSFrank Mayhar block_invalidatepage(page, offset); 2627ac27a0ecSDave Kleikamp } 2628ac27a0ecSDave Kleikamp 2629617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait) 2630ac27a0ecSDave Kleikamp { 2631617ba13bSMingming Cao journal_t *journal = EXT4_JOURNAL(page->mapping->host); 2632ac27a0ecSDave Kleikamp 26330562e0baSJiaying Zhang trace_ext4_releasepage(page); 26340562e0baSJiaying Zhang 2635ac27a0ecSDave Kleikamp WARN_ON(PageChecked(page)); 2636ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 2637ac27a0ecSDave Kleikamp return 0; 26380390131bSFrank Mayhar if (journal) 2639dab291afSMingming Cao return jbd2_journal_try_to_free_buffers(journal, page, wait); 26400390131bSFrank Mayhar else 26410390131bSFrank Mayhar return try_to_free_buffers(page); 2642ac27a0ecSDave Kleikamp } 2643ac27a0ecSDave Kleikamp 2644ac27a0ecSDave Kleikamp /* 26452ed88685STheodore Ts'o * ext4_get_block used when preparing for a DIO write or buffer write. 26462ed88685STheodore Ts'o * We allocate an uinitialized extent if blocks haven't been allocated. 26472ed88685STheodore Ts'o * The extent will be converted to initialized after the IO is complete. 26482ed88685STheodore Ts'o */ 2649c7064ef1SJiaying Zhang static int ext4_get_block_write(struct inode *inode, sector_t iblock, 26504c0425ffSMingming Cao struct buffer_head *bh_result, int create) 26514c0425ffSMingming Cao { 2652c7064ef1SJiaying Zhang ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n", 26538d5d02e6SMingming Cao inode->i_ino, create); 26542ed88685STheodore Ts'o return _ext4_get_block(inode, iblock, bh_result, 26552ed88685STheodore Ts'o EXT4_GET_BLOCKS_IO_CREATE_EXT); 26564c0425ffSMingming Cao } 26574c0425ffSMingming Cao 26584c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, 2659552ef802SChristoph Hellwig ssize_t size, void *private, int ret, 2660552ef802SChristoph Hellwig bool is_async) 26614c0425ffSMingming Cao { 266272c5052dSChristoph Hellwig struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 26634c0425ffSMingming Cao ext4_io_end_t *io_end = iocb->private; 26644c0425ffSMingming Cao struct workqueue_struct *wq; 2665744692dcSJiaying Zhang unsigned long flags; 2666744692dcSJiaying Zhang struct ext4_inode_info *ei; 26674c0425ffSMingming Cao 26684b70df18SMingming /* if not async direct IO or dio with 0 bytes write, just return */ 26694b70df18SMingming if (!io_end || !size) 2670552ef802SChristoph Hellwig goto out; 26714b70df18SMingming 26728d5d02e6SMingming Cao ext_debug("ext4_end_io_dio(): io_end 0x%p" 26738d5d02e6SMingming Cao "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", 26748d5d02e6SMingming Cao iocb->private, io_end->inode->i_ino, iocb, offset, 26758d5d02e6SMingming Cao size); 26768d5d02e6SMingming Cao 26778d5d02e6SMingming Cao /* if not aio dio with unwritten extents, just free io and return */ 2678bd2d0210STheodore Ts'o if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 26798d5d02e6SMingming Cao ext4_free_io_end(io_end); 26808d5d02e6SMingming Cao iocb->private = NULL; 26815b3ff237Sjiayingz@google.com (Jiaying Zhang) out: 26825b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) 26835b3ff237Sjiayingz@google.com (Jiaying Zhang) aio_complete(iocb, ret, 0); 268472c5052dSChristoph Hellwig inode_dio_done(inode); 26855b3ff237Sjiayingz@google.com (Jiaying Zhang) return; 26868d5d02e6SMingming Cao } 26878d5d02e6SMingming Cao 26884c0425ffSMingming Cao io_end->offset = offset; 26894c0425ffSMingming Cao io_end->size = size; 26905b3ff237Sjiayingz@google.com (Jiaying Zhang) if (is_async) { 26915b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->iocb = iocb; 26925b3ff237Sjiayingz@google.com (Jiaying Zhang) io_end->result = ret; 26935b3ff237Sjiayingz@google.com (Jiaying Zhang) } 26944c0425ffSMingming Cao wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; 26954c0425ffSMingming Cao 26968d5d02e6SMingming Cao /* Add the io_end to per-inode completed aio dio list*/ 2697744692dcSJiaying Zhang ei = EXT4_I(io_end->inode); 2698744692dcSJiaying Zhang spin_lock_irqsave(&ei->i_completed_io_lock, flags); 2699744692dcSJiaying Zhang list_add_tail(&io_end->list, &ei->i_completed_io_list); 2700744692dcSJiaying Zhang spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); 2701c999af2bSEric Sandeen 2702c999af2bSEric Sandeen /* queue the work to convert unwritten extents to written */ 2703c999af2bSEric Sandeen queue_work(wq, &io_end->work); 27044c0425ffSMingming Cao iocb->private = NULL; 270572c5052dSChristoph Hellwig 270672c5052dSChristoph Hellwig /* XXX: probably should move into the real I/O completion handler */ 270772c5052dSChristoph Hellwig inode_dio_done(inode); 27084c0425ffSMingming Cao } 2709c7064ef1SJiaying Zhang 2710744692dcSJiaying Zhang static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) 2711744692dcSJiaying Zhang { 2712744692dcSJiaying Zhang ext4_io_end_t *io_end = bh->b_private; 2713744692dcSJiaying Zhang struct workqueue_struct *wq; 2714744692dcSJiaying Zhang struct inode *inode; 2715744692dcSJiaying Zhang unsigned long flags; 2716744692dcSJiaying Zhang 2717744692dcSJiaying Zhang if (!test_clear_buffer_uninit(bh) || !io_end) 2718744692dcSJiaying Zhang goto out; 2719744692dcSJiaying Zhang 2720744692dcSJiaying Zhang if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) { 2721744692dcSJiaying Zhang printk("sb umounted, discard end_io request for inode %lu\n", 2722744692dcSJiaying Zhang io_end->inode->i_ino); 2723744692dcSJiaying Zhang ext4_free_io_end(io_end); 2724744692dcSJiaying Zhang goto out; 2725744692dcSJiaying Zhang } 2726744692dcSJiaying Zhang 272732c80b32STao Ma /* 272832c80b32STao Ma * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, 272932c80b32STao Ma * but being more careful is always safe for the future change. 273032c80b32STao Ma */ 2731744692dcSJiaying Zhang inode = io_end->inode; 273232c80b32STao Ma if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { 273332c80b32STao Ma io_end->flag |= EXT4_IO_END_UNWRITTEN; 273432c80b32STao Ma atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 273532c80b32STao Ma } 2736744692dcSJiaying Zhang 2737744692dcSJiaying Zhang /* Add the io_end to per-inode completed io list*/ 2738744692dcSJiaying Zhang spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 2739744692dcSJiaying Zhang list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 2740744692dcSJiaying Zhang spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags); 2741744692dcSJiaying Zhang 2742744692dcSJiaying Zhang wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq; 2743744692dcSJiaying Zhang /* queue the work to convert unwritten extents to written */ 2744744692dcSJiaying Zhang queue_work(wq, &io_end->work); 2745744692dcSJiaying Zhang out: 2746744692dcSJiaying Zhang bh->b_private = NULL; 2747744692dcSJiaying Zhang bh->b_end_io = NULL; 2748744692dcSJiaying Zhang clear_buffer_uninit(bh); 2749744692dcSJiaying Zhang end_buffer_async_write(bh, uptodate); 2750744692dcSJiaying Zhang } 2751744692dcSJiaying Zhang 2752744692dcSJiaying Zhang static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) 2753744692dcSJiaying Zhang { 2754744692dcSJiaying Zhang ext4_io_end_t *io_end; 2755744692dcSJiaying Zhang struct page *page = bh->b_page; 2756744692dcSJiaying Zhang loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT; 2757744692dcSJiaying Zhang size_t size = bh->b_size; 2758744692dcSJiaying Zhang 2759744692dcSJiaying Zhang retry: 2760744692dcSJiaying Zhang io_end = ext4_init_io_end(inode, GFP_ATOMIC); 2761744692dcSJiaying Zhang if (!io_end) { 27626db26ffcSAndrew Morton pr_warn_ratelimited("%s: allocation fail\n", __func__); 2763744692dcSJiaying Zhang schedule(); 2764744692dcSJiaying Zhang goto retry; 2765744692dcSJiaying Zhang } 2766744692dcSJiaying Zhang io_end->offset = offset; 2767744692dcSJiaying Zhang io_end->size = size; 2768744692dcSJiaying Zhang /* 2769744692dcSJiaying Zhang * We need to hold a reference to the page to make sure it 2770744692dcSJiaying Zhang * doesn't get evicted before ext4_end_io_work() has a chance 2771744692dcSJiaying Zhang * to convert the extent from written to unwritten. 2772744692dcSJiaying Zhang */ 2773744692dcSJiaying Zhang io_end->page = page; 2774744692dcSJiaying Zhang get_page(io_end->page); 2775744692dcSJiaying Zhang 2776744692dcSJiaying Zhang bh->b_private = io_end; 2777744692dcSJiaying Zhang bh->b_end_io = ext4_end_io_buffer_write; 2778744692dcSJiaying Zhang return 0; 2779744692dcSJiaying Zhang } 2780744692dcSJiaying Zhang 27814c0425ffSMingming Cao /* 27824c0425ffSMingming Cao * For ext4 extent files, ext4 will do direct-io write to holes, 27834c0425ffSMingming Cao * preallocated extents, and those write extend the file, no need to 27844c0425ffSMingming Cao * fall back to buffered IO. 27854c0425ffSMingming Cao * 2786b595076aSUwe Kleine-König * For holes, we fallocate those blocks, mark them as uninitialized 27874c0425ffSMingming Cao * If those blocks were preallocated, we mark sure they are splited, but 2788b595076aSUwe Kleine-König * still keep the range to write as uninitialized. 27894c0425ffSMingming Cao * 27908d5d02e6SMingming Cao * The unwrritten extents will be converted to written when DIO is completed. 27918d5d02e6SMingming Cao * For async direct IO, since the IO may still pending when return, we 279225985edcSLucas De Marchi * set up an end_io call back function, which will do the conversion 27938d5d02e6SMingming Cao * when async direct IO completed. 27944c0425ffSMingming Cao * 27954c0425ffSMingming Cao * If the O_DIRECT write will extend the file then add this inode to the 27964c0425ffSMingming Cao * orphan list. So recovery will truncate it back to the original size 27974c0425ffSMingming Cao * if the machine crashes during the write. 27984c0425ffSMingming Cao * 27994c0425ffSMingming Cao */ 28004c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, 28014c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 28024c0425ffSMingming Cao unsigned long nr_segs) 28034c0425ffSMingming Cao { 28044c0425ffSMingming Cao struct file *file = iocb->ki_filp; 28054c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 28064c0425ffSMingming Cao ssize_t ret; 28074c0425ffSMingming Cao size_t count = iov_length(iov, nr_segs); 28084c0425ffSMingming Cao 28094c0425ffSMingming Cao loff_t final_size = offset + count; 28104c0425ffSMingming Cao if (rw == WRITE && final_size <= inode->i_size) { 28114c0425ffSMingming Cao /* 28128d5d02e6SMingming Cao * We could direct write to holes and fallocate. 28138d5d02e6SMingming Cao * 28148d5d02e6SMingming Cao * Allocated blocks to fill the hole are marked as uninitialized 281525985edcSLucas De Marchi * to prevent parallel buffered read to expose the stale data 28164c0425ffSMingming Cao * before DIO complete the data IO. 28178d5d02e6SMingming Cao * 28188d5d02e6SMingming Cao * As to previously fallocated extents, ext4 get_block 28194c0425ffSMingming Cao * will just simply mark the buffer mapped but still 28204c0425ffSMingming Cao * keep the extents uninitialized. 28214c0425ffSMingming Cao * 28228d5d02e6SMingming Cao * for non AIO case, we will convert those unwritten extents 28238d5d02e6SMingming Cao * to written after return back from blockdev_direct_IO. 28244c0425ffSMingming Cao * 28258d5d02e6SMingming Cao * for async DIO, the conversion needs to be defered when 28268d5d02e6SMingming Cao * the IO is completed. The ext4 end_io callback function 28278d5d02e6SMingming Cao * will be called to take care of the conversion work. 28288d5d02e6SMingming Cao * Here for async case, we allocate an io_end structure to 28298d5d02e6SMingming Cao * hook to the iocb. 28304c0425ffSMingming Cao */ 28318d5d02e6SMingming Cao iocb->private = NULL; 28328d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = NULL; 28338d5d02e6SMingming Cao if (!is_sync_kiocb(iocb)) { 2834744692dcSJiaying Zhang iocb->private = ext4_init_io_end(inode, GFP_NOFS); 28354c0425ffSMingming Cao if (!iocb->private) 28364c0425ffSMingming Cao return -ENOMEM; 28378d5d02e6SMingming Cao /* 28388d5d02e6SMingming Cao * we save the io structure for current async 283979e83036SEric Sandeen * direct IO, so that later ext4_map_blocks() 28408d5d02e6SMingming Cao * could flag the io structure whether there 28418d5d02e6SMingming Cao * is a unwritten extents needs to be converted 28428d5d02e6SMingming Cao * when IO is completed. 28438d5d02e6SMingming Cao */ 28448d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = iocb->private; 28458d5d02e6SMingming Cao } 28468d5d02e6SMingming Cao 2847aacfc19cSChristoph Hellwig ret = __blockdev_direct_IO(rw, iocb, inode, 28484c0425ffSMingming Cao inode->i_sb->s_bdev, iov, 28494c0425ffSMingming Cao offset, nr_segs, 2850c7064ef1SJiaying Zhang ext4_get_block_write, 2851aacfc19cSChristoph Hellwig ext4_end_io_dio, 2852aacfc19cSChristoph Hellwig NULL, 2853aacfc19cSChristoph Hellwig DIO_LOCKING | DIO_SKIP_HOLES); 28548d5d02e6SMingming Cao if (iocb->private) 28558d5d02e6SMingming Cao EXT4_I(inode)->cur_aio_dio = NULL; 28568d5d02e6SMingming Cao /* 28578d5d02e6SMingming Cao * The io_end structure takes a reference to the inode, 28588d5d02e6SMingming Cao * that structure needs to be destroyed and the 28598d5d02e6SMingming Cao * reference to the inode need to be dropped, when IO is 28608d5d02e6SMingming Cao * complete, even with 0 byte write, or failed. 28618d5d02e6SMingming Cao * 28628d5d02e6SMingming Cao * In the successful AIO DIO case, the io_end structure will be 28638d5d02e6SMingming Cao * desctroyed and the reference to the inode will be dropped 28648d5d02e6SMingming Cao * after the end_io call back function is called. 28658d5d02e6SMingming Cao * 28668d5d02e6SMingming Cao * In the case there is 0 byte write, or error case, since 28678d5d02e6SMingming Cao * VFS direct IO won't invoke the end_io call back function, 28688d5d02e6SMingming Cao * we need to free the end_io structure here. 28698d5d02e6SMingming Cao */ 28708d5d02e6SMingming Cao if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) { 28718d5d02e6SMingming Cao ext4_free_io_end(iocb->private); 28728d5d02e6SMingming Cao iocb->private = NULL; 287319f5fb7aSTheodore Ts'o } else if (ret > 0 && ext4_test_inode_state(inode, 28745f524950SMingming EXT4_STATE_DIO_UNWRITTEN)) { 2875109f5565SMingming int err; 28768d5d02e6SMingming Cao /* 28778d5d02e6SMingming Cao * for non AIO case, since the IO is already 287825985edcSLucas De Marchi * completed, we could do the conversion right here 28798d5d02e6SMingming Cao */ 2880109f5565SMingming err = ext4_convert_unwritten_extents(inode, 28818d5d02e6SMingming Cao offset, ret); 2882109f5565SMingming if (err < 0) 2883109f5565SMingming ret = err; 288419f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 2885109f5565SMingming } 28864c0425ffSMingming Cao return ret; 28874c0425ffSMingming Cao } 28888d5d02e6SMingming Cao 28898d5d02e6SMingming Cao /* for write the the end of file case, we fall back to old way */ 28904c0425ffSMingming Cao return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 28914c0425ffSMingming Cao } 28924c0425ffSMingming Cao 28934c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb, 28944c0425ffSMingming Cao const struct iovec *iov, loff_t offset, 28954c0425ffSMingming Cao unsigned long nr_segs) 28964c0425ffSMingming Cao { 28974c0425ffSMingming Cao struct file *file = iocb->ki_filp; 28984c0425ffSMingming Cao struct inode *inode = file->f_mapping->host; 28990562e0baSJiaying Zhang ssize_t ret; 29004c0425ffSMingming Cao 290184ebd795STheodore Ts'o /* 290284ebd795STheodore Ts'o * If we are doing data journalling we don't support O_DIRECT 290384ebd795STheodore Ts'o */ 290484ebd795STheodore Ts'o if (ext4_should_journal_data(inode)) 290584ebd795STheodore Ts'o return 0; 290684ebd795STheodore Ts'o 29070562e0baSJiaying Zhang trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw); 290812e9b892SDmitry Monakhov if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 29090562e0baSJiaying Zhang ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 29100562e0baSJiaying Zhang else 29110562e0baSJiaying Zhang ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 29120562e0baSJiaying Zhang trace_ext4_direct_IO_exit(inode, offset, 29130562e0baSJiaying Zhang iov_length(iov, nr_segs), rw, ret); 29140562e0baSJiaying Zhang return ret; 29154c0425ffSMingming Cao } 29164c0425ffSMingming Cao 2917ac27a0ecSDave Kleikamp /* 2918617ba13bSMingming Cao * Pages can be marked dirty completely asynchronously from ext4's journalling 2919ac27a0ecSDave Kleikamp * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do 2920ac27a0ecSDave Kleikamp * much here because ->set_page_dirty is called under VFS locks. The page is 2921ac27a0ecSDave Kleikamp * not necessarily locked. 2922ac27a0ecSDave Kleikamp * 2923ac27a0ecSDave Kleikamp * We cannot just dirty the page and leave attached buffers clean, because the 2924ac27a0ecSDave Kleikamp * buffers' dirty state is "definitive". We cannot just set the buffers dirty 2925ac27a0ecSDave Kleikamp * or jbddirty because all the journalling code will explode. 2926ac27a0ecSDave Kleikamp * 2927ac27a0ecSDave Kleikamp * So what we do is to mark the page "pending dirty" and next time writepage 2928ac27a0ecSDave Kleikamp * is called, propagate that into the buffers appropriately. 2929ac27a0ecSDave Kleikamp */ 2930617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page) 2931ac27a0ecSDave Kleikamp { 2932ac27a0ecSDave Kleikamp SetPageChecked(page); 2933ac27a0ecSDave Kleikamp return __set_page_dirty_nobuffers(page); 2934ac27a0ecSDave Kleikamp } 2935ac27a0ecSDave Kleikamp 2936617ba13bSMingming Cao static const struct address_space_operations ext4_ordered_aops = { 2937617ba13bSMingming Cao .readpage = ext4_readpage, 2938617ba13bSMingming Cao .readpages = ext4_readpages, 293943ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 2940bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2941bfc1af65SNick Piggin .write_end = ext4_ordered_write_end, 2942617ba13bSMingming Cao .bmap = ext4_bmap, 2943617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2944617ba13bSMingming Cao .releasepage = ext4_releasepage, 2945617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 2946ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 29478ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2948aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 2949ac27a0ecSDave Kleikamp }; 2950ac27a0ecSDave Kleikamp 2951617ba13bSMingming Cao static const struct address_space_operations ext4_writeback_aops = { 2952617ba13bSMingming Cao .readpage = ext4_readpage, 2953617ba13bSMingming Cao .readpages = ext4_readpages, 295443ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 2955bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2956bfc1af65SNick Piggin .write_end = ext4_writeback_write_end, 2957617ba13bSMingming Cao .bmap = ext4_bmap, 2958617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2959617ba13bSMingming Cao .releasepage = ext4_releasepage, 2960617ba13bSMingming Cao .direct_IO = ext4_direct_IO, 2961ac27a0ecSDave Kleikamp .migratepage = buffer_migrate_page, 29628ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2963aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 2964ac27a0ecSDave Kleikamp }; 2965ac27a0ecSDave Kleikamp 2966617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = { 2967617ba13bSMingming Cao .readpage = ext4_readpage, 2968617ba13bSMingming Cao .readpages = ext4_readpages, 296943ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 2970bfc1af65SNick Piggin .write_begin = ext4_write_begin, 2971bfc1af65SNick Piggin .write_end = ext4_journalled_write_end, 2972617ba13bSMingming Cao .set_page_dirty = ext4_journalled_set_page_dirty, 2973617ba13bSMingming Cao .bmap = ext4_bmap, 2974617ba13bSMingming Cao .invalidatepage = ext4_invalidatepage, 2975617ba13bSMingming Cao .releasepage = ext4_releasepage, 297684ebd795STheodore Ts'o .direct_IO = ext4_direct_IO, 29778ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2978aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 2979ac27a0ecSDave Kleikamp }; 2980ac27a0ecSDave Kleikamp 298164769240SAlex Tomas static const struct address_space_operations ext4_da_aops = { 298264769240SAlex Tomas .readpage = ext4_readpage, 298364769240SAlex Tomas .readpages = ext4_readpages, 298443ce1d23SAneesh Kumar K.V .writepage = ext4_writepage, 298564769240SAlex Tomas .writepages = ext4_da_writepages, 298664769240SAlex Tomas .write_begin = ext4_da_write_begin, 298764769240SAlex Tomas .write_end = ext4_da_write_end, 298864769240SAlex Tomas .bmap = ext4_bmap, 298964769240SAlex Tomas .invalidatepage = ext4_da_invalidatepage, 299064769240SAlex Tomas .releasepage = ext4_releasepage, 299164769240SAlex Tomas .direct_IO = ext4_direct_IO, 299264769240SAlex Tomas .migratepage = buffer_migrate_page, 29938ab22b9aSHisashi Hifumi .is_partially_uptodate = block_is_partially_uptodate, 2994aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page, 299564769240SAlex Tomas }; 299664769240SAlex Tomas 2997617ba13bSMingming Cao void ext4_set_aops(struct inode *inode) 2998ac27a0ecSDave Kleikamp { 2999cd1aac32SAneesh Kumar K.V if (ext4_should_order_data(inode) && 3000cd1aac32SAneesh Kumar K.V test_opt(inode->i_sb, DELALLOC)) 3001cd1aac32SAneesh Kumar K.V inode->i_mapping->a_ops = &ext4_da_aops; 3002cd1aac32SAneesh Kumar K.V else if (ext4_should_order_data(inode)) 3003617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_ordered_aops; 300464769240SAlex Tomas else if (ext4_should_writeback_data(inode) && 300564769240SAlex Tomas test_opt(inode->i_sb, DELALLOC)) 300664769240SAlex Tomas inode->i_mapping->a_ops = &ext4_da_aops; 3007617ba13bSMingming Cao else if (ext4_should_writeback_data(inode)) 3008617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_writeback_aops; 3009ac27a0ecSDave Kleikamp else 3010617ba13bSMingming Cao inode->i_mapping->a_ops = &ext4_journalled_aops; 3011ac27a0ecSDave Kleikamp } 3012ac27a0ecSDave Kleikamp 30134e96b2dbSAllison Henderson 30144e96b2dbSAllison Henderson /* 30154e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers() 30164e96b2dbSAllison Henderson * Wrapper function for ext4_discard_partial_page_buffers_no_lock. 30174e96b2dbSAllison Henderson * This function finds and locks the page containing the offset 30184e96b2dbSAllison Henderson * "from" and passes it to ext4_discard_partial_page_buffers_no_lock. 30194e96b2dbSAllison Henderson * Calling functions that already have the page locked should call 30204e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock directly. 30214e96b2dbSAllison Henderson */ 30224e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle, 30234e96b2dbSAllison Henderson struct address_space *mapping, loff_t from, 30244e96b2dbSAllison Henderson loff_t length, int flags) 30254e96b2dbSAllison Henderson { 30264e96b2dbSAllison Henderson struct inode *inode = mapping->host; 30274e96b2dbSAllison Henderson struct page *page; 30284e96b2dbSAllison Henderson int err = 0; 30294e96b2dbSAllison Henderson 30304e96b2dbSAllison Henderson page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 30314e96b2dbSAllison Henderson mapping_gfp_mask(mapping) & ~__GFP_FS); 30324e96b2dbSAllison Henderson if (!page) 30334e96b2dbSAllison Henderson return -EINVAL; 30344e96b2dbSAllison Henderson 30354e96b2dbSAllison Henderson err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page, 30364e96b2dbSAllison Henderson from, length, flags); 30374e96b2dbSAllison Henderson 30384e96b2dbSAllison Henderson unlock_page(page); 30394e96b2dbSAllison Henderson page_cache_release(page); 30404e96b2dbSAllison Henderson return err; 30414e96b2dbSAllison Henderson } 30424e96b2dbSAllison Henderson 30434e96b2dbSAllison Henderson /* 30444e96b2dbSAllison Henderson * ext4_discard_partial_page_buffers_no_lock() 30454e96b2dbSAllison Henderson * Zeros a page range of length 'length' starting from offset 'from'. 30464e96b2dbSAllison Henderson * Buffer heads that correspond to the block aligned regions of the 30474e96b2dbSAllison Henderson * zeroed range will be unmapped. Unblock aligned regions 30484e96b2dbSAllison Henderson * will have the corresponding buffer head mapped if needed so that 30494e96b2dbSAllison Henderson * that region of the page can be updated with the partial zero out. 30504e96b2dbSAllison Henderson * 30514e96b2dbSAllison Henderson * This function assumes that the page has already been locked. The 30524e96b2dbSAllison Henderson * The range to be discarded must be contained with in the given page. 30534e96b2dbSAllison Henderson * If the specified range exceeds the end of the page it will be shortened 30544e96b2dbSAllison Henderson * to the end of the page that corresponds to 'from'. This function is 30554e96b2dbSAllison Henderson * appropriate for updating a page and it buffer heads to be unmapped and 30564e96b2dbSAllison Henderson * zeroed for blocks that have been either released, or are going to be 30574e96b2dbSAllison Henderson * released. 30584e96b2dbSAllison Henderson * 30594e96b2dbSAllison Henderson * handle: The journal handle 30604e96b2dbSAllison Henderson * inode: The files inode 30614e96b2dbSAllison Henderson * page: A locked page that contains the offset "from" 30624e96b2dbSAllison Henderson * from: The starting byte offset (from the begining of the file) 30634e96b2dbSAllison Henderson * to begin discarding 30644e96b2dbSAllison Henderson * len: The length of bytes to discard 30654e96b2dbSAllison Henderson * flags: Optional flags that may be used: 30664e96b2dbSAllison Henderson * 30674e96b2dbSAllison Henderson * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED 30684e96b2dbSAllison Henderson * Only zero the regions of the page whose buffer heads 30694e96b2dbSAllison Henderson * have already been unmapped. This flag is appropriate 30704e96b2dbSAllison Henderson * for updateing the contents of a page whose blocks may 30714e96b2dbSAllison Henderson * have already been released, and we only want to zero 30724e96b2dbSAllison Henderson * out the regions that correspond to those released blocks. 30734e96b2dbSAllison Henderson * 30744e96b2dbSAllison Henderson * Returns zero on sucess or negative on failure. 30754e96b2dbSAllison Henderson */ 30764e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, 30774e96b2dbSAllison Henderson struct inode *inode, struct page *page, loff_t from, 30784e96b2dbSAllison Henderson loff_t length, int flags) 30794e96b2dbSAllison Henderson { 30804e96b2dbSAllison Henderson ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 30814e96b2dbSAllison Henderson unsigned int offset = from & (PAGE_CACHE_SIZE-1); 30824e96b2dbSAllison Henderson unsigned int blocksize, max, pos; 30834e96b2dbSAllison Henderson unsigned int end_of_block, range_to_discard; 30844e96b2dbSAllison Henderson ext4_lblk_t iblock; 30854e96b2dbSAllison Henderson struct buffer_head *bh; 30864e96b2dbSAllison Henderson int err = 0; 30874e96b2dbSAllison Henderson 30884e96b2dbSAllison Henderson blocksize = inode->i_sb->s_blocksize; 30894e96b2dbSAllison Henderson max = PAGE_CACHE_SIZE - offset; 30904e96b2dbSAllison Henderson 30914e96b2dbSAllison Henderson if (index != page->index) 30924e96b2dbSAllison Henderson return -EINVAL; 30934e96b2dbSAllison Henderson 30944e96b2dbSAllison Henderson /* 30954e96b2dbSAllison Henderson * correct length if it does not fall between 30964e96b2dbSAllison Henderson * 'from' and the end of the page 30974e96b2dbSAllison Henderson */ 30984e96b2dbSAllison Henderson if (length > max || length < 0) 30994e96b2dbSAllison Henderson length = max; 31004e96b2dbSAllison Henderson 31014e96b2dbSAllison Henderson iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 31024e96b2dbSAllison Henderson 31034e96b2dbSAllison Henderson if (!page_has_buffers(page)) { 31044e96b2dbSAllison Henderson /* 31054e96b2dbSAllison Henderson * If the range to be discarded covers a partial block 31064e96b2dbSAllison Henderson * we need to get the page buffers. This is because 31074e96b2dbSAllison Henderson * partial blocks cannot be released and the page needs 31084e96b2dbSAllison Henderson * to be updated with the contents of the block before 31094e96b2dbSAllison Henderson * we write the zeros on top of it. 31104e96b2dbSAllison Henderson */ 31114e96b2dbSAllison Henderson if (!(from & (blocksize - 1)) || 31124e96b2dbSAllison Henderson !((from + length) & (blocksize - 1))) { 31134e96b2dbSAllison Henderson create_empty_buffers(page, blocksize, 0); 31144e96b2dbSAllison Henderson } else { 31154e96b2dbSAllison Henderson /* 31164e96b2dbSAllison Henderson * If there are no partial blocks, 31174e96b2dbSAllison Henderson * there is nothing to update, 31184e96b2dbSAllison Henderson * so we can return now 31194e96b2dbSAllison Henderson */ 31204e96b2dbSAllison Henderson return 0; 31214e96b2dbSAllison Henderson } 31224e96b2dbSAllison Henderson } 31234e96b2dbSAllison Henderson 31244e96b2dbSAllison Henderson /* Find the buffer that contains "offset" */ 31254e96b2dbSAllison Henderson bh = page_buffers(page); 31264e96b2dbSAllison Henderson pos = blocksize; 31274e96b2dbSAllison Henderson while (offset >= pos) { 31284e96b2dbSAllison Henderson bh = bh->b_this_page; 31294e96b2dbSAllison Henderson iblock++; 31304e96b2dbSAllison Henderson pos += blocksize; 31314e96b2dbSAllison Henderson } 31324e96b2dbSAllison Henderson 31334e96b2dbSAllison Henderson pos = offset; 31344e96b2dbSAllison Henderson while (pos < offset + length) { 31354e96b2dbSAllison Henderson err = 0; 31364e96b2dbSAllison Henderson 31374e96b2dbSAllison Henderson /* The length of space left to zero and unmap */ 31384e96b2dbSAllison Henderson range_to_discard = offset + length - pos; 31394e96b2dbSAllison Henderson 31404e96b2dbSAllison Henderson /* The length of space until the end of the block */ 31414e96b2dbSAllison Henderson end_of_block = blocksize - (pos & (blocksize-1)); 31424e96b2dbSAllison Henderson 31434e96b2dbSAllison Henderson /* 31444e96b2dbSAllison Henderson * Do not unmap or zero past end of block 31454e96b2dbSAllison Henderson * for this buffer head 31464e96b2dbSAllison Henderson */ 31474e96b2dbSAllison Henderson if (range_to_discard > end_of_block) 31484e96b2dbSAllison Henderson range_to_discard = end_of_block; 31494e96b2dbSAllison Henderson 31504e96b2dbSAllison Henderson 31514e96b2dbSAllison Henderson /* 31524e96b2dbSAllison Henderson * Skip this buffer head if we are only zeroing unampped 31534e96b2dbSAllison Henderson * regions of the page 31544e96b2dbSAllison Henderson */ 31554e96b2dbSAllison Henderson if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED && 31564e96b2dbSAllison Henderson buffer_mapped(bh)) 31574e96b2dbSAllison Henderson goto next; 31584e96b2dbSAllison Henderson 31594e96b2dbSAllison Henderson /* If the range is block aligned, unmap */ 31604e96b2dbSAllison Henderson if (range_to_discard == blocksize) { 31614e96b2dbSAllison Henderson clear_buffer_dirty(bh); 31624e96b2dbSAllison Henderson bh->b_bdev = NULL; 31634e96b2dbSAllison Henderson clear_buffer_mapped(bh); 31644e96b2dbSAllison Henderson clear_buffer_req(bh); 31654e96b2dbSAllison Henderson clear_buffer_new(bh); 31664e96b2dbSAllison Henderson clear_buffer_delay(bh); 31674e96b2dbSAllison Henderson clear_buffer_unwritten(bh); 31684e96b2dbSAllison Henderson clear_buffer_uptodate(bh); 31694e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 31704e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Buffer discarded"); 31714e96b2dbSAllison Henderson goto next; 31724e96b2dbSAllison Henderson } 31734e96b2dbSAllison Henderson 31744e96b2dbSAllison Henderson /* 31754e96b2dbSAllison Henderson * If this block is not completely contained in the range 31764e96b2dbSAllison Henderson * to be discarded, then it is not going to be released. Because 31774e96b2dbSAllison Henderson * we need to keep this block, we need to make sure this part 31784e96b2dbSAllison Henderson * of the page is uptodate before we modify it by writeing 31794e96b2dbSAllison Henderson * partial zeros on it. 31804e96b2dbSAllison Henderson */ 31814e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 31824e96b2dbSAllison Henderson /* 31834e96b2dbSAllison Henderson * Buffer head must be mapped before we can read 31844e96b2dbSAllison Henderson * from the block 31854e96b2dbSAllison Henderson */ 31864e96b2dbSAllison Henderson BUFFER_TRACE(bh, "unmapped"); 31874e96b2dbSAllison Henderson ext4_get_block(inode, iblock, bh, 0); 31884e96b2dbSAllison Henderson /* unmapped? It's a hole - nothing to do */ 31894e96b2dbSAllison Henderson if (!buffer_mapped(bh)) { 31904e96b2dbSAllison Henderson BUFFER_TRACE(bh, "still unmapped"); 31914e96b2dbSAllison Henderson goto next; 31924e96b2dbSAllison Henderson } 31934e96b2dbSAllison Henderson } 31944e96b2dbSAllison Henderson 31954e96b2dbSAllison Henderson /* Ok, it's mapped. Make sure it's up-to-date */ 31964e96b2dbSAllison Henderson if (PageUptodate(page)) 31974e96b2dbSAllison Henderson set_buffer_uptodate(bh); 31984e96b2dbSAllison Henderson 31994e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) { 32004e96b2dbSAllison Henderson err = -EIO; 32014e96b2dbSAllison Henderson ll_rw_block(READ, 1, &bh); 32024e96b2dbSAllison Henderson wait_on_buffer(bh); 32034e96b2dbSAllison Henderson /* Uhhuh. Read error. Complain and punt.*/ 32044e96b2dbSAllison Henderson if (!buffer_uptodate(bh)) 32054e96b2dbSAllison Henderson goto next; 32064e96b2dbSAllison Henderson } 32074e96b2dbSAllison Henderson 32084e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 32094e96b2dbSAllison Henderson BUFFER_TRACE(bh, "get write access"); 32104e96b2dbSAllison Henderson err = ext4_journal_get_write_access(handle, bh); 32114e96b2dbSAllison Henderson if (err) 32124e96b2dbSAllison Henderson goto next; 32134e96b2dbSAllison Henderson } 32144e96b2dbSAllison Henderson 32154e96b2dbSAllison Henderson zero_user(page, pos, range_to_discard); 32164e96b2dbSAllison Henderson 32174e96b2dbSAllison Henderson err = 0; 32184e96b2dbSAllison Henderson if (ext4_should_journal_data(inode)) { 32194e96b2dbSAllison Henderson err = ext4_handle_dirty_metadata(handle, inode, bh); 3220decbd919STheodore Ts'o } else 32214e96b2dbSAllison Henderson mark_buffer_dirty(bh); 32224e96b2dbSAllison Henderson 32234e96b2dbSAllison Henderson BUFFER_TRACE(bh, "Partial buffer zeroed"); 32244e96b2dbSAllison Henderson next: 32254e96b2dbSAllison Henderson bh = bh->b_this_page; 32264e96b2dbSAllison Henderson iblock++; 32274e96b2dbSAllison Henderson pos += range_to_discard; 32284e96b2dbSAllison Henderson } 32294e96b2dbSAllison Henderson 32304e96b2dbSAllison Henderson return err; 32314e96b2dbSAllison Henderson } 32324e96b2dbSAllison Henderson 3233ac27a0ecSDave Kleikamp /* 3234617ba13bSMingming Cao * ext4_block_truncate_page() zeroes out a mapping from file offset `from' 3235ac27a0ecSDave Kleikamp * up to the end of the block which corresponds to `from'. 3236ac27a0ecSDave Kleikamp * This required during truncate. We need to physically zero the tail end 3237ac27a0ecSDave Kleikamp * of that block so it doesn't yield old data if the file is later grown. 3238ac27a0ecSDave Kleikamp */ 3239cf108bcaSJan Kara int ext4_block_truncate_page(handle_t *handle, 3240ac27a0ecSDave Kleikamp struct address_space *mapping, loff_t from) 3241ac27a0ecSDave Kleikamp { 324230848851SAllison Henderson unsigned offset = from & (PAGE_CACHE_SIZE-1); 324330848851SAllison Henderson unsigned length; 324430848851SAllison Henderson unsigned blocksize; 324530848851SAllison Henderson struct inode *inode = mapping->host; 324630848851SAllison Henderson 324730848851SAllison Henderson blocksize = inode->i_sb->s_blocksize; 324830848851SAllison Henderson length = blocksize - (offset & (blocksize - 1)); 324930848851SAllison Henderson 325030848851SAllison Henderson return ext4_block_zero_page_range(handle, mapping, from, length); 325130848851SAllison Henderson } 325230848851SAllison Henderson 325330848851SAllison Henderson /* 325430848851SAllison Henderson * ext4_block_zero_page_range() zeros out a mapping of length 'length' 325530848851SAllison Henderson * starting from file offset 'from'. The range to be zero'd must 325630848851SAllison Henderson * be contained with in one block. If the specified range exceeds 325730848851SAllison Henderson * the end of the block it will be shortened to end of the block 325830848851SAllison Henderson * that cooresponds to 'from' 325930848851SAllison Henderson */ 326030848851SAllison Henderson int ext4_block_zero_page_range(handle_t *handle, 326130848851SAllison Henderson struct address_space *mapping, loff_t from, loff_t length) 326230848851SAllison Henderson { 3263617ba13bSMingming Cao ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; 3264ac27a0ecSDave Kleikamp unsigned offset = from & (PAGE_CACHE_SIZE-1); 326530848851SAllison Henderson unsigned blocksize, max, pos; 3266725d26d3SAneesh Kumar K.V ext4_lblk_t iblock; 3267ac27a0ecSDave Kleikamp struct inode *inode = mapping->host; 3268ac27a0ecSDave Kleikamp struct buffer_head *bh; 3269cf108bcaSJan Kara struct page *page; 3270ac27a0ecSDave Kleikamp int err = 0; 3271ac27a0ecSDave Kleikamp 3272f4a01017STheodore Ts'o page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, 3273f4a01017STheodore Ts'o mapping_gfp_mask(mapping) & ~__GFP_FS); 3274cf108bcaSJan Kara if (!page) 3275cf108bcaSJan Kara return -EINVAL; 3276cf108bcaSJan Kara 3277ac27a0ecSDave Kleikamp blocksize = inode->i_sb->s_blocksize; 327830848851SAllison Henderson max = blocksize - (offset & (blocksize - 1)); 327930848851SAllison Henderson 328030848851SAllison Henderson /* 328130848851SAllison Henderson * correct length if it does not fall between 328230848851SAllison Henderson * 'from' and the end of the block 328330848851SAllison Henderson */ 328430848851SAllison Henderson if (length > max || length < 0) 328530848851SAllison Henderson length = max; 328630848851SAllison Henderson 3287ac27a0ecSDave Kleikamp iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 3288ac27a0ecSDave Kleikamp 3289ac27a0ecSDave Kleikamp if (!page_has_buffers(page)) 3290ac27a0ecSDave Kleikamp create_empty_buffers(page, blocksize, 0); 3291ac27a0ecSDave Kleikamp 3292ac27a0ecSDave Kleikamp /* Find the buffer that contains "offset" */ 3293ac27a0ecSDave Kleikamp bh = page_buffers(page); 3294ac27a0ecSDave Kleikamp pos = blocksize; 3295ac27a0ecSDave Kleikamp while (offset >= pos) { 3296ac27a0ecSDave Kleikamp bh = bh->b_this_page; 3297ac27a0ecSDave Kleikamp iblock++; 3298ac27a0ecSDave Kleikamp pos += blocksize; 3299ac27a0ecSDave Kleikamp } 3300ac27a0ecSDave Kleikamp 3301ac27a0ecSDave Kleikamp err = 0; 3302ac27a0ecSDave Kleikamp if (buffer_freed(bh)) { 3303ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "freed: skip"); 3304ac27a0ecSDave Kleikamp goto unlock; 3305ac27a0ecSDave Kleikamp } 3306ac27a0ecSDave Kleikamp 3307ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3308ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "unmapped"); 3309617ba13bSMingming Cao ext4_get_block(inode, iblock, bh, 0); 3310ac27a0ecSDave Kleikamp /* unmapped? It's a hole - nothing to do */ 3311ac27a0ecSDave Kleikamp if (!buffer_mapped(bh)) { 3312ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "still unmapped"); 3313ac27a0ecSDave Kleikamp goto unlock; 3314ac27a0ecSDave Kleikamp } 3315ac27a0ecSDave Kleikamp } 3316ac27a0ecSDave Kleikamp 3317ac27a0ecSDave Kleikamp /* Ok, it's mapped. Make sure it's up-to-date */ 3318ac27a0ecSDave Kleikamp if (PageUptodate(page)) 3319ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3320ac27a0ecSDave Kleikamp 3321ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3322ac27a0ecSDave Kleikamp err = -EIO; 3323ac27a0ecSDave Kleikamp ll_rw_block(READ, 1, &bh); 3324ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3325ac27a0ecSDave Kleikamp /* Uhhuh. Read error. Complain and punt. */ 3326ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) 3327ac27a0ecSDave Kleikamp goto unlock; 3328ac27a0ecSDave Kleikamp } 3329ac27a0ecSDave Kleikamp 3330617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 3331ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "get write access"); 3332617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, bh); 3333ac27a0ecSDave Kleikamp if (err) 3334ac27a0ecSDave Kleikamp goto unlock; 3335ac27a0ecSDave Kleikamp } 3336ac27a0ecSDave Kleikamp 3337eebd2aa3SChristoph Lameter zero_user(page, offset, length); 3338ac27a0ecSDave Kleikamp 3339ac27a0ecSDave Kleikamp BUFFER_TRACE(bh, "zeroed end of block"); 3340ac27a0ecSDave Kleikamp 3341ac27a0ecSDave Kleikamp err = 0; 3342617ba13bSMingming Cao if (ext4_should_journal_data(inode)) { 33430390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, inode, bh); 3344decbd919STheodore Ts'o } else 3345ac27a0ecSDave Kleikamp mark_buffer_dirty(bh); 3346ac27a0ecSDave Kleikamp 3347ac27a0ecSDave Kleikamp unlock: 3348ac27a0ecSDave Kleikamp unlock_page(page); 3349ac27a0ecSDave Kleikamp page_cache_release(page); 3350ac27a0ecSDave Kleikamp return err; 3351ac27a0ecSDave Kleikamp } 3352ac27a0ecSDave Kleikamp 335391ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode) 335491ef4cafSDuane Griffin { 335591ef4cafSDuane Griffin if (S_ISREG(inode->i_mode)) 335691ef4cafSDuane Griffin return 1; 335791ef4cafSDuane Griffin if (S_ISDIR(inode->i_mode)) 335891ef4cafSDuane Griffin return 1; 335991ef4cafSDuane Griffin if (S_ISLNK(inode->i_mode)) 336091ef4cafSDuane Griffin return !ext4_inode_is_fast_symlink(inode); 336191ef4cafSDuane Griffin return 0; 336291ef4cafSDuane Griffin } 336391ef4cafSDuane Griffin 3364ac27a0ecSDave Kleikamp /* 3365a4bb6b64SAllison Henderson * ext4_punch_hole: punches a hole in a file by releaseing the blocks 3366a4bb6b64SAllison Henderson * associated with the given offset and length 3367a4bb6b64SAllison Henderson * 3368a4bb6b64SAllison Henderson * @inode: File inode 3369a4bb6b64SAllison Henderson * @offset: The offset where the hole will begin 3370a4bb6b64SAllison Henderson * @len: The length of the hole 3371a4bb6b64SAllison Henderson * 3372a4bb6b64SAllison Henderson * Returns: 0 on sucess or negative on failure 3373a4bb6b64SAllison Henderson */ 3374a4bb6b64SAllison Henderson 3375a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) 3376a4bb6b64SAllison Henderson { 3377a4bb6b64SAllison Henderson struct inode *inode = file->f_path.dentry->d_inode; 3378a4bb6b64SAllison Henderson if (!S_ISREG(inode->i_mode)) 3379a4bb6b64SAllison Henderson return -ENOTSUPP; 3380a4bb6b64SAllison Henderson 3381a4bb6b64SAllison Henderson if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3382a4bb6b64SAllison Henderson /* TODO: Add support for non extent hole punching */ 3383a4bb6b64SAllison Henderson return -ENOTSUPP; 3384a4bb6b64SAllison Henderson } 3385a4bb6b64SAllison Henderson 3386bab08ab9STheodore Ts'o if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) { 3387bab08ab9STheodore Ts'o /* TODO: Add support for bigalloc file systems */ 3388bab08ab9STheodore Ts'o return -ENOTSUPP; 3389bab08ab9STheodore Ts'o } 3390bab08ab9STheodore Ts'o 3391a4bb6b64SAllison Henderson return ext4_ext_punch_hole(file, offset, length); 3392a4bb6b64SAllison Henderson } 3393a4bb6b64SAllison Henderson 3394a4bb6b64SAllison Henderson /* 3395617ba13bSMingming Cao * ext4_truncate() 3396ac27a0ecSDave Kleikamp * 3397617ba13bSMingming Cao * We block out ext4_get_block() block instantiations across the entire 3398617ba13bSMingming Cao * transaction, and VFS/VM ensures that ext4_truncate() cannot run 3399ac27a0ecSDave Kleikamp * simultaneously on behalf of the same inode. 3400ac27a0ecSDave Kleikamp * 3401ac27a0ecSDave Kleikamp * As we work through the truncate and commmit bits of it to the journal there 3402ac27a0ecSDave Kleikamp * is one core, guiding principle: the file's tree must always be consistent on 3403ac27a0ecSDave Kleikamp * disk. We must be able to restart the truncate after a crash. 3404ac27a0ecSDave Kleikamp * 3405ac27a0ecSDave Kleikamp * The file's tree may be transiently inconsistent in memory (although it 3406ac27a0ecSDave Kleikamp * probably isn't), but whenever we close off and commit a journal transaction, 3407ac27a0ecSDave Kleikamp * the contents of (the filesystem + the journal) must be consistent and 3408ac27a0ecSDave Kleikamp * restartable. It's pretty simple, really: bottom up, right to left (although 3409ac27a0ecSDave Kleikamp * left-to-right works OK too). 3410ac27a0ecSDave Kleikamp * 3411ac27a0ecSDave Kleikamp * Note that at recovery time, journal replay occurs *before* the restart of 3412ac27a0ecSDave Kleikamp * truncate against the orphan inode list. 3413ac27a0ecSDave Kleikamp * 3414ac27a0ecSDave Kleikamp * The committed inode has the new, desired i_size (which is the same as 3415617ba13bSMingming Cao * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see 3416ac27a0ecSDave Kleikamp * that this inode's truncate did not complete and it will again call 3417617ba13bSMingming Cao * ext4_truncate() to have another go. So there will be instantiated blocks 3418617ba13bSMingming Cao * to the right of the truncation point in a crashed ext4 filesystem. But 3419ac27a0ecSDave Kleikamp * that's fine - as long as they are linked from the inode, the post-crash 3420617ba13bSMingming Cao * ext4_truncate() run will find them and release them. 3421ac27a0ecSDave Kleikamp */ 3422617ba13bSMingming Cao void ext4_truncate(struct inode *inode) 3423ac27a0ecSDave Kleikamp { 34240562e0baSJiaying Zhang trace_ext4_truncate_enter(inode); 34250562e0baSJiaying Zhang 342691ef4cafSDuane Griffin if (!ext4_can_truncate(inode)) 3427ac27a0ecSDave Kleikamp return; 3428ac27a0ecSDave Kleikamp 342912e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS); 3430c8d46e41SJiaying Zhang 34315534fb5bSTheodore Ts'o if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) 343219f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); 34337d8f9f7dSTheodore Ts'o 3434ff9893dcSAmir Goldstein if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3435cf108bcaSJan Kara ext4_ext_truncate(inode); 3436ff9893dcSAmir Goldstein else 3437ff9893dcSAmir Goldstein ext4_ind_truncate(inode); 3438a86c6181SAlex Tomas 34390562e0baSJiaying Zhang trace_ext4_truncate_exit(inode); 3440ac27a0ecSDave Kleikamp } 3441ac27a0ecSDave Kleikamp 3442ac27a0ecSDave Kleikamp /* 3443617ba13bSMingming Cao * ext4_get_inode_loc returns with an extra refcount against the inode's 3444ac27a0ecSDave Kleikamp * underlying buffer_head on success. If 'in_mem' is true, we have all 3445ac27a0ecSDave Kleikamp * data in memory that is needed to recreate the on-disk version of this 3446ac27a0ecSDave Kleikamp * inode. 3447ac27a0ecSDave Kleikamp */ 3448617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode, 3449617ba13bSMingming Cao struct ext4_iloc *iloc, int in_mem) 3450ac27a0ecSDave Kleikamp { 3451240799cdSTheodore Ts'o struct ext4_group_desc *gdp; 3452ac27a0ecSDave Kleikamp struct buffer_head *bh; 3453240799cdSTheodore Ts'o struct super_block *sb = inode->i_sb; 3454240799cdSTheodore Ts'o ext4_fsblk_t block; 3455240799cdSTheodore Ts'o int inodes_per_block, inode_offset; 3456ac27a0ecSDave Kleikamp 34573a06d778SAneesh Kumar K.V iloc->bh = NULL; 3458240799cdSTheodore Ts'o if (!ext4_valid_inum(sb, inode->i_ino)) 3459ac27a0ecSDave Kleikamp return -EIO; 3460ac27a0ecSDave Kleikamp 3461240799cdSTheodore Ts'o iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); 3462240799cdSTheodore Ts'o gdp = ext4_get_group_desc(sb, iloc->block_group, NULL); 3463240799cdSTheodore Ts'o if (!gdp) 3464240799cdSTheodore Ts'o return -EIO; 3465240799cdSTheodore Ts'o 3466240799cdSTheodore Ts'o /* 3467240799cdSTheodore Ts'o * Figure out the offset within the block group inode table 3468240799cdSTheodore Ts'o */ 346900d09882STao Ma inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; 3470240799cdSTheodore Ts'o inode_offset = ((inode->i_ino - 1) % 3471240799cdSTheodore Ts'o EXT4_INODES_PER_GROUP(sb)); 3472240799cdSTheodore Ts'o block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); 3473240799cdSTheodore Ts'o iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb); 3474240799cdSTheodore Ts'o 3475240799cdSTheodore Ts'o bh = sb_getblk(sb, block); 3476ac27a0ecSDave Kleikamp if (!bh) { 3477c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3478c398eda0STheodore Ts'o "unable to read itable block"); 3479ac27a0ecSDave Kleikamp return -EIO; 3480ac27a0ecSDave Kleikamp } 3481ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3482ac27a0ecSDave Kleikamp lock_buffer(bh); 34839c83a923SHidehiro Kawai 34849c83a923SHidehiro Kawai /* 34859c83a923SHidehiro Kawai * If the buffer has the write error flag, we have failed 34869c83a923SHidehiro Kawai * to write out another inode in the same block. In this 34879c83a923SHidehiro Kawai * case, we don't have to read the block because we may 34889c83a923SHidehiro Kawai * read the old inode data successfully. 34899c83a923SHidehiro Kawai */ 34909c83a923SHidehiro Kawai if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) 34919c83a923SHidehiro Kawai set_buffer_uptodate(bh); 34929c83a923SHidehiro Kawai 3493ac27a0ecSDave Kleikamp if (buffer_uptodate(bh)) { 3494ac27a0ecSDave Kleikamp /* someone brought it uptodate while we waited */ 3495ac27a0ecSDave Kleikamp unlock_buffer(bh); 3496ac27a0ecSDave Kleikamp goto has_buffer; 3497ac27a0ecSDave Kleikamp } 3498ac27a0ecSDave Kleikamp 3499ac27a0ecSDave Kleikamp /* 3500ac27a0ecSDave Kleikamp * If we have all information of the inode in memory and this 3501ac27a0ecSDave Kleikamp * is the only valid inode in the block, we need not read the 3502ac27a0ecSDave Kleikamp * block. 3503ac27a0ecSDave Kleikamp */ 3504ac27a0ecSDave Kleikamp if (in_mem) { 3505ac27a0ecSDave Kleikamp struct buffer_head *bitmap_bh; 3506240799cdSTheodore Ts'o int i, start; 3507ac27a0ecSDave Kleikamp 3508240799cdSTheodore Ts'o start = inode_offset & ~(inodes_per_block - 1); 3509ac27a0ecSDave Kleikamp 3510ac27a0ecSDave Kleikamp /* Is the inode bitmap in cache? */ 3511240799cdSTheodore Ts'o bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp)); 3512ac27a0ecSDave Kleikamp if (!bitmap_bh) 3513ac27a0ecSDave Kleikamp goto make_io; 3514ac27a0ecSDave Kleikamp 3515ac27a0ecSDave Kleikamp /* 3516ac27a0ecSDave Kleikamp * If the inode bitmap isn't in cache then the 3517ac27a0ecSDave Kleikamp * optimisation may end up performing two reads instead 3518ac27a0ecSDave Kleikamp * of one, so skip it. 3519ac27a0ecSDave Kleikamp */ 3520ac27a0ecSDave Kleikamp if (!buffer_uptodate(bitmap_bh)) { 3521ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3522ac27a0ecSDave Kleikamp goto make_io; 3523ac27a0ecSDave Kleikamp } 3524240799cdSTheodore Ts'o for (i = start; i < start + inodes_per_block; i++) { 3525ac27a0ecSDave Kleikamp if (i == inode_offset) 3526ac27a0ecSDave Kleikamp continue; 3527617ba13bSMingming Cao if (ext4_test_bit(i, bitmap_bh->b_data)) 3528ac27a0ecSDave Kleikamp break; 3529ac27a0ecSDave Kleikamp } 3530ac27a0ecSDave Kleikamp brelse(bitmap_bh); 3531240799cdSTheodore Ts'o if (i == start + inodes_per_block) { 3532ac27a0ecSDave Kleikamp /* all other inodes are free, so skip I/O */ 3533ac27a0ecSDave Kleikamp memset(bh->b_data, 0, bh->b_size); 3534ac27a0ecSDave Kleikamp set_buffer_uptodate(bh); 3535ac27a0ecSDave Kleikamp unlock_buffer(bh); 3536ac27a0ecSDave Kleikamp goto has_buffer; 3537ac27a0ecSDave Kleikamp } 3538ac27a0ecSDave Kleikamp } 3539ac27a0ecSDave Kleikamp 3540ac27a0ecSDave Kleikamp make_io: 3541ac27a0ecSDave Kleikamp /* 3542240799cdSTheodore Ts'o * If we need to do any I/O, try to pre-readahead extra 3543240799cdSTheodore Ts'o * blocks from the inode table. 3544240799cdSTheodore Ts'o */ 3545240799cdSTheodore Ts'o if (EXT4_SB(sb)->s_inode_readahead_blks) { 3546240799cdSTheodore Ts'o ext4_fsblk_t b, end, table; 3547240799cdSTheodore Ts'o unsigned num; 3548240799cdSTheodore Ts'o 3549240799cdSTheodore Ts'o table = ext4_inode_table(sb, gdp); 3550b713a5ecSTheodore Ts'o /* s_inode_readahead_blks is always a power of 2 */ 3551240799cdSTheodore Ts'o b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1); 3552240799cdSTheodore Ts'o if (table > b) 3553240799cdSTheodore Ts'o b = table; 3554240799cdSTheodore Ts'o end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3555240799cdSTheodore Ts'o num = EXT4_INODES_PER_GROUP(sb); 3556240799cdSTheodore Ts'o if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3557240799cdSTheodore Ts'o EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) 3558560671a0SAneesh Kumar K.V num -= ext4_itable_unused_count(sb, gdp); 3559240799cdSTheodore Ts'o table += num / inodes_per_block; 3560240799cdSTheodore Ts'o if (end > table) 3561240799cdSTheodore Ts'o end = table; 3562240799cdSTheodore Ts'o while (b <= end) 3563240799cdSTheodore Ts'o sb_breadahead(sb, b++); 3564240799cdSTheodore Ts'o } 3565240799cdSTheodore Ts'o 3566240799cdSTheodore Ts'o /* 3567ac27a0ecSDave Kleikamp * There are other valid inodes in the buffer, this inode 3568ac27a0ecSDave Kleikamp * has in-inode xattrs, or we don't have this inode in memory. 3569ac27a0ecSDave Kleikamp * Read the block from disk. 3570ac27a0ecSDave Kleikamp */ 35710562e0baSJiaying Zhang trace_ext4_load_inode(inode); 3572ac27a0ecSDave Kleikamp get_bh(bh); 3573ac27a0ecSDave Kleikamp bh->b_end_io = end_buffer_read_sync; 3574ac27a0ecSDave Kleikamp submit_bh(READ_META, bh); 3575ac27a0ecSDave Kleikamp wait_on_buffer(bh); 3576ac27a0ecSDave Kleikamp if (!buffer_uptodate(bh)) { 3577c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, block, 3578c398eda0STheodore Ts'o "unable to read itable block"); 3579ac27a0ecSDave Kleikamp brelse(bh); 3580ac27a0ecSDave Kleikamp return -EIO; 3581ac27a0ecSDave Kleikamp } 3582ac27a0ecSDave Kleikamp } 3583ac27a0ecSDave Kleikamp has_buffer: 3584ac27a0ecSDave Kleikamp iloc->bh = bh; 3585ac27a0ecSDave Kleikamp return 0; 3586ac27a0ecSDave Kleikamp } 3587ac27a0ecSDave Kleikamp 3588617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) 3589ac27a0ecSDave Kleikamp { 3590ac27a0ecSDave Kleikamp /* We have all inode data except xattrs in memory here. */ 3591617ba13bSMingming Cao return __ext4_get_inode_loc(inode, iloc, 359219f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_XATTR)); 3593ac27a0ecSDave Kleikamp } 3594ac27a0ecSDave Kleikamp 3595617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode) 3596ac27a0ecSDave Kleikamp { 3597617ba13bSMingming Cao unsigned int flags = EXT4_I(inode)->i_flags; 3598ac27a0ecSDave Kleikamp 3599ac27a0ecSDave Kleikamp inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 3600617ba13bSMingming Cao if (flags & EXT4_SYNC_FL) 3601ac27a0ecSDave Kleikamp inode->i_flags |= S_SYNC; 3602617ba13bSMingming Cao if (flags & EXT4_APPEND_FL) 3603ac27a0ecSDave Kleikamp inode->i_flags |= S_APPEND; 3604617ba13bSMingming Cao if (flags & EXT4_IMMUTABLE_FL) 3605ac27a0ecSDave Kleikamp inode->i_flags |= S_IMMUTABLE; 3606617ba13bSMingming Cao if (flags & EXT4_NOATIME_FL) 3607ac27a0ecSDave Kleikamp inode->i_flags |= S_NOATIME; 3608617ba13bSMingming Cao if (flags & EXT4_DIRSYNC_FL) 3609ac27a0ecSDave Kleikamp inode->i_flags |= S_DIRSYNC; 3610ac27a0ecSDave Kleikamp } 3611ac27a0ecSDave Kleikamp 3612ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3613ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei) 3614ff9ddf7eSJan Kara { 361584a8dce2SDmitry Monakhov unsigned int vfs_fl; 361684a8dce2SDmitry Monakhov unsigned long old_fl, new_fl; 3617ff9ddf7eSJan Kara 361884a8dce2SDmitry Monakhov do { 361984a8dce2SDmitry Monakhov vfs_fl = ei->vfs_inode.i_flags; 362084a8dce2SDmitry Monakhov old_fl = ei->i_flags; 362184a8dce2SDmitry Monakhov new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| 362284a8dce2SDmitry Monakhov EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| 362384a8dce2SDmitry Monakhov EXT4_DIRSYNC_FL); 362484a8dce2SDmitry Monakhov if (vfs_fl & S_SYNC) 362584a8dce2SDmitry Monakhov new_fl |= EXT4_SYNC_FL; 362684a8dce2SDmitry Monakhov if (vfs_fl & S_APPEND) 362784a8dce2SDmitry Monakhov new_fl |= EXT4_APPEND_FL; 362884a8dce2SDmitry Monakhov if (vfs_fl & S_IMMUTABLE) 362984a8dce2SDmitry Monakhov new_fl |= EXT4_IMMUTABLE_FL; 363084a8dce2SDmitry Monakhov if (vfs_fl & S_NOATIME) 363184a8dce2SDmitry Monakhov new_fl |= EXT4_NOATIME_FL; 363284a8dce2SDmitry Monakhov if (vfs_fl & S_DIRSYNC) 363384a8dce2SDmitry Monakhov new_fl |= EXT4_DIRSYNC_FL; 363484a8dce2SDmitry Monakhov } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); 3635ff9ddf7eSJan Kara } 3636de9a55b8STheodore Ts'o 36370fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, 36380fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 36390fc1b451SAneesh Kumar K.V { 36400fc1b451SAneesh Kumar K.V blkcnt_t i_blocks ; 36418180a562SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 36428180a562SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 36430fc1b451SAneesh Kumar K.V 36440fc1b451SAneesh Kumar K.V if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 36450fc1b451SAneesh Kumar K.V EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { 36460fc1b451SAneesh Kumar K.V /* we are using combined 48 bit field */ 36470fc1b451SAneesh Kumar K.V i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 | 36480fc1b451SAneesh Kumar K.V le32_to_cpu(raw_inode->i_blocks_lo); 364907a03824STheodore Ts'o if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) { 36508180a562SAneesh Kumar K.V /* i_blocks represent file system block size */ 36518180a562SAneesh Kumar K.V return i_blocks << (inode->i_blkbits - 9); 36528180a562SAneesh Kumar K.V } else { 36530fc1b451SAneesh Kumar K.V return i_blocks; 36548180a562SAneesh Kumar K.V } 36550fc1b451SAneesh Kumar K.V } else { 36560fc1b451SAneesh Kumar K.V return le32_to_cpu(raw_inode->i_blocks_lo); 36570fc1b451SAneesh Kumar K.V } 36580fc1b451SAneesh Kumar K.V } 3659ff9ddf7eSJan Kara 36601d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino) 3661ac27a0ecSDave Kleikamp { 3662617ba13bSMingming Cao struct ext4_iloc iloc; 3663617ba13bSMingming Cao struct ext4_inode *raw_inode; 36641d1fe1eeSDavid Howells struct ext4_inode_info *ei; 36651d1fe1eeSDavid Howells struct inode *inode; 3666b436b9beSJan Kara journal_t *journal = EXT4_SB(sb)->s_journal; 36671d1fe1eeSDavid Howells long ret; 3668ac27a0ecSDave Kleikamp int block; 3669ac27a0ecSDave Kleikamp 36701d1fe1eeSDavid Howells inode = iget_locked(sb, ino); 36711d1fe1eeSDavid Howells if (!inode) 36721d1fe1eeSDavid Howells return ERR_PTR(-ENOMEM); 36731d1fe1eeSDavid Howells if (!(inode->i_state & I_NEW)) 36741d1fe1eeSDavid Howells return inode; 36751d1fe1eeSDavid Howells 36761d1fe1eeSDavid Howells ei = EXT4_I(inode); 36777dc57615SPeter Huewe iloc.bh = NULL; 3678ac27a0ecSDave Kleikamp 36791d1fe1eeSDavid Howells ret = __ext4_get_inode_loc(inode, &iloc, 0); 36801d1fe1eeSDavid Howells if (ret < 0) 3681ac27a0ecSDave Kleikamp goto bad_inode; 3682617ba13bSMingming Cao raw_inode = ext4_raw_inode(&iloc); 3683ac27a0ecSDave Kleikamp inode->i_mode = le16_to_cpu(raw_inode->i_mode); 3684ac27a0ecSDave Kleikamp inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 3685ac27a0ecSDave Kleikamp inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3686ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 3687ac27a0ecSDave Kleikamp inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 3688ac27a0ecSDave Kleikamp inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 3689ac27a0ecSDave Kleikamp } 3690ac27a0ecSDave Kleikamp inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 3691ac27a0ecSDave Kleikamp 3692353eb83cSTheodore Ts'o ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 3693ac27a0ecSDave Kleikamp ei->i_dir_start_lookup = 0; 3694ac27a0ecSDave Kleikamp ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 3695ac27a0ecSDave Kleikamp /* We now have enough fields to check if the inode was active or not. 3696ac27a0ecSDave Kleikamp * This is needed because nfsd might try to access dead inodes 3697ac27a0ecSDave Kleikamp * the test is that same one that e2fsck uses 3698ac27a0ecSDave Kleikamp * NeilBrown 1999oct15 3699ac27a0ecSDave Kleikamp */ 3700ac27a0ecSDave Kleikamp if (inode->i_nlink == 0) { 3701ac27a0ecSDave Kleikamp if (inode->i_mode == 0 || 3702617ba13bSMingming Cao !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { 3703ac27a0ecSDave Kleikamp /* this inode is deleted */ 37041d1fe1eeSDavid Howells ret = -ESTALE; 3705ac27a0ecSDave Kleikamp goto bad_inode; 3706ac27a0ecSDave Kleikamp } 3707ac27a0ecSDave Kleikamp /* The only unlinked inodes we let through here have 3708ac27a0ecSDave Kleikamp * valid i_mode and are being read by the orphan 3709ac27a0ecSDave Kleikamp * recovery code: that's fine, we're about to complete 3710ac27a0ecSDave Kleikamp * the process of deleting those. */ 3711ac27a0ecSDave Kleikamp } 3712ac27a0ecSDave Kleikamp ei->i_flags = le32_to_cpu(raw_inode->i_flags); 37130fc1b451SAneesh Kumar K.V inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 37147973c0c1SAneesh Kumar K.V ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 3715a9e81742STheodore Ts'o if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) 3716a1ddeb7eSBadari Pulavarty ei->i_file_acl |= 3717a1ddeb7eSBadari Pulavarty ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; 3718a48380f7SAneesh Kumar K.V inode->i_size = ext4_isize(raw_inode); 3719ac27a0ecSDave Kleikamp ei->i_disksize = inode->i_size; 3720a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA 3721a9e7f447SDmitry Monakhov ei->i_reserved_quota = 0; 3722a9e7f447SDmitry Monakhov #endif 3723ac27a0ecSDave Kleikamp inode->i_generation = le32_to_cpu(raw_inode->i_generation); 3724ac27a0ecSDave Kleikamp ei->i_block_group = iloc.block_group; 3725a4912123STheodore Ts'o ei->i_last_alloc_group = ~0; 3726ac27a0ecSDave Kleikamp /* 3727ac27a0ecSDave Kleikamp * NOTE! The in-memory inode i_data array is in little-endian order 3728ac27a0ecSDave Kleikamp * even on big-endian machines: we do NOT byteswap the block numbers! 3729ac27a0ecSDave Kleikamp */ 3730617ba13bSMingming Cao for (block = 0; block < EXT4_N_BLOCKS; block++) 3731ac27a0ecSDave Kleikamp ei->i_data[block] = raw_inode->i_block[block]; 3732ac27a0ecSDave Kleikamp INIT_LIST_HEAD(&ei->i_orphan); 3733ac27a0ecSDave Kleikamp 3734b436b9beSJan Kara /* 3735b436b9beSJan Kara * Set transaction id's of transactions that have to be committed 3736b436b9beSJan Kara * to finish f[data]sync. We set them to currently running transaction 3737b436b9beSJan Kara * as we cannot be sure that the inode or some of its metadata isn't 3738b436b9beSJan Kara * part of the transaction - the inode could have been reclaimed and 3739b436b9beSJan Kara * now it is reread from disk. 3740b436b9beSJan Kara */ 3741b436b9beSJan Kara if (journal) { 3742b436b9beSJan Kara transaction_t *transaction; 3743b436b9beSJan Kara tid_t tid; 3744b436b9beSJan Kara 3745a931da6aSTheodore Ts'o read_lock(&journal->j_state_lock); 3746b436b9beSJan Kara if (journal->j_running_transaction) 3747b436b9beSJan Kara transaction = journal->j_running_transaction; 3748b436b9beSJan Kara else 3749b436b9beSJan Kara transaction = journal->j_committing_transaction; 3750b436b9beSJan Kara if (transaction) 3751b436b9beSJan Kara tid = transaction->t_tid; 3752b436b9beSJan Kara else 3753b436b9beSJan Kara tid = journal->j_commit_sequence; 3754a931da6aSTheodore Ts'o read_unlock(&journal->j_state_lock); 3755b436b9beSJan Kara ei->i_sync_tid = tid; 3756b436b9beSJan Kara ei->i_datasync_tid = tid; 3757b436b9beSJan Kara } 3758b436b9beSJan Kara 37590040d987SEric Sandeen if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3760ac27a0ecSDave Kleikamp ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); 3761617ba13bSMingming Cao if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > 3762e5d2861fSKirill Korotaev EXT4_INODE_SIZE(inode->i_sb)) { 37631d1fe1eeSDavid Howells ret = -EIO; 3764ac27a0ecSDave Kleikamp goto bad_inode; 3765e5d2861fSKirill Korotaev } 3766ac27a0ecSDave Kleikamp if (ei->i_extra_isize == 0) { 3767ac27a0ecSDave Kleikamp /* The extra space is currently unused. Use it. */ 3768617ba13bSMingming Cao ei->i_extra_isize = sizeof(struct ext4_inode) - 3769617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE; 3770ac27a0ecSDave Kleikamp } else { 3771ac27a0ecSDave Kleikamp __le32 *magic = (void *)raw_inode + 3772617ba13bSMingming Cao EXT4_GOOD_OLD_INODE_SIZE + 3773ac27a0ecSDave Kleikamp ei->i_extra_isize; 3774617ba13bSMingming Cao if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 377519f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, EXT4_STATE_XATTR); 3776ac27a0ecSDave Kleikamp } 3777ac27a0ecSDave Kleikamp } else 3778ac27a0ecSDave Kleikamp ei->i_extra_isize = 0; 3779ac27a0ecSDave Kleikamp 3780ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3781ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3782ef7f3835SKalpak Shah EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode); 3783ef7f3835SKalpak Shah EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode); 3784ef7f3835SKalpak Shah 378525ec56b5SJean Noel Cordenner inode->i_version = le32_to_cpu(raw_inode->i_disk_version); 378625ec56b5SJean Noel Cordenner if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 378725ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 378825ec56b5SJean Noel Cordenner inode->i_version |= 378925ec56b5SJean Noel Cordenner (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32; 379025ec56b5SJean Noel Cordenner } 379125ec56b5SJean Noel Cordenner 3792c4b5a614STheodore Ts'o ret = 0; 3793485c26ecSTheodore Ts'o if (ei->i_file_acl && 37941032988cSTheodore Ts'o !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { 379524676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bad extended attribute block %llu", 379624676da4STheodore Ts'o ei->i_file_acl); 3797485c26ecSTheodore Ts'o ret = -EIO; 3798485c26ecSTheodore Ts'o goto bad_inode; 379907a03824STheodore Ts'o } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 3800c4b5a614STheodore Ts'o if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3801c4b5a614STheodore Ts'o (S_ISLNK(inode->i_mode) && 3802c4b5a614STheodore Ts'o !ext4_inode_is_fast_symlink(inode))) 38037a262f7cSAneesh Kumar K.V /* Validate extent which is part of inode */ 38047a262f7cSAneesh Kumar K.V ret = ext4_ext_check_inode(inode); 3805fe2c8191SThiemo Nagel } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 3806fe2c8191SThiemo Nagel (S_ISLNK(inode->i_mode) && 3807fe2c8191SThiemo Nagel !ext4_inode_is_fast_symlink(inode))) { 3808fe2c8191SThiemo Nagel /* Validate block references which are part of inode */ 38091f7d1e77STheodore Ts'o ret = ext4_ind_check_inode(inode); 3810fe2c8191SThiemo Nagel } 3811567f3e9aSTheodore Ts'o if (ret) 38127a262f7cSAneesh Kumar K.V goto bad_inode; 38137a262f7cSAneesh Kumar K.V 3814ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode)) { 3815617ba13bSMingming Cao inode->i_op = &ext4_file_inode_operations; 3816617ba13bSMingming Cao inode->i_fop = &ext4_file_operations; 3817617ba13bSMingming Cao ext4_set_aops(inode); 3818ac27a0ecSDave Kleikamp } else if (S_ISDIR(inode->i_mode)) { 3819617ba13bSMingming Cao inode->i_op = &ext4_dir_inode_operations; 3820617ba13bSMingming Cao inode->i_fop = &ext4_dir_operations; 3821ac27a0ecSDave Kleikamp } else if (S_ISLNK(inode->i_mode)) { 3822e83c1397SDuane Griffin if (ext4_inode_is_fast_symlink(inode)) { 3823617ba13bSMingming Cao inode->i_op = &ext4_fast_symlink_inode_operations; 3824e83c1397SDuane Griffin nd_terminate_link(ei->i_data, inode->i_size, 3825e83c1397SDuane Griffin sizeof(ei->i_data) - 1); 3826e83c1397SDuane Griffin } else { 3827617ba13bSMingming Cao inode->i_op = &ext4_symlink_inode_operations; 3828617ba13bSMingming Cao ext4_set_aops(inode); 3829ac27a0ecSDave Kleikamp } 3830563bdd61STheodore Ts'o } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || 3831563bdd61STheodore Ts'o S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { 3832617ba13bSMingming Cao inode->i_op = &ext4_special_inode_operations; 3833ac27a0ecSDave Kleikamp if (raw_inode->i_block[0]) 3834ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3835ac27a0ecSDave Kleikamp old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 3836ac27a0ecSDave Kleikamp else 3837ac27a0ecSDave Kleikamp init_special_inode(inode, inode->i_mode, 3838ac27a0ecSDave Kleikamp new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 3839563bdd61STheodore Ts'o } else { 3840563bdd61STheodore Ts'o ret = -EIO; 384124676da4STheodore Ts'o EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode); 3842563bdd61STheodore Ts'o goto bad_inode; 3843ac27a0ecSDave Kleikamp } 3844ac27a0ecSDave Kleikamp brelse(iloc.bh); 3845617ba13bSMingming Cao ext4_set_inode_flags(inode); 38461d1fe1eeSDavid Howells unlock_new_inode(inode); 38471d1fe1eeSDavid Howells return inode; 3848ac27a0ecSDave Kleikamp 3849ac27a0ecSDave Kleikamp bad_inode: 3850567f3e9aSTheodore Ts'o brelse(iloc.bh); 38511d1fe1eeSDavid Howells iget_failed(inode); 38521d1fe1eeSDavid Howells return ERR_PTR(ret); 3853ac27a0ecSDave Kleikamp } 3854ac27a0ecSDave Kleikamp 38550fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle, 38560fc1b451SAneesh Kumar K.V struct ext4_inode *raw_inode, 38570fc1b451SAneesh Kumar K.V struct ext4_inode_info *ei) 38580fc1b451SAneesh Kumar K.V { 38590fc1b451SAneesh Kumar K.V struct inode *inode = &(ei->vfs_inode); 38600fc1b451SAneesh Kumar K.V u64 i_blocks = inode->i_blocks; 38610fc1b451SAneesh Kumar K.V struct super_block *sb = inode->i_sb; 38620fc1b451SAneesh Kumar K.V 38630fc1b451SAneesh Kumar K.V if (i_blocks <= ~0U) { 38640fc1b451SAneesh Kumar K.V /* 38650fc1b451SAneesh Kumar K.V * i_blocks can be represnted in a 32 bit variable 38660fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 38670fc1b451SAneesh Kumar K.V */ 38688180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38690fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = 0; 387084a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 3871f287a1a5STheodore Ts'o return 0; 3872f287a1a5STheodore Ts'o } 3873f287a1a5STheodore Ts'o if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) 3874f287a1a5STheodore Ts'o return -EFBIG; 3875f287a1a5STheodore Ts'o 3876f287a1a5STheodore Ts'o if (i_blocks <= 0xffffffffffffULL) { 38770fc1b451SAneesh Kumar K.V /* 38780fc1b451SAneesh Kumar K.V * i_blocks can be represented in a 48 bit variable 38790fc1b451SAneesh Kumar K.V * as multiple of 512 bytes 38800fc1b451SAneesh Kumar K.V */ 38818180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38820fc1b451SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 388384a8dce2SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); 38840fc1b451SAneesh Kumar K.V } else { 388584a8dce2SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); 38868180a562SAneesh Kumar K.V /* i_block is stored in file system block size */ 38878180a562SAneesh Kumar K.V i_blocks = i_blocks >> (inode->i_blkbits - 9); 38888180a562SAneesh Kumar K.V raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); 38898180a562SAneesh Kumar K.V raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); 38900fc1b451SAneesh Kumar K.V } 3891f287a1a5STheodore Ts'o return 0; 38920fc1b451SAneesh Kumar K.V } 38930fc1b451SAneesh Kumar K.V 3894ac27a0ecSDave Kleikamp /* 3895ac27a0ecSDave Kleikamp * Post the struct inode info into an on-disk inode location in the 3896ac27a0ecSDave Kleikamp * buffer-cache. This gobbles the caller's reference to the 3897ac27a0ecSDave Kleikamp * buffer_head in the inode location struct. 3898ac27a0ecSDave Kleikamp * 3899ac27a0ecSDave Kleikamp * The caller must have write access to iloc->bh. 3900ac27a0ecSDave Kleikamp */ 3901617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle, 3902ac27a0ecSDave Kleikamp struct inode *inode, 3903830156c7SFrank Mayhar struct ext4_iloc *iloc) 3904ac27a0ecSDave Kleikamp { 3905617ba13bSMingming Cao struct ext4_inode *raw_inode = ext4_raw_inode(iloc); 3906617ba13bSMingming Cao struct ext4_inode_info *ei = EXT4_I(inode); 3907ac27a0ecSDave Kleikamp struct buffer_head *bh = iloc->bh; 3908ac27a0ecSDave Kleikamp int err = 0, rc, block; 3909ac27a0ecSDave Kleikamp 3910ac27a0ecSDave Kleikamp /* For fields not not tracking in the in-memory inode, 3911ac27a0ecSDave Kleikamp * initialise them to zero for new inodes. */ 391219f5fb7aSTheodore Ts'o if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) 3913617ba13bSMingming Cao memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 3914ac27a0ecSDave Kleikamp 3915ff9ddf7eSJan Kara ext4_get_inode_flags(ei); 3916ac27a0ecSDave Kleikamp raw_inode->i_mode = cpu_to_le16(inode->i_mode); 3917ac27a0ecSDave Kleikamp if (!(test_opt(inode->i_sb, NO_UID32))) { 3918ac27a0ecSDave Kleikamp raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 3919ac27a0ecSDave Kleikamp raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 3920ac27a0ecSDave Kleikamp /* 3921ac27a0ecSDave Kleikamp * Fix up interoperability with old kernels. Otherwise, old inodes get 3922ac27a0ecSDave Kleikamp * re-used with the upper 16 bits of the uid/gid intact 3923ac27a0ecSDave Kleikamp */ 3924ac27a0ecSDave Kleikamp if (!ei->i_dtime) { 3925ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 3926ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_uid)); 3927ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 3928ac27a0ecSDave Kleikamp cpu_to_le16(high_16_bits(inode->i_gid)); 3929ac27a0ecSDave Kleikamp } else { 3930ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3931ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3932ac27a0ecSDave Kleikamp } 3933ac27a0ecSDave Kleikamp } else { 3934ac27a0ecSDave Kleikamp raw_inode->i_uid_low = 3935ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowuid(inode->i_uid)); 3936ac27a0ecSDave Kleikamp raw_inode->i_gid_low = 3937ac27a0ecSDave Kleikamp cpu_to_le16(fs_high2lowgid(inode->i_gid)); 3938ac27a0ecSDave Kleikamp raw_inode->i_uid_high = 0; 3939ac27a0ecSDave Kleikamp raw_inode->i_gid_high = 0; 3940ac27a0ecSDave Kleikamp } 3941ac27a0ecSDave Kleikamp raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 3942ef7f3835SKalpak Shah 3943ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode); 3944ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode); 3945ef7f3835SKalpak Shah EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 3946ef7f3835SKalpak Shah EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 3947ef7f3835SKalpak Shah 39480fc1b451SAneesh Kumar K.V if (ext4_inode_blocks_set(handle, raw_inode, ei)) 39490fc1b451SAneesh Kumar K.V goto out_brelse; 3950ac27a0ecSDave Kleikamp raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 3951353eb83cSTheodore Ts'o raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); 39529b8f1f01SMingming Cao if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 39539b8f1f01SMingming Cao cpu_to_le32(EXT4_OS_HURD)) 3954a1ddeb7eSBadari Pulavarty raw_inode->i_file_acl_high = 3955a1ddeb7eSBadari Pulavarty cpu_to_le16(ei->i_file_acl >> 32); 39567973c0c1SAneesh Kumar K.V raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); 3957a48380f7SAneesh Kumar K.V ext4_isize_set(raw_inode, ei->i_disksize); 3958ac27a0ecSDave Kleikamp if (ei->i_disksize > 0x7fffffffULL) { 3959ac27a0ecSDave Kleikamp struct super_block *sb = inode->i_sb; 3960617ba13bSMingming Cao if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 3961617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE) || 3962617ba13bSMingming Cao EXT4_SB(sb)->s_es->s_rev_level == 3963617ba13bSMingming Cao cpu_to_le32(EXT4_GOOD_OLD_REV)) { 3964ac27a0ecSDave Kleikamp /* If this is the first large file 3965ac27a0ecSDave Kleikamp * created, add a flag to the superblock. 3966ac27a0ecSDave Kleikamp */ 3967617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, 3968617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 3969ac27a0ecSDave Kleikamp if (err) 3970ac27a0ecSDave Kleikamp goto out_brelse; 3971617ba13bSMingming Cao ext4_update_dynamic_rev(sb); 3972617ba13bSMingming Cao EXT4_SET_RO_COMPAT_FEATURE(sb, 3973617ba13bSMingming Cao EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 3974ac27a0ecSDave Kleikamp sb->s_dirt = 1; 39750390131bSFrank Mayhar ext4_handle_sync(handle); 397673b50c1cSCurt Wohlgemuth err = ext4_handle_dirty_metadata(handle, NULL, 3977617ba13bSMingming Cao EXT4_SB(sb)->s_sbh); 3978ac27a0ecSDave Kleikamp } 3979ac27a0ecSDave Kleikamp } 3980ac27a0ecSDave Kleikamp raw_inode->i_generation = cpu_to_le32(inode->i_generation); 3981ac27a0ecSDave Kleikamp if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 3982ac27a0ecSDave Kleikamp if (old_valid_dev(inode->i_rdev)) { 3983ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 3984ac27a0ecSDave Kleikamp cpu_to_le32(old_encode_dev(inode->i_rdev)); 3985ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 0; 3986ac27a0ecSDave Kleikamp } else { 3987ac27a0ecSDave Kleikamp raw_inode->i_block[0] = 0; 3988ac27a0ecSDave Kleikamp raw_inode->i_block[1] = 3989ac27a0ecSDave Kleikamp cpu_to_le32(new_encode_dev(inode->i_rdev)); 3990ac27a0ecSDave Kleikamp raw_inode->i_block[2] = 0; 3991ac27a0ecSDave Kleikamp } 3992de9a55b8STheodore Ts'o } else 3993de9a55b8STheodore Ts'o for (block = 0; block < EXT4_N_BLOCKS; block++) 3994ac27a0ecSDave Kleikamp raw_inode->i_block[block] = ei->i_data[block]; 3995ac27a0ecSDave Kleikamp 399625ec56b5SJean Noel Cordenner raw_inode->i_disk_version = cpu_to_le32(inode->i_version); 399725ec56b5SJean Noel Cordenner if (ei->i_extra_isize) { 399825ec56b5SJean Noel Cordenner if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi)) 399925ec56b5SJean Noel Cordenner raw_inode->i_version_hi = 400025ec56b5SJean Noel Cordenner cpu_to_le32(inode->i_version >> 32); 4001ac27a0ecSDave Kleikamp raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 400225ec56b5SJean Noel Cordenner } 400325ec56b5SJean Noel Cordenner 40040390131bSFrank Mayhar BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 400573b50c1cSCurt Wohlgemuth rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4006ac27a0ecSDave Kleikamp if (!err) 4007ac27a0ecSDave Kleikamp err = rc; 400819f5fb7aSTheodore Ts'o ext4_clear_inode_state(inode, EXT4_STATE_NEW); 4009ac27a0ecSDave Kleikamp 4010b436b9beSJan Kara ext4_update_inode_fsync_trans(handle, inode, 0); 4011ac27a0ecSDave Kleikamp out_brelse: 4012ac27a0ecSDave Kleikamp brelse(bh); 4013617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4014ac27a0ecSDave Kleikamp return err; 4015ac27a0ecSDave Kleikamp } 4016ac27a0ecSDave Kleikamp 4017ac27a0ecSDave Kleikamp /* 4018617ba13bSMingming Cao * ext4_write_inode() 4019ac27a0ecSDave Kleikamp * 4020ac27a0ecSDave Kleikamp * We are called from a few places: 4021ac27a0ecSDave Kleikamp * 4022ac27a0ecSDave Kleikamp * - Within generic_file_write() for O_SYNC files. 4023ac27a0ecSDave Kleikamp * Here, there will be no transaction running. We wait for any running 4024ac27a0ecSDave Kleikamp * trasnaction to commit. 4025ac27a0ecSDave Kleikamp * 4026ac27a0ecSDave Kleikamp * - Within sys_sync(), kupdate and such. 4027ac27a0ecSDave Kleikamp * We wait on commit, if tol to. 4028ac27a0ecSDave Kleikamp * 4029ac27a0ecSDave Kleikamp * - Within prune_icache() (PF_MEMALLOC == true) 4030ac27a0ecSDave Kleikamp * Here we simply return. We can't afford to block kswapd on the 4031ac27a0ecSDave Kleikamp * journal commit. 4032ac27a0ecSDave Kleikamp * 4033ac27a0ecSDave Kleikamp * In all cases it is actually safe for us to return without doing anything, 4034ac27a0ecSDave Kleikamp * because the inode has been copied into a raw inode buffer in 4035617ba13bSMingming Cao * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 4036ac27a0ecSDave Kleikamp * knfsd. 4037ac27a0ecSDave Kleikamp * 4038ac27a0ecSDave Kleikamp * Note that we are absolutely dependent upon all inode dirtiers doing the 4039ac27a0ecSDave Kleikamp * right thing: they *must* call mark_inode_dirty() after dirtying info in 4040ac27a0ecSDave Kleikamp * which we are interested. 4041ac27a0ecSDave Kleikamp * 4042ac27a0ecSDave Kleikamp * It would be a bug for them to not do this. The code: 4043ac27a0ecSDave Kleikamp * 4044ac27a0ecSDave Kleikamp * mark_inode_dirty(inode) 4045ac27a0ecSDave Kleikamp * stuff(); 4046ac27a0ecSDave Kleikamp * inode->i_size = expr; 4047ac27a0ecSDave Kleikamp * 4048ac27a0ecSDave Kleikamp * is in error because a kswapd-driven write_inode() could occur while 4049ac27a0ecSDave Kleikamp * `stuff()' is running, and the new i_size will be lost. Plus the inode 4050ac27a0ecSDave Kleikamp * will no longer be on the superblock's dirty inode list. 4051ac27a0ecSDave Kleikamp */ 4052a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc) 4053ac27a0ecSDave Kleikamp { 405491ac6f43SFrank Mayhar int err; 405591ac6f43SFrank Mayhar 4056ac27a0ecSDave Kleikamp if (current->flags & PF_MEMALLOC) 4057ac27a0ecSDave Kleikamp return 0; 4058ac27a0ecSDave Kleikamp 405991ac6f43SFrank Mayhar if (EXT4_SB(inode->i_sb)->s_journal) { 4060617ba13bSMingming Cao if (ext4_journal_current_handle()) { 4061b38bd33aSMingming Cao jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); 4062ac27a0ecSDave Kleikamp dump_stack(); 4063ac27a0ecSDave Kleikamp return -EIO; 4064ac27a0ecSDave Kleikamp } 4065ac27a0ecSDave Kleikamp 4066a9185b41SChristoph Hellwig if (wbc->sync_mode != WB_SYNC_ALL) 4067ac27a0ecSDave Kleikamp return 0; 4068ac27a0ecSDave Kleikamp 406991ac6f43SFrank Mayhar err = ext4_force_commit(inode->i_sb); 407091ac6f43SFrank Mayhar } else { 407191ac6f43SFrank Mayhar struct ext4_iloc iloc; 407291ac6f43SFrank Mayhar 40738b472d73SCurt Wohlgemuth err = __ext4_get_inode_loc(inode, &iloc, 0); 407491ac6f43SFrank Mayhar if (err) 407591ac6f43SFrank Mayhar return err; 4076a9185b41SChristoph Hellwig if (wbc->sync_mode == WB_SYNC_ALL) 4077830156c7SFrank Mayhar sync_dirty_buffer(iloc.bh); 4078830156c7SFrank Mayhar if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { 4079c398eda0STheodore Ts'o EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, 4080c398eda0STheodore Ts'o "IO error syncing inode"); 4081830156c7SFrank Mayhar err = -EIO; 4082830156c7SFrank Mayhar } 4083fd2dd9fbSCurt Wohlgemuth brelse(iloc.bh); 408491ac6f43SFrank Mayhar } 408591ac6f43SFrank Mayhar return err; 4086ac27a0ecSDave Kleikamp } 4087ac27a0ecSDave Kleikamp 4088ac27a0ecSDave Kleikamp /* 4089617ba13bSMingming Cao * ext4_setattr() 4090ac27a0ecSDave Kleikamp * 4091ac27a0ecSDave Kleikamp * Called from notify_change. 4092ac27a0ecSDave Kleikamp * 4093ac27a0ecSDave Kleikamp * We want to trap VFS attempts to truncate the file as soon as 4094ac27a0ecSDave Kleikamp * possible. In particular, we want to make sure that when the VFS 4095ac27a0ecSDave Kleikamp * shrinks i_size, we put the inode on the orphan list and modify 4096ac27a0ecSDave Kleikamp * i_disksize immediately, so that during the subsequent flushing of 4097ac27a0ecSDave Kleikamp * dirty pages and freeing of disk blocks, we can guarantee that any 4098ac27a0ecSDave Kleikamp * commit will leave the blocks being flushed in an unused state on 4099ac27a0ecSDave Kleikamp * disk. (On recovery, the inode will get truncated and the blocks will 4100ac27a0ecSDave Kleikamp * be freed, so we have a strong guarantee that no future commit will 4101ac27a0ecSDave Kleikamp * leave these blocks visible to the user.) 4102ac27a0ecSDave Kleikamp * 4103678aaf48SJan Kara * Another thing we have to assure is that if we are in ordered mode 4104678aaf48SJan Kara * and inode is still attached to the committing transaction, we must 4105678aaf48SJan Kara * we start writeout of all the dirty pages which are being truncated. 4106678aaf48SJan Kara * This way we are sure that all the data written in the previous 4107678aaf48SJan Kara * transaction are already on disk (truncate waits for pages under 4108678aaf48SJan Kara * writeback). 4109678aaf48SJan Kara * 4110678aaf48SJan Kara * Called with inode->i_mutex down. 4111ac27a0ecSDave Kleikamp */ 4112617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr) 4113ac27a0ecSDave Kleikamp { 4114ac27a0ecSDave Kleikamp struct inode *inode = dentry->d_inode; 4115ac27a0ecSDave Kleikamp int error, rc = 0; 41163d287de3SDmitry Monakhov int orphan = 0; 4117ac27a0ecSDave Kleikamp const unsigned int ia_valid = attr->ia_valid; 4118ac27a0ecSDave Kleikamp 4119ac27a0ecSDave Kleikamp error = inode_change_ok(inode, attr); 4120ac27a0ecSDave Kleikamp if (error) 4121ac27a0ecSDave Kleikamp return error; 4122ac27a0ecSDave Kleikamp 412312755627SDmitry Monakhov if (is_quota_modification(inode, attr)) 4124871a2931SChristoph Hellwig dquot_initialize(inode); 4125ac27a0ecSDave Kleikamp if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 4126ac27a0ecSDave Kleikamp (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 4127ac27a0ecSDave Kleikamp handle_t *handle; 4128ac27a0ecSDave Kleikamp 4129ac27a0ecSDave Kleikamp /* (user+group)*(old+new) structure, inode write (sb, 4130ac27a0ecSDave Kleikamp * inode block, ? - but truncate inode update has it) */ 41315aca07ebSDmitry Monakhov handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ 4132194074acSDmitry Monakhov EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); 4133ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4134ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4135ac27a0ecSDave Kleikamp goto err_out; 4136ac27a0ecSDave Kleikamp } 4137b43fa828SChristoph Hellwig error = dquot_transfer(inode, attr); 4138ac27a0ecSDave Kleikamp if (error) { 4139617ba13bSMingming Cao ext4_journal_stop(handle); 4140ac27a0ecSDave Kleikamp return error; 4141ac27a0ecSDave Kleikamp } 4142ac27a0ecSDave Kleikamp /* Update corresponding info in inode so that everything is in 4143ac27a0ecSDave Kleikamp * one transaction */ 4144ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_UID) 4145ac27a0ecSDave Kleikamp inode->i_uid = attr->ia_uid; 4146ac27a0ecSDave Kleikamp if (attr->ia_valid & ATTR_GID) 4147ac27a0ecSDave Kleikamp inode->i_gid = attr->ia_gid; 4148617ba13bSMingming Cao error = ext4_mark_inode_dirty(handle, inode); 4149617ba13bSMingming Cao ext4_journal_stop(handle); 4150ac27a0ecSDave Kleikamp } 4151ac27a0ecSDave Kleikamp 4152e2b46574SEric Sandeen if (attr->ia_valid & ATTR_SIZE) { 4153562c72aaSChristoph Hellwig inode_dio_wait(inode); 4154562c72aaSChristoph Hellwig 415512e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { 4156e2b46574SEric Sandeen struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 4157e2b46574SEric Sandeen 41580c095c7fSTheodore Ts'o if (attr->ia_size > sbi->s_bitmap_maxbytes) 41590c095c7fSTheodore Ts'o return -EFBIG; 4160e2b46574SEric Sandeen } 4161e2b46574SEric Sandeen } 4162e2b46574SEric Sandeen 4163ac27a0ecSDave Kleikamp if (S_ISREG(inode->i_mode) && 4164c8d46e41SJiaying Zhang attr->ia_valid & ATTR_SIZE && 4165072bd7eaSTheodore Ts'o (attr->ia_size < inode->i_size)) { 4166ac27a0ecSDave Kleikamp handle_t *handle; 4167ac27a0ecSDave Kleikamp 4168617ba13bSMingming Cao handle = ext4_journal_start(inode, 3); 4169ac27a0ecSDave Kleikamp if (IS_ERR(handle)) { 4170ac27a0ecSDave Kleikamp error = PTR_ERR(handle); 4171ac27a0ecSDave Kleikamp goto err_out; 4172ac27a0ecSDave Kleikamp } 41733d287de3SDmitry Monakhov if (ext4_handle_valid(handle)) { 4174617ba13bSMingming Cao error = ext4_orphan_add(handle, inode); 41753d287de3SDmitry Monakhov orphan = 1; 41763d287de3SDmitry Monakhov } 4177617ba13bSMingming Cao EXT4_I(inode)->i_disksize = attr->ia_size; 4178617ba13bSMingming Cao rc = ext4_mark_inode_dirty(handle, inode); 4179ac27a0ecSDave Kleikamp if (!error) 4180ac27a0ecSDave Kleikamp error = rc; 4181617ba13bSMingming Cao ext4_journal_stop(handle); 4182678aaf48SJan Kara 4183678aaf48SJan Kara if (ext4_should_order_data(inode)) { 4184678aaf48SJan Kara error = ext4_begin_ordered_truncate(inode, 4185678aaf48SJan Kara attr->ia_size); 4186678aaf48SJan Kara if (error) { 4187678aaf48SJan Kara /* Do as much error cleanup as possible */ 4188678aaf48SJan Kara handle = ext4_journal_start(inode, 3); 4189678aaf48SJan Kara if (IS_ERR(handle)) { 4190678aaf48SJan Kara ext4_orphan_del(NULL, inode); 4191678aaf48SJan Kara goto err_out; 4192678aaf48SJan Kara } 4193678aaf48SJan Kara ext4_orphan_del(handle, inode); 41943d287de3SDmitry Monakhov orphan = 0; 4195678aaf48SJan Kara ext4_journal_stop(handle); 4196678aaf48SJan Kara goto err_out; 4197678aaf48SJan Kara } 4198678aaf48SJan Kara } 4199ac27a0ecSDave Kleikamp } 4200ac27a0ecSDave Kleikamp 4201072bd7eaSTheodore Ts'o if (attr->ia_valid & ATTR_SIZE) { 4202072bd7eaSTheodore Ts'o if (attr->ia_size != i_size_read(inode)) { 4203072bd7eaSTheodore Ts'o truncate_setsize(inode, attr->ia_size); 4204072bd7eaSTheodore Ts'o ext4_truncate(inode); 4205072bd7eaSTheodore Ts'o } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 4206072bd7eaSTheodore Ts'o ext4_truncate(inode); 4207072bd7eaSTheodore Ts'o } 4208ac27a0ecSDave Kleikamp 42091025774cSChristoph Hellwig if (!rc) { 42101025774cSChristoph Hellwig setattr_copy(inode, attr); 42111025774cSChristoph Hellwig mark_inode_dirty(inode); 42121025774cSChristoph Hellwig } 42131025774cSChristoph Hellwig 42141025774cSChristoph Hellwig /* 42151025774cSChristoph Hellwig * If the call to ext4_truncate failed to get a transaction handle at 42161025774cSChristoph Hellwig * all, we need to clean up the in-core orphan list manually. 42171025774cSChristoph Hellwig */ 42183d287de3SDmitry Monakhov if (orphan && inode->i_nlink) 4219617ba13bSMingming Cao ext4_orphan_del(NULL, inode); 4220ac27a0ecSDave Kleikamp 4221ac27a0ecSDave Kleikamp if (!rc && (ia_valid & ATTR_MODE)) 4222617ba13bSMingming Cao rc = ext4_acl_chmod(inode); 4223ac27a0ecSDave Kleikamp 4224ac27a0ecSDave Kleikamp err_out: 4225617ba13bSMingming Cao ext4_std_error(inode->i_sb, error); 4226ac27a0ecSDave Kleikamp if (!error) 4227ac27a0ecSDave Kleikamp error = rc; 4228ac27a0ecSDave Kleikamp return error; 4229ac27a0ecSDave Kleikamp } 4230ac27a0ecSDave Kleikamp 42313e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 42323e3398a0SMingming Cao struct kstat *stat) 42333e3398a0SMingming Cao { 42343e3398a0SMingming Cao struct inode *inode; 42353e3398a0SMingming Cao unsigned long delalloc_blocks; 42363e3398a0SMingming Cao 42373e3398a0SMingming Cao inode = dentry->d_inode; 42383e3398a0SMingming Cao generic_fillattr(inode, stat); 42393e3398a0SMingming Cao 42403e3398a0SMingming Cao /* 42413e3398a0SMingming Cao * We can't update i_blocks if the block allocation is delayed 42423e3398a0SMingming Cao * otherwise in the case of system crash before the real block 42433e3398a0SMingming Cao * allocation is done, we will have i_blocks inconsistent with 42443e3398a0SMingming Cao * on-disk file blocks. 42453e3398a0SMingming Cao * We always keep i_blocks updated together with real 42463e3398a0SMingming Cao * allocation. But to not confuse with user, stat 42473e3398a0SMingming Cao * will return the blocks that include the delayed allocation 42483e3398a0SMingming Cao * blocks for this file. 42493e3398a0SMingming Cao */ 42503e3398a0SMingming Cao delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 42513e3398a0SMingming Cao 42523e3398a0SMingming Cao stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 42533e3398a0SMingming Cao return 0; 42543e3398a0SMingming Cao } 4255ac27a0ecSDave Kleikamp 4256a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4257a02908f1SMingming Cao { 425812e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 42598bb2b247SAmir Goldstein return ext4_ind_trans_blocks(inode, nrblocks, chunk); 4260ac51d837STheodore Ts'o return ext4_ext_index_trans_blocks(inode, nrblocks, chunk); 4261a02908f1SMingming Cao } 4262ac51d837STheodore Ts'o 4263a02908f1SMingming Cao /* 4264a02908f1SMingming Cao * Account for index blocks, block groups bitmaps and block group 4265a02908f1SMingming Cao * descriptor blocks if modify datablocks and index blocks 4266a02908f1SMingming Cao * worse case, the indexs blocks spread over different block groups 4267a02908f1SMingming Cao * 4268a02908f1SMingming Cao * If datablocks are discontiguous, they are possible to spread over 4269af901ca1SAndré Goddard Rosa * different block groups too. If they are contiuguous, with flexbg, 4270a02908f1SMingming Cao * they could still across block group boundary. 4271a02908f1SMingming Cao * 4272a02908f1SMingming Cao * Also account for superblock, inode, quota and xattr blocks 4273a02908f1SMingming Cao */ 42741f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk) 4275a02908f1SMingming Cao { 42768df9675fSTheodore Ts'o ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb); 42778df9675fSTheodore Ts'o int gdpblocks; 4278a02908f1SMingming Cao int idxblocks; 4279a02908f1SMingming Cao int ret = 0; 4280a02908f1SMingming Cao 4281a02908f1SMingming Cao /* 4282a02908f1SMingming Cao * How many index blocks need to touch to modify nrblocks? 4283a02908f1SMingming Cao * The "Chunk" flag indicating whether the nrblocks is 4284a02908f1SMingming Cao * physically contiguous on disk 4285a02908f1SMingming Cao * 4286a02908f1SMingming Cao * For Direct IO and fallocate, they calls get_block to allocate 4287a02908f1SMingming Cao * one single extent at a time, so they could set the "Chunk" flag 4288a02908f1SMingming Cao */ 4289a02908f1SMingming Cao idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk); 4290a02908f1SMingming Cao 4291a02908f1SMingming Cao ret = idxblocks; 4292a02908f1SMingming Cao 4293a02908f1SMingming Cao /* 4294a02908f1SMingming Cao * Now let's see how many group bitmaps and group descriptors need 4295a02908f1SMingming Cao * to account 4296a02908f1SMingming Cao */ 4297a02908f1SMingming Cao groups = idxblocks; 4298a02908f1SMingming Cao if (chunk) 4299a02908f1SMingming Cao groups += 1; 4300ac27a0ecSDave Kleikamp else 4301a02908f1SMingming Cao groups += nrblocks; 4302ac27a0ecSDave Kleikamp 4303a02908f1SMingming Cao gdpblocks = groups; 43048df9675fSTheodore Ts'o if (groups > ngroups) 43058df9675fSTheodore Ts'o groups = ngroups; 4306a02908f1SMingming Cao if (groups > EXT4_SB(inode->i_sb)->s_gdb_count) 4307a02908f1SMingming Cao gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count; 4308a02908f1SMingming Cao 4309a02908f1SMingming Cao /* bitmaps and block group descriptor blocks */ 4310a02908f1SMingming Cao ret += groups + gdpblocks; 4311a02908f1SMingming Cao 4312a02908f1SMingming Cao /* Blocks for super block, inode, quota and xattr blocks */ 4313a02908f1SMingming Cao ret += EXT4_META_TRANS_BLOCKS(inode->i_sb); 4314ac27a0ecSDave Kleikamp 4315ac27a0ecSDave Kleikamp return ret; 4316ac27a0ecSDave Kleikamp } 4317ac27a0ecSDave Kleikamp 4318ac27a0ecSDave Kleikamp /* 431925985edcSLucas De Marchi * Calculate the total number of credits to reserve to fit 4320f3bd1f3fSMingming Cao * the modification of a single pages into a single transaction, 4321f3bd1f3fSMingming Cao * which may include multiple chunks of block allocations. 4322a02908f1SMingming Cao * 4323525f4ed8SMingming Cao * This could be called via ext4_write_begin() 4324a02908f1SMingming Cao * 4325525f4ed8SMingming Cao * We need to consider the worse case, when 4326a02908f1SMingming Cao * one new block per extent. 4327a02908f1SMingming Cao */ 4328a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode) 4329a02908f1SMingming Cao { 4330a02908f1SMingming Cao int bpp = ext4_journal_blocks_per_page(inode); 4331a02908f1SMingming Cao int ret; 4332a02908f1SMingming Cao 4333a02908f1SMingming Cao ret = ext4_meta_trans_blocks(inode, bpp, 0); 4334a02908f1SMingming Cao 4335a02908f1SMingming Cao /* Account for data blocks for journalled mode */ 4336a02908f1SMingming Cao if (ext4_should_journal_data(inode)) 4337a02908f1SMingming Cao ret += bpp; 4338a02908f1SMingming Cao return ret; 4339a02908f1SMingming Cao } 4340f3bd1f3fSMingming Cao 4341f3bd1f3fSMingming Cao /* 4342f3bd1f3fSMingming Cao * Calculate the journal credits for a chunk of data modification. 4343f3bd1f3fSMingming Cao * 4344f3bd1f3fSMingming Cao * This is called from DIO, fallocate or whoever calling 434579e83036SEric Sandeen * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks. 4346f3bd1f3fSMingming Cao * 4347f3bd1f3fSMingming Cao * journal buffers for data blocks are not included here, as DIO 4348f3bd1f3fSMingming Cao * and fallocate do no need to journal data buffers. 4349f3bd1f3fSMingming Cao */ 4350f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks) 4351f3bd1f3fSMingming Cao { 4352f3bd1f3fSMingming Cao return ext4_meta_trans_blocks(inode, nrblocks, 1); 4353f3bd1f3fSMingming Cao } 4354f3bd1f3fSMingming Cao 4355a02908f1SMingming Cao /* 4356617ba13bSMingming Cao * The caller must have previously called ext4_reserve_inode_write(). 4357ac27a0ecSDave Kleikamp * Give this, we know that the caller already has write access to iloc->bh. 4358ac27a0ecSDave Kleikamp */ 4359617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle, 4360617ba13bSMingming Cao struct inode *inode, struct ext4_iloc *iloc) 4361ac27a0ecSDave Kleikamp { 4362ac27a0ecSDave Kleikamp int err = 0; 4363ac27a0ecSDave Kleikamp 436425ec56b5SJean Noel Cordenner if (test_opt(inode->i_sb, I_VERSION)) 436525ec56b5SJean Noel Cordenner inode_inc_iversion(inode); 436625ec56b5SJean Noel Cordenner 4367ac27a0ecSDave Kleikamp /* the do_update_inode consumes one bh->b_count */ 4368ac27a0ecSDave Kleikamp get_bh(iloc->bh); 4369ac27a0ecSDave Kleikamp 4370dab291afSMingming Cao /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ 4371830156c7SFrank Mayhar err = ext4_do_update_inode(handle, inode, iloc); 4372ac27a0ecSDave Kleikamp put_bh(iloc->bh); 4373ac27a0ecSDave Kleikamp return err; 4374ac27a0ecSDave Kleikamp } 4375ac27a0ecSDave Kleikamp 4376ac27a0ecSDave Kleikamp /* 4377ac27a0ecSDave Kleikamp * On success, We end up with an outstanding reference count against 4378ac27a0ecSDave Kleikamp * iloc->bh. This _must_ be cleaned up later. 4379ac27a0ecSDave Kleikamp */ 4380ac27a0ecSDave Kleikamp 4381ac27a0ecSDave Kleikamp int 4382617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode, 4383617ba13bSMingming Cao struct ext4_iloc *iloc) 4384ac27a0ecSDave Kleikamp { 43850390131bSFrank Mayhar int err; 43860390131bSFrank Mayhar 4387617ba13bSMingming Cao err = ext4_get_inode_loc(inode, iloc); 4388ac27a0ecSDave Kleikamp if (!err) { 4389ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc->bh, "get_write_access"); 4390617ba13bSMingming Cao err = ext4_journal_get_write_access(handle, iloc->bh); 4391ac27a0ecSDave Kleikamp if (err) { 4392ac27a0ecSDave Kleikamp brelse(iloc->bh); 4393ac27a0ecSDave Kleikamp iloc->bh = NULL; 4394ac27a0ecSDave Kleikamp } 4395ac27a0ecSDave Kleikamp } 4396617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4397ac27a0ecSDave Kleikamp return err; 4398ac27a0ecSDave Kleikamp } 4399ac27a0ecSDave Kleikamp 4400ac27a0ecSDave Kleikamp /* 44016dd4ee7cSKalpak Shah * Expand an inode by new_extra_isize bytes. 44026dd4ee7cSKalpak Shah * Returns 0 on success or negative error number on failure. 44036dd4ee7cSKalpak Shah */ 44041d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode, 44051d03ec98SAneesh Kumar K.V unsigned int new_extra_isize, 44061d03ec98SAneesh Kumar K.V struct ext4_iloc iloc, 44071d03ec98SAneesh Kumar K.V handle_t *handle) 44086dd4ee7cSKalpak Shah { 44096dd4ee7cSKalpak Shah struct ext4_inode *raw_inode; 44106dd4ee7cSKalpak Shah struct ext4_xattr_ibody_header *header; 44116dd4ee7cSKalpak Shah 44126dd4ee7cSKalpak Shah if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) 44136dd4ee7cSKalpak Shah return 0; 44146dd4ee7cSKalpak Shah 44156dd4ee7cSKalpak Shah raw_inode = ext4_raw_inode(&iloc); 44166dd4ee7cSKalpak Shah 44176dd4ee7cSKalpak Shah header = IHDR(inode, raw_inode); 44186dd4ee7cSKalpak Shah 44196dd4ee7cSKalpak Shah /* No extended attributes present */ 442019f5fb7aSTheodore Ts'o if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 44216dd4ee7cSKalpak Shah header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 44226dd4ee7cSKalpak Shah memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 44236dd4ee7cSKalpak Shah new_extra_isize); 44246dd4ee7cSKalpak Shah EXT4_I(inode)->i_extra_isize = new_extra_isize; 44256dd4ee7cSKalpak Shah return 0; 44266dd4ee7cSKalpak Shah } 44276dd4ee7cSKalpak Shah 44286dd4ee7cSKalpak Shah /* try to expand with EAs present */ 44296dd4ee7cSKalpak Shah return ext4_expand_extra_isize_ea(inode, new_extra_isize, 44306dd4ee7cSKalpak Shah raw_inode, handle); 44316dd4ee7cSKalpak Shah } 44326dd4ee7cSKalpak Shah 44336dd4ee7cSKalpak Shah /* 4434ac27a0ecSDave Kleikamp * What we do here is to mark the in-core inode as clean with respect to inode 4435ac27a0ecSDave Kleikamp * dirtiness (it may still be data-dirty). 4436ac27a0ecSDave Kleikamp * This means that the in-core inode may be reaped by prune_icache 4437ac27a0ecSDave Kleikamp * without having to perform any I/O. This is a very good thing, 4438ac27a0ecSDave Kleikamp * because *any* task may call prune_icache - even ones which 4439ac27a0ecSDave Kleikamp * have a transaction open against a different journal. 4440ac27a0ecSDave Kleikamp * 4441ac27a0ecSDave Kleikamp * Is this cheating? Not really. Sure, we haven't written the 4442ac27a0ecSDave Kleikamp * inode out, but prune_icache isn't a user-visible syncing function. 4443ac27a0ecSDave Kleikamp * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 4444ac27a0ecSDave Kleikamp * we start and wait on commits. 4445ac27a0ecSDave Kleikamp * 4446ac27a0ecSDave Kleikamp * Is this efficient/effective? Well, we're being nice to the system 4447ac27a0ecSDave Kleikamp * by cleaning up our inodes proactively so they can be reaped 4448ac27a0ecSDave Kleikamp * without I/O. But we are potentially leaving up to five seconds' 4449ac27a0ecSDave Kleikamp * worth of inodes floating about which prune_icache wants us to 4450ac27a0ecSDave Kleikamp * write out. One way to fix that would be to get prune_icache() 4451ac27a0ecSDave Kleikamp * to do a write_super() to free up some memory. It has the desired 4452ac27a0ecSDave Kleikamp * effect. 4453ac27a0ecSDave Kleikamp */ 4454617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) 4455ac27a0ecSDave Kleikamp { 4456617ba13bSMingming Cao struct ext4_iloc iloc; 44576dd4ee7cSKalpak Shah struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 44586dd4ee7cSKalpak Shah static unsigned int mnt_count; 44596dd4ee7cSKalpak Shah int err, ret; 4460ac27a0ecSDave Kleikamp 4461ac27a0ecSDave Kleikamp might_sleep(); 44627ff9c073STheodore Ts'o trace_ext4_mark_inode_dirty(inode, _RET_IP_); 4463617ba13bSMingming Cao err = ext4_reserve_inode_write(handle, inode, &iloc); 44640390131bSFrank Mayhar if (ext4_handle_valid(handle) && 44650390131bSFrank Mayhar EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 446619f5fb7aSTheodore Ts'o !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) { 44676dd4ee7cSKalpak Shah /* 44686dd4ee7cSKalpak Shah * We need extra buffer credits since we may write into EA block 44696dd4ee7cSKalpak Shah * with this same handle. If journal_extend fails, then it will 44706dd4ee7cSKalpak Shah * only result in a minor loss of functionality for that inode. 44716dd4ee7cSKalpak Shah * If this is felt to be critical, then e2fsck should be run to 44726dd4ee7cSKalpak Shah * force a large enough s_min_extra_isize. 44736dd4ee7cSKalpak Shah */ 44746dd4ee7cSKalpak Shah if ((jbd2_journal_extend(handle, 44756dd4ee7cSKalpak Shah EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) { 44766dd4ee7cSKalpak Shah ret = ext4_expand_extra_isize(inode, 44776dd4ee7cSKalpak Shah sbi->s_want_extra_isize, 44786dd4ee7cSKalpak Shah iloc, handle); 44796dd4ee7cSKalpak Shah if (ret) { 448019f5fb7aSTheodore Ts'o ext4_set_inode_state(inode, 448119f5fb7aSTheodore Ts'o EXT4_STATE_NO_EXPAND); 4482c1bddad9SAneesh Kumar K.V if (mnt_count != 4483c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count)) { 448412062dddSEric Sandeen ext4_warning(inode->i_sb, 44856dd4ee7cSKalpak Shah "Unable to expand inode %lu. Delete" 44866dd4ee7cSKalpak Shah " some EAs or run e2fsck.", 44876dd4ee7cSKalpak Shah inode->i_ino); 4488c1bddad9SAneesh Kumar K.V mnt_count = 4489c1bddad9SAneesh Kumar K.V le16_to_cpu(sbi->s_es->s_mnt_count); 44906dd4ee7cSKalpak Shah } 44916dd4ee7cSKalpak Shah } 44926dd4ee7cSKalpak Shah } 44936dd4ee7cSKalpak Shah } 4494ac27a0ecSDave Kleikamp if (!err) 4495617ba13bSMingming Cao err = ext4_mark_iloc_dirty(handle, inode, &iloc); 4496ac27a0ecSDave Kleikamp return err; 4497ac27a0ecSDave Kleikamp } 4498ac27a0ecSDave Kleikamp 4499ac27a0ecSDave Kleikamp /* 4500617ba13bSMingming Cao * ext4_dirty_inode() is called from __mark_inode_dirty() 4501ac27a0ecSDave Kleikamp * 4502ac27a0ecSDave Kleikamp * We're really interested in the case where a file is being extended. 4503ac27a0ecSDave Kleikamp * i_size has been changed by generic_commit_write() and we thus need 4504ac27a0ecSDave Kleikamp * to include the updated inode in the current transaction. 4505ac27a0ecSDave Kleikamp * 45065dd4056dSChristoph Hellwig * Also, dquot_alloc_block() will always dirty the inode when blocks 4507ac27a0ecSDave Kleikamp * are allocated to the file. 4508ac27a0ecSDave Kleikamp * 4509ac27a0ecSDave Kleikamp * If the inode is marked synchronous, we don't honour that here - doing 4510ac27a0ecSDave Kleikamp * so would cause a commit on atime updates, which we don't bother doing. 4511ac27a0ecSDave Kleikamp * We handle synchronous inodes at the highest possible level. 4512ac27a0ecSDave Kleikamp */ 4513aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags) 4514ac27a0ecSDave Kleikamp { 4515ac27a0ecSDave Kleikamp handle_t *handle; 4516ac27a0ecSDave Kleikamp 4517617ba13bSMingming Cao handle = ext4_journal_start(inode, 2); 4518ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4519ac27a0ecSDave Kleikamp goto out; 4520f3dc272fSCurt Wohlgemuth 4521617ba13bSMingming Cao ext4_mark_inode_dirty(handle, inode); 4522f3dc272fSCurt Wohlgemuth 4523617ba13bSMingming Cao ext4_journal_stop(handle); 4524ac27a0ecSDave Kleikamp out: 4525ac27a0ecSDave Kleikamp return; 4526ac27a0ecSDave Kleikamp } 4527ac27a0ecSDave Kleikamp 4528ac27a0ecSDave Kleikamp #if 0 4529ac27a0ecSDave Kleikamp /* 4530ac27a0ecSDave Kleikamp * Bind an inode's backing buffer_head into this transaction, to prevent 4531ac27a0ecSDave Kleikamp * it from being flushed to disk early. Unlike 4532617ba13bSMingming Cao * ext4_reserve_inode_write, this leaves behind no bh reference and 4533ac27a0ecSDave Kleikamp * returns no iloc structure, so the caller needs to repeat the iloc 4534ac27a0ecSDave Kleikamp * lookup to mark the inode dirty later. 4535ac27a0ecSDave Kleikamp */ 4536617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode) 4537ac27a0ecSDave Kleikamp { 4538617ba13bSMingming Cao struct ext4_iloc iloc; 4539ac27a0ecSDave Kleikamp 4540ac27a0ecSDave Kleikamp int err = 0; 4541ac27a0ecSDave Kleikamp if (handle) { 4542617ba13bSMingming Cao err = ext4_get_inode_loc(inode, &iloc); 4543ac27a0ecSDave Kleikamp if (!err) { 4544ac27a0ecSDave Kleikamp BUFFER_TRACE(iloc.bh, "get_write_access"); 4545dab291afSMingming Cao err = jbd2_journal_get_write_access(handle, iloc.bh); 4546ac27a0ecSDave Kleikamp if (!err) 45470390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, 454873b50c1cSCurt Wohlgemuth NULL, 4549ac27a0ecSDave Kleikamp iloc.bh); 4550ac27a0ecSDave Kleikamp brelse(iloc.bh); 4551ac27a0ecSDave Kleikamp } 4552ac27a0ecSDave Kleikamp } 4553617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4554ac27a0ecSDave Kleikamp return err; 4555ac27a0ecSDave Kleikamp } 4556ac27a0ecSDave Kleikamp #endif 4557ac27a0ecSDave Kleikamp 4558617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val) 4559ac27a0ecSDave Kleikamp { 4560ac27a0ecSDave Kleikamp journal_t *journal; 4561ac27a0ecSDave Kleikamp handle_t *handle; 4562ac27a0ecSDave Kleikamp int err; 4563ac27a0ecSDave Kleikamp 4564ac27a0ecSDave Kleikamp /* 4565ac27a0ecSDave Kleikamp * We have to be very careful here: changing a data block's 4566ac27a0ecSDave Kleikamp * journaling status dynamically is dangerous. If we write a 4567ac27a0ecSDave Kleikamp * data block to the journal, change the status and then delete 4568ac27a0ecSDave Kleikamp * that block, we risk forgetting to revoke the old log record 4569ac27a0ecSDave Kleikamp * from the journal and so a subsequent replay can corrupt data. 4570ac27a0ecSDave Kleikamp * So, first we make sure that the journal is empty and that 4571ac27a0ecSDave Kleikamp * nobody is changing anything. 4572ac27a0ecSDave Kleikamp */ 4573ac27a0ecSDave Kleikamp 4574617ba13bSMingming Cao journal = EXT4_JOURNAL(inode); 45750390131bSFrank Mayhar if (!journal) 45760390131bSFrank Mayhar return 0; 4577d699594dSDave Hansen if (is_journal_aborted(journal)) 4578ac27a0ecSDave Kleikamp return -EROFS; 4579ac27a0ecSDave Kleikamp 4580dab291afSMingming Cao jbd2_journal_lock_updates(journal); 4581dab291afSMingming Cao jbd2_journal_flush(journal); 4582ac27a0ecSDave Kleikamp 4583ac27a0ecSDave Kleikamp /* 4584ac27a0ecSDave Kleikamp * OK, there are no updates running now, and all cached data is 4585ac27a0ecSDave Kleikamp * synced to disk. We are now in a completely consistent state 4586ac27a0ecSDave Kleikamp * which doesn't have anything in the journal, and we know that 4587ac27a0ecSDave Kleikamp * no filesystem updates are running, so it is safe to modify 4588ac27a0ecSDave Kleikamp * the inode's in-core data-journaling state flag now. 4589ac27a0ecSDave Kleikamp */ 4590ac27a0ecSDave Kleikamp 4591ac27a0ecSDave Kleikamp if (val) 459212e9b892SDmitry Monakhov ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4593ac27a0ecSDave Kleikamp else 459412e9b892SDmitry Monakhov ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4595617ba13bSMingming Cao ext4_set_aops(inode); 4596ac27a0ecSDave Kleikamp 4597dab291afSMingming Cao jbd2_journal_unlock_updates(journal); 4598ac27a0ecSDave Kleikamp 4599ac27a0ecSDave Kleikamp /* Finally we can mark the inode as dirty. */ 4600ac27a0ecSDave Kleikamp 4601617ba13bSMingming Cao handle = ext4_journal_start(inode, 1); 4602ac27a0ecSDave Kleikamp if (IS_ERR(handle)) 4603ac27a0ecSDave Kleikamp return PTR_ERR(handle); 4604ac27a0ecSDave Kleikamp 4605617ba13bSMingming Cao err = ext4_mark_inode_dirty(handle, inode); 46060390131bSFrank Mayhar ext4_handle_sync(handle); 4607617ba13bSMingming Cao ext4_journal_stop(handle); 4608617ba13bSMingming Cao ext4_std_error(inode->i_sb, err); 4609ac27a0ecSDave Kleikamp 4610ac27a0ecSDave Kleikamp return err; 4611ac27a0ecSDave Kleikamp } 46122e9ee850SAneesh Kumar K.V 46132e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh) 46142e9ee850SAneesh Kumar K.V { 46152e9ee850SAneesh Kumar K.V return !buffer_mapped(bh); 46162e9ee850SAneesh Kumar K.V } 46172e9ee850SAneesh Kumar K.V 4618c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) 46192e9ee850SAneesh Kumar K.V { 4620c2ec175cSNick Piggin struct page *page = vmf->page; 46212e9ee850SAneesh Kumar K.V loff_t size; 46222e9ee850SAneesh Kumar K.V unsigned long len; 46239ea7df53SJan Kara int ret; 46242e9ee850SAneesh Kumar K.V struct file *file = vma->vm_file; 46252e9ee850SAneesh Kumar K.V struct inode *inode = file->f_path.dentry->d_inode; 46262e9ee850SAneesh Kumar K.V struct address_space *mapping = inode->i_mapping; 46279ea7df53SJan Kara handle_t *handle; 46289ea7df53SJan Kara get_block_t *get_block; 46299ea7df53SJan Kara int retries = 0; 46302e9ee850SAneesh Kumar K.V 46312e9ee850SAneesh Kumar K.V /* 46329ea7df53SJan Kara * This check is racy but catches the common case. We rely on 46339ea7df53SJan Kara * __block_page_mkwrite() to do a reliable check. 46342e9ee850SAneesh Kumar K.V */ 46359ea7df53SJan Kara vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 46369ea7df53SJan Kara /* Delalloc case is easy... */ 46379ea7df53SJan Kara if (test_opt(inode->i_sb, DELALLOC) && 46389ea7df53SJan Kara !ext4_should_journal_data(inode) && 46399ea7df53SJan Kara !ext4_nonda_switch(inode->i_sb)) { 46409ea7df53SJan Kara do { 46419ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, 46429ea7df53SJan Kara ext4_da_get_block_prep); 46439ea7df53SJan Kara } while (ret == -ENOSPC && 46449ea7df53SJan Kara ext4_should_retry_alloc(inode->i_sb, &retries)); 46459ea7df53SJan Kara goto out_ret; 46462e9ee850SAneesh Kumar K.V } 46470e499890SDarrick J. Wong 46480e499890SDarrick J. Wong lock_page(page); 46499ea7df53SJan Kara size = i_size_read(inode); 46509ea7df53SJan Kara /* Page got truncated from under us? */ 46519ea7df53SJan Kara if (page->mapping != mapping || page_offset(page) > size) { 46529ea7df53SJan Kara unlock_page(page); 46539ea7df53SJan Kara ret = VM_FAULT_NOPAGE; 46549ea7df53SJan Kara goto out; 46550e499890SDarrick J. Wong } 46562e9ee850SAneesh Kumar K.V 46572e9ee850SAneesh Kumar K.V if (page->index == size >> PAGE_CACHE_SHIFT) 46582e9ee850SAneesh Kumar K.V len = size & ~PAGE_CACHE_MASK; 46592e9ee850SAneesh Kumar K.V else 46602e9ee850SAneesh Kumar K.V len = PAGE_CACHE_SIZE; 4661a827eaffSAneesh Kumar K.V /* 46629ea7df53SJan Kara * Return if we have all the buffers mapped. This avoids the need to do 46639ea7df53SJan Kara * journal_start/journal_stop which can block and take a long time 4664a827eaffSAneesh Kumar K.V */ 46652e9ee850SAneesh Kumar K.V if (page_has_buffers(page)) { 46662e9ee850SAneesh Kumar K.V if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, 4667a827eaffSAneesh Kumar K.V ext4_bh_unmapped)) { 46689ea7df53SJan Kara /* Wait so that we don't change page under IO */ 46699ea7df53SJan Kara wait_on_page_writeback(page); 46709ea7df53SJan Kara ret = VM_FAULT_LOCKED; 46719ea7df53SJan Kara goto out; 46722e9ee850SAneesh Kumar K.V } 4673a827eaffSAneesh Kumar K.V } 4674a827eaffSAneesh Kumar K.V unlock_page(page); 46759ea7df53SJan Kara /* OK, we need to fill the hole... */ 46769ea7df53SJan Kara if (ext4_should_dioread_nolock(inode)) 46779ea7df53SJan Kara get_block = ext4_get_block_write; 46789ea7df53SJan Kara else 46799ea7df53SJan Kara get_block = ext4_get_block; 46809ea7df53SJan Kara retry_alloc: 46819ea7df53SJan Kara handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); 46829ea7df53SJan Kara if (IS_ERR(handle)) { 4683c2ec175cSNick Piggin ret = VM_FAULT_SIGBUS; 46849ea7df53SJan Kara goto out; 46859ea7df53SJan Kara } 46869ea7df53SJan Kara ret = __block_page_mkwrite(vma, vmf, get_block); 46879ea7df53SJan Kara if (!ret && ext4_should_journal_data(inode)) { 46889ea7df53SJan Kara if (walk_page_buffers(handle, page_buffers(page), 0, 46899ea7df53SJan Kara PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { 46909ea7df53SJan Kara unlock_page(page); 46919ea7df53SJan Kara ret = VM_FAULT_SIGBUS; 46929ea7df53SJan Kara goto out; 46939ea7df53SJan Kara } 46949ea7df53SJan Kara ext4_set_inode_state(inode, EXT4_STATE_JDATA); 46959ea7df53SJan Kara } 46969ea7df53SJan Kara ext4_journal_stop(handle); 46979ea7df53SJan Kara if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 46989ea7df53SJan Kara goto retry_alloc; 46999ea7df53SJan Kara out_ret: 47009ea7df53SJan Kara ret = block_page_mkwrite_return(ret); 47019ea7df53SJan Kara out: 47022e9ee850SAneesh Kumar K.V return ret; 47032e9ee850SAneesh Kumar K.V } 4704