xref: /openbmc/linux/fs/ext4/inode.c (revision d76a3a77113db020d9bb1e894822869410450bd9)
1ac27a0ecSDave Kleikamp /*
2617ba13bSMingming Cao  *  linux/fs/ext4/inode.c
3ac27a0ecSDave Kleikamp  *
4ac27a0ecSDave Kleikamp  * Copyright (C) 1992, 1993, 1994, 1995
5ac27a0ecSDave Kleikamp  * Remy Card (card@masi.ibp.fr)
6ac27a0ecSDave Kleikamp  * Laboratoire MASI - Institut Blaise Pascal
7ac27a0ecSDave Kleikamp  * Universite Pierre et Marie Curie (Paris VI)
8ac27a0ecSDave Kleikamp  *
9ac27a0ecSDave Kleikamp  *  from
10ac27a0ecSDave Kleikamp  *
11ac27a0ecSDave Kleikamp  *  linux/fs/minix/inode.c
12ac27a0ecSDave Kleikamp  *
13ac27a0ecSDave Kleikamp  *  Copyright (C) 1991, 1992  Linus Torvalds
14ac27a0ecSDave Kleikamp  *
15ac27a0ecSDave Kleikamp  *  64-bit file support on 64-bit platforms by Jakub Jelinek
16ac27a0ecSDave Kleikamp  *	(jj@sunsite.ms.mff.cuni.cz)
17ac27a0ecSDave Kleikamp  *
18617ba13bSMingming Cao  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19ac27a0ecSDave Kleikamp  */
20ac27a0ecSDave Kleikamp 
21ac27a0ecSDave Kleikamp #include <linux/fs.h>
22ac27a0ecSDave Kleikamp #include <linux/time.h>
23dab291afSMingming Cao #include <linux/jbd2.h>
24ac27a0ecSDave Kleikamp #include <linux/highuid.h>
25ac27a0ecSDave Kleikamp #include <linux/pagemap.h>
26ac27a0ecSDave Kleikamp #include <linux/quotaops.h>
27ac27a0ecSDave Kleikamp #include <linux/string.h>
28ac27a0ecSDave Kleikamp #include <linux/buffer_head.h>
29ac27a0ecSDave Kleikamp #include <linux/writeback.h>
3064769240SAlex Tomas #include <linux/pagevec.h>
31ac27a0ecSDave Kleikamp #include <linux/mpage.h>
32e83c1397SDuane Griffin #include <linux/namei.h>
33ac27a0ecSDave Kleikamp #include <linux/uio.h>
34ac27a0ecSDave Kleikamp #include <linux/bio.h>
354c0425ffSMingming Cao #include <linux/workqueue.h>
36744692dcSJiaying Zhang #include <linux/kernel.h>
376db26ffcSAndrew Morton #include <linux/printk.h>
385a0e3ad6STejun Heo #include <linux/slab.h>
39a8901d34STheodore Ts'o #include <linux/ratelimit.h>
409bffad1eSTheodore Ts'o 
413dcf5451SChristoph Hellwig #include "ext4_jbd2.h"
42ac27a0ecSDave Kleikamp #include "xattr.h"
43ac27a0ecSDave Kleikamp #include "acl.h"
449f125d64STheodore Ts'o #include "truncate.h"
45ac27a0ecSDave Kleikamp 
469bffad1eSTheodore Ts'o #include <trace/events/ext4.h>
479bffad1eSTheodore Ts'o 
48a1d6cc56SAneesh Kumar K.V #define MPAGE_DA_EXTENT_TAIL 0x01
49a1d6cc56SAneesh Kumar K.V 
50814525f4SDarrick J. Wong static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51814525f4SDarrick J. Wong 			      struct ext4_inode_info *ei)
52814525f4SDarrick J. Wong {
53814525f4SDarrick J. Wong 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54814525f4SDarrick J. Wong 	__u16 csum_lo;
55814525f4SDarrick J. Wong 	__u16 csum_hi = 0;
56814525f4SDarrick J. Wong 	__u32 csum;
57814525f4SDarrick J. Wong 
58814525f4SDarrick J. Wong 	csum_lo = raw->i_checksum_lo;
59814525f4SDarrick J. Wong 	raw->i_checksum_lo = 0;
60814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62814525f4SDarrick J. Wong 		csum_hi = raw->i_checksum_hi;
63814525f4SDarrick J. Wong 		raw->i_checksum_hi = 0;
64814525f4SDarrick J. Wong 	}
65814525f4SDarrick J. Wong 
66814525f4SDarrick J. Wong 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67814525f4SDarrick J. Wong 			   EXT4_INODE_SIZE(inode->i_sb));
68814525f4SDarrick J. Wong 
69814525f4SDarrick J. Wong 	raw->i_checksum_lo = csum_lo;
70814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72814525f4SDarrick J. Wong 		raw->i_checksum_hi = csum_hi;
73814525f4SDarrick J. Wong 
74814525f4SDarrick J. Wong 	return csum;
75814525f4SDarrick J. Wong }
76814525f4SDarrick J. Wong 
77814525f4SDarrick J. Wong static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78814525f4SDarrick J. Wong 				  struct ext4_inode_info *ei)
79814525f4SDarrick J. Wong {
80814525f4SDarrick J. Wong 	__u32 provided, calculated;
81814525f4SDarrick J. Wong 
82814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
84814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86814525f4SDarrick J. Wong 		return 1;
87814525f4SDarrick J. Wong 
88814525f4SDarrick J. Wong 	provided = le16_to_cpu(raw->i_checksum_lo);
89814525f4SDarrick J. Wong 	calculated = ext4_inode_csum(inode, raw, ei);
90814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92814525f4SDarrick J. Wong 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93814525f4SDarrick J. Wong 	else
94814525f4SDarrick J. Wong 		calculated &= 0xFFFF;
95814525f4SDarrick J. Wong 
96814525f4SDarrick J. Wong 	return provided == calculated;
97814525f4SDarrick J. Wong }
98814525f4SDarrick J. Wong 
99814525f4SDarrick J. Wong static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100814525f4SDarrick J. Wong 				struct ext4_inode_info *ei)
101814525f4SDarrick J. Wong {
102814525f4SDarrick J. Wong 	__u32 csum;
103814525f4SDarrick J. Wong 
104814525f4SDarrick J. Wong 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105814525f4SDarrick J. Wong 	    cpu_to_le32(EXT4_OS_LINUX) ||
106814525f4SDarrick J. Wong 	    !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107814525f4SDarrick J. Wong 		EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108814525f4SDarrick J. Wong 		return;
109814525f4SDarrick J. Wong 
110814525f4SDarrick J. Wong 	csum = ext4_inode_csum(inode, raw, ei);
111814525f4SDarrick J. Wong 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113814525f4SDarrick J. Wong 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114814525f4SDarrick J. Wong 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115814525f4SDarrick J. Wong }
116814525f4SDarrick J. Wong 
117678aaf48SJan Kara static inline int ext4_begin_ordered_truncate(struct inode *inode,
118678aaf48SJan Kara 					      loff_t new_size)
119678aaf48SJan Kara {
1207ff9c073STheodore Ts'o 	trace_ext4_begin_ordered_truncate(inode, new_size);
1218aefcd55STheodore Ts'o 	/*
1228aefcd55STheodore Ts'o 	 * If jinode is zero, then we never opened the file for
1238aefcd55STheodore Ts'o 	 * writing, so there's no need to call
1248aefcd55STheodore Ts'o 	 * jbd2_journal_begin_ordered_truncate() since there's no
1258aefcd55STheodore Ts'o 	 * outstanding writes we need to flush.
1268aefcd55STheodore Ts'o 	 */
1278aefcd55STheodore Ts'o 	if (!EXT4_I(inode)->jinode)
1288aefcd55STheodore Ts'o 		return 0;
1298aefcd55STheodore Ts'o 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
1308aefcd55STheodore Ts'o 						   EXT4_I(inode)->jinode,
131678aaf48SJan Kara 						   new_size);
132678aaf48SJan Kara }
133678aaf48SJan Kara 
13464769240SAlex Tomas static void ext4_invalidatepage(struct page *page, unsigned long offset);
135cb20d518STheodore Ts'o static int __ext4_journalled_writepage(struct page *page, unsigned int len);
136cb20d518STheodore Ts'o static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
1375f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
1385f163cc7SEric Sandeen 		struct inode *inode, struct page *page, loff_t from,
1395f163cc7SEric Sandeen 		loff_t length, int flags);
14064769240SAlex Tomas 
141ac27a0ecSDave Kleikamp /*
142ac27a0ecSDave Kleikamp  * Test whether an inode is a fast symlink.
143ac27a0ecSDave Kleikamp  */
144617ba13bSMingming Cao static int ext4_inode_is_fast_symlink(struct inode *inode)
145ac27a0ecSDave Kleikamp {
146617ba13bSMingming Cao 	int ea_blocks = EXT4_I(inode)->i_file_acl ?
147ac27a0ecSDave Kleikamp 		(inode->i_sb->s_blocksize >> 9) : 0;
148ac27a0ecSDave Kleikamp 
149ac27a0ecSDave Kleikamp 	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
150ac27a0ecSDave Kleikamp }
151ac27a0ecSDave Kleikamp 
152ac27a0ecSDave Kleikamp /*
153ac27a0ecSDave Kleikamp  * Restart the transaction associated with *handle.  This does a commit,
154ac27a0ecSDave Kleikamp  * so before we call here everything must be consistently dirtied against
155ac27a0ecSDave Kleikamp  * this transaction.
156ac27a0ecSDave Kleikamp  */
157487caeefSJan Kara int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
158487caeefSJan Kara 				 int nblocks)
159ac27a0ecSDave Kleikamp {
160487caeefSJan Kara 	int ret;
161487caeefSJan Kara 
162487caeefSJan Kara 	/*
163e35fd660STheodore Ts'o 	 * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
164487caeefSJan Kara 	 * moment, get_block can be called only for blocks inside i_size since
165487caeefSJan Kara 	 * page cache has been already dropped and writes are blocked by
166487caeefSJan Kara 	 * i_mutex. So we can safely drop the i_data_sem here.
167487caeefSJan Kara 	 */
1680390131bSFrank Mayhar 	BUG_ON(EXT4_JOURNAL(inode) == NULL);
169ac27a0ecSDave Kleikamp 	jbd_debug(2, "restarting handle %p\n", handle);
170487caeefSJan Kara 	up_write(&EXT4_I(inode)->i_data_sem);
1718e8eaabeSAmir Goldstein 	ret = ext4_journal_restart(handle, nblocks);
172487caeefSJan Kara 	down_write(&EXT4_I(inode)->i_data_sem);
173fa5d1113SAneesh Kumar K.V 	ext4_discard_preallocations(inode);
174487caeefSJan Kara 
175487caeefSJan Kara 	return ret;
176ac27a0ecSDave Kleikamp }
177ac27a0ecSDave Kleikamp 
178ac27a0ecSDave Kleikamp /*
179ac27a0ecSDave Kleikamp  * Called at the last iput() if i_nlink is zero.
180ac27a0ecSDave Kleikamp  */
1810930fcc1SAl Viro void ext4_evict_inode(struct inode *inode)
182ac27a0ecSDave Kleikamp {
183ac27a0ecSDave Kleikamp 	handle_t *handle;
184bc965ab3STheodore Ts'o 	int err;
185ac27a0ecSDave Kleikamp 
1867ff9c073STheodore Ts'o 	trace_ext4_evict_inode(inode);
1872581fdc8SJiaying Zhang 
1880930fcc1SAl Viro 	if (inode->i_nlink) {
1892d859db3SJan Kara 		/*
1902d859db3SJan Kara 		 * When journalling data dirty buffers are tracked only in the
1912d859db3SJan Kara 		 * journal. So although mm thinks everything is clean and
1922d859db3SJan Kara 		 * ready for reaping the inode might still have some pages to
1932d859db3SJan Kara 		 * write in the running transaction or waiting to be
1942d859db3SJan Kara 		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
1952d859db3SJan Kara 		 * (via truncate_inode_pages()) to discard these buffers can
1962d859db3SJan Kara 		 * cause data loss. Also even if we did not discard these
1972d859db3SJan Kara 		 * buffers, we would have no way to find them after the inode
1982d859db3SJan Kara 		 * is reaped and thus user could see stale data if he tries to
1992d859db3SJan Kara 		 * read them before the transaction is checkpointed. So be
2002d859db3SJan Kara 		 * careful and force everything to disk here... We use
2012d859db3SJan Kara 		 * ei->i_datasync_tid to store the newest transaction
2022d859db3SJan Kara 		 * containing inode's data.
2032d859db3SJan Kara 		 *
2042d859db3SJan Kara 		 * Note that directories do not have this problem because they
2052d859db3SJan Kara 		 * don't use page cache.
2062d859db3SJan Kara 		 */
2072d859db3SJan Kara 		if (ext4_should_journal_data(inode) &&
2082b405bfaSTheodore Ts'o 		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
2092b405bfaSTheodore Ts'o 		    inode->i_ino != EXT4_JOURNAL_INO) {
2102d859db3SJan Kara 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
2112d859db3SJan Kara 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
2122d859db3SJan Kara 
213*d76a3a77STheodore Ts'o 			jbd2_complete_transaction(journal, commit_tid);
2142d859db3SJan Kara 			filemap_write_and_wait(&inode->i_data);
2152d859db3SJan Kara 		}
2160930fcc1SAl Viro 		truncate_inode_pages(&inode->i_data, 0);
2171ada47d9STheodore Ts'o 		ext4_ioend_shutdown(inode);
2180930fcc1SAl Viro 		goto no_delete;
2190930fcc1SAl Viro 	}
2200930fcc1SAl Viro 
221907f4554SChristoph Hellwig 	if (!is_bad_inode(inode))
222871a2931SChristoph Hellwig 		dquot_initialize(inode);
223907f4554SChristoph Hellwig 
224678aaf48SJan Kara 	if (ext4_should_order_data(inode))
225678aaf48SJan Kara 		ext4_begin_ordered_truncate(inode, 0);
226ac27a0ecSDave Kleikamp 	truncate_inode_pages(&inode->i_data, 0);
2271ada47d9STheodore Ts'o 	ext4_ioend_shutdown(inode);
228ac27a0ecSDave Kleikamp 
229ac27a0ecSDave Kleikamp 	if (is_bad_inode(inode))
230ac27a0ecSDave Kleikamp 		goto no_delete;
231ac27a0ecSDave Kleikamp 
2328e8ad8a5SJan Kara 	/*
2338e8ad8a5SJan Kara 	 * Protect us against freezing - iput() caller didn't have to have any
2348e8ad8a5SJan Kara 	 * protection against it
2358e8ad8a5SJan Kara 	 */
2368e8ad8a5SJan Kara 	sb_start_intwrite(inode->i_sb);
2379924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
2389924a92aSTheodore Ts'o 				    ext4_blocks_for_truncate(inode)+3);
239ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
240bc965ab3STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
241ac27a0ecSDave Kleikamp 		/*
242ac27a0ecSDave Kleikamp 		 * If we're going to skip the normal cleanup, we still need to
243ac27a0ecSDave Kleikamp 		 * make sure that the in-core orphan linked list is properly
244ac27a0ecSDave Kleikamp 		 * cleaned up.
245ac27a0ecSDave Kleikamp 		 */
246617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
2478e8ad8a5SJan Kara 		sb_end_intwrite(inode->i_sb);
248ac27a0ecSDave Kleikamp 		goto no_delete;
249ac27a0ecSDave Kleikamp 	}
250ac27a0ecSDave Kleikamp 
251ac27a0ecSDave Kleikamp 	if (IS_SYNC(inode))
2520390131bSFrank Mayhar 		ext4_handle_sync(handle);
253ac27a0ecSDave Kleikamp 	inode->i_size = 0;
254bc965ab3STheodore Ts'o 	err = ext4_mark_inode_dirty(handle, inode);
255bc965ab3STheodore Ts'o 	if (err) {
25612062dddSEric Sandeen 		ext4_warning(inode->i_sb,
257bc965ab3STheodore Ts'o 			     "couldn't mark inode dirty (err %d)", err);
258bc965ab3STheodore Ts'o 		goto stop_handle;
259bc965ab3STheodore Ts'o 	}
260ac27a0ecSDave Kleikamp 	if (inode->i_blocks)
261617ba13bSMingming Cao 		ext4_truncate(inode);
262bc965ab3STheodore Ts'o 
263bc965ab3STheodore Ts'o 	/*
264bc965ab3STheodore Ts'o 	 * ext4_ext_truncate() doesn't reserve any slop when it
265bc965ab3STheodore Ts'o 	 * restarts journal transactions; therefore there may not be
266bc965ab3STheodore Ts'o 	 * enough credits left in the handle to remove the inode from
267bc965ab3STheodore Ts'o 	 * the orphan list and set the dtime field.
268bc965ab3STheodore Ts'o 	 */
2690390131bSFrank Mayhar 	if (!ext4_handle_has_enough_credits(handle, 3)) {
270bc965ab3STheodore Ts'o 		err = ext4_journal_extend(handle, 3);
271bc965ab3STheodore Ts'o 		if (err > 0)
272bc965ab3STheodore Ts'o 			err = ext4_journal_restart(handle, 3);
273bc965ab3STheodore Ts'o 		if (err != 0) {
27412062dddSEric Sandeen 			ext4_warning(inode->i_sb,
275bc965ab3STheodore Ts'o 				     "couldn't extend journal (err %d)", err);
276bc965ab3STheodore Ts'o 		stop_handle:
277bc965ab3STheodore Ts'o 			ext4_journal_stop(handle);
27845388219STheodore Ts'o 			ext4_orphan_del(NULL, inode);
2798e8ad8a5SJan Kara 			sb_end_intwrite(inode->i_sb);
280bc965ab3STheodore Ts'o 			goto no_delete;
281bc965ab3STheodore Ts'o 		}
282bc965ab3STheodore Ts'o 	}
283bc965ab3STheodore Ts'o 
284ac27a0ecSDave Kleikamp 	/*
285617ba13bSMingming Cao 	 * Kill off the orphan record which ext4_truncate created.
286ac27a0ecSDave Kleikamp 	 * AKPM: I think this can be inside the above `if'.
287617ba13bSMingming Cao 	 * Note that ext4_orphan_del() has to be able to cope with the
288ac27a0ecSDave Kleikamp 	 * deletion of a non-existent orphan - this is because we don't
289617ba13bSMingming Cao 	 * know if ext4_truncate() actually created an orphan record.
290ac27a0ecSDave Kleikamp 	 * (Well, we could do this if we need to, but heck - it works)
291ac27a0ecSDave Kleikamp 	 */
292617ba13bSMingming Cao 	ext4_orphan_del(handle, inode);
293617ba13bSMingming Cao 	EXT4_I(inode)->i_dtime	= get_seconds();
294ac27a0ecSDave Kleikamp 
295ac27a0ecSDave Kleikamp 	/*
296ac27a0ecSDave Kleikamp 	 * One subtle ordering requirement: if anything has gone wrong
297ac27a0ecSDave Kleikamp 	 * (transaction abort, IO errors, whatever), then we can still
298ac27a0ecSDave Kleikamp 	 * do these next steps (the fs will already have been marked as
299ac27a0ecSDave Kleikamp 	 * having errors), but we can't free the inode if the mark_dirty
300ac27a0ecSDave Kleikamp 	 * fails.
301ac27a0ecSDave Kleikamp 	 */
302617ba13bSMingming Cao 	if (ext4_mark_inode_dirty(handle, inode))
303ac27a0ecSDave Kleikamp 		/* If that failed, just do the required in-core inode clear. */
3040930fcc1SAl Viro 		ext4_clear_inode(inode);
305ac27a0ecSDave Kleikamp 	else
306617ba13bSMingming Cao 		ext4_free_inode(handle, inode);
307617ba13bSMingming Cao 	ext4_journal_stop(handle);
3088e8ad8a5SJan Kara 	sb_end_intwrite(inode->i_sb);
309ac27a0ecSDave Kleikamp 	return;
310ac27a0ecSDave Kleikamp no_delete:
3110930fcc1SAl Viro 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
312ac27a0ecSDave Kleikamp }
313ac27a0ecSDave Kleikamp 
314a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
315a9e7f447SDmitry Monakhov qsize_t *ext4_get_reserved_space(struct inode *inode)
31660e58e0fSMingming Cao {
317a9e7f447SDmitry Monakhov 	return &EXT4_I(inode)->i_reserved_quota;
31860e58e0fSMingming Cao }
319a9e7f447SDmitry Monakhov #endif
3209d0be502STheodore Ts'o 
32112219aeaSAneesh Kumar K.V /*
32212219aeaSAneesh Kumar K.V  * Calculate the number of metadata blocks need to reserve
3239d0be502STheodore Ts'o  * to allocate a block located at @lblock
32412219aeaSAneesh Kumar K.V  */
32501f49d0bSTheodore Ts'o static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
32612219aeaSAneesh Kumar K.V {
32712e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3289d0be502STheodore Ts'o 		return ext4_ext_calc_metadata_amount(inode, lblock);
32912219aeaSAneesh Kumar K.V 
3308bb2b247SAmir Goldstein 	return ext4_ind_calc_metadata_amount(inode, lblock);
33112219aeaSAneesh Kumar K.V }
33212219aeaSAneesh Kumar K.V 
3330637c6f4STheodore Ts'o /*
3340637c6f4STheodore Ts'o  * Called with i_data_sem down, which is important since we can call
3350637c6f4STheodore Ts'o  * ext4_discard_preallocations() from here.
3360637c6f4STheodore Ts'o  */
3375f634d06SAneesh Kumar K.V void ext4_da_update_reserve_space(struct inode *inode,
3385f634d06SAneesh Kumar K.V 					int used, int quota_claim)
33912219aeaSAneesh Kumar K.V {
34012219aeaSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3410637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
34212219aeaSAneesh Kumar K.V 
3430637c6f4STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
344d8990240SAditya Kali 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
3450637c6f4STheodore Ts'o 	if (unlikely(used > ei->i_reserved_data_blocks)) {
3468de5c325STheodore Ts'o 		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
3471084f252STheodore Ts'o 			 "with only %d reserved data blocks",
3480637c6f4STheodore Ts'o 			 __func__, inode->i_ino, used,
3490637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
3500637c6f4STheodore Ts'o 		WARN_ON(1);
3510637c6f4STheodore Ts'o 		used = ei->i_reserved_data_blocks;
3526bc6e63fSAneesh Kumar K.V 	}
35312219aeaSAneesh Kumar K.V 
35497795d2aSBrian Foster 	if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
35501a523ebSTheodore Ts'o 		ext4_warning(inode->i_sb, "ino %lu, allocated %d "
35601a523ebSTheodore Ts'o 			"with only %d reserved metadata blocks "
35701a523ebSTheodore Ts'o 			"(releasing %d blocks with reserved %d data blocks)",
35897795d2aSBrian Foster 			inode->i_ino, ei->i_allocated_meta_blocks,
35901a523ebSTheodore Ts'o 			     ei->i_reserved_meta_blocks, used,
36001a523ebSTheodore Ts'o 			     ei->i_reserved_data_blocks);
36197795d2aSBrian Foster 		WARN_ON(1);
36297795d2aSBrian Foster 		ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
36397795d2aSBrian Foster 	}
36497795d2aSBrian Foster 
3650637c6f4STheodore Ts'o 	/* Update per-inode reservations */
3660637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= used;
3670637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
36857042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter,
36972b8ab9dSEric Sandeen 			   used + ei->i_allocated_meta_blocks);
3700637c6f4STheodore Ts'o 	ei->i_allocated_meta_blocks = 0;
3710637c6f4STheodore Ts'o 
3720637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
3730637c6f4STheodore Ts'o 		/*
3740637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
3750637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
3760637c6f4STheodore Ts'o 		 * allocation blocks.
3770637c6f4STheodore Ts'o 		 */
37857042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
37972b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
380ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
3819d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
3820637c6f4STheodore Ts'o 	}
38312219aeaSAneesh Kumar K.V 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
38460e58e0fSMingming Cao 
38572b8ab9dSEric Sandeen 	/* Update quota subsystem for data blocks */
38672b8ab9dSEric Sandeen 	if (quota_claim)
3877b415bf6SAditya Kali 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
38872b8ab9dSEric Sandeen 	else {
3895f634d06SAneesh Kumar K.V 		/*
3905f634d06SAneesh Kumar K.V 		 * We did fallocate with an offset that is already delayed
3915f634d06SAneesh Kumar K.V 		 * allocated. So on delayed allocated writeback we should
39272b8ab9dSEric Sandeen 		 * not re-claim the quota for fallocated blocks.
3935f634d06SAneesh Kumar K.V 		 */
3947b415bf6SAditya Kali 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
3955f634d06SAneesh Kumar K.V 	}
396d6014301SAneesh Kumar K.V 
397d6014301SAneesh Kumar K.V 	/*
398d6014301SAneesh Kumar K.V 	 * If we have done all the pending block allocations and if
399d6014301SAneesh Kumar K.V 	 * there aren't any writers on the inode, we can discard the
400d6014301SAneesh Kumar K.V 	 * inode's preallocations.
401d6014301SAneesh Kumar K.V 	 */
4020637c6f4STheodore Ts'o 	if ((ei->i_reserved_data_blocks == 0) &&
4030637c6f4STheodore Ts'o 	    (atomic_read(&inode->i_writecount) == 0))
404d6014301SAneesh Kumar K.V 		ext4_discard_preallocations(inode);
40512219aeaSAneesh Kumar K.V }
40612219aeaSAneesh Kumar K.V 
407e29136f8STheodore Ts'o static int __check_block_validity(struct inode *inode, const char *func,
408c398eda0STheodore Ts'o 				unsigned int line,
40924676da4STheodore Ts'o 				struct ext4_map_blocks *map)
4106fd058f7STheodore Ts'o {
41124676da4STheodore Ts'o 	if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
41224676da4STheodore Ts'o 				   map->m_len)) {
413c398eda0STheodore Ts'o 		ext4_error_inode(inode, func, line, map->m_pblk,
414c398eda0STheodore Ts'o 				 "lblock %lu mapped to illegal pblock "
41524676da4STheodore Ts'o 				 "(length %d)", (unsigned long) map->m_lblk,
416c398eda0STheodore Ts'o 				 map->m_len);
4176fd058f7STheodore Ts'o 		return -EIO;
4186fd058f7STheodore Ts'o 	}
4196fd058f7STheodore Ts'o 	return 0;
4206fd058f7STheodore Ts'o }
4216fd058f7STheodore Ts'o 
422e29136f8STheodore Ts'o #define check_block_validity(inode, map)	\
423c398eda0STheodore Ts'o 	__check_block_validity((inode), __func__, __LINE__, (map))
424e29136f8STheodore Ts'o 
425f5ab0d1fSMingming Cao /*
4261f94533dSTheodore Ts'o  * Return the number of contiguous dirty pages in a given inode
4271f94533dSTheodore Ts'o  * starting at page frame idx.
42855138e0bSTheodore Ts'o  */
42955138e0bSTheodore Ts'o static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
43055138e0bSTheodore Ts'o 				    unsigned int max_pages)
43155138e0bSTheodore Ts'o {
43255138e0bSTheodore Ts'o 	struct address_space *mapping = inode->i_mapping;
43355138e0bSTheodore Ts'o 	pgoff_t	index;
43455138e0bSTheodore Ts'o 	struct pagevec pvec;
43555138e0bSTheodore Ts'o 	pgoff_t num = 0;
43655138e0bSTheodore Ts'o 	int i, nr_pages, done = 0;
43755138e0bSTheodore Ts'o 
43855138e0bSTheodore Ts'o 	if (max_pages == 0)
43955138e0bSTheodore Ts'o 		return 0;
44055138e0bSTheodore Ts'o 	pagevec_init(&pvec, 0);
44155138e0bSTheodore Ts'o 	while (!done) {
44255138e0bSTheodore Ts'o 		index = idx;
44355138e0bSTheodore Ts'o 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
44455138e0bSTheodore Ts'o 					      PAGECACHE_TAG_DIRTY,
44555138e0bSTheodore Ts'o 					      (pgoff_t)PAGEVEC_SIZE);
44655138e0bSTheodore Ts'o 		if (nr_pages == 0)
44755138e0bSTheodore Ts'o 			break;
44855138e0bSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
44955138e0bSTheodore Ts'o 			struct page *page = pvec.pages[i];
45055138e0bSTheodore Ts'o 			struct buffer_head *bh, *head;
45155138e0bSTheodore Ts'o 
45255138e0bSTheodore Ts'o 			lock_page(page);
45355138e0bSTheodore Ts'o 			if (unlikely(page->mapping != mapping) ||
45455138e0bSTheodore Ts'o 			    !PageDirty(page) ||
45555138e0bSTheodore Ts'o 			    PageWriteback(page) ||
45655138e0bSTheodore Ts'o 			    page->index != idx) {
45755138e0bSTheodore Ts'o 				done = 1;
45855138e0bSTheodore Ts'o 				unlock_page(page);
45955138e0bSTheodore Ts'o 				break;
46055138e0bSTheodore Ts'o 			}
4611f94533dSTheodore Ts'o 			if (page_has_buffers(page)) {
4621f94533dSTheodore Ts'o 				bh = head = page_buffers(page);
46355138e0bSTheodore Ts'o 				do {
46455138e0bSTheodore Ts'o 					if (!buffer_delay(bh) &&
4651f94533dSTheodore Ts'o 					    !buffer_unwritten(bh))
46655138e0bSTheodore Ts'o 						done = 1;
4671f94533dSTheodore Ts'o 					bh = bh->b_this_page;
4681f94533dSTheodore Ts'o 				} while (!done && (bh != head));
46955138e0bSTheodore Ts'o 			}
47055138e0bSTheodore Ts'o 			unlock_page(page);
47155138e0bSTheodore Ts'o 			if (done)
47255138e0bSTheodore Ts'o 				break;
47355138e0bSTheodore Ts'o 			idx++;
47455138e0bSTheodore Ts'o 			num++;
475659c6009SEric Sandeen 			if (num >= max_pages) {
476659c6009SEric Sandeen 				done = 1;
47755138e0bSTheodore Ts'o 				break;
47855138e0bSTheodore Ts'o 			}
479659c6009SEric Sandeen 		}
48055138e0bSTheodore Ts'o 		pagevec_release(&pvec);
48155138e0bSTheodore Ts'o 	}
48255138e0bSTheodore Ts'o 	return num;
48355138e0bSTheodore Ts'o }
48455138e0bSTheodore Ts'o 
485921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
486921f266bSDmitry Monakhov static void ext4_map_blocks_es_recheck(handle_t *handle,
487921f266bSDmitry Monakhov 				       struct inode *inode,
488921f266bSDmitry Monakhov 				       struct ext4_map_blocks *es_map,
489921f266bSDmitry Monakhov 				       struct ext4_map_blocks *map,
490921f266bSDmitry Monakhov 				       int flags)
491921f266bSDmitry Monakhov {
492921f266bSDmitry Monakhov 	int retval;
493921f266bSDmitry Monakhov 
494921f266bSDmitry Monakhov 	map->m_flags = 0;
495921f266bSDmitry Monakhov 	/*
496921f266bSDmitry Monakhov 	 * There is a race window that the result is not the same.
497921f266bSDmitry Monakhov 	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
498921f266bSDmitry Monakhov 	 * is that we lookup a block mapping in extent status tree with
499921f266bSDmitry Monakhov 	 * out taking i_data_sem.  So at the time the unwritten extent
500921f266bSDmitry Monakhov 	 * could be converted.
501921f266bSDmitry Monakhov 	 */
502921f266bSDmitry Monakhov 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
503921f266bSDmitry Monakhov 		down_read((&EXT4_I(inode)->i_data_sem));
504921f266bSDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
505921f266bSDmitry Monakhov 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
506921f266bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
507921f266bSDmitry Monakhov 	} else {
508921f266bSDmitry Monakhov 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
509921f266bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
510921f266bSDmitry Monakhov 	}
511921f266bSDmitry Monakhov 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
512921f266bSDmitry Monakhov 		up_read((&EXT4_I(inode)->i_data_sem));
513921f266bSDmitry Monakhov 	/*
514921f266bSDmitry Monakhov 	 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
515921f266bSDmitry Monakhov 	 * because it shouldn't be marked in es_map->m_flags.
516921f266bSDmitry Monakhov 	 */
517921f266bSDmitry Monakhov 	map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
518921f266bSDmitry Monakhov 
519921f266bSDmitry Monakhov 	/*
520921f266bSDmitry Monakhov 	 * We don't check m_len because extent will be collpased in status
521921f266bSDmitry Monakhov 	 * tree.  So the m_len might not equal.
522921f266bSDmitry Monakhov 	 */
523921f266bSDmitry Monakhov 	if (es_map->m_lblk != map->m_lblk ||
524921f266bSDmitry Monakhov 	    es_map->m_flags != map->m_flags ||
525921f266bSDmitry Monakhov 	    es_map->m_pblk != map->m_pblk) {
526921f266bSDmitry Monakhov 		printk("ES cache assertation failed for inode: %lu "
527921f266bSDmitry Monakhov 		       "es_cached ex [%d/%d/%llu/%x] != "
528921f266bSDmitry Monakhov 		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
529921f266bSDmitry Monakhov 		       inode->i_ino, es_map->m_lblk, es_map->m_len,
530921f266bSDmitry Monakhov 		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
531921f266bSDmitry Monakhov 		       map->m_len, map->m_pblk, map->m_flags,
532921f266bSDmitry Monakhov 		       retval, flags);
533921f266bSDmitry Monakhov 	}
534921f266bSDmitry Monakhov }
535921f266bSDmitry Monakhov #endif /* ES_AGGRESSIVE_TEST */
536921f266bSDmitry Monakhov 
53755138e0bSTheodore Ts'o /*
538e35fd660STheodore Ts'o  * The ext4_map_blocks() function tries to look up the requested blocks,
5392b2d6d01STheodore Ts'o  * and returns if the blocks are already mapped.
540f5ab0d1fSMingming Cao  *
541f5ab0d1fSMingming Cao  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
542f5ab0d1fSMingming Cao  * and store the allocated blocks in the result buffer head and mark it
543f5ab0d1fSMingming Cao  * mapped.
544f5ab0d1fSMingming Cao  *
545e35fd660STheodore Ts'o  * If file type is extents based, it will call ext4_ext_map_blocks(),
546e35fd660STheodore Ts'o  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
547f5ab0d1fSMingming Cao  * based files
548f5ab0d1fSMingming Cao  *
549f5ab0d1fSMingming Cao  * On success, it returns the number of blocks being mapped or allocate.
550f5ab0d1fSMingming Cao  * if create==0 and the blocks are pre-allocated and uninitialized block,
551f5ab0d1fSMingming Cao  * the result buffer head is unmapped. If the create ==1, it will make sure
552f5ab0d1fSMingming Cao  * the buffer head is mapped.
553f5ab0d1fSMingming Cao  *
554f5ab0d1fSMingming Cao  * It returns 0 if plain look up failed (blocks have not been allocated), in
555df3ab170STao Ma  * that case, buffer head is unmapped
556f5ab0d1fSMingming Cao  *
557f5ab0d1fSMingming Cao  * It returns the error in case of allocation failure.
558f5ab0d1fSMingming Cao  */
559e35fd660STheodore Ts'o int ext4_map_blocks(handle_t *handle, struct inode *inode,
560e35fd660STheodore Ts'o 		    struct ext4_map_blocks *map, int flags)
5610e855ac8SAneesh Kumar K.V {
562d100eef2SZheng Liu 	struct extent_status es;
5630e855ac8SAneesh Kumar K.V 	int retval;
564921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
565921f266bSDmitry Monakhov 	struct ext4_map_blocks orig_map;
566921f266bSDmitry Monakhov 
567921f266bSDmitry Monakhov 	memcpy(&orig_map, map, sizeof(*map));
568921f266bSDmitry Monakhov #endif
569f5ab0d1fSMingming Cao 
570e35fd660STheodore Ts'o 	map->m_flags = 0;
571e35fd660STheodore Ts'o 	ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
572e35fd660STheodore Ts'o 		  "logical block %lu\n", inode->i_ino, flags, map->m_len,
573e35fd660STheodore Ts'o 		  (unsigned long) map->m_lblk);
574d100eef2SZheng Liu 
575d100eef2SZheng Liu 	/* Lookup extent status tree firstly */
576d100eef2SZheng Liu 	if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
577d100eef2SZheng Liu 		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
578d100eef2SZheng Liu 			map->m_pblk = ext4_es_pblock(&es) +
579d100eef2SZheng Liu 					map->m_lblk - es.es_lblk;
580d100eef2SZheng Liu 			map->m_flags |= ext4_es_is_written(&es) ?
581d100eef2SZheng Liu 					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
582d100eef2SZheng Liu 			retval = es.es_len - (map->m_lblk - es.es_lblk);
583d100eef2SZheng Liu 			if (retval > map->m_len)
584d100eef2SZheng Liu 				retval = map->m_len;
585d100eef2SZheng Liu 			map->m_len = retval;
586d100eef2SZheng Liu 		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
587d100eef2SZheng Liu 			retval = 0;
588d100eef2SZheng Liu 		} else {
589d100eef2SZheng Liu 			BUG_ON(1);
590d100eef2SZheng Liu 		}
591921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
592921f266bSDmitry Monakhov 		ext4_map_blocks_es_recheck(handle, inode, map,
593921f266bSDmitry Monakhov 					   &orig_map, flags);
594921f266bSDmitry Monakhov #endif
595d100eef2SZheng Liu 		goto found;
596d100eef2SZheng Liu 	}
597d100eef2SZheng Liu 
5984df3d265SAneesh Kumar K.V 	/*
599b920c755STheodore Ts'o 	 * Try to see if we can get the block without requesting a new
600b920c755STheodore Ts'o 	 * file system block.
6014df3d265SAneesh Kumar K.V 	 */
602729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
6030e855ac8SAneesh Kumar K.V 		down_read((&EXT4_I(inode)->i_data_sem));
60412e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
605a4e5d88bSDmitry Monakhov 		retval = ext4_ext_map_blocks(handle, inode, map, flags &
606a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
6074df3d265SAneesh Kumar K.V 	} else {
608a4e5d88bSDmitry Monakhov 		retval = ext4_ind_map_blocks(handle, inode, map, flags &
609a4e5d88bSDmitry Monakhov 					     EXT4_GET_BLOCKS_KEEP_SIZE);
6100e855ac8SAneesh Kumar K.V 	}
611f7fec032SZheng Liu 	if (retval > 0) {
612f7fec032SZheng Liu 		int ret;
613f7fec032SZheng Liu 		unsigned long long status;
614f7fec032SZheng Liu 
615921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
616921f266bSDmitry Monakhov 		if (retval != map->m_len) {
617921f266bSDmitry Monakhov 			printk("ES len assertation failed for inode: %lu "
618921f266bSDmitry Monakhov 			       "retval %d != map->m_len %d "
619921f266bSDmitry Monakhov 			       "in %s (lookup)\n", inode->i_ino, retval,
620921f266bSDmitry Monakhov 			       map->m_len, __func__);
621921f266bSDmitry Monakhov 		}
622921f266bSDmitry Monakhov #endif
623921f266bSDmitry Monakhov 
624f7fec032SZheng Liu 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
625f7fec032SZheng Liu 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
626f7fec032SZheng Liu 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
627f7fec032SZheng Liu 		    ext4_find_delalloc_range(inode, map->m_lblk,
628f7fec032SZheng Liu 					     map->m_lblk + map->m_len - 1))
629f7fec032SZheng Liu 			status |= EXTENT_STATUS_DELAYED;
630f7fec032SZheng Liu 		ret = ext4_es_insert_extent(inode, map->m_lblk,
631f7fec032SZheng Liu 					    map->m_len, map->m_pblk, status);
632f7fec032SZheng Liu 		if (ret < 0)
633f7fec032SZheng Liu 			retval = ret;
634f7fec032SZheng Liu 	}
635729f52c6SZheng Liu 	if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
6364df3d265SAneesh Kumar K.V 		up_read((&EXT4_I(inode)->i_data_sem));
637f5ab0d1fSMingming Cao 
638d100eef2SZheng Liu found:
639e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
640f7fec032SZheng Liu 		int ret = check_block_validity(inode, map);
6416fd058f7STheodore Ts'o 		if (ret != 0)
6426fd058f7STheodore Ts'o 			return ret;
6436fd058f7STheodore Ts'o 	}
6446fd058f7STheodore Ts'o 
645f5ab0d1fSMingming Cao 	/* If it is only a block(s) look up */
646c2177057STheodore Ts'o 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
6474df3d265SAneesh Kumar K.V 		return retval;
6484df3d265SAneesh Kumar K.V 
6494df3d265SAneesh Kumar K.V 	/*
650f5ab0d1fSMingming Cao 	 * Returns if the blocks have already allocated
651f5ab0d1fSMingming Cao 	 *
652f5ab0d1fSMingming Cao 	 * Note that if blocks have been preallocated
653df3ab170STao Ma 	 * ext4_ext_get_block() returns the create = 0
654f5ab0d1fSMingming Cao 	 * with buffer head unmapped.
655f5ab0d1fSMingming Cao 	 */
656e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
657f5ab0d1fSMingming Cao 		return retval;
658f5ab0d1fSMingming Cao 
659f5ab0d1fSMingming Cao 	/*
660a25a4e1aSZheng Liu 	 * Here we clear m_flags because after allocating an new extent,
661a25a4e1aSZheng Liu 	 * it will be set again.
6622a8964d6SAneesh Kumar K.V 	 */
663a25a4e1aSZheng Liu 	map->m_flags &= ~EXT4_MAP_FLAGS;
6642a8964d6SAneesh Kumar K.V 
6652a8964d6SAneesh Kumar K.V 	/*
666f5ab0d1fSMingming Cao 	 * New blocks allocate and/or writing to uninitialized extent
667f5ab0d1fSMingming Cao 	 * will possibly result in updating i_data, so we take
668f5ab0d1fSMingming Cao 	 * the write lock of i_data_sem, and call get_blocks()
669f5ab0d1fSMingming Cao 	 * with create == 1 flag.
6704df3d265SAneesh Kumar K.V 	 */
6714df3d265SAneesh Kumar K.V 	down_write((&EXT4_I(inode)->i_data_sem));
672d2a17637SMingming Cao 
673d2a17637SMingming Cao 	/*
674d2a17637SMingming Cao 	 * if the caller is from delayed allocation writeout path
675d2a17637SMingming Cao 	 * we have already reserved fs blocks for allocation
676d2a17637SMingming Cao 	 * let the underlying get_block() function know to
677d2a17637SMingming Cao 	 * avoid double accounting
678d2a17637SMingming Cao 	 */
679c2177057STheodore Ts'o 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
680f2321097STheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
6814df3d265SAneesh Kumar K.V 	/*
6824df3d265SAneesh Kumar K.V 	 * We need to check for EXT4 here because migrate
6834df3d265SAneesh Kumar K.V 	 * could have changed the inode type in between
6844df3d265SAneesh Kumar K.V 	 */
68512e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
686e35fd660STheodore Ts'o 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
6870e855ac8SAneesh Kumar K.V 	} else {
688e35fd660STheodore Ts'o 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
689267e4db9SAneesh Kumar K.V 
690e35fd660STheodore Ts'o 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
691267e4db9SAneesh Kumar K.V 			/*
692267e4db9SAneesh Kumar K.V 			 * We allocated new blocks which will result in
693267e4db9SAneesh Kumar K.V 			 * i_data's format changing.  Force the migrate
694267e4db9SAneesh Kumar K.V 			 * to fail by clearing migrate flags
695267e4db9SAneesh Kumar K.V 			 */
69619f5fb7aSTheodore Ts'o 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
697267e4db9SAneesh Kumar K.V 		}
6982ac3b6e0STheodore Ts'o 
699d2a17637SMingming Cao 		/*
7002ac3b6e0STheodore Ts'o 		 * Update reserved blocks/metadata blocks after successful
7015f634d06SAneesh Kumar K.V 		 * block allocation which had been deferred till now. We don't
7025f634d06SAneesh Kumar K.V 		 * support fallocate for non extent files. So we can update
7035f634d06SAneesh Kumar K.V 		 * reserve space here.
704d2a17637SMingming Cao 		 */
7055f634d06SAneesh Kumar K.V 		if ((retval > 0) &&
7061296cc85SAneesh Kumar K.V 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
7075f634d06SAneesh Kumar K.V 			ext4_da_update_reserve_space(inode, retval, 1);
7085f634d06SAneesh Kumar K.V 	}
709f7fec032SZheng Liu 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
710f2321097STheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
711d2a17637SMingming Cao 
712f7fec032SZheng Liu 	if (retval > 0) {
71351865fdaSZheng Liu 		int ret;
714f7fec032SZheng Liu 		unsigned long long status;
715f7fec032SZheng Liu 
716921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
717921f266bSDmitry Monakhov 		if (retval != map->m_len) {
718921f266bSDmitry Monakhov 			printk("ES len assertation failed for inode: %lu "
719921f266bSDmitry Monakhov 			       "retval %d != map->m_len %d "
720921f266bSDmitry Monakhov 			       "in %s (allocation)\n", inode->i_ino, retval,
721921f266bSDmitry Monakhov 			       map->m_len, __func__);
722921f266bSDmitry Monakhov 		}
723921f266bSDmitry Monakhov #endif
724921f266bSDmitry Monakhov 
725adb23551SZheng Liu 		/*
726adb23551SZheng Liu 		 * If the extent has been zeroed out, we don't need to update
727adb23551SZheng Liu 		 * extent status tree.
728adb23551SZheng Liu 		 */
729adb23551SZheng Liu 		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
730adb23551SZheng Liu 		    ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
731adb23551SZheng Liu 			if (ext4_es_is_written(&es))
732adb23551SZheng Liu 				goto has_zeroout;
733adb23551SZheng Liu 		}
734f7fec032SZheng Liu 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
735f7fec032SZheng Liu 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
736f7fec032SZheng Liu 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
737f7fec032SZheng Liu 		    ext4_find_delalloc_range(inode, map->m_lblk,
738f7fec032SZheng Liu 					     map->m_lblk + map->m_len - 1))
739f7fec032SZheng Liu 			status |= EXTENT_STATUS_DELAYED;
740f7fec032SZheng Liu 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
741f7fec032SZheng Liu 					    map->m_pblk, status);
74251865fdaSZheng Liu 		if (ret < 0)
74351865fdaSZheng Liu 			retval = ret;
74451865fdaSZheng Liu 	}
7455356f261SAditya Kali 
746adb23551SZheng Liu has_zeroout:
7470e855ac8SAneesh Kumar K.V 	up_write((&EXT4_I(inode)->i_data_sem));
748e35fd660STheodore Ts'o 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
749e29136f8STheodore Ts'o 		int ret = check_block_validity(inode, map);
7506fd058f7STheodore Ts'o 		if (ret != 0)
7516fd058f7STheodore Ts'o 			return ret;
7526fd058f7STheodore Ts'o 	}
7530e855ac8SAneesh Kumar K.V 	return retval;
7540e855ac8SAneesh Kumar K.V }
7550e855ac8SAneesh Kumar K.V 
756f3bd1f3fSMingming Cao /* Maximum number of blocks we map for direct IO at once. */
757f3bd1f3fSMingming Cao #define DIO_MAX_BLOCKS 4096
758f3bd1f3fSMingming Cao 
7592ed88685STheodore Ts'o static int _ext4_get_block(struct inode *inode, sector_t iblock,
7602ed88685STheodore Ts'o 			   struct buffer_head *bh, int flags)
761ac27a0ecSDave Kleikamp {
7623e4fdaf8SDmitriy Monakhov 	handle_t *handle = ext4_journal_current_handle();
7632ed88685STheodore Ts'o 	struct ext4_map_blocks map;
7647fb5409dSJan Kara 	int ret = 0, started = 0;
765f3bd1f3fSMingming Cao 	int dio_credits;
766ac27a0ecSDave Kleikamp 
76746c7f254STao Ma 	if (ext4_has_inline_data(inode))
76846c7f254STao Ma 		return -ERANGE;
76946c7f254STao Ma 
7702ed88685STheodore Ts'o 	map.m_lblk = iblock;
7712ed88685STheodore Ts'o 	map.m_len = bh->b_size >> inode->i_blkbits;
7722ed88685STheodore Ts'o 
7738b0f165fSAnatol Pomozov 	if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
7747fb5409dSJan Kara 		/* Direct IO write... */
7752ed88685STheodore Ts'o 		if (map.m_len > DIO_MAX_BLOCKS)
7762ed88685STheodore Ts'o 			map.m_len = DIO_MAX_BLOCKS;
7772ed88685STheodore Ts'o 		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
7789924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
7799924a92aSTheodore Ts'o 					    dio_credits);
7807fb5409dSJan Kara 		if (IS_ERR(handle)) {
781ac27a0ecSDave Kleikamp 			ret = PTR_ERR(handle);
7822ed88685STheodore Ts'o 			return ret;
7837fb5409dSJan Kara 		}
7847fb5409dSJan Kara 		started = 1;
785ac27a0ecSDave Kleikamp 	}
786ac27a0ecSDave Kleikamp 
7872ed88685STheodore Ts'o 	ret = ext4_map_blocks(handle, inode, &map, flags);
788ac27a0ecSDave Kleikamp 	if (ret > 0) {
7892ed88685STheodore Ts'o 		map_bh(bh, inode->i_sb, map.m_pblk);
7902ed88685STheodore Ts'o 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
7912ed88685STheodore Ts'o 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
792ac27a0ecSDave Kleikamp 		ret = 0;
793ac27a0ecSDave Kleikamp 	}
7947fb5409dSJan Kara 	if (started)
7957fb5409dSJan Kara 		ext4_journal_stop(handle);
796ac27a0ecSDave Kleikamp 	return ret;
797ac27a0ecSDave Kleikamp }
798ac27a0ecSDave Kleikamp 
7992ed88685STheodore Ts'o int ext4_get_block(struct inode *inode, sector_t iblock,
8002ed88685STheodore Ts'o 		   struct buffer_head *bh, int create)
8012ed88685STheodore Ts'o {
8022ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh,
8032ed88685STheodore Ts'o 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
8042ed88685STheodore Ts'o }
8052ed88685STheodore Ts'o 
806ac27a0ecSDave Kleikamp /*
807ac27a0ecSDave Kleikamp  * `handle' can be NULL if create is zero
808ac27a0ecSDave Kleikamp  */
809617ba13bSMingming Cao struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
810725d26d3SAneesh Kumar K.V 				ext4_lblk_t block, int create, int *errp)
811ac27a0ecSDave Kleikamp {
8122ed88685STheodore Ts'o 	struct ext4_map_blocks map;
8132ed88685STheodore Ts'o 	struct buffer_head *bh;
814ac27a0ecSDave Kleikamp 	int fatal = 0, err;
815ac27a0ecSDave Kleikamp 
816ac27a0ecSDave Kleikamp 	J_ASSERT(handle != NULL || create == 0);
817ac27a0ecSDave Kleikamp 
8182ed88685STheodore Ts'o 	map.m_lblk = block;
8192ed88685STheodore Ts'o 	map.m_len = 1;
8202ed88685STheodore Ts'o 	err = ext4_map_blocks(handle, inode, &map,
8212ed88685STheodore Ts'o 			      create ? EXT4_GET_BLOCKS_CREATE : 0);
8222ed88685STheodore Ts'o 
82390b0a973SCarlos Maiolino 	/* ensure we send some value back into *errp */
82490b0a973SCarlos Maiolino 	*errp = 0;
82590b0a973SCarlos Maiolino 
8260f70b406STheodore Ts'o 	if (create && err == 0)
8270f70b406STheodore Ts'o 		err = -ENOSPC;	/* should never happen */
8282ed88685STheodore Ts'o 	if (err < 0)
829ac27a0ecSDave Kleikamp 		*errp = err;
8302ed88685STheodore Ts'o 	if (err <= 0)
8312ed88685STheodore Ts'o 		return NULL;
8322ed88685STheodore Ts'o 
8332ed88685STheodore Ts'o 	bh = sb_getblk(inode->i_sb, map.m_pblk);
834aebf0243SWang Shilong 	if (unlikely(!bh)) {
835860d21e2STheodore Ts'o 		*errp = -ENOMEM;
8362ed88685STheodore Ts'o 		return NULL;
837ac27a0ecSDave Kleikamp 	}
8382ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
839ac27a0ecSDave Kleikamp 		J_ASSERT(create != 0);
840ac39849dSAneesh Kumar K.V 		J_ASSERT(handle != NULL);
841ac27a0ecSDave Kleikamp 
842ac27a0ecSDave Kleikamp 		/*
843ac27a0ecSDave Kleikamp 		 * Now that we do not always journal data, we should
844ac27a0ecSDave Kleikamp 		 * keep in mind whether this should always journal the
845ac27a0ecSDave Kleikamp 		 * new buffer as metadata.  For now, regular file
846617ba13bSMingming Cao 		 * writes use ext4_get_block instead, so it's not a
847ac27a0ecSDave Kleikamp 		 * problem.
848ac27a0ecSDave Kleikamp 		 */
849ac27a0ecSDave Kleikamp 		lock_buffer(bh);
850ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "call get_create_access");
851617ba13bSMingming Cao 		fatal = ext4_journal_get_create_access(handle, bh);
852ac27a0ecSDave Kleikamp 		if (!fatal && !buffer_uptodate(bh)) {
853ac27a0ecSDave Kleikamp 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
854ac27a0ecSDave Kleikamp 			set_buffer_uptodate(bh);
855ac27a0ecSDave Kleikamp 		}
856ac27a0ecSDave Kleikamp 		unlock_buffer(bh);
8570390131bSFrank Mayhar 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
8580390131bSFrank Mayhar 		err = ext4_handle_dirty_metadata(handle, inode, bh);
859ac27a0ecSDave Kleikamp 		if (!fatal)
860ac27a0ecSDave Kleikamp 			fatal = err;
861ac27a0ecSDave Kleikamp 	} else {
862ac27a0ecSDave Kleikamp 		BUFFER_TRACE(bh, "not a new buffer");
863ac27a0ecSDave Kleikamp 	}
864ac27a0ecSDave Kleikamp 	if (fatal) {
865ac27a0ecSDave Kleikamp 		*errp = fatal;
866ac27a0ecSDave Kleikamp 		brelse(bh);
867ac27a0ecSDave Kleikamp 		bh = NULL;
868ac27a0ecSDave Kleikamp 	}
869ac27a0ecSDave Kleikamp 	return bh;
870ac27a0ecSDave Kleikamp }
871ac27a0ecSDave Kleikamp 
872617ba13bSMingming Cao struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
873725d26d3SAneesh Kumar K.V 			       ext4_lblk_t block, int create, int *err)
874ac27a0ecSDave Kleikamp {
875ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
876ac27a0ecSDave Kleikamp 
877617ba13bSMingming Cao 	bh = ext4_getblk(handle, inode, block, create, err);
878ac27a0ecSDave Kleikamp 	if (!bh)
879ac27a0ecSDave Kleikamp 		return bh;
880ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
881ac27a0ecSDave Kleikamp 		return bh;
88265299a3bSChristoph Hellwig 	ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
883ac27a0ecSDave Kleikamp 	wait_on_buffer(bh);
884ac27a0ecSDave Kleikamp 	if (buffer_uptodate(bh))
885ac27a0ecSDave Kleikamp 		return bh;
886ac27a0ecSDave Kleikamp 	put_bh(bh);
887ac27a0ecSDave Kleikamp 	*err = -EIO;
888ac27a0ecSDave Kleikamp 	return NULL;
889ac27a0ecSDave Kleikamp }
890ac27a0ecSDave Kleikamp 
891f19d5870STao Ma int ext4_walk_page_buffers(handle_t *handle,
892ac27a0ecSDave Kleikamp 			   struct buffer_head *head,
893ac27a0ecSDave Kleikamp 			   unsigned from,
894ac27a0ecSDave Kleikamp 			   unsigned to,
895ac27a0ecSDave Kleikamp 			   int *partial,
896ac27a0ecSDave Kleikamp 			   int (*fn)(handle_t *handle,
897ac27a0ecSDave Kleikamp 				     struct buffer_head *bh))
898ac27a0ecSDave Kleikamp {
899ac27a0ecSDave Kleikamp 	struct buffer_head *bh;
900ac27a0ecSDave Kleikamp 	unsigned block_start, block_end;
901ac27a0ecSDave Kleikamp 	unsigned blocksize = head->b_size;
902ac27a0ecSDave Kleikamp 	int err, ret = 0;
903ac27a0ecSDave Kleikamp 	struct buffer_head *next;
904ac27a0ecSDave Kleikamp 
905ac27a0ecSDave Kleikamp 	for (bh = head, block_start = 0;
906ac27a0ecSDave Kleikamp 	     ret == 0 && (bh != head || !block_start);
907de9a55b8STheodore Ts'o 	     block_start = block_end, bh = next) {
908ac27a0ecSDave Kleikamp 		next = bh->b_this_page;
909ac27a0ecSDave Kleikamp 		block_end = block_start + blocksize;
910ac27a0ecSDave Kleikamp 		if (block_end <= from || block_start >= to) {
911ac27a0ecSDave Kleikamp 			if (partial && !buffer_uptodate(bh))
912ac27a0ecSDave Kleikamp 				*partial = 1;
913ac27a0ecSDave Kleikamp 			continue;
914ac27a0ecSDave Kleikamp 		}
915ac27a0ecSDave Kleikamp 		err = (*fn)(handle, bh);
916ac27a0ecSDave Kleikamp 		if (!ret)
917ac27a0ecSDave Kleikamp 			ret = err;
918ac27a0ecSDave Kleikamp 	}
919ac27a0ecSDave Kleikamp 	return ret;
920ac27a0ecSDave Kleikamp }
921ac27a0ecSDave Kleikamp 
922ac27a0ecSDave Kleikamp /*
923ac27a0ecSDave Kleikamp  * To preserve ordering, it is essential that the hole instantiation and
924ac27a0ecSDave Kleikamp  * the data write be encapsulated in a single transaction.  We cannot
925617ba13bSMingming Cao  * close off a transaction and start a new one between the ext4_get_block()
926dab291afSMingming Cao  * and the commit_write().  So doing the jbd2_journal_start at the start of
927ac27a0ecSDave Kleikamp  * prepare_write() is the right place.
928ac27a0ecSDave Kleikamp  *
92936ade451SJan Kara  * Also, this function can nest inside ext4_writepage().  In that case, we
93036ade451SJan Kara  * *know* that ext4_writepage() has generated enough buffer credits to do the
93136ade451SJan Kara  * whole page.  So we won't block on the journal in that case, which is good,
93236ade451SJan Kara  * because the caller may be PF_MEMALLOC.
933ac27a0ecSDave Kleikamp  *
934617ba13bSMingming Cao  * By accident, ext4 can be reentered when a transaction is open via
935ac27a0ecSDave Kleikamp  * quota file writes.  If we were to commit the transaction while thus
936ac27a0ecSDave Kleikamp  * reentered, there can be a deadlock - we would be holding a quota
937ac27a0ecSDave Kleikamp  * lock, and the commit would never complete if another thread had a
938ac27a0ecSDave Kleikamp  * transaction open and was blocking on the quota lock - a ranking
939ac27a0ecSDave Kleikamp  * violation.
940ac27a0ecSDave Kleikamp  *
941dab291afSMingming Cao  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
942ac27a0ecSDave Kleikamp  * will _not_ run commit under these circumstances because handle->h_ref
943ac27a0ecSDave Kleikamp  * is elevated.  We'll still have enough credits for the tiny quotafile
944ac27a0ecSDave Kleikamp  * write.
945ac27a0ecSDave Kleikamp  */
946f19d5870STao Ma int do_journal_get_write_access(handle_t *handle,
947ac27a0ecSDave Kleikamp 				struct buffer_head *bh)
948ac27a0ecSDave Kleikamp {
94956d35a4cSJan Kara 	int dirty = buffer_dirty(bh);
95056d35a4cSJan Kara 	int ret;
95156d35a4cSJan Kara 
952ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
953ac27a0ecSDave Kleikamp 		return 0;
95456d35a4cSJan Kara 	/*
955ebdec241SChristoph Hellwig 	 * __block_write_begin() could have dirtied some buffers. Clean
95656d35a4cSJan Kara 	 * the dirty bit as jbd2_journal_get_write_access() could complain
95756d35a4cSJan Kara 	 * otherwise about fs integrity issues. Setting of the dirty bit
958ebdec241SChristoph Hellwig 	 * by __block_write_begin() isn't a real problem here as we clear
95956d35a4cSJan Kara 	 * the bit before releasing a page lock and thus writeback cannot
96056d35a4cSJan Kara 	 * ever write the buffer.
96156d35a4cSJan Kara 	 */
96256d35a4cSJan Kara 	if (dirty)
96356d35a4cSJan Kara 		clear_buffer_dirty(bh);
96456d35a4cSJan Kara 	ret = ext4_journal_get_write_access(handle, bh);
96556d35a4cSJan Kara 	if (!ret && dirty)
96656d35a4cSJan Kara 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
96756d35a4cSJan Kara 	return ret;
968ac27a0ecSDave Kleikamp }
969ac27a0ecSDave Kleikamp 
9708b0f165fSAnatol Pomozov static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
9718b0f165fSAnatol Pomozov 		   struct buffer_head *bh_result, int create);
972bfc1af65SNick Piggin static int ext4_write_begin(struct file *file, struct address_space *mapping,
973bfc1af65SNick Piggin 			    loff_t pos, unsigned len, unsigned flags,
974bfc1af65SNick Piggin 			    struct page **pagep, void **fsdata)
975ac27a0ecSDave Kleikamp {
976bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
9771938a150SAneesh Kumar K.V 	int ret, needed_blocks;
978ac27a0ecSDave Kleikamp 	handle_t *handle;
979ac27a0ecSDave Kleikamp 	int retries = 0;
980bfc1af65SNick Piggin 	struct page *page;
981bfc1af65SNick Piggin 	pgoff_t index;
982bfc1af65SNick Piggin 	unsigned from, to;
983bfc1af65SNick Piggin 
9849bffad1eSTheodore Ts'o 	trace_ext4_write_begin(inode, pos, len, flags);
9851938a150SAneesh Kumar K.V 	/*
9861938a150SAneesh Kumar K.V 	 * Reserve one block more for addition to orphan list in case
9871938a150SAneesh Kumar K.V 	 * we allocate blocks but write fails for some reason
9881938a150SAneesh Kumar K.V 	 */
9891938a150SAneesh Kumar K.V 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
990bfc1af65SNick Piggin 	index = pos >> PAGE_CACHE_SHIFT;
991bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
992bfc1af65SNick Piggin 	to = from + len;
993ac27a0ecSDave Kleikamp 
994f19d5870STao Ma 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
995f19d5870STao Ma 		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
996f19d5870STao Ma 						    flags, pagep);
997f19d5870STao Ma 		if (ret < 0)
99847564bfbSTheodore Ts'o 			return ret;
99947564bfbSTheodore Ts'o 		if (ret == 1)
100047564bfbSTheodore Ts'o 			return 0;
1001f19d5870STao Ma 	}
1002f19d5870STao Ma 
100347564bfbSTheodore Ts'o 	/*
100447564bfbSTheodore Ts'o 	 * grab_cache_page_write_begin() can take a long time if the
100547564bfbSTheodore Ts'o 	 * system is thrashing due to memory pressure, or if the page
100647564bfbSTheodore Ts'o 	 * is being written back.  So grab it first before we start
100747564bfbSTheodore Ts'o 	 * the transaction handle.  This also allows us to allocate
100847564bfbSTheodore Ts'o 	 * the page (if needed) without using GFP_NOFS.
100947564bfbSTheodore Ts'o 	 */
101047564bfbSTheodore Ts'o retry_grab:
101154566b2cSNick Piggin 	page = grab_cache_page_write_begin(mapping, index, flags);
101247564bfbSTheodore Ts'o 	if (!page)
101347564bfbSTheodore Ts'o 		return -ENOMEM;
101447564bfbSTheodore Ts'o 	unlock_page(page);
101547564bfbSTheodore Ts'o 
101647564bfbSTheodore Ts'o retry_journal:
10179924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1018ac27a0ecSDave Kleikamp 	if (IS_ERR(handle)) {
101947564bfbSTheodore Ts'o 		page_cache_release(page);
102047564bfbSTheodore Ts'o 		return PTR_ERR(handle);
1021cf108bcaSJan Kara 	}
1022f19d5870STao Ma 
102347564bfbSTheodore Ts'o 	lock_page(page);
102447564bfbSTheodore Ts'o 	if (page->mapping != mapping) {
102547564bfbSTheodore Ts'o 		/* The page got truncated from under us */
102647564bfbSTheodore Ts'o 		unlock_page(page);
102747564bfbSTheodore Ts'o 		page_cache_release(page);
1028cf108bcaSJan Kara 		ext4_journal_stop(handle);
102947564bfbSTheodore Ts'o 		goto retry_grab;
1030cf108bcaSJan Kara 	}
103147564bfbSTheodore Ts'o 	wait_on_page_writeback(page);
1032cf108bcaSJan Kara 
1033744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(inode))
10346e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block_write);
1035744692dcSJiaying Zhang 	else
10366e1db88dSChristoph Hellwig 		ret = __block_write_begin(page, pos, len, ext4_get_block);
1037bfc1af65SNick Piggin 
1038bfc1af65SNick Piggin 	if (!ret && ext4_should_journal_data(inode)) {
1039f19d5870STao Ma 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
1040f19d5870STao Ma 					     from, to, NULL,
1041f19d5870STao Ma 					     do_journal_get_write_access);
1042b46be050SAndrey Savochkin 	}
1043bfc1af65SNick Piggin 
1044bfc1af65SNick Piggin 	if (ret) {
1045bfc1af65SNick Piggin 		unlock_page(page);
1046ae4d5372SAneesh Kumar K.V 		/*
10476e1db88dSChristoph Hellwig 		 * __block_write_begin may have instantiated a few blocks
1048ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
1049ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
10501938a150SAneesh Kumar K.V 		 *
10511938a150SAneesh Kumar K.V 		 * Add inode to orphan list in case we crash before
10521938a150SAneesh Kumar K.V 		 * truncate finishes
1053ae4d5372SAneesh Kumar K.V 		 */
1054ffacfa7aSJan Kara 		if (pos + len > inode->i_size && ext4_can_truncate(inode))
10551938a150SAneesh Kumar K.V 			ext4_orphan_add(handle, inode);
10561938a150SAneesh Kumar K.V 
10571938a150SAneesh Kumar K.V 		ext4_journal_stop(handle);
10581938a150SAneesh Kumar K.V 		if (pos + len > inode->i_size) {
1059b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
10601938a150SAneesh Kumar K.V 			/*
1061ffacfa7aSJan Kara 			 * If truncate failed early the inode might
10621938a150SAneesh Kumar K.V 			 * still be on the orphan list; we need to
10631938a150SAneesh Kumar K.V 			 * make sure the inode is removed from the
10641938a150SAneesh Kumar K.V 			 * orphan list in that case.
10651938a150SAneesh Kumar K.V 			 */
10661938a150SAneesh Kumar K.V 			if (inode->i_nlink)
10671938a150SAneesh Kumar K.V 				ext4_orphan_del(NULL, inode);
10681938a150SAneesh Kumar K.V 		}
1069bfc1af65SNick Piggin 
107047564bfbSTheodore Ts'o 		if (ret == -ENOSPC &&
107147564bfbSTheodore Ts'o 		    ext4_should_retry_alloc(inode->i_sb, &retries))
107247564bfbSTheodore Ts'o 			goto retry_journal;
107347564bfbSTheodore Ts'o 		page_cache_release(page);
107447564bfbSTheodore Ts'o 		return ret;
107547564bfbSTheodore Ts'o 	}
107647564bfbSTheodore Ts'o 	*pagep = page;
1077ac27a0ecSDave Kleikamp 	return ret;
1078ac27a0ecSDave Kleikamp }
1079ac27a0ecSDave Kleikamp 
1080bfc1af65SNick Piggin /* For write_end() in data=journal mode */
1081bfc1af65SNick Piggin static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1082ac27a0ecSDave Kleikamp {
1083ac27a0ecSDave Kleikamp 	if (!buffer_mapped(bh) || buffer_freed(bh))
1084ac27a0ecSDave Kleikamp 		return 0;
1085ac27a0ecSDave Kleikamp 	set_buffer_uptodate(bh);
10860390131bSFrank Mayhar 	return ext4_handle_dirty_metadata(handle, NULL, bh);
1087ac27a0ecSDave Kleikamp }
1088ac27a0ecSDave Kleikamp 
1089eed4333fSZheng Liu /*
1090eed4333fSZheng Liu  * We need to pick up the new inode size which generic_commit_write gave us
1091eed4333fSZheng Liu  * `file' can be NULL - eg, when called from page_symlink().
1092eed4333fSZheng Liu  *
1093eed4333fSZheng Liu  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1094eed4333fSZheng Liu  * buffers are managed internally.
1095eed4333fSZheng Liu  */
1096eed4333fSZheng Liu static int ext4_write_end(struct file *file,
1097f8514083SAneesh Kumar K.V 			  struct address_space *mapping,
1098f8514083SAneesh Kumar K.V 			  loff_t pos, unsigned len, unsigned copied,
1099f8514083SAneesh Kumar K.V 			  struct page *page, void *fsdata)
1100f8514083SAneesh Kumar K.V {
1101f8514083SAneesh Kumar K.V 	handle_t *handle = ext4_journal_current_handle();
1102eed4333fSZheng Liu 	struct inode *inode = mapping->host;
1103eed4333fSZheng Liu 	int ret = 0, ret2;
1104eed4333fSZheng Liu 	int i_size_changed = 0;
1105eed4333fSZheng Liu 
1106eed4333fSZheng Liu 	trace_ext4_write_end(inode, pos, len, copied);
1107eed4333fSZheng Liu 	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
1108eed4333fSZheng Liu 		ret = ext4_jbd2_file_inode(handle, inode);
1109eed4333fSZheng Liu 		if (ret) {
1110eed4333fSZheng Liu 			unlock_page(page);
1111eed4333fSZheng Liu 			page_cache_release(page);
1112eed4333fSZheng Liu 			goto errout;
1113eed4333fSZheng Liu 		}
1114eed4333fSZheng Liu 	}
1115f8514083SAneesh Kumar K.V 
1116f19d5870STao Ma 	if (ext4_has_inline_data(inode))
1117f19d5870STao Ma 		copied = ext4_write_inline_data_end(inode, pos, len,
1118f19d5870STao Ma 						    copied, page);
1119f19d5870STao Ma 	else
1120f19d5870STao Ma 		copied = block_write_end(file, mapping, pos,
1121f19d5870STao Ma 					 len, copied, page, fsdata);
1122f8514083SAneesh Kumar K.V 
1123f8514083SAneesh Kumar K.V 	/*
1124f8514083SAneesh Kumar K.V 	 * No need to use i_size_read() here, the i_size
1125eed4333fSZheng Liu 	 * cannot change under us because we hole i_mutex.
1126f8514083SAneesh Kumar K.V 	 *
1127f8514083SAneesh Kumar K.V 	 * But it's important to update i_size while still holding page lock:
1128f8514083SAneesh Kumar K.V 	 * page writeout could otherwise come in and zero beyond i_size.
1129f8514083SAneesh Kumar K.V 	 */
1130f8514083SAneesh Kumar K.V 	if (pos + copied > inode->i_size) {
1131f8514083SAneesh Kumar K.V 		i_size_write(inode, pos + copied);
1132f8514083SAneesh Kumar K.V 		i_size_changed = 1;
1133f8514083SAneesh Kumar K.V 	}
1134f8514083SAneesh Kumar K.V 
1135f8514083SAneesh Kumar K.V 	if (pos + copied > EXT4_I(inode)->i_disksize) {
1136f8514083SAneesh Kumar K.V 		/* We need to mark inode dirty even if
1137f8514083SAneesh Kumar K.V 		 * new_i_size is less that inode->i_size
1138eed4333fSZheng Liu 		 * but greater than i_disksize. (hint delalloc)
1139f8514083SAneesh Kumar K.V 		 */
1140f8514083SAneesh Kumar K.V 		ext4_update_i_disksize(inode, (pos + copied));
1141f8514083SAneesh Kumar K.V 		i_size_changed = 1;
1142f8514083SAneesh Kumar K.V 	}
1143f8514083SAneesh Kumar K.V 	unlock_page(page);
1144f8514083SAneesh Kumar K.V 	page_cache_release(page);
1145f8514083SAneesh Kumar K.V 
1146f8514083SAneesh Kumar K.V 	/*
1147f8514083SAneesh Kumar K.V 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1148f8514083SAneesh Kumar K.V 	 * makes the holding time of page lock longer. Second, it forces lock
1149f8514083SAneesh Kumar K.V 	 * ordering of page lock and transaction start for journaling
1150f8514083SAneesh Kumar K.V 	 * filesystems.
1151f8514083SAneesh Kumar K.V 	 */
1152f8514083SAneesh Kumar K.V 	if (i_size_changed)
1153f8514083SAneesh Kumar K.V 		ext4_mark_inode_dirty(handle, inode);
1154f8514083SAneesh Kumar K.V 
115574d553aaSTheodore Ts'o 	if (copied < 0)
115674d553aaSTheodore Ts'o 		ret = copied;
1157ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1158f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1159f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1160f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1161f8514083SAneesh Kumar K.V 		 */
1162f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
116374d553aaSTheodore Ts'o errout:
1164617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1165ac27a0ecSDave Kleikamp 	if (!ret)
1166ac27a0ecSDave Kleikamp 		ret = ret2;
1167bfc1af65SNick Piggin 
1168f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1169b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1170f8514083SAneesh Kumar K.V 		/*
1171ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1172f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1173f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1174f8514083SAneesh Kumar K.V 		 */
1175f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1176f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1177f8514083SAneesh Kumar K.V 	}
1178f8514083SAneesh Kumar K.V 
1179bfc1af65SNick Piggin 	return ret ? ret : copied;
1180ac27a0ecSDave Kleikamp }
1181ac27a0ecSDave Kleikamp 
1182bfc1af65SNick Piggin static int ext4_journalled_write_end(struct file *file,
1183bfc1af65SNick Piggin 				     struct address_space *mapping,
1184bfc1af65SNick Piggin 				     loff_t pos, unsigned len, unsigned copied,
1185bfc1af65SNick Piggin 				     struct page *page, void *fsdata)
1186ac27a0ecSDave Kleikamp {
1187617ba13bSMingming Cao 	handle_t *handle = ext4_journal_current_handle();
1188bfc1af65SNick Piggin 	struct inode *inode = mapping->host;
1189ac27a0ecSDave Kleikamp 	int ret = 0, ret2;
1190ac27a0ecSDave Kleikamp 	int partial = 0;
1191bfc1af65SNick Piggin 	unsigned from, to;
1192cf17fea6SAneesh Kumar K.V 	loff_t new_i_size;
1193ac27a0ecSDave Kleikamp 
11949bffad1eSTheodore Ts'o 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1195bfc1af65SNick Piggin 	from = pos & (PAGE_CACHE_SIZE - 1);
1196bfc1af65SNick Piggin 	to = from + len;
1197bfc1af65SNick Piggin 
1198441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
1199441c8508SCurt Wohlgemuth 
12003fdcfb66STao Ma 	if (ext4_has_inline_data(inode))
12013fdcfb66STao Ma 		copied = ext4_write_inline_data_end(inode, pos, len,
12023fdcfb66STao Ma 						    copied, page);
12033fdcfb66STao Ma 	else {
1204bfc1af65SNick Piggin 		if (copied < len) {
1205bfc1af65SNick Piggin 			if (!PageUptodate(page))
1206bfc1af65SNick Piggin 				copied = 0;
1207bfc1af65SNick Piggin 			page_zero_new_buffers(page, from+copied, to);
1208bfc1af65SNick Piggin 		}
1209ac27a0ecSDave Kleikamp 
1210f19d5870STao Ma 		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1211bfc1af65SNick Piggin 					     to, &partial, write_end_fn);
1212ac27a0ecSDave Kleikamp 		if (!partial)
1213ac27a0ecSDave Kleikamp 			SetPageUptodate(page);
12143fdcfb66STao Ma 	}
1215cf17fea6SAneesh Kumar K.V 	new_i_size = pos + copied;
1216cf17fea6SAneesh Kumar K.V 	if (new_i_size > inode->i_size)
1217bfc1af65SNick Piggin 		i_size_write(inode, pos+copied);
121819f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
12192d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1220cf17fea6SAneesh Kumar K.V 	if (new_i_size > EXT4_I(inode)->i_disksize) {
1221cf17fea6SAneesh Kumar K.V 		ext4_update_i_disksize(inode, new_i_size);
1222617ba13bSMingming Cao 		ret2 = ext4_mark_inode_dirty(handle, inode);
1223ac27a0ecSDave Kleikamp 		if (!ret)
1224ac27a0ecSDave Kleikamp 			ret = ret2;
1225ac27a0ecSDave Kleikamp 	}
1226bfc1af65SNick Piggin 
1227cf108bcaSJan Kara 	unlock_page(page);
1228f8514083SAneesh Kumar K.V 	page_cache_release(page);
1229ffacfa7aSJan Kara 	if (pos + len > inode->i_size && ext4_can_truncate(inode))
1230f8514083SAneesh Kumar K.V 		/* if we have allocated more blocks and copied
1231f8514083SAneesh Kumar K.V 		 * less. We will have blocks allocated outside
1232f8514083SAneesh Kumar K.V 		 * inode->i_size. So truncate them
1233f8514083SAneesh Kumar K.V 		 */
1234f8514083SAneesh Kumar K.V 		ext4_orphan_add(handle, inode);
1235f8514083SAneesh Kumar K.V 
1236617ba13bSMingming Cao 	ret2 = ext4_journal_stop(handle);
1237ac27a0ecSDave Kleikamp 	if (!ret)
1238ac27a0ecSDave Kleikamp 		ret = ret2;
1239f8514083SAneesh Kumar K.V 	if (pos + len > inode->i_size) {
1240b9a4207dSJan Kara 		ext4_truncate_failed_write(inode);
1241f8514083SAneesh Kumar K.V 		/*
1242ffacfa7aSJan Kara 		 * If truncate failed early the inode might still be
1243f8514083SAneesh Kumar K.V 		 * on the orphan list; we need to make sure the inode
1244f8514083SAneesh Kumar K.V 		 * is removed from the orphan list in that case.
1245f8514083SAneesh Kumar K.V 		 */
1246f8514083SAneesh Kumar K.V 		if (inode->i_nlink)
1247f8514083SAneesh Kumar K.V 			ext4_orphan_del(NULL, inode);
1248f8514083SAneesh Kumar K.V 	}
1249bfc1af65SNick Piggin 
1250bfc1af65SNick Piggin 	return ret ? ret : copied;
1251ac27a0ecSDave Kleikamp }
1252d2a17637SMingming Cao 
12539d0be502STheodore Ts'o /*
1254386ad67cSLukas Czerner  * Reserve a metadata for a single block located at lblock
1255386ad67cSLukas Czerner  */
1256386ad67cSLukas Czerner static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1257386ad67cSLukas Czerner {
1258386ad67cSLukas Czerner 	int retries = 0;
1259386ad67cSLukas Czerner 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1260386ad67cSLukas Czerner 	struct ext4_inode_info *ei = EXT4_I(inode);
1261386ad67cSLukas Czerner 	unsigned int md_needed;
1262386ad67cSLukas Czerner 	ext4_lblk_t save_last_lblock;
1263386ad67cSLukas Czerner 	int save_len;
1264386ad67cSLukas Czerner 
1265386ad67cSLukas Czerner 	/*
1266386ad67cSLukas Czerner 	 * recalculate the amount of metadata blocks to reserve
1267386ad67cSLukas Czerner 	 * in order to allocate nrblocks
1268386ad67cSLukas Czerner 	 * worse case is one extent per block
1269386ad67cSLukas Czerner 	 */
1270386ad67cSLukas Czerner repeat:
1271386ad67cSLukas Czerner 	spin_lock(&ei->i_block_reservation_lock);
1272386ad67cSLukas Czerner 	/*
1273386ad67cSLukas Czerner 	 * ext4_calc_metadata_amount() has side effects, which we have
1274386ad67cSLukas Czerner 	 * to be prepared undo if we fail to claim space.
1275386ad67cSLukas Czerner 	 */
1276386ad67cSLukas Czerner 	save_len = ei->i_da_metadata_calc_len;
1277386ad67cSLukas Czerner 	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1278386ad67cSLukas Czerner 	md_needed = EXT4_NUM_B2C(sbi,
1279386ad67cSLukas Czerner 				 ext4_calc_metadata_amount(inode, lblock));
1280386ad67cSLukas Czerner 	trace_ext4_da_reserve_space(inode, md_needed);
1281386ad67cSLukas Czerner 
1282386ad67cSLukas Czerner 	/*
1283386ad67cSLukas Czerner 	 * We do still charge estimated metadata to the sb though;
1284386ad67cSLukas Czerner 	 * we cannot afford to run out of free blocks.
1285386ad67cSLukas Czerner 	 */
1286386ad67cSLukas Czerner 	if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1287386ad67cSLukas Czerner 		ei->i_da_metadata_calc_len = save_len;
1288386ad67cSLukas Czerner 		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1289386ad67cSLukas Czerner 		spin_unlock(&ei->i_block_reservation_lock);
1290386ad67cSLukas Czerner 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1291386ad67cSLukas Czerner 			cond_resched();
1292386ad67cSLukas Czerner 			goto repeat;
1293386ad67cSLukas Czerner 		}
1294386ad67cSLukas Czerner 		return -ENOSPC;
1295386ad67cSLukas Czerner 	}
1296386ad67cSLukas Czerner 	ei->i_reserved_meta_blocks += md_needed;
1297386ad67cSLukas Czerner 	spin_unlock(&ei->i_block_reservation_lock);
1298386ad67cSLukas Czerner 
1299386ad67cSLukas Czerner 	return 0;       /* success */
1300386ad67cSLukas Czerner }
1301386ad67cSLukas Czerner 
1302386ad67cSLukas Czerner /*
13037b415bf6SAditya Kali  * Reserve a single cluster located at lblock
13049d0be502STheodore Ts'o  */
130501f49d0bSTheodore Ts'o static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1306d2a17637SMingming Cao {
1307030ba6bcSAneesh Kumar K.V 	int retries = 0;
1308d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
13090637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
13107b415bf6SAditya Kali 	unsigned int md_needed;
13115dd4056dSChristoph Hellwig 	int ret;
131203179fe9STheodore Ts'o 	ext4_lblk_t save_last_lblock;
131303179fe9STheodore Ts'o 	int save_len;
1314d2a17637SMingming Cao 
131560e58e0fSMingming Cao 	/*
131672b8ab9dSEric Sandeen 	 * We will charge metadata quota at writeout time; this saves
131772b8ab9dSEric Sandeen 	 * us from metadata over-estimation, though we may go over by
131872b8ab9dSEric Sandeen 	 * a small amount in the end.  Here we just reserve for data.
131960e58e0fSMingming Cao 	 */
13207b415bf6SAditya Kali 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
13215dd4056dSChristoph Hellwig 	if (ret)
13225dd4056dSChristoph Hellwig 		return ret;
132303179fe9STheodore Ts'o 
132403179fe9STheodore Ts'o 	/*
132503179fe9STheodore Ts'o 	 * recalculate the amount of metadata blocks to reserve
132603179fe9STheodore Ts'o 	 * in order to allocate nrblocks
132703179fe9STheodore Ts'o 	 * worse case is one extent per block
132803179fe9STheodore Ts'o 	 */
132903179fe9STheodore Ts'o repeat:
133003179fe9STheodore Ts'o 	spin_lock(&ei->i_block_reservation_lock);
133103179fe9STheodore Ts'o 	/*
133203179fe9STheodore Ts'o 	 * ext4_calc_metadata_amount() has side effects, which we have
133303179fe9STheodore Ts'o 	 * to be prepared undo if we fail to claim space.
133403179fe9STheodore Ts'o 	 */
133503179fe9STheodore Ts'o 	save_len = ei->i_da_metadata_calc_len;
133603179fe9STheodore Ts'o 	save_last_lblock = ei->i_da_metadata_calc_last_lblock;
133703179fe9STheodore Ts'o 	md_needed = EXT4_NUM_B2C(sbi,
133803179fe9STheodore Ts'o 				 ext4_calc_metadata_amount(inode, lblock));
133903179fe9STheodore Ts'o 	trace_ext4_da_reserve_space(inode, md_needed);
134003179fe9STheodore Ts'o 
134172b8ab9dSEric Sandeen 	/*
134272b8ab9dSEric Sandeen 	 * We do still charge estimated metadata to the sb though;
134372b8ab9dSEric Sandeen 	 * we cannot afford to run out of free blocks.
134472b8ab9dSEric Sandeen 	 */
1345e7d5f315STheodore Ts'o 	if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
134603179fe9STheodore Ts'o 		ei->i_da_metadata_calc_len = save_len;
134703179fe9STheodore Ts'o 		ei->i_da_metadata_calc_last_lblock = save_last_lblock;
134803179fe9STheodore Ts'o 		spin_unlock(&ei->i_block_reservation_lock);
1349030ba6bcSAneesh Kumar K.V 		if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1350bb8b20edSLukas Czerner 			cond_resched();
1351030ba6bcSAneesh Kumar K.V 			goto repeat;
1352030ba6bcSAneesh Kumar K.V 		}
135303179fe9STheodore Ts'o 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1354d2a17637SMingming Cao 		return -ENOSPC;
1355d2a17637SMingming Cao 	}
13569d0be502STheodore Ts'o 	ei->i_reserved_data_blocks++;
13570637c6f4STheodore Ts'o 	ei->i_reserved_meta_blocks += md_needed;
13580637c6f4STheodore Ts'o 	spin_unlock(&ei->i_block_reservation_lock);
135939bc680aSDmitry Monakhov 
1360d2a17637SMingming Cao 	return 0;       /* success */
1361d2a17637SMingming Cao }
1362d2a17637SMingming Cao 
136312219aeaSAneesh Kumar K.V static void ext4_da_release_space(struct inode *inode, int to_free)
1364d2a17637SMingming Cao {
1365d2a17637SMingming Cao 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
13660637c6f4STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
1367d2a17637SMingming Cao 
1368cd213226SMingming Cao 	if (!to_free)
1369cd213226SMingming Cao 		return;		/* Nothing to release, exit */
1370cd213226SMingming Cao 
1371d2a17637SMingming Cao 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1372cd213226SMingming Cao 
13735a58ec87SLi Zefan 	trace_ext4_da_release_space(inode, to_free);
13740637c6f4STheodore Ts'o 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1375cd213226SMingming Cao 		/*
13760637c6f4STheodore Ts'o 		 * if there aren't enough reserved blocks, then the
13770637c6f4STheodore Ts'o 		 * counter is messed up somewhere.  Since this
13780637c6f4STheodore Ts'o 		 * function is called from invalidate page, it's
13790637c6f4STheodore Ts'o 		 * harmless to return without any action.
1380cd213226SMingming Cao 		 */
13818de5c325STheodore Ts'o 		ext4_warning(inode->i_sb, "ext4_da_release_space: "
13820637c6f4STheodore Ts'o 			 "ino %lu, to_free %d with only %d reserved "
13831084f252STheodore Ts'o 			 "data blocks", inode->i_ino, to_free,
13840637c6f4STheodore Ts'o 			 ei->i_reserved_data_blocks);
13850637c6f4STheodore Ts'o 		WARN_ON(1);
13860637c6f4STheodore Ts'o 		to_free = ei->i_reserved_data_blocks;
13870637c6f4STheodore Ts'o 	}
13880637c6f4STheodore Ts'o 	ei->i_reserved_data_blocks -= to_free;
13890637c6f4STheodore Ts'o 
13900637c6f4STheodore Ts'o 	if (ei->i_reserved_data_blocks == 0) {
13910637c6f4STheodore Ts'o 		/*
13920637c6f4STheodore Ts'o 		 * We can release all of the reserved metadata blocks
13930637c6f4STheodore Ts'o 		 * only when we have written all of the delayed
13940637c6f4STheodore Ts'o 		 * allocation blocks.
13957b415bf6SAditya Kali 		 * Note that in case of bigalloc, i_reserved_meta_blocks,
13967b415bf6SAditya Kali 		 * i_reserved_data_blocks, etc. refer to number of clusters.
13970637c6f4STheodore Ts'o 		 */
139857042651STheodore Ts'o 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
139972b8ab9dSEric Sandeen 				   ei->i_reserved_meta_blocks);
1400ee5f4d9cSTheodore Ts'o 		ei->i_reserved_meta_blocks = 0;
14019d0be502STheodore Ts'o 		ei->i_da_metadata_calc_len = 0;
1402cd213226SMingming Cao 	}
1403cd213226SMingming Cao 
140472b8ab9dSEric Sandeen 	/* update fs dirty data blocks counter */
140557042651STheodore Ts'o 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1406d2a17637SMingming Cao 
1407d2a17637SMingming Cao 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
140860e58e0fSMingming Cao 
14097b415bf6SAditya Kali 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1410d2a17637SMingming Cao }
1411d2a17637SMingming Cao 
1412d2a17637SMingming Cao static void ext4_da_page_release_reservation(struct page *page,
1413d2a17637SMingming Cao 					     unsigned long offset)
1414d2a17637SMingming Cao {
1415d2a17637SMingming Cao 	int to_release = 0;
1416d2a17637SMingming Cao 	struct buffer_head *head, *bh;
1417d2a17637SMingming Cao 	unsigned int curr_off = 0;
14187b415bf6SAditya Kali 	struct inode *inode = page->mapping->host;
14197b415bf6SAditya Kali 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
14207b415bf6SAditya Kali 	int num_clusters;
142151865fdaSZheng Liu 	ext4_fsblk_t lblk;
1422d2a17637SMingming Cao 
1423d2a17637SMingming Cao 	head = page_buffers(page);
1424d2a17637SMingming Cao 	bh = head;
1425d2a17637SMingming Cao 	do {
1426d2a17637SMingming Cao 		unsigned int next_off = curr_off + bh->b_size;
1427d2a17637SMingming Cao 
1428d2a17637SMingming Cao 		if ((offset <= curr_off) && (buffer_delay(bh))) {
1429d2a17637SMingming Cao 			to_release++;
1430d2a17637SMingming Cao 			clear_buffer_delay(bh);
1431d2a17637SMingming Cao 		}
1432d2a17637SMingming Cao 		curr_off = next_off;
1433d2a17637SMingming Cao 	} while ((bh = bh->b_this_page) != head);
14347b415bf6SAditya Kali 
143551865fdaSZheng Liu 	if (to_release) {
143651865fdaSZheng Liu 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
143751865fdaSZheng Liu 		ext4_es_remove_extent(inode, lblk, to_release);
143851865fdaSZheng Liu 	}
143951865fdaSZheng Liu 
14407b415bf6SAditya Kali 	/* If we have released all the blocks belonging to a cluster, then we
14417b415bf6SAditya Kali 	 * need to release the reserved space for that cluster. */
14427b415bf6SAditya Kali 	num_clusters = EXT4_NUM_B2C(sbi, to_release);
14437b415bf6SAditya Kali 	while (num_clusters > 0) {
14447b415bf6SAditya Kali 		lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
14457b415bf6SAditya Kali 			((num_clusters - 1) << sbi->s_cluster_bits);
14467b415bf6SAditya Kali 		if (sbi->s_cluster_ratio == 1 ||
14477d1b1fbcSZheng Liu 		    !ext4_find_delalloc_cluster(inode, lblk))
14487b415bf6SAditya Kali 			ext4_da_release_space(inode, 1);
14497b415bf6SAditya Kali 
14507b415bf6SAditya Kali 		num_clusters--;
14517b415bf6SAditya Kali 	}
1452d2a17637SMingming Cao }
1453ac27a0ecSDave Kleikamp 
1454ac27a0ecSDave Kleikamp /*
145564769240SAlex Tomas  * Delayed allocation stuff
145664769240SAlex Tomas  */
145764769240SAlex Tomas 
145864769240SAlex Tomas /*
145964769240SAlex Tomas  * mpage_da_submit_io - walks through extent of pages and try to write
1460a1d6cc56SAneesh Kumar K.V  * them with writepage() call back
146164769240SAlex Tomas  *
146264769240SAlex Tomas  * @mpd->inode: inode
146364769240SAlex Tomas  * @mpd->first_page: first page of the extent
146464769240SAlex Tomas  * @mpd->next_page: page after the last page of the extent
146564769240SAlex Tomas  *
146664769240SAlex Tomas  * By the time mpage_da_submit_io() is called we expect all blocks
146764769240SAlex Tomas  * to be allocated. this may be wrong if allocation failed.
146864769240SAlex Tomas  *
146964769240SAlex Tomas  * As pages are already locked by write_cache_pages(), we can't use it
147064769240SAlex Tomas  */
14711de3e3dfSTheodore Ts'o static int mpage_da_submit_io(struct mpage_da_data *mpd,
14721de3e3dfSTheodore Ts'o 			      struct ext4_map_blocks *map)
147364769240SAlex Tomas {
1474791b7f08SAneesh Kumar K.V 	struct pagevec pvec;
1475791b7f08SAneesh Kumar K.V 	unsigned long index, end;
1476791b7f08SAneesh Kumar K.V 	int ret = 0, err, nr_pages, i;
1477791b7f08SAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1478791b7f08SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
1479cb20d518STheodore Ts'o 	loff_t size = i_size_read(inode);
14803ecdb3a1STheodore Ts'o 	unsigned int len, block_start;
14813ecdb3a1STheodore Ts'o 	struct buffer_head *bh, *page_bufs = NULL;
14821de3e3dfSTheodore Ts'o 	sector_t pblock = 0, cur_logical = 0;
1483bd2d0210STheodore Ts'o 	struct ext4_io_submit io_submit;
148464769240SAlex Tomas 
148564769240SAlex Tomas 	BUG_ON(mpd->next_page <= mpd->first_page);
1486bd2d0210STheodore Ts'o 	memset(&io_submit, 0, sizeof(io_submit));
1487791b7f08SAneesh Kumar K.V 	/*
1488791b7f08SAneesh Kumar K.V 	 * We need to start from the first_page to the next_page - 1
1489791b7f08SAneesh Kumar K.V 	 * to make sure we also write the mapped dirty buffer_heads.
14908dc207c0STheodore Ts'o 	 * If we look at mpd->b_blocknr we would only be looking
1491791b7f08SAneesh Kumar K.V 	 * at the currently mapped buffer_heads.
1492791b7f08SAneesh Kumar K.V 	 */
149364769240SAlex Tomas 	index = mpd->first_page;
149464769240SAlex Tomas 	end = mpd->next_page - 1;
149564769240SAlex Tomas 
1496791b7f08SAneesh Kumar K.V 	pagevec_init(&pvec, 0);
149764769240SAlex Tomas 	while (index <= end) {
1498791b7f08SAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
149964769240SAlex Tomas 		if (nr_pages == 0)
150064769240SAlex Tomas 			break;
150164769240SAlex Tomas 		for (i = 0; i < nr_pages; i++) {
1502f8bec370SJan Kara 			int skip_page = 0;
150364769240SAlex Tomas 			struct page *page = pvec.pages[i];
150464769240SAlex Tomas 
1505791b7f08SAneesh Kumar K.V 			index = page->index;
1506791b7f08SAneesh Kumar K.V 			if (index > end)
1507791b7f08SAneesh Kumar K.V 				break;
1508cb20d518STheodore Ts'o 
1509cb20d518STheodore Ts'o 			if (index == size >> PAGE_CACHE_SHIFT)
1510cb20d518STheodore Ts'o 				len = size & ~PAGE_CACHE_MASK;
1511cb20d518STheodore Ts'o 			else
1512cb20d518STheodore Ts'o 				len = PAGE_CACHE_SIZE;
15131de3e3dfSTheodore Ts'o 			if (map) {
15141de3e3dfSTheodore Ts'o 				cur_logical = index << (PAGE_CACHE_SHIFT -
15151de3e3dfSTheodore Ts'o 							inode->i_blkbits);
15161de3e3dfSTheodore Ts'o 				pblock = map->m_pblk + (cur_logical -
15171de3e3dfSTheodore Ts'o 							map->m_lblk);
15181de3e3dfSTheodore Ts'o 			}
1519791b7f08SAneesh Kumar K.V 			index++;
1520791b7f08SAneesh Kumar K.V 
1521791b7f08SAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1522791b7f08SAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1523791b7f08SAneesh Kumar K.V 
15243ecdb3a1STheodore Ts'o 			bh = page_bufs = page_buffers(page);
15253ecdb3a1STheodore Ts'o 			block_start = 0;
15263ecdb3a1STheodore Ts'o 			do {
15271de3e3dfSTheodore Ts'o 				if (map && (cur_logical >= map->m_lblk) &&
15281de3e3dfSTheodore Ts'o 				    (cur_logical <= (map->m_lblk +
15291de3e3dfSTheodore Ts'o 						     (map->m_len - 1)))) {
15301de3e3dfSTheodore Ts'o 					if (buffer_delay(bh)) {
15311de3e3dfSTheodore Ts'o 						clear_buffer_delay(bh);
15321de3e3dfSTheodore Ts'o 						bh->b_blocknr = pblock;
15331de3e3dfSTheodore Ts'o 					}
15341de3e3dfSTheodore Ts'o 					if (buffer_unwritten(bh) ||
15351de3e3dfSTheodore Ts'o 					    buffer_mapped(bh))
15361de3e3dfSTheodore Ts'o 						BUG_ON(bh->b_blocknr != pblock);
15371de3e3dfSTheodore Ts'o 					if (map->m_flags & EXT4_MAP_UNINIT)
15381de3e3dfSTheodore Ts'o 						set_buffer_uninit(bh);
15391de3e3dfSTheodore Ts'o 					clear_buffer_unwritten(bh);
15401de3e3dfSTheodore Ts'o 				}
15411de3e3dfSTheodore Ts'o 
154213a79a47SYongqiang Yang 				/*
154313a79a47SYongqiang Yang 				 * skip page if block allocation undone and
154413a79a47SYongqiang Yang 				 * block is dirty
154513a79a47SYongqiang Yang 				 */
154613a79a47SYongqiang Yang 				if (ext4_bh_delay_or_unwritten(NULL, bh))
154797498956STheodore Ts'o 					skip_page = 1;
15483ecdb3a1STheodore Ts'o 				bh = bh->b_this_page;
15493ecdb3a1STheodore Ts'o 				block_start += bh->b_size;
15501de3e3dfSTheodore Ts'o 				cur_logical++;
15511de3e3dfSTheodore Ts'o 				pblock++;
15521de3e3dfSTheodore Ts'o 			} while (bh != page_bufs);
15531de3e3dfSTheodore Ts'o 
1554f8bec370SJan Kara 			if (skip_page) {
1555f8bec370SJan Kara 				unlock_page(page);
1556f8bec370SJan Kara 				continue;
1557f8bec370SJan Kara 			}
1558cb20d518STheodore Ts'o 
155997498956STheodore Ts'o 			clear_page_dirty_for_io(page);
1560fe089c77SJan Kara 			err = ext4_bio_write_page(&io_submit, page, len,
1561fe089c77SJan Kara 						  mpd->wbc);
1562cb20d518STheodore Ts'o 			if (!err)
1563a1d6cc56SAneesh Kumar K.V 				mpd->pages_written++;
156464769240SAlex Tomas 			/*
156564769240SAlex Tomas 			 * In error case, we have to continue because
156664769240SAlex Tomas 			 * remaining pages are still locked
156764769240SAlex Tomas 			 */
156864769240SAlex Tomas 			if (ret == 0)
156964769240SAlex Tomas 				ret = err;
157064769240SAlex Tomas 		}
157164769240SAlex Tomas 		pagevec_release(&pvec);
157264769240SAlex Tomas 	}
1573bd2d0210STheodore Ts'o 	ext4_io_submit(&io_submit);
157464769240SAlex Tomas 	return ret;
157564769240SAlex Tomas }
157664769240SAlex Tomas 
1577c7f5938aSCurt Wohlgemuth static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1578c4a0c46eSAneesh Kumar K.V {
1579c4a0c46eSAneesh Kumar K.V 	int nr_pages, i;
1580c4a0c46eSAneesh Kumar K.V 	pgoff_t index, end;
1581c4a0c46eSAneesh Kumar K.V 	struct pagevec pvec;
1582c4a0c46eSAneesh Kumar K.V 	struct inode *inode = mpd->inode;
1583c4a0c46eSAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
158451865fdaSZheng Liu 	ext4_lblk_t start, last;
1585c4a0c46eSAneesh Kumar K.V 
1586c7f5938aSCurt Wohlgemuth 	index = mpd->first_page;
1587c7f5938aSCurt Wohlgemuth 	end   = mpd->next_page - 1;
158851865fdaSZheng Liu 
158951865fdaSZheng Liu 	start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
159051865fdaSZheng Liu 	last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
159151865fdaSZheng Liu 	ext4_es_remove_extent(inode, start, last - start + 1);
159251865fdaSZheng Liu 
159366bea92cSEric Sandeen 	pagevec_init(&pvec, 0);
1594c4a0c46eSAneesh Kumar K.V 	while (index <= end) {
1595c4a0c46eSAneesh Kumar K.V 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1596c4a0c46eSAneesh Kumar K.V 		if (nr_pages == 0)
1597c4a0c46eSAneesh Kumar K.V 			break;
1598c4a0c46eSAneesh Kumar K.V 		for (i = 0; i < nr_pages; i++) {
1599c4a0c46eSAneesh Kumar K.V 			struct page *page = pvec.pages[i];
16009b1d0998SJan Kara 			if (page->index > end)
1601c4a0c46eSAneesh Kumar K.V 				break;
1602c4a0c46eSAneesh Kumar K.V 			BUG_ON(!PageLocked(page));
1603c4a0c46eSAneesh Kumar K.V 			BUG_ON(PageWriteback(page));
1604c4a0c46eSAneesh Kumar K.V 			block_invalidatepage(page, 0);
1605c4a0c46eSAneesh Kumar K.V 			ClearPageUptodate(page);
1606c4a0c46eSAneesh Kumar K.V 			unlock_page(page);
1607c4a0c46eSAneesh Kumar K.V 		}
16089b1d0998SJan Kara 		index = pvec.pages[nr_pages - 1]->index + 1;
16099b1d0998SJan Kara 		pagevec_release(&pvec);
1610c4a0c46eSAneesh Kumar K.V 	}
1611c4a0c46eSAneesh Kumar K.V 	return;
1612c4a0c46eSAneesh Kumar K.V }
1613c4a0c46eSAneesh Kumar K.V 
1614df22291fSAneesh Kumar K.V static void ext4_print_free_blocks(struct inode *inode)
1615df22291fSAneesh Kumar K.V {
1616df22291fSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
161792b97816STheodore Ts'o 	struct super_block *sb = inode->i_sb;
161892b97816STheodore Ts'o 
161992b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
16205dee5437STheodore Ts'o 	       EXT4_C2B(EXT4_SB(inode->i_sb),
16215dee5437STheodore Ts'o 			ext4_count_free_clusters(inode->i_sb)));
162292b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
162392b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
162457042651STheodore Ts'o 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
162557042651STheodore Ts'o 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
162692b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
16277b415bf6SAditya Kali 	       (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
16287b415bf6SAditya Kali 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
162992b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
163092b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1631df22291fSAneesh Kumar K.V 		 EXT4_I(inode)->i_reserved_data_blocks);
163292b97816STheodore Ts'o 	ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1633df22291fSAneesh Kumar K.V 	       EXT4_I(inode)->i_reserved_meta_blocks);
1634df22291fSAneesh Kumar K.V 	return;
1635df22291fSAneesh Kumar K.V }
1636df22291fSAneesh Kumar K.V 
1637b920c755STheodore Ts'o /*
16385a87b7a5STheodore Ts'o  * mpage_da_map_and_submit - go through given space, map them
16395a87b7a5STheodore Ts'o  *       if necessary, and then submit them for I/O
164064769240SAlex Tomas  *
16418dc207c0STheodore Ts'o  * @mpd - bh describing space
164264769240SAlex Tomas  *
164364769240SAlex Tomas  * The function skips space we know is already mapped to disk blocks.
164464769240SAlex Tomas  *
164564769240SAlex Tomas  */
16465a87b7a5STheodore Ts'o static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
164764769240SAlex Tomas {
16482ac3b6e0STheodore Ts'o 	int err, blks, get_blocks_flags;
16491de3e3dfSTheodore Ts'o 	struct ext4_map_blocks map, *mapp = NULL;
16502fa3cdfbSTheodore Ts'o 	sector_t next = mpd->b_blocknr;
16512fa3cdfbSTheodore Ts'o 	unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
16522fa3cdfbSTheodore Ts'o 	loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
16532fa3cdfbSTheodore Ts'o 	handle_t *handle = NULL;
165464769240SAlex Tomas 
165564769240SAlex Tomas 	/*
16565a87b7a5STheodore Ts'o 	 * If the blocks are mapped already, or we couldn't accumulate
16575a87b7a5STheodore Ts'o 	 * any blocks, then proceed immediately to the submission stage.
165864769240SAlex Tomas 	 */
16595a87b7a5STheodore Ts'o 	if ((mpd->b_size == 0) ||
16605a87b7a5STheodore Ts'o 	    ((mpd->b_state  & (1 << BH_Mapped)) &&
166129fa89d0SAneesh Kumar K.V 	     !(mpd->b_state & (1 << BH_Delay)) &&
16625a87b7a5STheodore Ts'o 	     !(mpd->b_state & (1 << BH_Unwritten))))
16635a87b7a5STheodore Ts'o 		goto submit_io;
16642fa3cdfbSTheodore Ts'o 
16652fa3cdfbSTheodore Ts'o 	handle = ext4_journal_current_handle();
16662fa3cdfbSTheodore Ts'o 	BUG_ON(!handle);
16672fa3cdfbSTheodore Ts'o 
166879ffab34SAneesh Kumar K.V 	/*
166979e83036SEric Sandeen 	 * Call ext4_map_blocks() to allocate any delayed allocation
16702ac3b6e0STheodore Ts'o 	 * blocks, or to convert an uninitialized extent to be
16712ac3b6e0STheodore Ts'o 	 * initialized (in the case where we have written into
16722ac3b6e0STheodore Ts'o 	 * one or more preallocated blocks).
16732ac3b6e0STheodore Ts'o 	 *
16742ac3b6e0STheodore Ts'o 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
16752ac3b6e0STheodore Ts'o 	 * indicate that we are on the delayed allocation path.  This
16762ac3b6e0STheodore Ts'o 	 * affects functions in many different parts of the allocation
16772ac3b6e0STheodore Ts'o 	 * call path.  This flag exists primarily because we don't
167879e83036SEric Sandeen 	 * want to change *many* call functions, so ext4_map_blocks()
1679f2321097STheodore Ts'o 	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
16802ac3b6e0STheodore Ts'o 	 * inode's allocation semaphore is taken.
16812ac3b6e0STheodore Ts'o 	 *
16822ac3b6e0STheodore Ts'o 	 * If the blocks in questions were delalloc blocks, set
16832ac3b6e0STheodore Ts'o 	 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
16842ac3b6e0STheodore Ts'o 	 * variables are updated after the blocks have been allocated.
168579ffab34SAneesh Kumar K.V 	 */
16862ed88685STheodore Ts'o 	map.m_lblk = next;
16872ed88685STheodore Ts'o 	map.m_len = max_blocks;
16881296cc85SAneesh Kumar K.V 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1689744692dcSJiaying Zhang 	if (ext4_should_dioread_nolock(mpd->inode))
1690744692dcSJiaying Zhang 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
16912ac3b6e0STheodore Ts'o 	if (mpd->b_state & (1 << BH_Delay))
16921296cc85SAneesh Kumar K.V 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
16931296cc85SAneesh Kumar K.V 
16942ed88685STheodore Ts'o 	blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
16952fa3cdfbSTheodore Ts'o 	if (blks < 0) {
1696e3570639SEric Sandeen 		struct super_block *sb = mpd->inode->i_sb;
1697e3570639SEric Sandeen 
16982fa3cdfbSTheodore Ts'o 		err = blks;
1699ed5bde0bSTheodore Ts'o 		/*
17005a87b7a5STheodore Ts'o 		 * If get block returns EAGAIN or ENOSPC and there
170197498956STheodore Ts'o 		 * appears to be free blocks we will just let
170297498956STheodore Ts'o 		 * mpage_da_submit_io() unlock all of the pages.
1703c4a0c46eSAneesh Kumar K.V 		 */
1704c4a0c46eSAneesh Kumar K.V 		if (err == -EAGAIN)
17055a87b7a5STheodore Ts'o 			goto submit_io;
1706df22291fSAneesh Kumar K.V 
17075dee5437STheodore Ts'o 		if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1708df22291fSAneesh Kumar K.V 			mpd->retval = err;
17095a87b7a5STheodore Ts'o 			goto submit_io;
1710df22291fSAneesh Kumar K.V 		}
1711df22291fSAneesh Kumar K.V 
1712c4a0c46eSAneesh Kumar K.V 		/*
1713ed5bde0bSTheodore Ts'o 		 * get block failure will cause us to loop in
1714ed5bde0bSTheodore Ts'o 		 * writepages, because a_ops->writepage won't be able
1715ed5bde0bSTheodore Ts'o 		 * to make progress. The page will be redirtied by
1716ed5bde0bSTheodore Ts'o 		 * writepage and writepages will again try to write
1717ed5bde0bSTheodore Ts'o 		 * the same.
1718c4a0c46eSAneesh Kumar K.V 		 */
1719e3570639SEric Sandeen 		if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1720e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
1721e3570639SEric Sandeen 				 "delayed block allocation failed for inode %lu "
1722e3570639SEric Sandeen 				 "at logical offset %llu with max blocks %zd "
1723e3570639SEric Sandeen 				 "with error %d", mpd->inode->i_ino,
1724c4a0c46eSAneesh Kumar K.V 				 (unsigned long long) next,
17258dc207c0STheodore Ts'o 				 mpd->b_size >> mpd->inode->i_blkbits, err);
1726e3570639SEric Sandeen 			ext4_msg(sb, KERN_CRIT,
172701a523ebSTheodore Ts'o 				"This should not happen!! Data will be lost");
1728e3570639SEric Sandeen 			if (err == -ENOSPC)
1729df22291fSAneesh Kumar K.V 				ext4_print_free_blocks(mpd->inode);
1730030ba6bcSAneesh Kumar K.V 		}
17312fa3cdfbSTheodore Ts'o 		/* invalidate all the pages */
1732c7f5938aSCurt Wohlgemuth 		ext4_da_block_invalidatepages(mpd);
1733e0fd9b90SCurt Wohlgemuth 
1734e0fd9b90SCurt Wohlgemuth 		/* Mark this page range as having been completed */
1735e0fd9b90SCurt Wohlgemuth 		mpd->io_done = 1;
17365a87b7a5STheodore Ts'o 		return;
1737c4a0c46eSAneesh Kumar K.V 	}
17382fa3cdfbSTheodore Ts'o 	BUG_ON(blks == 0);
17392fa3cdfbSTheodore Ts'o 
17401de3e3dfSTheodore Ts'o 	mapp = &map;
17412ed88685STheodore Ts'o 	if (map.m_flags & EXT4_MAP_NEW) {
17422ed88685STheodore Ts'o 		struct block_device *bdev = mpd->inode->i_sb->s_bdev;
17432ed88685STheodore Ts'o 		int i;
174464769240SAlex Tomas 
17452ed88685STheodore Ts'o 		for (i = 0; i < map.m_len; i++)
17462ed88685STheodore Ts'o 			unmap_underlying_metadata(bdev, map.m_pblk + i);
17472fa3cdfbSTheodore Ts'o 	}
17482fa3cdfbSTheodore Ts'o 
17492fa3cdfbSTheodore Ts'o 	/*
175003f5d8bcSJan Kara 	 * Update on-disk size along with block allocation.
17512fa3cdfbSTheodore Ts'o 	 */
17522fa3cdfbSTheodore Ts'o 	disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
17532fa3cdfbSTheodore Ts'o 	if (disksize > i_size_read(mpd->inode))
17542fa3cdfbSTheodore Ts'o 		disksize = i_size_read(mpd->inode);
17552fa3cdfbSTheodore Ts'o 	if (disksize > EXT4_I(mpd->inode)->i_disksize) {
17562fa3cdfbSTheodore Ts'o 		ext4_update_i_disksize(mpd->inode, disksize);
17575a87b7a5STheodore Ts'o 		err = ext4_mark_inode_dirty(handle, mpd->inode);
17585a87b7a5STheodore Ts'o 		if (err)
17595a87b7a5STheodore Ts'o 			ext4_error(mpd->inode->i_sb,
17605a87b7a5STheodore Ts'o 				   "Failed to mark inode %lu dirty",
17615a87b7a5STheodore Ts'o 				   mpd->inode->i_ino);
17622fa3cdfbSTheodore Ts'o 	}
17632fa3cdfbSTheodore Ts'o 
17645a87b7a5STheodore Ts'o submit_io:
17651de3e3dfSTheodore Ts'o 	mpage_da_submit_io(mpd, mapp);
17665a87b7a5STheodore Ts'o 	mpd->io_done = 1;
176764769240SAlex Tomas }
176864769240SAlex Tomas 
1769bf068ee2SAneesh Kumar K.V #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1770bf068ee2SAneesh Kumar K.V 		(1 << BH_Delay) | (1 << BH_Unwritten))
177164769240SAlex Tomas 
177264769240SAlex Tomas /*
177364769240SAlex Tomas  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
177464769240SAlex Tomas  *
177564769240SAlex Tomas  * @mpd->lbh - extent of blocks
177664769240SAlex Tomas  * @logical - logical number of the block in the file
1777b6a8e62fSJan Kara  * @b_state - b_state of the buffer head added
177864769240SAlex Tomas  *
177964769240SAlex Tomas  * the function is used to collect contig. blocks in same state
178064769240SAlex Tomas  */
1781b6a8e62fSJan Kara static void mpage_add_bh_to_extent(struct mpage_da_data *mpd, sector_t logical,
17828dc207c0STheodore Ts'o 				   unsigned long b_state)
178364769240SAlex Tomas {
178464769240SAlex Tomas 	sector_t next;
1785b6a8e62fSJan Kara 	int blkbits = mpd->inode->i_blkbits;
1786b6a8e62fSJan Kara 	int nrblocks = mpd->b_size >> blkbits;
178764769240SAlex Tomas 
1788c445e3e0SEric Sandeen 	/*
1789c445e3e0SEric Sandeen 	 * XXX Don't go larger than mballoc is willing to allocate
1790c445e3e0SEric Sandeen 	 * This is a stopgap solution.  We eventually need to fold
1791c445e3e0SEric Sandeen 	 * mpage_da_submit_io() into this function and then call
179279e83036SEric Sandeen 	 * ext4_map_blocks() multiple times in a loop
1793c445e3e0SEric Sandeen 	 */
1794b6a8e62fSJan Kara 	if (nrblocks >= (8*1024*1024 >> blkbits))
1795c445e3e0SEric Sandeen 		goto flush_it;
1796c445e3e0SEric Sandeen 
1797525f4ed8SMingming Cao 	/* check if the reserved journal credits might overflow */
1798b6a8e62fSJan Kara 	if (!ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS)) {
1799525f4ed8SMingming Cao 		if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1800525f4ed8SMingming Cao 			/*
1801525f4ed8SMingming Cao 			 * With non-extent format we are limited by the journal
1802525f4ed8SMingming Cao 			 * credit available.  Total credit needed to insert
1803525f4ed8SMingming Cao 			 * nrblocks contiguous blocks is dependent on the
1804525f4ed8SMingming Cao 			 * nrblocks.  So limit nrblocks.
1805525f4ed8SMingming Cao 			 */
1806525f4ed8SMingming Cao 			goto flush_it;
1807525f4ed8SMingming Cao 		}
1808525f4ed8SMingming Cao 	}
180964769240SAlex Tomas 	/*
181064769240SAlex Tomas 	 * First block in the extent
181164769240SAlex Tomas 	 */
18128dc207c0STheodore Ts'o 	if (mpd->b_size == 0) {
18138dc207c0STheodore Ts'o 		mpd->b_blocknr = logical;
1814b6a8e62fSJan Kara 		mpd->b_size = 1 << blkbits;
18158dc207c0STheodore Ts'o 		mpd->b_state = b_state & BH_FLAGS;
181664769240SAlex Tomas 		return;
181764769240SAlex Tomas 	}
181864769240SAlex Tomas 
18198dc207c0STheodore Ts'o 	next = mpd->b_blocknr + nrblocks;
182064769240SAlex Tomas 	/*
182164769240SAlex Tomas 	 * Can we merge the block to our big extent?
182264769240SAlex Tomas 	 */
18238dc207c0STheodore Ts'o 	if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1824b6a8e62fSJan Kara 		mpd->b_size += 1 << blkbits;
182564769240SAlex Tomas 		return;
182664769240SAlex Tomas 	}
182764769240SAlex Tomas 
1828525f4ed8SMingming Cao flush_it:
182964769240SAlex Tomas 	/*
183064769240SAlex Tomas 	 * We couldn't merge the block to our extent, so we
183164769240SAlex Tomas 	 * need to flush current  extent and start new one
183264769240SAlex Tomas 	 */
18335a87b7a5STheodore Ts'o 	mpage_da_map_and_submit(mpd);
1834a1d6cc56SAneesh Kumar K.V 	return;
183564769240SAlex Tomas }
183664769240SAlex Tomas 
1837c364b22cSAneesh Kumar K.V static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
183829fa89d0SAneesh Kumar K.V {
1839c364b22cSAneesh Kumar K.V 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
184029fa89d0SAneesh Kumar K.V }
184129fa89d0SAneesh Kumar K.V 
184264769240SAlex Tomas /*
18435356f261SAditya Kali  * This function is grabs code from the very beginning of
18445356f261SAditya Kali  * ext4_map_blocks, but assumes that the caller is from delayed write
18455356f261SAditya Kali  * time. This function looks up the requested blocks and sets the
18465356f261SAditya Kali  * buffer delay bit under the protection of i_data_sem.
18475356f261SAditya Kali  */
18485356f261SAditya Kali static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
18495356f261SAditya Kali 			      struct ext4_map_blocks *map,
18505356f261SAditya Kali 			      struct buffer_head *bh)
18515356f261SAditya Kali {
1852d100eef2SZheng Liu 	struct extent_status es;
18535356f261SAditya Kali 	int retval;
18545356f261SAditya Kali 	sector_t invalid_block = ~((sector_t) 0xffff);
1855921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
1856921f266bSDmitry Monakhov 	struct ext4_map_blocks orig_map;
1857921f266bSDmitry Monakhov 
1858921f266bSDmitry Monakhov 	memcpy(&orig_map, map, sizeof(*map));
1859921f266bSDmitry Monakhov #endif
18605356f261SAditya Kali 
18615356f261SAditya Kali 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
18625356f261SAditya Kali 		invalid_block = ~0;
18635356f261SAditya Kali 
18645356f261SAditya Kali 	map->m_flags = 0;
18655356f261SAditya Kali 	ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
18665356f261SAditya Kali 		  "logical block %lu\n", inode->i_ino, map->m_len,
18675356f261SAditya Kali 		  (unsigned long) map->m_lblk);
1868d100eef2SZheng Liu 
1869d100eef2SZheng Liu 	/* Lookup extent status tree firstly */
1870d100eef2SZheng Liu 	if (ext4_es_lookup_extent(inode, iblock, &es)) {
1871d100eef2SZheng Liu 
1872d100eef2SZheng Liu 		if (ext4_es_is_hole(&es)) {
1873d100eef2SZheng Liu 			retval = 0;
1874d100eef2SZheng Liu 			down_read((&EXT4_I(inode)->i_data_sem));
1875d100eef2SZheng Liu 			goto add_delayed;
1876d100eef2SZheng Liu 		}
1877d100eef2SZheng Liu 
1878d100eef2SZheng Liu 		/*
1879d100eef2SZheng Liu 		 * Delayed extent could be allocated by fallocate.
1880d100eef2SZheng Liu 		 * So we need to check it.
1881d100eef2SZheng Liu 		 */
1882d100eef2SZheng Liu 		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1883d100eef2SZheng Liu 			map_bh(bh, inode->i_sb, invalid_block);
1884d100eef2SZheng Liu 			set_buffer_new(bh);
1885d100eef2SZheng Liu 			set_buffer_delay(bh);
1886d100eef2SZheng Liu 			return 0;
1887d100eef2SZheng Liu 		}
1888d100eef2SZheng Liu 
1889d100eef2SZheng Liu 		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1890d100eef2SZheng Liu 		retval = es.es_len - (iblock - es.es_lblk);
1891d100eef2SZheng Liu 		if (retval > map->m_len)
1892d100eef2SZheng Liu 			retval = map->m_len;
1893d100eef2SZheng Liu 		map->m_len = retval;
1894d100eef2SZheng Liu 		if (ext4_es_is_written(&es))
1895d100eef2SZheng Liu 			map->m_flags |= EXT4_MAP_MAPPED;
1896d100eef2SZheng Liu 		else if (ext4_es_is_unwritten(&es))
1897d100eef2SZheng Liu 			map->m_flags |= EXT4_MAP_UNWRITTEN;
1898d100eef2SZheng Liu 		else
1899d100eef2SZheng Liu 			BUG_ON(1);
1900d100eef2SZheng Liu 
1901921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
1902921f266bSDmitry Monakhov 		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1903921f266bSDmitry Monakhov #endif
1904d100eef2SZheng Liu 		return retval;
1905d100eef2SZheng Liu 	}
1906d100eef2SZheng Liu 
19075356f261SAditya Kali 	/*
19085356f261SAditya Kali 	 * Try to see if we can get the block without requesting a new
19095356f261SAditya Kali 	 * file system block.
19105356f261SAditya Kali 	 */
19115356f261SAditya Kali 	down_read((&EXT4_I(inode)->i_data_sem));
19129c3569b5STao Ma 	if (ext4_has_inline_data(inode)) {
19139c3569b5STao Ma 		/*
19149c3569b5STao Ma 		 * We will soon create blocks for this page, and let
19159c3569b5STao Ma 		 * us pretend as if the blocks aren't allocated yet.
19169c3569b5STao Ma 		 * In case of clusters, we have to handle the work
19179c3569b5STao Ma 		 * of mapping from cluster so that the reserved space
19189c3569b5STao Ma 		 * is calculated properly.
19199c3569b5STao Ma 		 */
19209c3569b5STao Ma 		if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
19219c3569b5STao Ma 		    ext4_find_delalloc_cluster(inode, map->m_lblk))
19229c3569b5STao Ma 			map->m_flags |= EXT4_MAP_FROM_CLUSTER;
19239c3569b5STao Ma 		retval = 0;
19249c3569b5STao Ma 	} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1925d100eef2SZheng Liu 		retval = ext4_ext_map_blocks(NULL, inode, map,
1926d100eef2SZheng Liu 					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
19275356f261SAditya Kali 	else
1928d100eef2SZheng Liu 		retval = ext4_ind_map_blocks(NULL, inode, map,
1929d100eef2SZheng Liu 					     EXT4_GET_BLOCKS_NO_PUT_HOLE);
19305356f261SAditya Kali 
1931d100eef2SZheng Liu add_delayed:
19325356f261SAditya Kali 	if (retval == 0) {
1933f7fec032SZheng Liu 		int ret;
19345356f261SAditya Kali 		/*
19355356f261SAditya Kali 		 * XXX: __block_prepare_write() unmaps passed block,
19365356f261SAditya Kali 		 * is it OK?
19375356f261SAditya Kali 		 */
1938386ad67cSLukas Czerner 		/*
1939386ad67cSLukas Czerner 		 * If the block was allocated from previously allocated cluster,
1940386ad67cSLukas Czerner 		 * then we don't need to reserve it again. However we still need
1941386ad67cSLukas Czerner 		 * to reserve metadata for every block we're going to write.
1942386ad67cSLukas Czerner 		 */
19435356f261SAditya Kali 		if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1944f7fec032SZheng Liu 			ret = ext4_da_reserve_space(inode, iblock);
1945f7fec032SZheng Liu 			if (ret) {
19465356f261SAditya Kali 				/* not enough space to reserve */
1947f7fec032SZheng Liu 				retval = ret;
19485356f261SAditya Kali 				goto out_unlock;
19495356f261SAditya Kali 			}
1950386ad67cSLukas Czerner 		} else {
1951386ad67cSLukas Czerner 			ret = ext4_da_reserve_metadata(inode, iblock);
1952386ad67cSLukas Czerner 			if (ret) {
1953386ad67cSLukas Czerner 				/* not enough space to reserve */
1954386ad67cSLukas Czerner 				retval = ret;
1955386ad67cSLukas Czerner 				goto out_unlock;
1956386ad67cSLukas Czerner 			}
1957f7fec032SZheng Liu 		}
19585356f261SAditya Kali 
1959f7fec032SZheng Liu 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1960fdc0212eSZheng Liu 					    ~0, EXTENT_STATUS_DELAYED);
1961f7fec032SZheng Liu 		if (ret) {
1962f7fec032SZheng Liu 			retval = ret;
196351865fdaSZheng Liu 			goto out_unlock;
1964f7fec032SZheng Liu 		}
196551865fdaSZheng Liu 
19665356f261SAditya Kali 		/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
19675356f261SAditya Kali 		 * and it should not appear on the bh->b_state.
19685356f261SAditya Kali 		 */
19695356f261SAditya Kali 		map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
19705356f261SAditya Kali 
19715356f261SAditya Kali 		map_bh(bh, inode->i_sb, invalid_block);
19725356f261SAditya Kali 		set_buffer_new(bh);
19735356f261SAditya Kali 		set_buffer_delay(bh);
1974f7fec032SZheng Liu 	} else if (retval > 0) {
1975f7fec032SZheng Liu 		int ret;
1976f7fec032SZheng Liu 		unsigned long long status;
1977f7fec032SZheng Liu 
1978921f266bSDmitry Monakhov #ifdef ES_AGGRESSIVE_TEST
1979921f266bSDmitry Monakhov 		if (retval != map->m_len) {
1980921f266bSDmitry Monakhov 			printk("ES len assertation failed for inode: %lu "
1981921f266bSDmitry Monakhov 			       "retval %d != map->m_len %d "
1982921f266bSDmitry Monakhov 			       "in %s (lookup)\n", inode->i_ino, retval,
1983921f266bSDmitry Monakhov 			       map->m_len, __func__);
1984921f266bSDmitry Monakhov 		}
1985921f266bSDmitry Monakhov #endif
1986921f266bSDmitry Monakhov 
1987f7fec032SZheng Liu 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1988f7fec032SZheng Liu 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1989f7fec032SZheng Liu 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1990f7fec032SZheng Liu 					    map->m_pblk, status);
1991f7fec032SZheng Liu 		if (ret != 0)
1992f7fec032SZheng Liu 			retval = ret;
19935356f261SAditya Kali 	}
19945356f261SAditya Kali 
19955356f261SAditya Kali out_unlock:
19965356f261SAditya Kali 	up_read((&EXT4_I(inode)->i_data_sem));
19975356f261SAditya Kali 
19985356f261SAditya Kali 	return retval;
19995356f261SAditya Kali }
20005356f261SAditya Kali 
20015356f261SAditya Kali /*
2002b920c755STheodore Ts'o  * This is a special get_blocks_t callback which is used by
2003b920c755STheodore Ts'o  * ext4_da_write_begin().  It will either return mapped block or
2004b920c755STheodore Ts'o  * reserve space for a single block.
200529fa89d0SAneesh Kumar K.V  *
200629fa89d0SAneesh Kumar K.V  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
200729fa89d0SAneesh Kumar K.V  * We also have b_blocknr = -1 and b_bdev initialized properly
200829fa89d0SAneesh Kumar K.V  *
200929fa89d0SAneesh Kumar K.V  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
201029fa89d0SAneesh Kumar K.V  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
201129fa89d0SAneesh Kumar K.V  * initialized properly.
201264769240SAlex Tomas  */
20139c3569b5STao Ma int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
20142ed88685STheodore Ts'o 			   struct buffer_head *bh, int create)
201564769240SAlex Tomas {
20162ed88685STheodore Ts'o 	struct ext4_map_blocks map;
201764769240SAlex Tomas 	int ret = 0;
201864769240SAlex Tomas 
201964769240SAlex Tomas 	BUG_ON(create == 0);
20202ed88685STheodore Ts'o 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
20212ed88685STheodore Ts'o 
20222ed88685STheodore Ts'o 	map.m_lblk = iblock;
20232ed88685STheodore Ts'o 	map.m_len = 1;
202464769240SAlex Tomas 
202564769240SAlex Tomas 	/*
202664769240SAlex Tomas 	 * first, we need to know whether the block is allocated already
202764769240SAlex Tomas 	 * preallocated blocks are unmapped but should treated
202864769240SAlex Tomas 	 * the same as allocated blocks.
202964769240SAlex Tomas 	 */
20305356f261SAditya Kali 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
20315356f261SAditya Kali 	if (ret <= 0)
20322ed88685STheodore Ts'o 		return ret;
203364769240SAlex Tomas 
20342ed88685STheodore Ts'o 	map_bh(bh, inode->i_sb, map.m_pblk);
20352ed88685STheodore Ts'o 	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
20362ed88685STheodore Ts'o 
20372ed88685STheodore Ts'o 	if (buffer_unwritten(bh)) {
20382ed88685STheodore Ts'o 		/* A delayed write to unwritten bh should be marked
20392ed88685STheodore Ts'o 		 * new and mapped.  Mapped ensures that we don't do
20402ed88685STheodore Ts'o 		 * get_block multiple times when we write to the same
20412ed88685STheodore Ts'o 		 * offset and new ensures that we do proper zero out
20422ed88685STheodore Ts'o 		 * for partial write.
20432ed88685STheodore Ts'o 		 */
20442ed88685STheodore Ts'o 		set_buffer_new(bh);
2045c8205636STheodore Ts'o 		set_buffer_mapped(bh);
20462ed88685STheodore Ts'o 	}
20472ed88685STheodore Ts'o 	return 0;
204864769240SAlex Tomas }
204961628a3fSMingming Cao 
205062e086beSAneesh Kumar K.V static int bget_one(handle_t *handle, struct buffer_head *bh)
205162e086beSAneesh Kumar K.V {
205262e086beSAneesh Kumar K.V 	get_bh(bh);
205362e086beSAneesh Kumar K.V 	return 0;
205462e086beSAneesh Kumar K.V }
205562e086beSAneesh Kumar K.V 
205662e086beSAneesh Kumar K.V static int bput_one(handle_t *handle, struct buffer_head *bh)
205762e086beSAneesh Kumar K.V {
205862e086beSAneesh Kumar K.V 	put_bh(bh);
205962e086beSAneesh Kumar K.V 	return 0;
206062e086beSAneesh Kumar K.V }
206162e086beSAneesh Kumar K.V 
206262e086beSAneesh Kumar K.V static int __ext4_journalled_writepage(struct page *page,
206362e086beSAneesh Kumar K.V 				       unsigned int len)
206462e086beSAneesh Kumar K.V {
206562e086beSAneesh Kumar K.V 	struct address_space *mapping = page->mapping;
206662e086beSAneesh Kumar K.V 	struct inode *inode = mapping->host;
20673fdcfb66STao Ma 	struct buffer_head *page_bufs = NULL;
206862e086beSAneesh Kumar K.V 	handle_t *handle = NULL;
20693fdcfb66STao Ma 	int ret = 0, err = 0;
20703fdcfb66STao Ma 	int inline_data = ext4_has_inline_data(inode);
20713fdcfb66STao Ma 	struct buffer_head *inode_bh = NULL;
207262e086beSAneesh Kumar K.V 
2073cb20d518STheodore Ts'o 	ClearPageChecked(page);
20743fdcfb66STao Ma 
20753fdcfb66STao Ma 	if (inline_data) {
20763fdcfb66STao Ma 		BUG_ON(page->index != 0);
20773fdcfb66STao Ma 		BUG_ON(len > ext4_get_max_inline_size(inode));
20783fdcfb66STao Ma 		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
20793fdcfb66STao Ma 		if (inode_bh == NULL)
20803fdcfb66STao Ma 			goto out;
20813fdcfb66STao Ma 	} else {
208262e086beSAneesh Kumar K.V 		page_bufs = page_buffers(page);
20833fdcfb66STao Ma 		if (!page_bufs) {
20843fdcfb66STao Ma 			BUG();
20853fdcfb66STao Ma 			goto out;
20863fdcfb66STao Ma 		}
20873fdcfb66STao Ma 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
20883fdcfb66STao Ma 				       NULL, bget_one);
20893fdcfb66STao Ma 	}
209062e086beSAneesh Kumar K.V 	/* As soon as we unlock the page, it can go away, but we have
209162e086beSAneesh Kumar K.V 	 * references to buffers so we are safe */
209262e086beSAneesh Kumar K.V 	unlock_page(page);
209362e086beSAneesh Kumar K.V 
20949924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
20959924a92aSTheodore Ts'o 				    ext4_writepage_trans_blocks(inode));
209662e086beSAneesh Kumar K.V 	if (IS_ERR(handle)) {
209762e086beSAneesh Kumar K.V 		ret = PTR_ERR(handle);
209862e086beSAneesh Kumar K.V 		goto out;
209962e086beSAneesh Kumar K.V 	}
210062e086beSAneesh Kumar K.V 
2101441c8508SCurt Wohlgemuth 	BUG_ON(!ext4_handle_valid(handle));
2102441c8508SCurt Wohlgemuth 
21033fdcfb66STao Ma 	if (inline_data) {
21043fdcfb66STao Ma 		ret = ext4_journal_get_write_access(handle, inode_bh);
21053fdcfb66STao Ma 
21063fdcfb66STao Ma 		err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
21073fdcfb66STao Ma 
21083fdcfb66STao Ma 	} else {
2109f19d5870STao Ma 		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
211062e086beSAneesh Kumar K.V 					     do_journal_get_write_access);
211162e086beSAneesh Kumar K.V 
2112f19d5870STao Ma 		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
211362e086beSAneesh Kumar K.V 					     write_end_fn);
21143fdcfb66STao Ma 	}
211562e086beSAneesh Kumar K.V 	if (ret == 0)
211662e086beSAneesh Kumar K.V 		ret = err;
21172d859db3SJan Kara 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
211862e086beSAneesh Kumar K.V 	err = ext4_journal_stop(handle);
211962e086beSAneesh Kumar K.V 	if (!ret)
212062e086beSAneesh Kumar K.V 		ret = err;
212162e086beSAneesh Kumar K.V 
21223fdcfb66STao Ma 	if (!ext4_has_inline_data(inode))
21233fdcfb66STao Ma 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
21243fdcfb66STao Ma 				       NULL, bput_one);
212519f5fb7aSTheodore Ts'o 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
212662e086beSAneesh Kumar K.V out:
21273fdcfb66STao Ma 	brelse(inode_bh);
212862e086beSAneesh Kumar K.V 	return ret;
212962e086beSAneesh Kumar K.V }
213062e086beSAneesh Kumar K.V 
213161628a3fSMingming Cao /*
213243ce1d23SAneesh Kumar K.V  * Note that we don't need to start a transaction unless we're journaling data
213343ce1d23SAneesh Kumar K.V  * because we should have holes filled from ext4_page_mkwrite(). We even don't
213443ce1d23SAneesh Kumar K.V  * need to file the inode to the transaction's list in ordered mode because if
213543ce1d23SAneesh Kumar K.V  * we are writing back data added by write(), the inode is already there and if
213643ce1d23SAneesh Kumar K.V  * we are writing back data modified via mmap(), no one guarantees in which
213743ce1d23SAneesh Kumar K.V  * transaction the data will hit the disk. In case we are journaling data, we
213843ce1d23SAneesh Kumar K.V  * cannot start transaction directly because transaction start ranks above page
213943ce1d23SAneesh Kumar K.V  * lock so we have to do some magic.
214043ce1d23SAneesh Kumar K.V  *
2141b920c755STheodore Ts'o  * This function can get called via...
2142b920c755STheodore Ts'o  *   - ext4_da_writepages after taking page lock (have journal handle)
2143b920c755STheodore Ts'o  *   - journal_submit_inode_data_buffers (no journal handle)
2144f6463b0dSArtem Bityutskiy  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2145b920c755STheodore Ts'o  *   - grab_page_cache when doing write_begin (have journal handle)
214643ce1d23SAneesh Kumar K.V  *
214743ce1d23SAneesh Kumar K.V  * We don't do any block allocation in this function. If we have page with
214843ce1d23SAneesh Kumar K.V  * multiple blocks we need to write those buffer_heads that are mapped. This
214943ce1d23SAneesh Kumar K.V  * is important for mmaped based write. So if we do with blocksize 1K
215043ce1d23SAneesh Kumar K.V  * truncate(f, 1024);
215143ce1d23SAneesh Kumar K.V  * a = mmap(f, 0, 4096);
215243ce1d23SAneesh Kumar K.V  * a[0] = 'a';
215343ce1d23SAneesh Kumar K.V  * truncate(f, 4096);
215443ce1d23SAneesh Kumar K.V  * we have in the page first buffer_head mapped via page_mkwrite call back
215590802ed9SPaul Bolle  * but other buffer_heads would be unmapped but dirty (dirty done via the
215643ce1d23SAneesh Kumar K.V  * do_wp_page). So writepage should write the first block. If we modify
215743ce1d23SAneesh Kumar K.V  * the mmap area beyond 1024 we will again get a page_fault and the
215843ce1d23SAneesh Kumar K.V  * page_mkwrite callback will do the block allocation and mark the
215943ce1d23SAneesh Kumar K.V  * buffer_heads mapped.
216043ce1d23SAneesh Kumar K.V  *
216143ce1d23SAneesh Kumar K.V  * We redirty the page if we have any buffer_heads that is either delay or
216243ce1d23SAneesh Kumar K.V  * unwritten in the page.
216343ce1d23SAneesh Kumar K.V  *
216443ce1d23SAneesh Kumar K.V  * We can get recursively called as show below.
216543ce1d23SAneesh Kumar K.V  *
216643ce1d23SAneesh Kumar K.V  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
216743ce1d23SAneesh Kumar K.V  *		ext4_writepage()
216843ce1d23SAneesh Kumar K.V  *
216943ce1d23SAneesh Kumar K.V  * But since we don't do any block allocation we should not deadlock.
217043ce1d23SAneesh Kumar K.V  * Page also have the dirty flag cleared so we don't get recurive page_lock.
217161628a3fSMingming Cao  */
217243ce1d23SAneesh Kumar K.V static int ext4_writepage(struct page *page,
217364769240SAlex Tomas 			  struct writeback_control *wbc)
217464769240SAlex Tomas {
2175f8bec370SJan Kara 	int ret = 0;
217661628a3fSMingming Cao 	loff_t size;
2177498e5f24STheodore Ts'o 	unsigned int len;
2178744692dcSJiaying Zhang 	struct buffer_head *page_bufs = NULL;
217961628a3fSMingming Cao 	struct inode *inode = page->mapping->host;
218036ade451SJan Kara 	struct ext4_io_submit io_submit;
218164769240SAlex Tomas 
2182a9c667f8SLukas Czerner 	trace_ext4_writepage(page);
218361628a3fSMingming Cao 	size = i_size_read(inode);
218461628a3fSMingming Cao 	if (page->index == size >> PAGE_CACHE_SHIFT)
218561628a3fSMingming Cao 		len = size & ~PAGE_CACHE_MASK;
218661628a3fSMingming Cao 	else
218761628a3fSMingming Cao 		len = PAGE_CACHE_SIZE;
218861628a3fSMingming Cao 
2189f0e6c985SAneesh Kumar K.V 	page_bufs = page_buffers(page);
219064769240SAlex Tomas 	/*
2191fe386132SJan Kara 	 * We cannot do block allocation or other extent handling in this
2192fe386132SJan Kara 	 * function. If there are buffers needing that, we have to redirty
2193fe386132SJan Kara 	 * the page. But we may reach here when we do a journal commit via
2194fe386132SJan Kara 	 * journal_submit_inode_data_buffers() and in that case we must write
2195fe386132SJan Kara 	 * allocated buffers to achieve data=ordered mode guarantees.
219664769240SAlex Tomas 	 */
2197f19d5870STao Ma 	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2198c364b22cSAneesh Kumar K.V 				   ext4_bh_delay_or_unwritten)) {
219961628a3fSMingming Cao 		redirty_page_for_writepage(wbc, page);
2200fe386132SJan Kara 		if (current->flags & PF_MEMALLOC) {
2201fe386132SJan Kara 			/*
2202fe386132SJan Kara 			 * For memory cleaning there's no point in writing only
2203fe386132SJan Kara 			 * some buffers. So just bail out. Warn if we came here
2204fe386132SJan Kara 			 * from direct reclaim.
2205fe386132SJan Kara 			 */
2206fe386132SJan Kara 			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2207fe386132SJan Kara 							== PF_MEMALLOC);
220861628a3fSMingming Cao 			unlock_page(page);
220961628a3fSMingming Cao 			return 0;
221061628a3fSMingming Cao 		}
2211f0e6c985SAneesh Kumar K.V 	}
221264769240SAlex Tomas 
2213cb20d518STheodore Ts'o 	if (PageChecked(page) && ext4_should_journal_data(inode))
221443ce1d23SAneesh Kumar K.V 		/*
221543ce1d23SAneesh Kumar K.V 		 * It's mmapped pagecache.  Add buffers and journal it.  There
221643ce1d23SAneesh Kumar K.V 		 * doesn't seem much point in redirtying the page here.
221743ce1d23SAneesh Kumar K.V 		 */
22183f0ca309SWu Fengguang 		return __ext4_journalled_writepage(page, len);
221943ce1d23SAneesh Kumar K.V 
222036ade451SJan Kara 	memset(&io_submit, 0, sizeof(io_submit));
222136ade451SJan Kara 	ret = ext4_bio_write_page(&io_submit, page, len, wbc);
222236ade451SJan Kara 	ext4_io_submit(&io_submit);
222364769240SAlex Tomas 	return ret;
222464769240SAlex Tomas }
222564769240SAlex Tomas 
222661628a3fSMingming Cao /*
2227525f4ed8SMingming Cao  * This is called via ext4_da_writepages() to
222825985edcSLucas De Marchi  * calculate the total number of credits to reserve to fit
2229525f4ed8SMingming Cao  * a single extent allocation into a single transaction,
2230525f4ed8SMingming Cao  * ext4_da_writpeages() will loop calling this before
2231525f4ed8SMingming Cao  * the block allocation.
223261628a3fSMingming Cao  */
2233525f4ed8SMingming Cao 
2234525f4ed8SMingming Cao static int ext4_da_writepages_trans_blocks(struct inode *inode)
2235525f4ed8SMingming Cao {
2236525f4ed8SMingming Cao 	int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2237525f4ed8SMingming Cao 
2238525f4ed8SMingming Cao 	/*
2239525f4ed8SMingming Cao 	 * With non-extent format the journal credit needed to
2240525f4ed8SMingming Cao 	 * insert nrblocks contiguous block is dependent on
2241525f4ed8SMingming Cao 	 * number of contiguous block. So we will limit
2242525f4ed8SMingming Cao 	 * number of contiguous block to a sane value
2243525f4ed8SMingming Cao 	 */
224412e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2245525f4ed8SMingming Cao 	    (max_blocks > EXT4_MAX_TRANS_DATA))
2246525f4ed8SMingming Cao 		max_blocks = EXT4_MAX_TRANS_DATA;
2247525f4ed8SMingming Cao 
2248525f4ed8SMingming Cao 	return ext4_chunk_trans_blocks(inode, max_blocks);
2249525f4ed8SMingming Cao }
225061628a3fSMingming Cao 
22518e48dcfbSTheodore Ts'o /*
22528e48dcfbSTheodore Ts'o  * write_cache_pages_da - walk the list of dirty pages of the given
22538eb9e5ceSTheodore Ts'o  * address space and accumulate pages that need writing, and call
2254168fc022STheodore Ts'o  * mpage_da_map_and_submit to map a single contiguous memory region
2255168fc022STheodore Ts'o  * and then write them.
22568e48dcfbSTheodore Ts'o  */
22579c3569b5STao Ma static int write_cache_pages_da(handle_t *handle,
22589c3569b5STao Ma 				struct address_space *mapping,
22598e48dcfbSTheodore Ts'o 				struct writeback_control *wbc,
226072f84e65SEric Sandeen 				struct mpage_da_data *mpd,
226172f84e65SEric Sandeen 				pgoff_t *done_index)
22628e48dcfbSTheodore Ts'o {
22638eb9e5ceSTheodore Ts'o 	struct buffer_head	*bh, *head;
2264168fc022STheodore Ts'o 	struct inode		*inode = mapping->host;
22658e48dcfbSTheodore Ts'o 	struct pagevec		pvec;
22664f01b02cSTheodore Ts'o 	unsigned int		nr_pages;
22674f01b02cSTheodore Ts'o 	sector_t		logical;
22684f01b02cSTheodore Ts'o 	pgoff_t			index, end;
22698e48dcfbSTheodore Ts'o 	long			nr_to_write = wbc->nr_to_write;
22704f01b02cSTheodore Ts'o 	int			i, tag, ret = 0;
22718e48dcfbSTheodore Ts'o 
2272168fc022STheodore Ts'o 	memset(mpd, 0, sizeof(struct mpage_da_data));
2273168fc022STheodore Ts'o 	mpd->wbc = wbc;
2274168fc022STheodore Ts'o 	mpd->inode = inode;
22758e48dcfbSTheodore Ts'o 	pagevec_init(&pvec, 0);
22768e48dcfbSTheodore Ts'o 	index = wbc->range_start >> PAGE_CACHE_SHIFT;
22778e48dcfbSTheodore Ts'o 	end = wbc->range_end >> PAGE_CACHE_SHIFT;
22788e48dcfbSTheodore Ts'o 
22796e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
22805b41d924SEric Sandeen 		tag = PAGECACHE_TAG_TOWRITE;
22815b41d924SEric Sandeen 	else
22825b41d924SEric Sandeen 		tag = PAGECACHE_TAG_DIRTY;
22835b41d924SEric Sandeen 
228472f84e65SEric Sandeen 	*done_index = index;
22854f01b02cSTheodore Ts'o 	while (index <= end) {
22865b41d924SEric Sandeen 		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
22878e48dcfbSTheodore Ts'o 			      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
22888e48dcfbSTheodore Ts'o 		if (nr_pages == 0)
22894f01b02cSTheodore Ts'o 			return 0;
22908e48dcfbSTheodore Ts'o 
22918e48dcfbSTheodore Ts'o 		for (i = 0; i < nr_pages; i++) {
22928e48dcfbSTheodore Ts'o 			struct page *page = pvec.pages[i];
22938e48dcfbSTheodore Ts'o 
22948e48dcfbSTheodore Ts'o 			/*
22958e48dcfbSTheodore Ts'o 			 * At this point, the page may be truncated or
22968e48dcfbSTheodore Ts'o 			 * invalidated (changing page->mapping to NULL), or
22978e48dcfbSTheodore Ts'o 			 * even swizzled back from swapper_space to tmpfs file
22988e48dcfbSTheodore Ts'o 			 * mapping. However, page->index will not change
22998e48dcfbSTheodore Ts'o 			 * because we have a reference on the page.
23008e48dcfbSTheodore Ts'o 			 */
23014f01b02cSTheodore Ts'o 			if (page->index > end)
23024f01b02cSTheodore Ts'o 				goto out;
23038e48dcfbSTheodore Ts'o 
230472f84e65SEric Sandeen 			*done_index = page->index + 1;
230572f84e65SEric Sandeen 
230678aaced3STheodore Ts'o 			/*
230778aaced3STheodore Ts'o 			 * If we can't merge this page, and we have
230878aaced3STheodore Ts'o 			 * accumulated an contiguous region, write it
230978aaced3STheodore Ts'o 			 */
231078aaced3STheodore Ts'o 			if ((mpd->next_page != page->index) &&
231178aaced3STheodore Ts'o 			    (mpd->next_page != mpd->first_page)) {
231278aaced3STheodore Ts'o 				mpage_da_map_and_submit(mpd);
231378aaced3STheodore Ts'o 				goto ret_extent_tail;
231478aaced3STheodore Ts'o 			}
231578aaced3STheodore Ts'o 
23168e48dcfbSTheodore Ts'o 			lock_page(page);
23178e48dcfbSTheodore Ts'o 
23188e48dcfbSTheodore Ts'o 			/*
23194f01b02cSTheodore Ts'o 			 * If the page is no longer dirty, or its
23204f01b02cSTheodore Ts'o 			 * mapping no longer corresponds to inode we
23214f01b02cSTheodore Ts'o 			 * are writing (which means it has been
23224f01b02cSTheodore Ts'o 			 * truncated or invalidated), or the page is
23234f01b02cSTheodore Ts'o 			 * already under writeback and we are not
23244f01b02cSTheodore Ts'o 			 * doing a data integrity writeback, skip the page
23258e48dcfbSTheodore Ts'o 			 */
23264f01b02cSTheodore Ts'o 			if (!PageDirty(page) ||
23274f01b02cSTheodore Ts'o 			    (PageWriteback(page) &&
23284f01b02cSTheodore Ts'o 			     (wbc->sync_mode == WB_SYNC_NONE)) ||
23294f01b02cSTheodore Ts'o 			    unlikely(page->mapping != mapping)) {
23308e48dcfbSTheodore Ts'o 				unlock_page(page);
23318e48dcfbSTheodore Ts'o 				continue;
23328e48dcfbSTheodore Ts'o 			}
23338e48dcfbSTheodore Ts'o 
23348e48dcfbSTheodore Ts'o 			wait_on_page_writeback(page);
23358e48dcfbSTheodore Ts'o 			BUG_ON(PageWriteback(page));
23368e48dcfbSTheodore Ts'o 
23379c3569b5STao Ma 			/*
23389c3569b5STao Ma 			 * If we have inline data and arrive here, it means that
23399c3569b5STao Ma 			 * we will soon create the block for the 1st page, so
23409c3569b5STao Ma 			 * we'd better clear the inline data here.
23419c3569b5STao Ma 			 */
23429c3569b5STao Ma 			if (ext4_has_inline_data(inode)) {
23439c3569b5STao Ma 				BUG_ON(ext4_test_inode_state(inode,
23449c3569b5STao Ma 						EXT4_STATE_MAY_INLINE_DATA));
23459c3569b5STao Ma 				ext4_destroy_inline_data(handle, inode);
23469c3569b5STao Ma 			}
23479c3569b5STao Ma 
2348168fc022STheodore Ts'o 			if (mpd->next_page != page->index)
23498eb9e5ceSTheodore Ts'o 				mpd->first_page = page->index;
23508eb9e5ceSTheodore Ts'o 			mpd->next_page = page->index + 1;
23518eb9e5ceSTheodore Ts'o 			logical = (sector_t) page->index <<
23528eb9e5ceSTheodore Ts'o 				(PAGE_CACHE_SHIFT - inode->i_blkbits);
23538eb9e5ceSTheodore Ts'o 
2354f8bec370SJan Kara 			/* Add all dirty buffers to mpd */
23558eb9e5ceSTheodore Ts'o 			head = page_buffers(page);
23568eb9e5ceSTheodore Ts'o 			bh = head;
23578eb9e5ceSTheodore Ts'o 			do {
23588eb9e5ceSTheodore Ts'o 				BUG_ON(buffer_locked(bh));
23598eb9e5ceSTheodore Ts'o 				/*
2360f8bec370SJan Kara 				 * We need to try to allocate unmapped blocks
2361f8bec370SJan Kara 				 * in the same page.  Otherwise we won't make
2362f8bec370SJan Kara 				 * progress with the page in ext4_writepage
23638eb9e5ceSTheodore Ts'o 				 */
23648eb9e5ceSTheodore Ts'o 				if (ext4_bh_delay_or_unwritten(NULL, bh)) {
23658eb9e5ceSTheodore Ts'o 					mpage_add_bh_to_extent(mpd, logical,
23668eb9e5ceSTheodore Ts'o 							       bh->b_state);
23674f01b02cSTheodore Ts'o 					if (mpd->io_done)
23684f01b02cSTheodore Ts'o 						goto ret_extent_tail;
2369f8bec370SJan Kara 				} else if (buffer_dirty(bh) &&
2370f8bec370SJan Kara 					   buffer_mapped(bh)) {
23718eb9e5ceSTheodore Ts'o 					/*
2372f8bec370SJan Kara 					 * mapped dirty buffer. We need to
2373f8bec370SJan Kara 					 * update the b_state because we look
2374f8bec370SJan Kara 					 * at b_state in mpage_da_map_blocks.
2375f8bec370SJan Kara 					 * We don't update b_size because if we
2376f8bec370SJan Kara 					 * find an unmapped buffer_head later
2377f8bec370SJan Kara 					 * we need to use the b_state flag of
2378f8bec370SJan Kara 					 * that buffer_head.
23798eb9e5ceSTheodore Ts'o 					 */
23808eb9e5ceSTheodore Ts'o 					if (mpd->b_size == 0)
2381f8bec370SJan Kara 						mpd->b_state =
2382f8bec370SJan Kara 							bh->b_state & BH_FLAGS;
23838e48dcfbSTheodore Ts'o 				}
23848eb9e5ceSTheodore Ts'o 				logical++;
23858eb9e5ceSTheodore Ts'o 			} while ((bh = bh->b_this_page) != head);
23868e48dcfbSTheodore Ts'o 
23878e48dcfbSTheodore Ts'o 			if (nr_to_write > 0) {
23888e48dcfbSTheodore Ts'o 				nr_to_write--;
23898e48dcfbSTheodore Ts'o 				if (nr_to_write == 0 &&
23904f01b02cSTheodore Ts'o 				    wbc->sync_mode == WB_SYNC_NONE)
23918e48dcfbSTheodore Ts'o 					/*
23928e48dcfbSTheodore Ts'o 					 * We stop writing back only if we are
23938e48dcfbSTheodore Ts'o 					 * not doing integrity sync. In case of
23948e48dcfbSTheodore Ts'o 					 * integrity sync we have to keep going
23958e48dcfbSTheodore Ts'o 					 * because someone may be concurrently
23968e48dcfbSTheodore Ts'o 					 * dirtying pages, and we might have
23978e48dcfbSTheodore Ts'o 					 * synced a lot of newly appeared dirty
23988e48dcfbSTheodore Ts'o 					 * pages, but have not synced all of the
23998e48dcfbSTheodore Ts'o 					 * old dirty pages.
24008e48dcfbSTheodore Ts'o 					 */
24014f01b02cSTheodore Ts'o 					goto out;
24028e48dcfbSTheodore Ts'o 			}
24038e48dcfbSTheodore Ts'o 		}
24048e48dcfbSTheodore Ts'o 		pagevec_release(&pvec);
24058e48dcfbSTheodore Ts'o 		cond_resched();
24068e48dcfbSTheodore Ts'o 	}
24074f01b02cSTheodore Ts'o 	return 0;
24084f01b02cSTheodore Ts'o ret_extent_tail:
24094f01b02cSTheodore Ts'o 	ret = MPAGE_DA_EXTENT_TAIL;
24108eb9e5ceSTheodore Ts'o out:
24118eb9e5ceSTheodore Ts'o 	pagevec_release(&pvec);
24128eb9e5ceSTheodore Ts'o 	cond_resched();
24138e48dcfbSTheodore Ts'o 	return ret;
24148e48dcfbSTheodore Ts'o }
24158e48dcfbSTheodore Ts'o 
24168e48dcfbSTheodore Ts'o 
241764769240SAlex Tomas static int ext4_da_writepages(struct address_space *mapping,
241864769240SAlex Tomas 			      struct writeback_control *wbc)
241964769240SAlex Tomas {
242022208dedSAneesh Kumar K.V 	pgoff_t	index;
242122208dedSAneesh Kumar K.V 	int range_whole = 0;
242261628a3fSMingming Cao 	handle_t *handle = NULL;
2423df22291fSAneesh Kumar K.V 	struct mpage_da_data mpd;
24245e745b04SAneesh Kumar K.V 	struct inode *inode = mapping->host;
2425498e5f24STheodore Ts'o 	int pages_written = 0;
242655138e0bSTheodore Ts'o 	unsigned int max_pages;
24272acf2c26SAneesh Kumar K.V 	int range_cyclic, cycled = 1, io_done = 0;
242855138e0bSTheodore Ts'o 	int needed_blocks, ret = 0;
242955138e0bSTheodore Ts'o 	long desired_nr_to_write, nr_to_writebump = 0;
2430de89de6eSTheodore Ts'o 	loff_t range_start = wbc->range_start;
24315e745b04SAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
243272f84e65SEric Sandeen 	pgoff_t done_index = 0;
24335b41d924SEric Sandeen 	pgoff_t end;
24341bce63d1SShaohua Li 	struct blk_plug plug;
243561628a3fSMingming Cao 
24369bffad1eSTheodore Ts'o 	trace_ext4_da_writepages(inode, wbc);
2437ba80b101STheodore Ts'o 
243861628a3fSMingming Cao 	/*
243961628a3fSMingming Cao 	 * No pages to write? This is mainly a kludge to avoid starting
244061628a3fSMingming Cao 	 * a transaction for special inodes like journal inode on last iput()
244161628a3fSMingming Cao 	 * because that could violate lock ordering on umount
244261628a3fSMingming Cao 	 */
2443a1d6cc56SAneesh Kumar K.V 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
244461628a3fSMingming Cao 		return 0;
24452a21e37eSTheodore Ts'o 
24462a21e37eSTheodore Ts'o 	/*
24472a21e37eSTheodore Ts'o 	 * If the filesystem has aborted, it is read-only, so return
24482a21e37eSTheodore Ts'o 	 * right away instead of dumping stack traces later on that
24492a21e37eSTheodore Ts'o 	 * will obscure the real source of the problem.  We test
24504ab2f15bSTheodore Ts'o 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
24512a21e37eSTheodore Ts'o 	 * the latter could be true if the filesystem is mounted
24522a21e37eSTheodore Ts'o 	 * read-only, and in that case, ext4_da_writepages should
24532a21e37eSTheodore Ts'o 	 * *never* be called, so if that ever happens, we would want
24542a21e37eSTheodore Ts'o 	 * the stack trace.
24552a21e37eSTheodore Ts'o 	 */
24564ab2f15bSTheodore Ts'o 	if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
24572a21e37eSTheodore Ts'o 		return -EROFS;
24582a21e37eSTheodore Ts'o 
245922208dedSAneesh Kumar K.V 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
246022208dedSAneesh Kumar K.V 		range_whole = 1;
246161628a3fSMingming Cao 
24622acf2c26SAneesh Kumar K.V 	range_cyclic = wbc->range_cyclic;
24632acf2c26SAneesh Kumar K.V 	if (wbc->range_cyclic) {
246422208dedSAneesh Kumar K.V 		index = mapping->writeback_index;
24652acf2c26SAneesh Kumar K.V 		if (index)
24662acf2c26SAneesh Kumar K.V 			cycled = 0;
24672acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
24682acf2c26SAneesh Kumar K.V 		wbc->range_end  = LLONG_MAX;
24692acf2c26SAneesh Kumar K.V 		wbc->range_cyclic = 0;
24705b41d924SEric Sandeen 		end = -1;
24715b41d924SEric Sandeen 	} else {
247222208dedSAneesh Kumar K.V 		index = wbc->range_start >> PAGE_CACHE_SHIFT;
24735b41d924SEric Sandeen 		end = wbc->range_end >> PAGE_CACHE_SHIFT;
24745b41d924SEric Sandeen 	}
2475a1d6cc56SAneesh Kumar K.V 
247655138e0bSTheodore Ts'o 	/*
247755138e0bSTheodore Ts'o 	 * This works around two forms of stupidity.  The first is in
247855138e0bSTheodore Ts'o 	 * the writeback code, which caps the maximum number of pages
247955138e0bSTheodore Ts'o 	 * written to be 1024 pages.  This is wrong on multiple
248055138e0bSTheodore Ts'o 	 * levels; different architectues have a different page size,
248155138e0bSTheodore Ts'o 	 * which changes the maximum amount of data which gets
248255138e0bSTheodore Ts'o 	 * written.  Secondly, 4 megabytes is way too small.  XFS
248355138e0bSTheodore Ts'o 	 * forces this value to be 16 megabytes by multiplying
248455138e0bSTheodore Ts'o 	 * nr_to_write parameter by four, and then relies on its
248555138e0bSTheodore Ts'o 	 * allocator to allocate larger extents to make them
248655138e0bSTheodore Ts'o 	 * contiguous.  Unfortunately this brings us to the second
248755138e0bSTheodore Ts'o 	 * stupidity, which is that ext4's mballoc code only allocates
248855138e0bSTheodore Ts'o 	 * at most 2048 blocks.  So we force contiguous writes up to
248955138e0bSTheodore Ts'o 	 * the number of dirty blocks in the inode, or
249055138e0bSTheodore Ts'o 	 * sbi->max_writeback_mb_bump whichever is smaller.
249155138e0bSTheodore Ts'o 	 */
249255138e0bSTheodore Ts'o 	max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2493b443e733SEric Sandeen 	if (!range_cyclic && range_whole) {
2494b443e733SEric Sandeen 		if (wbc->nr_to_write == LONG_MAX)
2495b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write;
249655138e0bSTheodore Ts'o 		else
2497b443e733SEric Sandeen 			desired_nr_to_write = wbc->nr_to_write * 8;
2498b443e733SEric Sandeen 	} else
249955138e0bSTheodore Ts'o 		desired_nr_to_write = ext4_num_dirty_pages(inode, index,
250055138e0bSTheodore Ts'o 							   max_pages);
250155138e0bSTheodore Ts'o 	if (desired_nr_to_write > max_pages)
250255138e0bSTheodore Ts'o 		desired_nr_to_write = max_pages;
250355138e0bSTheodore Ts'o 
250455138e0bSTheodore Ts'o 	if (wbc->nr_to_write < desired_nr_to_write) {
250555138e0bSTheodore Ts'o 		nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
250655138e0bSTheodore Ts'o 		wbc->nr_to_write = desired_nr_to_write;
250755138e0bSTheodore Ts'o 	}
250855138e0bSTheodore Ts'o 
25092acf2c26SAneesh Kumar K.V retry:
25106e6938b6SWu Fengguang 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
25115b41d924SEric Sandeen 		tag_pages_for_writeback(mapping, index, end);
25125b41d924SEric Sandeen 
25131bce63d1SShaohua Li 	blk_start_plug(&plug);
251422208dedSAneesh Kumar K.V 	while (!ret && wbc->nr_to_write > 0) {
2515a1d6cc56SAneesh Kumar K.V 
2516a1d6cc56SAneesh Kumar K.V 		/*
2517a1d6cc56SAneesh Kumar K.V 		 * we  insert one extent at a time. So we need
2518a1d6cc56SAneesh Kumar K.V 		 * credit needed for single extent allocation.
2519a1d6cc56SAneesh Kumar K.V 		 * journalled mode is currently not supported
2520a1d6cc56SAneesh Kumar K.V 		 * by delalloc
2521a1d6cc56SAneesh Kumar K.V 		 */
2522a1d6cc56SAneesh Kumar K.V 		BUG_ON(ext4_should_journal_data(inode));
2523525f4ed8SMingming Cao 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2524a1d6cc56SAneesh Kumar K.V 
252561628a3fSMingming Cao 		/* start a new transaction*/
25269924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
25279924a92aSTheodore Ts'o 					    needed_blocks);
252861628a3fSMingming Cao 		if (IS_ERR(handle)) {
252961628a3fSMingming Cao 			ret = PTR_ERR(handle);
25301693918eSTheodore Ts'o 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2531fbe845ddSCurt Wohlgemuth 			       "%ld pages, ino %lu; err %d", __func__,
2532a1d6cc56SAneesh Kumar K.V 				wbc->nr_to_write, inode->i_ino, ret);
25333c1fcb2cSNamjae Jeon 			blk_finish_plug(&plug);
253461628a3fSMingming Cao 			goto out_writepages;
253561628a3fSMingming Cao 		}
2536f63e6005STheodore Ts'o 
2537f63e6005STheodore Ts'o 		/*
25388eb9e5ceSTheodore Ts'o 		 * Now call write_cache_pages_da() to find the next
2539f63e6005STheodore Ts'o 		 * contiguous region of logical blocks that need
25408eb9e5ceSTheodore Ts'o 		 * blocks to be allocated by ext4 and submit them.
2541f63e6005STheodore Ts'o 		 */
25429c3569b5STao Ma 		ret = write_cache_pages_da(handle, mapping,
25439c3569b5STao Ma 					   wbc, &mpd, &done_index);
2544f63e6005STheodore Ts'o 		/*
2545af901ca1SAndré Goddard Rosa 		 * If we have a contiguous extent of pages and we
2546f63e6005STheodore Ts'o 		 * haven't done the I/O yet, map the blocks and submit
2547f63e6005STheodore Ts'o 		 * them for I/O.
2548f63e6005STheodore Ts'o 		 */
2549f63e6005STheodore Ts'o 		if (!mpd.io_done && mpd.next_page != mpd.first_page) {
25505a87b7a5STheodore Ts'o 			mpage_da_map_and_submit(&mpd);
2551f63e6005STheodore Ts'o 			ret = MPAGE_DA_EXTENT_TAIL;
2552f63e6005STheodore Ts'o 		}
2553b3a3ca8cSTheodore Ts'o 		trace_ext4_da_write_pages(inode, &mpd);
2554f63e6005STheodore Ts'o 		wbc->nr_to_write -= mpd.pages_written;
2555df22291fSAneesh Kumar K.V 
255661628a3fSMingming Cao 		ext4_journal_stop(handle);
2557df22291fSAneesh Kumar K.V 
25588f64b32eSEric Sandeen 		if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
255922208dedSAneesh Kumar K.V 			/* commit the transaction which would
256022208dedSAneesh Kumar K.V 			 * free blocks released in the transaction
256122208dedSAneesh Kumar K.V 			 * and try again
256222208dedSAneesh Kumar K.V 			 */
2563df22291fSAneesh Kumar K.V 			jbd2_journal_force_commit_nested(sbi->s_journal);
256422208dedSAneesh Kumar K.V 			ret = 0;
256522208dedSAneesh Kumar K.V 		} else if (ret == MPAGE_DA_EXTENT_TAIL) {
2566a1d6cc56SAneesh Kumar K.V 			/*
25678de49e67SKazuya Mio 			 * Got one extent now try with rest of the pages.
25688de49e67SKazuya Mio 			 * If mpd.retval is set -EIO, journal is aborted.
25698de49e67SKazuya Mio 			 * So we don't need to write any more.
2570a1d6cc56SAneesh Kumar K.V 			 */
257122208dedSAneesh Kumar K.V 			pages_written += mpd.pages_written;
25728de49e67SKazuya Mio 			ret = mpd.retval;
25732acf2c26SAneesh Kumar K.V 			io_done = 1;
257422208dedSAneesh Kumar K.V 		} else if (wbc->nr_to_write)
257561628a3fSMingming Cao 			/*
257661628a3fSMingming Cao 			 * There is no more writeout needed
257761628a3fSMingming Cao 			 * or we requested for a noblocking writeout
257861628a3fSMingming Cao 			 * and we found the device congested
257961628a3fSMingming Cao 			 */
258061628a3fSMingming Cao 			break;
258161628a3fSMingming Cao 	}
25821bce63d1SShaohua Li 	blk_finish_plug(&plug);
25832acf2c26SAneesh Kumar K.V 	if (!io_done && !cycled) {
25842acf2c26SAneesh Kumar K.V 		cycled = 1;
25852acf2c26SAneesh Kumar K.V 		index = 0;
25862acf2c26SAneesh Kumar K.V 		wbc->range_start = index << PAGE_CACHE_SHIFT;
25872acf2c26SAneesh Kumar K.V 		wbc->range_end  = mapping->writeback_index - 1;
25882acf2c26SAneesh Kumar K.V 		goto retry;
25892acf2c26SAneesh Kumar K.V 	}
259061628a3fSMingming Cao 
259122208dedSAneesh Kumar K.V 	/* Update index */
25922acf2c26SAneesh Kumar K.V 	wbc->range_cyclic = range_cyclic;
259322208dedSAneesh Kumar K.V 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
259422208dedSAneesh Kumar K.V 		/*
259522208dedSAneesh Kumar K.V 		 * set the writeback_index so that range_cyclic
259622208dedSAneesh Kumar K.V 		 * mode will write it back later
259722208dedSAneesh Kumar K.V 		 */
259872f84e65SEric Sandeen 		mapping->writeback_index = done_index;
2599a1d6cc56SAneesh Kumar K.V 
260061628a3fSMingming Cao out_writepages:
260122208dedSAneesh Kumar K.V 	wbc->nr_to_write -= nr_to_writebump;
2602de89de6eSTheodore Ts'o 	wbc->range_start = range_start;
26039bffad1eSTheodore Ts'o 	trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
260461628a3fSMingming Cao 	return ret;
260564769240SAlex Tomas }
260664769240SAlex Tomas 
260779f0be8dSAneesh Kumar K.V static int ext4_nonda_switch(struct super_block *sb)
260879f0be8dSAneesh Kumar K.V {
260979f0be8dSAneesh Kumar K.V 	s64 free_blocks, dirty_blocks;
261079f0be8dSAneesh Kumar K.V 	struct ext4_sb_info *sbi = EXT4_SB(sb);
261179f0be8dSAneesh Kumar K.V 
261279f0be8dSAneesh Kumar K.V 	/*
261379f0be8dSAneesh Kumar K.V 	 * switch to non delalloc mode if we are running low
261479f0be8dSAneesh Kumar K.V 	 * on free block. The free block accounting via percpu
2615179f7ebfSEric Dumazet 	 * counters can get slightly wrong with percpu_counter_batch getting
261679f0be8dSAneesh Kumar K.V 	 * accumulated on each CPU without updating global counters
261779f0be8dSAneesh Kumar K.V 	 * Delalloc need an accurate free block accounting. So switch
261879f0be8dSAneesh Kumar K.V 	 * to non delalloc when we are near to error range.
261979f0be8dSAneesh Kumar K.V 	 */
262057042651STheodore Ts'o 	free_blocks  = EXT4_C2B(sbi,
262157042651STheodore Ts'o 		percpu_counter_read_positive(&sbi->s_freeclusters_counter));
262257042651STheodore Ts'o 	dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
262300d4e736STheodore Ts'o 	/*
262400d4e736STheodore Ts'o 	 * Start pushing delalloc when 1/2 of free blocks are dirty.
262500d4e736STheodore Ts'o 	 */
262610ee27a0SMiao Xie 	if (dirty_blocks && (free_blocks < 2 * dirty_blocks))
262710ee27a0SMiao Xie 		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
262800d4e736STheodore Ts'o 
262979f0be8dSAneesh Kumar K.V 	if (2 * free_blocks < 3 * dirty_blocks ||
2630df55c99dSTheodore Ts'o 		free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
263179f0be8dSAneesh Kumar K.V 		/*
2632c8afb446SEric Sandeen 		 * free block count is less than 150% of dirty blocks
2633c8afb446SEric Sandeen 		 * or free blocks is less than watermark
263479f0be8dSAneesh Kumar K.V 		 */
263579f0be8dSAneesh Kumar K.V 		return 1;
263679f0be8dSAneesh Kumar K.V 	}
263779f0be8dSAneesh Kumar K.V 	return 0;
263879f0be8dSAneesh Kumar K.V }
263979f0be8dSAneesh Kumar K.V 
264064769240SAlex Tomas static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
264164769240SAlex Tomas 			       loff_t pos, unsigned len, unsigned flags,
264264769240SAlex Tomas 			       struct page **pagep, void **fsdata)
264364769240SAlex Tomas {
264472b8ab9dSEric Sandeen 	int ret, retries = 0;
264564769240SAlex Tomas 	struct page *page;
264664769240SAlex Tomas 	pgoff_t index;
264764769240SAlex Tomas 	struct inode *inode = mapping->host;
264864769240SAlex Tomas 	handle_t *handle;
264964769240SAlex Tomas 
265064769240SAlex Tomas 	index = pos >> PAGE_CACHE_SHIFT;
265179f0be8dSAneesh Kumar K.V 
265279f0be8dSAneesh Kumar K.V 	if (ext4_nonda_switch(inode->i_sb)) {
265379f0be8dSAneesh Kumar K.V 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
265479f0be8dSAneesh Kumar K.V 		return ext4_write_begin(file, mapping, pos,
265579f0be8dSAneesh Kumar K.V 					len, flags, pagep, fsdata);
265679f0be8dSAneesh Kumar K.V 	}
265779f0be8dSAneesh Kumar K.V 	*fsdata = (void *)0;
26589bffad1eSTheodore Ts'o 	trace_ext4_da_write_begin(inode, pos, len, flags);
26599c3569b5STao Ma 
26609c3569b5STao Ma 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
26619c3569b5STao Ma 		ret = ext4_da_write_inline_data_begin(mapping, inode,
26629c3569b5STao Ma 						      pos, len, flags,
26639c3569b5STao Ma 						      pagep, fsdata);
26649c3569b5STao Ma 		if (ret < 0)
266547564bfbSTheodore Ts'o 			return ret;
266647564bfbSTheodore Ts'o 		if (ret == 1)
266747564bfbSTheodore Ts'o 			return 0;
26689c3569b5STao Ma 	}
26699c3569b5STao Ma 
267047564bfbSTheodore Ts'o 	/*
267147564bfbSTheodore Ts'o 	 * grab_cache_page_write_begin() can take a long time if the
267247564bfbSTheodore Ts'o 	 * system is thrashing due to memory pressure, or if the page
267347564bfbSTheodore Ts'o 	 * is being written back.  So grab it first before we start
267447564bfbSTheodore Ts'o 	 * the transaction handle.  This also allows us to allocate
267547564bfbSTheodore Ts'o 	 * the page (if needed) without using GFP_NOFS.
267647564bfbSTheodore Ts'o 	 */
267747564bfbSTheodore Ts'o retry_grab:
267847564bfbSTheodore Ts'o 	page = grab_cache_page_write_begin(mapping, index, flags);
267947564bfbSTheodore Ts'o 	if (!page)
268047564bfbSTheodore Ts'o 		return -ENOMEM;
268147564bfbSTheodore Ts'o 	unlock_page(page);
268247564bfbSTheodore Ts'o 
268364769240SAlex Tomas 	/*
268464769240SAlex Tomas 	 * With delayed allocation, we don't log the i_disksize update
268564769240SAlex Tomas 	 * if there is delayed block allocation. But we still need
268664769240SAlex Tomas 	 * to journalling the i_disksize update if writes to the end
268764769240SAlex Tomas 	 * of file which has an already mapped buffer.
268864769240SAlex Tomas 	 */
268947564bfbSTheodore Ts'o retry_journal:
26909924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
269164769240SAlex Tomas 	if (IS_ERR(handle)) {
269247564bfbSTheodore Ts'o 		page_cache_release(page);
269347564bfbSTheodore Ts'o 		return PTR_ERR(handle);
269464769240SAlex Tomas 	}
269564769240SAlex Tomas 
269647564bfbSTheodore Ts'o 	lock_page(page);
269747564bfbSTheodore Ts'o 	if (page->mapping != mapping) {
269847564bfbSTheodore Ts'o 		/* The page got truncated from under us */
269947564bfbSTheodore Ts'o 		unlock_page(page);
270047564bfbSTheodore Ts'o 		page_cache_release(page);
2701d5a0d4f7SEric Sandeen 		ext4_journal_stop(handle);
270247564bfbSTheodore Ts'o 		goto retry_grab;
2703d5a0d4f7SEric Sandeen 	}
270447564bfbSTheodore Ts'o 	/* In case writeback began while the page was unlocked */
270547564bfbSTheodore Ts'o 	wait_on_page_writeback(page);
270664769240SAlex Tomas 
27076e1db88dSChristoph Hellwig 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
270864769240SAlex Tomas 	if (ret < 0) {
270964769240SAlex Tomas 		unlock_page(page);
271064769240SAlex Tomas 		ext4_journal_stop(handle);
2711ae4d5372SAneesh Kumar K.V 		/*
2712ae4d5372SAneesh Kumar K.V 		 * block_write_begin may have instantiated a few blocks
2713ae4d5372SAneesh Kumar K.V 		 * outside i_size.  Trim these off again. Don't need
2714ae4d5372SAneesh Kumar K.V 		 * i_size_read because we hold i_mutex.
2715ae4d5372SAneesh Kumar K.V 		 */
2716ae4d5372SAneesh Kumar K.V 		if (pos + len > inode->i_size)
2717b9a4207dSJan Kara 			ext4_truncate_failed_write(inode);
271847564bfbSTheodore Ts'o 
271947564bfbSTheodore Ts'o 		if (ret == -ENOSPC &&
272047564bfbSTheodore Ts'o 		    ext4_should_retry_alloc(inode->i_sb, &retries))
272147564bfbSTheodore Ts'o 			goto retry_journal;
272247564bfbSTheodore Ts'o 
272347564bfbSTheodore Ts'o 		page_cache_release(page);
272447564bfbSTheodore Ts'o 		return ret;
272564769240SAlex Tomas 	}
272664769240SAlex Tomas 
272747564bfbSTheodore Ts'o 	*pagep = page;
272864769240SAlex Tomas 	return ret;
272964769240SAlex Tomas }
273064769240SAlex Tomas 
2731632eaeabSMingming Cao /*
2732632eaeabSMingming Cao  * Check if we should update i_disksize
2733632eaeabSMingming Cao  * when write to the end of file but not require block allocation
2734632eaeabSMingming Cao  */
2735632eaeabSMingming Cao static int ext4_da_should_update_i_disksize(struct page *page,
2736632eaeabSMingming Cao 					    unsigned long offset)
2737632eaeabSMingming Cao {
2738632eaeabSMingming Cao 	struct buffer_head *bh;
2739632eaeabSMingming Cao 	struct inode *inode = page->mapping->host;
2740632eaeabSMingming Cao 	unsigned int idx;
2741632eaeabSMingming Cao 	int i;
2742632eaeabSMingming Cao 
2743632eaeabSMingming Cao 	bh = page_buffers(page);
2744632eaeabSMingming Cao 	idx = offset >> inode->i_blkbits;
2745632eaeabSMingming Cao 
2746632eaeabSMingming Cao 	for (i = 0; i < idx; i++)
2747632eaeabSMingming Cao 		bh = bh->b_this_page;
2748632eaeabSMingming Cao 
274929fa89d0SAneesh Kumar K.V 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2750632eaeabSMingming Cao 		return 0;
2751632eaeabSMingming Cao 	return 1;
2752632eaeabSMingming Cao }
2753632eaeabSMingming Cao 
275464769240SAlex Tomas static int ext4_da_write_end(struct file *file,
275564769240SAlex Tomas 			     struct address_space *mapping,
275664769240SAlex Tomas 			     loff_t pos, unsigned len, unsigned copied,
275764769240SAlex Tomas 			     struct page *page, void *fsdata)
275864769240SAlex Tomas {
275964769240SAlex Tomas 	struct inode *inode = mapping->host;
276064769240SAlex Tomas 	int ret = 0, ret2;
276164769240SAlex Tomas 	handle_t *handle = ext4_journal_current_handle();
276264769240SAlex Tomas 	loff_t new_i_size;
2763632eaeabSMingming Cao 	unsigned long start, end;
276479f0be8dSAneesh Kumar K.V 	int write_mode = (int)(unsigned long)fsdata;
276579f0be8dSAneesh Kumar K.V 
276674d553aaSTheodore Ts'o 	if (write_mode == FALL_BACK_TO_NONDELALLOC)
276774d553aaSTheodore Ts'o 		return ext4_write_end(file, mapping, pos,
276879f0be8dSAneesh Kumar K.V 				      len, copied, page, fsdata);
2769632eaeabSMingming Cao 
27709bffad1eSTheodore Ts'o 	trace_ext4_da_write_end(inode, pos, len, copied);
2771632eaeabSMingming Cao 	start = pos & (PAGE_CACHE_SIZE - 1);
2772632eaeabSMingming Cao 	end = start + copied - 1;
277364769240SAlex Tomas 
277464769240SAlex Tomas 	/*
277564769240SAlex Tomas 	 * generic_write_end() will run mark_inode_dirty() if i_size
277664769240SAlex Tomas 	 * changes.  So let's piggyback the i_disksize mark_inode_dirty
277764769240SAlex Tomas 	 * into that.
277864769240SAlex Tomas 	 */
277964769240SAlex Tomas 	new_i_size = pos + copied;
2780ea51d132SAndrea Arcangeli 	if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
27819c3569b5STao Ma 		if (ext4_has_inline_data(inode) ||
27829c3569b5STao Ma 		    ext4_da_should_update_i_disksize(page, end)) {
2783632eaeabSMingming Cao 			down_write(&EXT4_I(inode)->i_data_sem);
2784f3b59291STheodore Ts'o 			if (new_i_size > EXT4_I(inode)->i_disksize)
278564769240SAlex Tomas 				EXT4_I(inode)->i_disksize = new_i_size;
2786632eaeabSMingming Cao 			up_write(&EXT4_I(inode)->i_data_sem);
2787cf17fea6SAneesh Kumar K.V 			/* We need to mark inode dirty even if
2788cf17fea6SAneesh Kumar K.V 			 * new_i_size is less that inode->i_size
2789cf17fea6SAneesh Kumar K.V 			 * bu greater than i_disksize.(hint delalloc)
2790cf17fea6SAneesh Kumar K.V 			 */
2791cf17fea6SAneesh Kumar K.V 			ext4_mark_inode_dirty(handle, inode);
2792632eaeabSMingming Cao 		}
2793632eaeabSMingming Cao 	}
27949c3569b5STao Ma 
27959c3569b5STao Ma 	if (write_mode != CONVERT_INLINE_DATA &&
27969c3569b5STao Ma 	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
27979c3569b5STao Ma 	    ext4_has_inline_data(inode))
27989c3569b5STao Ma 		ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
27999c3569b5STao Ma 						     page);
28009c3569b5STao Ma 	else
280164769240SAlex Tomas 		ret2 = generic_write_end(file, mapping, pos, len, copied,
280264769240SAlex Tomas 							page, fsdata);
28039c3569b5STao Ma 
280464769240SAlex Tomas 	copied = ret2;
280564769240SAlex Tomas 	if (ret2 < 0)
280664769240SAlex Tomas 		ret = ret2;
280764769240SAlex Tomas 	ret2 = ext4_journal_stop(handle);
280864769240SAlex Tomas 	if (!ret)
280964769240SAlex Tomas 		ret = ret2;
281064769240SAlex Tomas 
281164769240SAlex Tomas 	return ret ? ret : copied;
281264769240SAlex Tomas }
281364769240SAlex Tomas 
281464769240SAlex Tomas static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
281564769240SAlex Tomas {
281664769240SAlex Tomas 	/*
281764769240SAlex Tomas 	 * Drop reserved blocks
281864769240SAlex Tomas 	 */
281964769240SAlex Tomas 	BUG_ON(!PageLocked(page));
282064769240SAlex Tomas 	if (!page_has_buffers(page))
282164769240SAlex Tomas 		goto out;
282264769240SAlex Tomas 
2823d2a17637SMingming Cao 	ext4_da_page_release_reservation(page, offset);
282464769240SAlex Tomas 
282564769240SAlex Tomas out:
282664769240SAlex Tomas 	ext4_invalidatepage(page, offset);
282764769240SAlex Tomas 
282864769240SAlex Tomas 	return;
282964769240SAlex Tomas }
283064769240SAlex Tomas 
2831ccd2506bSTheodore Ts'o /*
2832ccd2506bSTheodore Ts'o  * Force all delayed allocation blocks to be allocated for a given inode.
2833ccd2506bSTheodore Ts'o  */
2834ccd2506bSTheodore Ts'o int ext4_alloc_da_blocks(struct inode *inode)
2835ccd2506bSTheodore Ts'o {
2836fb40ba0dSTheodore Ts'o 	trace_ext4_alloc_da_blocks(inode);
2837fb40ba0dSTheodore Ts'o 
2838ccd2506bSTheodore Ts'o 	if (!EXT4_I(inode)->i_reserved_data_blocks &&
2839ccd2506bSTheodore Ts'o 	    !EXT4_I(inode)->i_reserved_meta_blocks)
2840ccd2506bSTheodore Ts'o 		return 0;
2841ccd2506bSTheodore Ts'o 
2842ccd2506bSTheodore Ts'o 	/*
2843ccd2506bSTheodore Ts'o 	 * We do something simple for now.  The filemap_flush() will
2844ccd2506bSTheodore Ts'o 	 * also start triggering a write of the data blocks, which is
2845ccd2506bSTheodore Ts'o 	 * not strictly speaking necessary (and for users of
2846ccd2506bSTheodore Ts'o 	 * laptop_mode, not even desirable).  However, to do otherwise
2847ccd2506bSTheodore Ts'o 	 * would require replicating code paths in:
2848ccd2506bSTheodore Ts'o 	 *
2849ccd2506bSTheodore Ts'o 	 * ext4_da_writepages() ->
2850ccd2506bSTheodore Ts'o 	 *    write_cache_pages() ---> (via passed in callback function)
2851ccd2506bSTheodore Ts'o 	 *        __mpage_da_writepage() -->
2852ccd2506bSTheodore Ts'o 	 *           mpage_add_bh_to_extent()
2853ccd2506bSTheodore Ts'o 	 *           mpage_da_map_blocks()
2854ccd2506bSTheodore Ts'o 	 *
2855ccd2506bSTheodore Ts'o 	 * The problem is that write_cache_pages(), located in
2856ccd2506bSTheodore Ts'o 	 * mm/page-writeback.c, marks pages clean in preparation for
2857ccd2506bSTheodore Ts'o 	 * doing I/O, which is not desirable if we're not planning on
2858ccd2506bSTheodore Ts'o 	 * doing I/O at all.
2859ccd2506bSTheodore Ts'o 	 *
2860ccd2506bSTheodore Ts'o 	 * We could call write_cache_pages(), and then redirty all of
2861380cf090SWu Fengguang 	 * the pages by calling redirty_page_for_writepage() but that
2862ccd2506bSTheodore Ts'o 	 * would be ugly in the extreme.  So instead we would need to
2863ccd2506bSTheodore Ts'o 	 * replicate parts of the code in the above functions,
286425985edcSLucas De Marchi 	 * simplifying them because we wouldn't actually intend to
2865ccd2506bSTheodore Ts'o 	 * write out the pages, but rather only collect contiguous
2866ccd2506bSTheodore Ts'o 	 * logical block extents, call the multi-block allocator, and
2867ccd2506bSTheodore Ts'o 	 * then update the buffer heads with the block allocations.
2868ccd2506bSTheodore Ts'o 	 *
2869ccd2506bSTheodore Ts'o 	 * For now, though, we'll cheat by calling filemap_flush(),
2870ccd2506bSTheodore Ts'o 	 * which will map the blocks, and start the I/O, but not
2871ccd2506bSTheodore Ts'o 	 * actually wait for the I/O to complete.
2872ccd2506bSTheodore Ts'o 	 */
2873ccd2506bSTheodore Ts'o 	return filemap_flush(inode->i_mapping);
2874ccd2506bSTheodore Ts'o }
287564769240SAlex Tomas 
287664769240SAlex Tomas /*
2877ac27a0ecSDave Kleikamp  * bmap() is special.  It gets used by applications such as lilo and by
2878ac27a0ecSDave Kleikamp  * the swapper to find the on-disk block of a specific piece of data.
2879ac27a0ecSDave Kleikamp  *
2880ac27a0ecSDave Kleikamp  * Naturally, this is dangerous if the block concerned is still in the
2881617ba13bSMingming Cao  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2882ac27a0ecSDave Kleikamp  * filesystem and enables swap, then they may get a nasty shock when the
2883ac27a0ecSDave Kleikamp  * data getting swapped to that swapfile suddenly gets overwritten by
2884ac27a0ecSDave Kleikamp  * the original zero's written out previously to the journal and
2885ac27a0ecSDave Kleikamp  * awaiting writeback in the kernel's buffer cache.
2886ac27a0ecSDave Kleikamp  *
2887ac27a0ecSDave Kleikamp  * So, if we see any bmap calls here on a modified, data-journaled file,
2888ac27a0ecSDave Kleikamp  * take extra steps to flush any blocks which might be in the cache.
2889ac27a0ecSDave Kleikamp  */
2890617ba13bSMingming Cao static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2891ac27a0ecSDave Kleikamp {
2892ac27a0ecSDave Kleikamp 	struct inode *inode = mapping->host;
2893ac27a0ecSDave Kleikamp 	journal_t *journal;
2894ac27a0ecSDave Kleikamp 	int err;
2895ac27a0ecSDave Kleikamp 
289646c7f254STao Ma 	/*
289746c7f254STao Ma 	 * We can get here for an inline file via the FIBMAP ioctl
289846c7f254STao Ma 	 */
289946c7f254STao Ma 	if (ext4_has_inline_data(inode))
290046c7f254STao Ma 		return 0;
290146c7f254STao Ma 
290264769240SAlex Tomas 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
290364769240SAlex Tomas 			test_opt(inode->i_sb, DELALLOC)) {
290464769240SAlex Tomas 		/*
290564769240SAlex Tomas 		 * With delalloc we want to sync the file
290664769240SAlex Tomas 		 * so that we can make sure we allocate
290764769240SAlex Tomas 		 * blocks for file
290864769240SAlex Tomas 		 */
290964769240SAlex Tomas 		filemap_write_and_wait(mapping);
291064769240SAlex Tomas 	}
291164769240SAlex Tomas 
291219f5fb7aSTheodore Ts'o 	if (EXT4_JOURNAL(inode) &&
291319f5fb7aSTheodore Ts'o 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2914ac27a0ecSDave Kleikamp 		/*
2915ac27a0ecSDave Kleikamp 		 * This is a REALLY heavyweight approach, but the use of
2916ac27a0ecSDave Kleikamp 		 * bmap on dirty files is expected to be extremely rare:
2917ac27a0ecSDave Kleikamp 		 * only if we run lilo or swapon on a freshly made file
2918ac27a0ecSDave Kleikamp 		 * do we expect this to happen.
2919ac27a0ecSDave Kleikamp 		 *
2920ac27a0ecSDave Kleikamp 		 * (bmap requires CAP_SYS_RAWIO so this does not
2921ac27a0ecSDave Kleikamp 		 * represent an unprivileged user DOS attack --- we'd be
2922ac27a0ecSDave Kleikamp 		 * in trouble if mortal users could trigger this path at
2923ac27a0ecSDave Kleikamp 		 * will.)
2924ac27a0ecSDave Kleikamp 		 *
2925617ba13bSMingming Cao 		 * NB. EXT4_STATE_JDATA is not set on files other than
2926ac27a0ecSDave Kleikamp 		 * regular files.  If somebody wants to bmap a directory
2927ac27a0ecSDave Kleikamp 		 * or symlink and gets confused because the buffer
2928ac27a0ecSDave Kleikamp 		 * hasn't yet been flushed to disk, they deserve
2929ac27a0ecSDave Kleikamp 		 * everything they get.
2930ac27a0ecSDave Kleikamp 		 */
2931ac27a0ecSDave Kleikamp 
293219f5fb7aSTheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2933617ba13bSMingming Cao 		journal = EXT4_JOURNAL(inode);
2934dab291afSMingming Cao 		jbd2_journal_lock_updates(journal);
2935dab291afSMingming Cao 		err = jbd2_journal_flush(journal);
2936dab291afSMingming Cao 		jbd2_journal_unlock_updates(journal);
2937ac27a0ecSDave Kleikamp 
2938ac27a0ecSDave Kleikamp 		if (err)
2939ac27a0ecSDave Kleikamp 			return 0;
2940ac27a0ecSDave Kleikamp 	}
2941ac27a0ecSDave Kleikamp 
2942617ba13bSMingming Cao 	return generic_block_bmap(mapping, block, ext4_get_block);
2943ac27a0ecSDave Kleikamp }
2944ac27a0ecSDave Kleikamp 
2945617ba13bSMingming Cao static int ext4_readpage(struct file *file, struct page *page)
2946ac27a0ecSDave Kleikamp {
294746c7f254STao Ma 	int ret = -EAGAIN;
294846c7f254STao Ma 	struct inode *inode = page->mapping->host;
294946c7f254STao Ma 
29500562e0baSJiaying Zhang 	trace_ext4_readpage(page);
295146c7f254STao Ma 
295246c7f254STao Ma 	if (ext4_has_inline_data(inode))
295346c7f254STao Ma 		ret = ext4_readpage_inline(inode, page);
295446c7f254STao Ma 
295546c7f254STao Ma 	if (ret == -EAGAIN)
2956617ba13bSMingming Cao 		return mpage_readpage(page, ext4_get_block);
295746c7f254STao Ma 
295846c7f254STao Ma 	return ret;
2959ac27a0ecSDave Kleikamp }
2960ac27a0ecSDave Kleikamp 
2961ac27a0ecSDave Kleikamp static int
2962617ba13bSMingming Cao ext4_readpages(struct file *file, struct address_space *mapping,
2963ac27a0ecSDave Kleikamp 		struct list_head *pages, unsigned nr_pages)
2964ac27a0ecSDave Kleikamp {
296546c7f254STao Ma 	struct inode *inode = mapping->host;
296646c7f254STao Ma 
296746c7f254STao Ma 	/* If the file has inline data, no need to do readpages. */
296846c7f254STao Ma 	if (ext4_has_inline_data(inode))
296946c7f254STao Ma 		return 0;
297046c7f254STao Ma 
2971617ba13bSMingming Cao 	return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2972ac27a0ecSDave Kleikamp }
2973ac27a0ecSDave Kleikamp 
2974617ba13bSMingming Cao static void ext4_invalidatepage(struct page *page, unsigned long offset)
2975ac27a0ecSDave Kleikamp {
29760562e0baSJiaying Zhang 	trace_ext4_invalidatepage(page, offset);
29770562e0baSJiaying Zhang 
29784520fb3cSJan Kara 	/* No journalling happens on data buffers when this function is used */
29794520fb3cSJan Kara 	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
29804520fb3cSJan Kara 
29814520fb3cSJan Kara 	block_invalidatepage(page, offset);
29824520fb3cSJan Kara }
29834520fb3cSJan Kara 
298453e87268SJan Kara static int __ext4_journalled_invalidatepage(struct page *page,
29854520fb3cSJan Kara 					    unsigned long offset)
29864520fb3cSJan Kara {
29874520fb3cSJan Kara 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
29884520fb3cSJan Kara 
29894520fb3cSJan Kara 	trace_ext4_journalled_invalidatepage(page, offset);
29904520fb3cSJan Kara 
2991744692dcSJiaying Zhang 	/*
2992ac27a0ecSDave Kleikamp 	 * If it's a full truncate we just forget about the pending dirtying
2993ac27a0ecSDave Kleikamp 	 */
2994ac27a0ecSDave Kleikamp 	if (offset == 0)
2995ac27a0ecSDave Kleikamp 		ClearPageChecked(page);
2996ac27a0ecSDave Kleikamp 
299753e87268SJan Kara 	return jbd2_journal_invalidatepage(journal, page, offset);
299853e87268SJan Kara }
299953e87268SJan Kara 
300053e87268SJan Kara /* Wrapper for aops... */
300153e87268SJan Kara static void ext4_journalled_invalidatepage(struct page *page,
300253e87268SJan Kara 					   unsigned long offset)
300353e87268SJan Kara {
300453e87268SJan Kara 	WARN_ON(__ext4_journalled_invalidatepage(page, offset) < 0);
3005ac27a0ecSDave Kleikamp }
3006ac27a0ecSDave Kleikamp 
3007617ba13bSMingming Cao static int ext4_releasepage(struct page *page, gfp_t wait)
3008ac27a0ecSDave Kleikamp {
3009617ba13bSMingming Cao 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3010ac27a0ecSDave Kleikamp 
30110562e0baSJiaying Zhang 	trace_ext4_releasepage(page);
30120562e0baSJiaying Zhang 
3013e1c36595SJan Kara 	/* Page has dirty journalled data -> cannot release */
3014e1c36595SJan Kara 	if (PageChecked(page))
3015ac27a0ecSDave Kleikamp 		return 0;
30160390131bSFrank Mayhar 	if (journal)
3017dab291afSMingming Cao 		return jbd2_journal_try_to_free_buffers(journal, page, wait);
30180390131bSFrank Mayhar 	else
30190390131bSFrank Mayhar 		return try_to_free_buffers(page);
3020ac27a0ecSDave Kleikamp }
3021ac27a0ecSDave Kleikamp 
3022ac27a0ecSDave Kleikamp /*
30232ed88685STheodore Ts'o  * ext4_get_block used when preparing for a DIO write or buffer write.
30242ed88685STheodore Ts'o  * We allocate an uinitialized extent if blocks haven't been allocated.
30252ed88685STheodore Ts'o  * The extent will be converted to initialized after the IO is complete.
30262ed88685STheodore Ts'o  */
3027f19d5870STao Ma int ext4_get_block_write(struct inode *inode, sector_t iblock,
30284c0425ffSMingming Cao 		   struct buffer_head *bh_result, int create)
30294c0425ffSMingming Cao {
3030c7064ef1SJiaying Zhang 	ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
30318d5d02e6SMingming Cao 		   inode->i_ino, create);
30322ed88685STheodore Ts'o 	return _ext4_get_block(inode, iblock, bh_result,
30332ed88685STheodore Ts'o 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
30344c0425ffSMingming Cao }
30354c0425ffSMingming Cao 
3036729f52c6SZheng Liu static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
30378b0f165fSAnatol Pomozov 		   struct buffer_head *bh_result, int create)
3038729f52c6SZheng Liu {
30398b0f165fSAnatol Pomozov 	ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
30408b0f165fSAnatol Pomozov 		   inode->i_ino, create);
30418b0f165fSAnatol Pomozov 	return _ext4_get_block(inode, iblock, bh_result,
30428b0f165fSAnatol Pomozov 			       EXT4_GET_BLOCKS_NO_LOCK);
3043729f52c6SZheng Liu }
3044729f52c6SZheng Liu 
30454c0425ffSMingming Cao static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3046552ef802SChristoph Hellwig 			    ssize_t size, void *private, int ret,
3047552ef802SChristoph Hellwig 			    bool is_async)
30484c0425ffSMingming Cao {
3049496ad9aaSAl Viro 	struct inode *inode = file_inode(iocb->ki_filp);
30504c0425ffSMingming Cao         ext4_io_end_t *io_end = iocb->private;
30514c0425ffSMingming Cao 
30524b70df18SMingming 	/* if not async direct IO or dio with 0 bytes write, just return */
30534b70df18SMingming 	if (!io_end || !size)
3054552ef802SChristoph Hellwig 		goto out;
30554b70df18SMingming 
30568d5d02e6SMingming Cao 	ext_debug("ext4_end_io_dio(): io_end 0x%p "
3057ace36ad4SJoe Perches 		  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
30588d5d02e6SMingming Cao  		  iocb->private, io_end->inode->i_ino, iocb, offset,
30598d5d02e6SMingming Cao 		  size);
30608d5d02e6SMingming Cao 
3061b5a7e970STheodore Ts'o 	iocb->private = NULL;
3062b5a7e970STheodore Ts'o 
30638d5d02e6SMingming Cao 	/* if not aio dio with unwritten extents, just free io and return */
3064bd2d0210STheodore Ts'o 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
30658d5d02e6SMingming Cao 		ext4_free_io_end(io_end);
30665b3ff237Sjiayingz@google.com (Jiaying Zhang) out:
3067091e26dfSJan Kara 		inode_dio_done(inode);
30685b3ff237Sjiayingz@google.com (Jiaying Zhang) 		if (is_async)
30695b3ff237Sjiayingz@google.com (Jiaying Zhang) 			aio_complete(iocb, ret, 0);
30705b3ff237Sjiayingz@google.com (Jiaying Zhang) 		return;
30718d5d02e6SMingming Cao 	}
30728d5d02e6SMingming Cao 
30734c0425ffSMingming Cao 	io_end->offset = offset;
30744c0425ffSMingming Cao 	io_end->size = size;
30755b3ff237Sjiayingz@google.com (Jiaying Zhang) 	if (is_async) {
30765b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->iocb = iocb;
30775b3ff237Sjiayingz@google.com (Jiaying Zhang) 		io_end->result = ret;
30785b3ff237Sjiayingz@google.com (Jiaying Zhang) 	}
30794c0425ffSMingming Cao 
308028a535f9SDmitry Monakhov 	ext4_add_complete_io(io_end);
30814c0425ffSMingming Cao }
3082c7064ef1SJiaying Zhang 
30834c0425ffSMingming Cao /*
30844c0425ffSMingming Cao  * For ext4 extent files, ext4 will do direct-io write to holes,
30854c0425ffSMingming Cao  * preallocated extents, and those write extend the file, no need to
30864c0425ffSMingming Cao  * fall back to buffered IO.
30874c0425ffSMingming Cao  *
3088b595076aSUwe Kleine-König  * For holes, we fallocate those blocks, mark them as uninitialized
308969c499d1STheodore Ts'o  * If those blocks were preallocated, we mark sure they are split, but
3090b595076aSUwe Kleine-König  * still keep the range to write as uninitialized.
30914c0425ffSMingming Cao  *
309269c499d1STheodore Ts'o  * The unwritten extents will be converted to written when DIO is completed.
30938d5d02e6SMingming Cao  * For async direct IO, since the IO may still pending when return, we
309425985edcSLucas De Marchi  * set up an end_io call back function, which will do the conversion
30958d5d02e6SMingming Cao  * when async direct IO completed.
30964c0425ffSMingming Cao  *
30974c0425ffSMingming Cao  * If the O_DIRECT write will extend the file then add this inode to the
30984c0425ffSMingming Cao  * orphan list.  So recovery will truncate it back to the original size
30994c0425ffSMingming Cao  * if the machine crashes during the write.
31004c0425ffSMingming Cao  *
31014c0425ffSMingming Cao  */
31024c0425ffSMingming Cao static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
31034c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
31044c0425ffSMingming Cao 			      unsigned long nr_segs)
31054c0425ffSMingming Cao {
31064c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
31074c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
31084c0425ffSMingming Cao 	ssize_t ret;
31094c0425ffSMingming Cao 	size_t count = iov_length(iov, nr_segs);
3110729f52c6SZheng Liu 	int overwrite = 0;
31118b0f165fSAnatol Pomozov 	get_block_t *get_block_func = NULL;
31128b0f165fSAnatol Pomozov 	int dio_flags = 0;
311369c499d1STheodore Ts'o 	loff_t final_size = offset + count;
311469c499d1STheodore Ts'o 
311569c499d1STheodore Ts'o 	/* Use the old path for reads and writes beyond i_size. */
311669c499d1STheodore Ts'o 	if (rw != WRITE || final_size > inode->i_size)
311769c499d1STheodore Ts'o 		return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3118729f52c6SZheng Liu 
31194bd809dbSZheng Liu 	BUG_ON(iocb->private == NULL);
31204bd809dbSZheng Liu 
31214bd809dbSZheng Liu 	/* If we do a overwrite dio, i_mutex locking can be released */
31224bd809dbSZheng Liu 	overwrite = *((int *)iocb->private);
31234bd809dbSZheng Liu 
31244bd809dbSZheng Liu 	if (overwrite) {
31251f555cfaSDmitry Monakhov 		atomic_inc(&inode->i_dio_count);
31264bd809dbSZheng Liu 		down_read(&EXT4_I(inode)->i_data_sem);
31274bd809dbSZheng Liu 		mutex_unlock(&inode->i_mutex);
31284bd809dbSZheng Liu 	}
31294bd809dbSZheng Liu 
31304c0425ffSMingming Cao 	/*
31318d5d02e6SMingming Cao 	 * We could direct write to holes and fallocate.
31328d5d02e6SMingming Cao 	 *
313369c499d1STheodore Ts'o 	 * Allocated blocks to fill the hole are marked as
313469c499d1STheodore Ts'o 	 * uninitialized to prevent parallel buffered read to expose
313569c499d1STheodore Ts'o 	 * the stale data before DIO complete the data IO.
31368d5d02e6SMingming Cao 	 *
313769c499d1STheodore Ts'o 	 * As to previously fallocated extents, ext4 get_block will
313869c499d1STheodore Ts'o 	 * just simply mark the buffer mapped but still keep the
313969c499d1STheodore Ts'o 	 * extents uninitialized.
31404c0425ffSMingming Cao 	 *
314169c499d1STheodore Ts'o 	 * For non AIO case, we will convert those unwritten extents
31428d5d02e6SMingming Cao 	 * to written after return back from blockdev_direct_IO.
31434c0425ffSMingming Cao 	 *
314469c499d1STheodore Ts'o 	 * For async DIO, the conversion needs to be deferred when the
314569c499d1STheodore Ts'o 	 * IO is completed. The ext4 end_io callback function will be
314669c499d1STheodore Ts'o 	 * called to take care of the conversion work.  Here for async
314769c499d1STheodore Ts'o 	 * case, we allocate an io_end structure to hook to the iocb.
31484c0425ffSMingming Cao 	 */
31498d5d02e6SMingming Cao 	iocb->private = NULL;
3150f45ee3a1SDmitry Monakhov 	ext4_inode_aio_set(inode, NULL);
31518d5d02e6SMingming Cao 	if (!is_sync_kiocb(iocb)) {
315269c499d1STheodore Ts'o 		ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
31534bd809dbSZheng Liu 		if (!io_end) {
31544bd809dbSZheng Liu 			ret = -ENOMEM;
31554bd809dbSZheng Liu 			goto retake_lock;
31564bd809dbSZheng Liu 		}
3157266991b1SJeff Moyer 		io_end->flag |= EXT4_IO_END_DIRECT;
3158266991b1SJeff Moyer 		iocb->private = io_end;
31598d5d02e6SMingming Cao 		/*
316069c499d1STheodore Ts'o 		 * we save the io structure for current async direct
316169c499d1STheodore Ts'o 		 * IO, so that later ext4_map_blocks() could flag the
316269c499d1STheodore Ts'o 		 * io structure whether there is a unwritten extents
316369c499d1STheodore Ts'o 		 * needs to be converted when IO is completed.
31648d5d02e6SMingming Cao 		 */
3165f45ee3a1SDmitry Monakhov 		ext4_inode_aio_set(inode, io_end);
31668d5d02e6SMingming Cao 	}
31678d5d02e6SMingming Cao 
31688b0f165fSAnatol Pomozov 	if (overwrite) {
31698b0f165fSAnatol Pomozov 		get_block_func = ext4_get_block_write_nolock;
31708b0f165fSAnatol Pomozov 	} else {
31718b0f165fSAnatol Pomozov 		get_block_func = ext4_get_block_write;
31728b0f165fSAnatol Pomozov 		dio_flags = DIO_LOCKING;
31738b0f165fSAnatol Pomozov 	}
3174729f52c6SZheng Liu 	ret = __blockdev_direct_IO(rw, iocb, inode,
3175729f52c6SZheng Liu 				   inode->i_sb->s_bdev, iov,
3176729f52c6SZheng Liu 				   offset, nr_segs,
31778b0f165fSAnatol Pomozov 				   get_block_func,
3178729f52c6SZheng Liu 				   ext4_end_io_dio,
3179729f52c6SZheng Liu 				   NULL,
31808b0f165fSAnatol Pomozov 				   dio_flags);
31818b0f165fSAnatol Pomozov 
31828d5d02e6SMingming Cao 	if (iocb->private)
3183f45ee3a1SDmitry Monakhov 		ext4_inode_aio_set(inode, NULL);
31848d5d02e6SMingming Cao 	/*
318569c499d1STheodore Ts'o 	 * The io_end structure takes a reference to the inode, that
318669c499d1STheodore Ts'o 	 * structure needs to be destroyed and the reference to the
318769c499d1STheodore Ts'o 	 * inode need to be dropped, when IO is complete, even with 0
318869c499d1STheodore Ts'o 	 * byte write, or failed.
31898d5d02e6SMingming Cao 	 *
319069c499d1STheodore Ts'o 	 * In the successful AIO DIO case, the io_end structure will
319169c499d1STheodore Ts'o 	 * be destroyed and the reference to the inode will be dropped
31928d5d02e6SMingming Cao 	 * after the end_io call back function is called.
31938d5d02e6SMingming Cao 	 *
319469c499d1STheodore Ts'o 	 * In the case there is 0 byte write, or error case, since VFS
319569c499d1STheodore Ts'o 	 * direct IO won't invoke the end_io call back function, we
319669c499d1STheodore Ts'o 	 * need to free the end_io structure here.
31978d5d02e6SMingming Cao 	 */
31988d5d02e6SMingming Cao 	if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
31998d5d02e6SMingming Cao 		ext4_free_io_end(iocb->private);
32008d5d02e6SMingming Cao 		iocb->private = NULL;
3201729f52c6SZheng Liu 	} else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
32025f524950SMingming 						EXT4_STATE_DIO_UNWRITTEN)) {
3203109f5565SMingming 		int err;
32048d5d02e6SMingming Cao 		/*
32058d5d02e6SMingming Cao 		 * for non AIO case, since the IO is already
320625985edcSLucas De Marchi 		 * completed, we could do the conversion right here
32078d5d02e6SMingming Cao 		 */
3208109f5565SMingming 		err = ext4_convert_unwritten_extents(inode,
32098d5d02e6SMingming Cao 						     offset, ret);
3210109f5565SMingming 		if (err < 0)
3211109f5565SMingming 			ret = err;
321219f5fb7aSTheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3213109f5565SMingming 	}
32144bd809dbSZheng Liu 
32154bd809dbSZheng Liu retake_lock:
32164bd809dbSZheng Liu 	/* take i_mutex locking again if we do a ovewrite dio */
32174bd809dbSZheng Liu 	if (overwrite) {
32181f555cfaSDmitry Monakhov 		inode_dio_done(inode);
32194bd809dbSZheng Liu 		up_read(&EXT4_I(inode)->i_data_sem);
32204bd809dbSZheng Liu 		mutex_lock(&inode->i_mutex);
32214bd809dbSZheng Liu 	}
32224bd809dbSZheng Liu 
32234c0425ffSMingming Cao 	return ret;
32244c0425ffSMingming Cao }
32258d5d02e6SMingming Cao 
32264c0425ffSMingming Cao static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
32274c0425ffSMingming Cao 			      const struct iovec *iov, loff_t offset,
32284c0425ffSMingming Cao 			      unsigned long nr_segs)
32294c0425ffSMingming Cao {
32304c0425ffSMingming Cao 	struct file *file = iocb->ki_filp;
32314c0425ffSMingming Cao 	struct inode *inode = file->f_mapping->host;
32320562e0baSJiaying Zhang 	ssize_t ret;
32334c0425ffSMingming Cao 
323484ebd795STheodore Ts'o 	/*
323584ebd795STheodore Ts'o 	 * If we are doing data journalling we don't support O_DIRECT
323684ebd795STheodore Ts'o 	 */
323784ebd795STheodore Ts'o 	if (ext4_should_journal_data(inode))
323884ebd795STheodore Ts'o 		return 0;
323984ebd795STheodore Ts'o 
324046c7f254STao Ma 	/* Let buffer I/O handle the inline data case. */
324146c7f254STao Ma 	if (ext4_has_inline_data(inode))
324246c7f254STao Ma 		return 0;
324346c7f254STao Ma 
32440562e0baSJiaying Zhang 	trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
324512e9b892SDmitry Monakhov 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
32460562e0baSJiaying Zhang 		ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
32470562e0baSJiaying Zhang 	else
32480562e0baSJiaying Zhang 		ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
32490562e0baSJiaying Zhang 	trace_ext4_direct_IO_exit(inode, offset,
32500562e0baSJiaying Zhang 				iov_length(iov, nr_segs), rw, ret);
32510562e0baSJiaying Zhang 	return ret;
32524c0425ffSMingming Cao }
32534c0425ffSMingming Cao 
3254ac27a0ecSDave Kleikamp /*
3255617ba13bSMingming Cao  * Pages can be marked dirty completely asynchronously from ext4's journalling
3256ac27a0ecSDave Kleikamp  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3257ac27a0ecSDave Kleikamp  * much here because ->set_page_dirty is called under VFS locks.  The page is
3258ac27a0ecSDave Kleikamp  * not necessarily locked.
3259ac27a0ecSDave Kleikamp  *
3260ac27a0ecSDave Kleikamp  * We cannot just dirty the page and leave attached buffers clean, because the
3261ac27a0ecSDave Kleikamp  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3262ac27a0ecSDave Kleikamp  * or jbddirty because all the journalling code will explode.
3263ac27a0ecSDave Kleikamp  *
3264ac27a0ecSDave Kleikamp  * So what we do is to mark the page "pending dirty" and next time writepage
3265ac27a0ecSDave Kleikamp  * is called, propagate that into the buffers appropriately.
3266ac27a0ecSDave Kleikamp  */
3267617ba13bSMingming Cao static int ext4_journalled_set_page_dirty(struct page *page)
3268ac27a0ecSDave Kleikamp {
3269ac27a0ecSDave Kleikamp 	SetPageChecked(page);
3270ac27a0ecSDave Kleikamp 	return __set_page_dirty_nobuffers(page);
3271ac27a0ecSDave Kleikamp }
3272ac27a0ecSDave Kleikamp 
327374d553aaSTheodore Ts'o static const struct address_space_operations ext4_aops = {
3274617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3275617ba13bSMingming Cao 	.readpages		= ext4_readpages,
327643ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3277bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
327874d553aaSTheodore Ts'o 	.write_end		= ext4_write_end,
3279617ba13bSMingming Cao 	.bmap			= ext4_bmap,
3280617ba13bSMingming Cao 	.invalidatepage		= ext4_invalidatepage,
3281617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
3282617ba13bSMingming Cao 	.direct_IO		= ext4_direct_IO,
3283ac27a0ecSDave Kleikamp 	.migratepage		= buffer_migrate_page,
32848ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3285aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3286ac27a0ecSDave Kleikamp };
3287ac27a0ecSDave Kleikamp 
3288617ba13bSMingming Cao static const struct address_space_operations ext4_journalled_aops = {
3289617ba13bSMingming Cao 	.readpage		= ext4_readpage,
3290617ba13bSMingming Cao 	.readpages		= ext4_readpages,
329143ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
3292bfc1af65SNick Piggin 	.write_begin		= ext4_write_begin,
3293bfc1af65SNick Piggin 	.write_end		= ext4_journalled_write_end,
3294617ba13bSMingming Cao 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3295617ba13bSMingming Cao 	.bmap			= ext4_bmap,
32964520fb3cSJan Kara 	.invalidatepage		= ext4_journalled_invalidatepage,
3297617ba13bSMingming Cao 	.releasepage		= ext4_releasepage,
329884ebd795STheodore Ts'o 	.direct_IO		= ext4_direct_IO,
32998ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3300aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
3301ac27a0ecSDave Kleikamp };
3302ac27a0ecSDave Kleikamp 
330364769240SAlex Tomas static const struct address_space_operations ext4_da_aops = {
330464769240SAlex Tomas 	.readpage		= ext4_readpage,
330564769240SAlex Tomas 	.readpages		= ext4_readpages,
330643ce1d23SAneesh Kumar K.V 	.writepage		= ext4_writepage,
330764769240SAlex Tomas 	.writepages		= ext4_da_writepages,
330864769240SAlex Tomas 	.write_begin		= ext4_da_write_begin,
330964769240SAlex Tomas 	.write_end		= ext4_da_write_end,
331064769240SAlex Tomas 	.bmap			= ext4_bmap,
331164769240SAlex Tomas 	.invalidatepage		= ext4_da_invalidatepage,
331264769240SAlex Tomas 	.releasepage		= ext4_releasepage,
331364769240SAlex Tomas 	.direct_IO		= ext4_direct_IO,
331464769240SAlex Tomas 	.migratepage		= buffer_migrate_page,
33158ab22b9aSHisashi Hifumi 	.is_partially_uptodate  = block_is_partially_uptodate,
3316aa261f54SAndi Kleen 	.error_remove_page	= generic_error_remove_page,
331764769240SAlex Tomas };
331864769240SAlex Tomas 
3319617ba13bSMingming Cao void ext4_set_aops(struct inode *inode)
3320ac27a0ecSDave Kleikamp {
33213d2b1582SLukas Czerner 	switch (ext4_inode_journal_mode(inode)) {
33223d2b1582SLukas Czerner 	case EXT4_INODE_ORDERED_DATA_MODE:
332374d553aaSTheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
33243d2b1582SLukas Czerner 		break;
33253d2b1582SLukas Czerner 	case EXT4_INODE_WRITEBACK_DATA_MODE:
332674d553aaSTheodore Ts'o 		ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
33273d2b1582SLukas Czerner 		break;
33283d2b1582SLukas Czerner 	case EXT4_INODE_JOURNAL_DATA_MODE:
3329617ba13bSMingming Cao 		inode->i_mapping->a_ops = &ext4_journalled_aops;
333074d553aaSTheodore Ts'o 		return;
33313d2b1582SLukas Czerner 	default:
33323d2b1582SLukas Czerner 		BUG();
33333d2b1582SLukas Czerner 	}
333474d553aaSTheodore Ts'o 	if (test_opt(inode->i_sb, DELALLOC))
333574d553aaSTheodore Ts'o 		inode->i_mapping->a_ops = &ext4_da_aops;
333674d553aaSTheodore Ts'o 	else
333774d553aaSTheodore Ts'o 		inode->i_mapping->a_ops = &ext4_aops;
3338ac27a0ecSDave Kleikamp }
3339ac27a0ecSDave Kleikamp 
33404e96b2dbSAllison Henderson 
33414e96b2dbSAllison Henderson /*
33424e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers()
33434e96b2dbSAllison Henderson  * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
33444e96b2dbSAllison Henderson  * This function finds and locks the page containing the offset
33454e96b2dbSAllison Henderson  * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
33464e96b2dbSAllison Henderson  * Calling functions that already have the page locked should call
33474e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock directly.
33484e96b2dbSAllison Henderson  */
33494e96b2dbSAllison Henderson int ext4_discard_partial_page_buffers(handle_t *handle,
33504e96b2dbSAllison Henderson 		struct address_space *mapping, loff_t from,
33514e96b2dbSAllison Henderson 		loff_t length, int flags)
33524e96b2dbSAllison Henderson {
33534e96b2dbSAllison Henderson 	struct inode *inode = mapping->host;
33544e96b2dbSAllison Henderson 	struct page *page;
33554e96b2dbSAllison Henderson 	int err = 0;
33564e96b2dbSAllison Henderson 
33574e96b2dbSAllison Henderson 	page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
33584e96b2dbSAllison Henderson 				   mapping_gfp_mask(mapping) & ~__GFP_FS);
33594e96b2dbSAllison Henderson 	if (!page)
33605129d05fSYongqiang Yang 		return -ENOMEM;
33614e96b2dbSAllison Henderson 
33624e96b2dbSAllison Henderson 	err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
33634e96b2dbSAllison Henderson 		from, length, flags);
33644e96b2dbSAllison Henderson 
33654e96b2dbSAllison Henderson 	unlock_page(page);
33664e96b2dbSAllison Henderson 	page_cache_release(page);
33674e96b2dbSAllison Henderson 	return err;
33684e96b2dbSAllison Henderson }
33694e96b2dbSAllison Henderson 
33704e96b2dbSAllison Henderson /*
33714e96b2dbSAllison Henderson  * ext4_discard_partial_page_buffers_no_lock()
33724e96b2dbSAllison Henderson  * Zeros a page range of length 'length' starting from offset 'from'.
33734e96b2dbSAllison Henderson  * Buffer heads that correspond to the block aligned regions of the
33744e96b2dbSAllison Henderson  * zeroed range will be unmapped.  Unblock aligned regions
33754e96b2dbSAllison Henderson  * will have the corresponding buffer head mapped if needed so that
33764e96b2dbSAllison Henderson  * that region of the page can be updated with the partial zero out.
33774e96b2dbSAllison Henderson  *
33784e96b2dbSAllison Henderson  * This function assumes that the page has already been  locked.  The
33794e96b2dbSAllison Henderson  * The range to be discarded must be contained with in the given page.
33804e96b2dbSAllison Henderson  * If the specified range exceeds the end of the page it will be shortened
33814e96b2dbSAllison Henderson  * to the end of the page that corresponds to 'from'.  This function is
33824e96b2dbSAllison Henderson  * appropriate for updating a page and it buffer heads to be unmapped and
33834e96b2dbSAllison Henderson  * zeroed for blocks that have been either released, or are going to be
33844e96b2dbSAllison Henderson  * released.
33854e96b2dbSAllison Henderson  *
33864e96b2dbSAllison Henderson  * handle: The journal handle
33874e96b2dbSAllison Henderson  * inode:  The files inode
33884e96b2dbSAllison Henderson  * page:   A locked page that contains the offset "from"
33894907cb7bSAnatol Pomozov  * from:   The starting byte offset (from the beginning of the file)
33904e96b2dbSAllison Henderson  *         to begin discarding
33914e96b2dbSAllison Henderson  * len:    The length of bytes to discard
33924e96b2dbSAllison Henderson  * flags:  Optional flags that may be used:
33934e96b2dbSAllison Henderson  *
33944e96b2dbSAllison Henderson  *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
33954e96b2dbSAllison Henderson  *         Only zero the regions of the page whose buffer heads
33964e96b2dbSAllison Henderson  *         have already been unmapped.  This flag is appropriate
33974907cb7bSAnatol Pomozov  *         for updating the contents of a page whose blocks may
33984e96b2dbSAllison Henderson  *         have already been released, and we only want to zero
33994e96b2dbSAllison Henderson  *         out the regions that correspond to those released blocks.
34004e96b2dbSAllison Henderson  *
34014907cb7bSAnatol Pomozov  * Returns zero on success or negative on failure.
34024e96b2dbSAllison Henderson  */
34035f163cc7SEric Sandeen static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
34044e96b2dbSAllison Henderson 		struct inode *inode, struct page *page, loff_t from,
34054e96b2dbSAllison Henderson 		loff_t length, int flags)
34064e96b2dbSAllison Henderson {
34074e96b2dbSAllison Henderson 	ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
34084e96b2dbSAllison Henderson 	unsigned int offset = from & (PAGE_CACHE_SIZE-1);
34094e96b2dbSAllison Henderson 	unsigned int blocksize, max, pos;
34104e96b2dbSAllison Henderson 	ext4_lblk_t iblock;
34114e96b2dbSAllison Henderson 	struct buffer_head *bh;
34124e96b2dbSAllison Henderson 	int err = 0;
34134e96b2dbSAllison Henderson 
34144e96b2dbSAllison Henderson 	blocksize = inode->i_sb->s_blocksize;
34154e96b2dbSAllison Henderson 	max = PAGE_CACHE_SIZE - offset;
34164e96b2dbSAllison Henderson 
34174e96b2dbSAllison Henderson 	if (index != page->index)
34184e96b2dbSAllison Henderson 		return -EINVAL;
34194e96b2dbSAllison Henderson 
34204e96b2dbSAllison Henderson 	/*
34214e96b2dbSAllison Henderson 	 * correct length if it does not fall between
34224e96b2dbSAllison Henderson 	 * 'from' and the end of the page
34234e96b2dbSAllison Henderson 	 */
34244e96b2dbSAllison Henderson 	if (length > max || length < 0)
34254e96b2dbSAllison Henderson 		length = max;
34264e96b2dbSAllison Henderson 
34274e96b2dbSAllison Henderson 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
34284e96b2dbSAllison Henderson 
3429093e6e36SYongqiang Yang 	if (!page_has_buffers(page))
34304e96b2dbSAllison Henderson 		create_empty_buffers(page, blocksize, 0);
34314e96b2dbSAllison Henderson 
34324e96b2dbSAllison Henderson 	/* Find the buffer that contains "offset" */
34334e96b2dbSAllison Henderson 	bh = page_buffers(page);
34344e96b2dbSAllison Henderson 	pos = blocksize;
34354e96b2dbSAllison Henderson 	while (offset >= pos) {
34364e96b2dbSAllison Henderson 		bh = bh->b_this_page;
34374e96b2dbSAllison Henderson 		iblock++;
34384e96b2dbSAllison Henderson 		pos += blocksize;
34394e96b2dbSAllison Henderson 	}
34404e96b2dbSAllison Henderson 
34414e96b2dbSAllison Henderson 	pos = offset;
34424e96b2dbSAllison Henderson 	while (pos < offset + length) {
3443e260daf2SYongqiang Yang 		unsigned int end_of_block, range_to_discard;
3444e260daf2SYongqiang Yang 
34454e96b2dbSAllison Henderson 		err = 0;
34464e96b2dbSAllison Henderson 
34474e96b2dbSAllison Henderson 		/* The length of space left to zero and unmap */
34484e96b2dbSAllison Henderson 		range_to_discard = offset + length - pos;
34494e96b2dbSAllison Henderson 
34504e96b2dbSAllison Henderson 		/* The length of space until the end of the block */
34514e96b2dbSAllison Henderson 		end_of_block = blocksize - (pos & (blocksize-1));
34524e96b2dbSAllison Henderson 
34534e96b2dbSAllison Henderson 		/*
34544e96b2dbSAllison Henderson 		 * Do not unmap or zero past end of block
34554e96b2dbSAllison Henderson 		 * for this buffer head
34564e96b2dbSAllison Henderson 		 */
34574e96b2dbSAllison Henderson 		if (range_to_discard > end_of_block)
34584e96b2dbSAllison Henderson 			range_to_discard = end_of_block;
34594e96b2dbSAllison Henderson 
34604e96b2dbSAllison Henderson 
34614e96b2dbSAllison Henderson 		/*
34624e96b2dbSAllison Henderson 		 * Skip this buffer head if we are only zeroing unampped
34634e96b2dbSAllison Henderson 		 * regions of the page
34644e96b2dbSAllison Henderson 		 */
34654e96b2dbSAllison Henderson 		if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
34664e96b2dbSAllison Henderson 			buffer_mapped(bh))
34674e96b2dbSAllison Henderson 				goto next;
34684e96b2dbSAllison Henderson 
34694e96b2dbSAllison Henderson 		/* If the range is block aligned, unmap */
34704e96b2dbSAllison Henderson 		if (range_to_discard == blocksize) {
34714e96b2dbSAllison Henderson 			clear_buffer_dirty(bh);
34724e96b2dbSAllison Henderson 			bh->b_bdev = NULL;
34734e96b2dbSAllison Henderson 			clear_buffer_mapped(bh);
34744e96b2dbSAllison Henderson 			clear_buffer_req(bh);
34754e96b2dbSAllison Henderson 			clear_buffer_new(bh);
34764e96b2dbSAllison Henderson 			clear_buffer_delay(bh);
34774e96b2dbSAllison Henderson 			clear_buffer_unwritten(bh);
34784e96b2dbSAllison Henderson 			clear_buffer_uptodate(bh);
34794e96b2dbSAllison Henderson 			zero_user(page, pos, range_to_discard);
34804e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "Buffer discarded");
34814e96b2dbSAllison Henderson 			goto next;
34824e96b2dbSAllison Henderson 		}
34834e96b2dbSAllison Henderson 
34844e96b2dbSAllison Henderson 		/*
34854e96b2dbSAllison Henderson 		 * If this block is not completely contained in the range
34864e96b2dbSAllison Henderson 		 * to be discarded, then it is not going to be released. Because
34874e96b2dbSAllison Henderson 		 * we need to keep this block, we need to make sure this part
34884e96b2dbSAllison Henderson 		 * of the page is uptodate before we modify it by writeing
34894e96b2dbSAllison Henderson 		 * partial zeros on it.
34904e96b2dbSAllison Henderson 		 */
34914e96b2dbSAllison Henderson 		if (!buffer_mapped(bh)) {
34924e96b2dbSAllison Henderson 			/*
34934e96b2dbSAllison Henderson 			 * Buffer head must be mapped before we can read
34944e96b2dbSAllison Henderson 			 * from the block
34954e96b2dbSAllison Henderson 			 */
34964e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "unmapped");
34974e96b2dbSAllison Henderson 			ext4_get_block(inode, iblock, bh, 0);
34984e96b2dbSAllison Henderson 			/* unmapped? It's a hole - nothing to do */
34994e96b2dbSAllison Henderson 			if (!buffer_mapped(bh)) {
35004e96b2dbSAllison Henderson 				BUFFER_TRACE(bh, "still unmapped");
35014e96b2dbSAllison Henderson 				goto next;
35024e96b2dbSAllison Henderson 			}
35034e96b2dbSAllison Henderson 		}
35044e96b2dbSAllison Henderson 
35054e96b2dbSAllison Henderson 		/* Ok, it's mapped. Make sure it's up-to-date */
35064e96b2dbSAllison Henderson 		if (PageUptodate(page))
35074e96b2dbSAllison Henderson 			set_buffer_uptodate(bh);
35084e96b2dbSAllison Henderson 
35094e96b2dbSAllison Henderson 		if (!buffer_uptodate(bh)) {
35104e96b2dbSAllison Henderson 			err = -EIO;
35114e96b2dbSAllison Henderson 			ll_rw_block(READ, 1, &bh);
35124e96b2dbSAllison Henderson 			wait_on_buffer(bh);
35134e96b2dbSAllison Henderson 			/* Uhhuh. Read error. Complain and punt.*/
35144e96b2dbSAllison Henderson 			if (!buffer_uptodate(bh))
35154e96b2dbSAllison Henderson 				goto next;
35164e96b2dbSAllison Henderson 		}
35174e96b2dbSAllison Henderson 
35184e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
35194e96b2dbSAllison Henderson 			BUFFER_TRACE(bh, "get write access");
35204e96b2dbSAllison Henderson 			err = ext4_journal_get_write_access(handle, bh);
35214e96b2dbSAllison Henderson 			if (err)
35224e96b2dbSAllison Henderson 				goto next;
35234e96b2dbSAllison Henderson 		}
35244e96b2dbSAllison Henderson 
35254e96b2dbSAllison Henderson 		zero_user(page, pos, range_to_discard);
35264e96b2dbSAllison Henderson 
35274e96b2dbSAllison Henderson 		err = 0;
35284e96b2dbSAllison Henderson 		if (ext4_should_journal_data(inode)) {
35294e96b2dbSAllison Henderson 			err = ext4_handle_dirty_metadata(handle, inode, bh);
3530decbd919STheodore Ts'o 		} else
35314e96b2dbSAllison Henderson 			mark_buffer_dirty(bh);
35324e96b2dbSAllison Henderson 
35334e96b2dbSAllison Henderson 		BUFFER_TRACE(bh, "Partial buffer zeroed");
35344e96b2dbSAllison Henderson next:
35354e96b2dbSAllison Henderson 		bh = bh->b_this_page;
35364e96b2dbSAllison Henderson 		iblock++;
35374e96b2dbSAllison Henderson 		pos += range_to_discard;
35384e96b2dbSAllison Henderson 	}
35394e96b2dbSAllison Henderson 
35404e96b2dbSAllison Henderson 	return err;
35414e96b2dbSAllison Henderson }
35424e96b2dbSAllison Henderson 
354391ef4cafSDuane Griffin int ext4_can_truncate(struct inode *inode)
354491ef4cafSDuane Griffin {
354591ef4cafSDuane Griffin 	if (S_ISREG(inode->i_mode))
354691ef4cafSDuane Griffin 		return 1;
354791ef4cafSDuane Griffin 	if (S_ISDIR(inode->i_mode))
354891ef4cafSDuane Griffin 		return 1;
354991ef4cafSDuane Griffin 	if (S_ISLNK(inode->i_mode))
355091ef4cafSDuane Griffin 		return !ext4_inode_is_fast_symlink(inode);
355191ef4cafSDuane Griffin 	return 0;
355291ef4cafSDuane Griffin }
355391ef4cafSDuane Griffin 
3554ac27a0ecSDave Kleikamp /*
3555a4bb6b64SAllison Henderson  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3556a4bb6b64SAllison Henderson  * associated with the given offset and length
3557a4bb6b64SAllison Henderson  *
3558a4bb6b64SAllison Henderson  * @inode:  File inode
3559a4bb6b64SAllison Henderson  * @offset: The offset where the hole will begin
3560a4bb6b64SAllison Henderson  * @len:    The length of the hole
3561a4bb6b64SAllison Henderson  *
35624907cb7bSAnatol Pomozov  * Returns: 0 on success or negative on failure
3563a4bb6b64SAllison Henderson  */
3564a4bb6b64SAllison Henderson 
3565a4bb6b64SAllison Henderson int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3566a4bb6b64SAllison Henderson {
3567496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
356826a4c0c6STheodore Ts'o 	struct super_block *sb = inode->i_sb;
356926a4c0c6STheodore Ts'o 	ext4_lblk_t first_block, stop_block;
357026a4c0c6STheodore Ts'o 	struct address_space *mapping = inode->i_mapping;
357126a4c0c6STheodore Ts'o 	loff_t first_page, last_page, page_len;
357226a4c0c6STheodore Ts'o 	loff_t first_page_offset, last_page_offset;
357326a4c0c6STheodore Ts'o 	handle_t *handle;
357426a4c0c6STheodore Ts'o 	unsigned int credits;
357526a4c0c6STheodore Ts'o 	int ret = 0;
357626a4c0c6STheodore Ts'o 
3577a4bb6b64SAllison Henderson 	if (!S_ISREG(inode->i_mode))
357873355192SAllison Henderson 		return -EOPNOTSUPP;
3579a4bb6b64SAllison Henderson 
358026a4c0c6STheodore Ts'o 	if (EXT4_SB(sb)->s_cluster_ratio > 1) {
3581bab08ab9STheodore Ts'o 		/* TODO: Add support for bigalloc file systems */
358273355192SAllison Henderson 		return -EOPNOTSUPP;
3583bab08ab9STheodore Ts'o 	}
3584bab08ab9STheodore Ts'o 
3585aaddea81SZheng Liu 	trace_ext4_punch_hole(inode, offset, length);
3586aaddea81SZheng Liu 
358726a4c0c6STheodore Ts'o 	/*
358826a4c0c6STheodore Ts'o 	 * Write out all dirty pages to avoid race conditions
358926a4c0c6STheodore Ts'o 	 * Then release them.
359026a4c0c6STheodore Ts'o 	 */
359126a4c0c6STheodore Ts'o 	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
359226a4c0c6STheodore Ts'o 		ret = filemap_write_and_wait_range(mapping, offset,
359326a4c0c6STheodore Ts'o 						   offset + length - 1);
359426a4c0c6STheodore Ts'o 		if (ret)
359526a4c0c6STheodore Ts'o 			return ret;
359626a4c0c6STheodore Ts'o 	}
359726a4c0c6STheodore Ts'o 
359826a4c0c6STheodore Ts'o 	mutex_lock(&inode->i_mutex);
359926a4c0c6STheodore Ts'o 	/* It's not possible punch hole on append only file */
360026a4c0c6STheodore Ts'o 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
360126a4c0c6STheodore Ts'o 		ret = -EPERM;
360226a4c0c6STheodore Ts'o 		goto out_mutex;
360326a4c0c6STheodore Ts'o 	}
360426a4c0c6STheodore Ts'o 	if (IS_SWAPFILE(inode)) {
360526a4c0c6STheodore Ts'o 		ret = -ETXTBSY;
360626a4c0c6STheodore Ts'o 		goto out_mutex;
360726a4c0c6STheodore Ts'o 	}
360826a4c0c6STheodore Ts'o 
360926a4c0c6STheodore Ts'o 	/* No need to punch hole beyond i_size */
361026a4c0c6STheodore Ts'o 	if (offset >= inode->i_size)
361126a4c0c6STheodore Ts'o 		goto out_mutex;
361226a4c0c6STheodore Ts'o 
361326a4c0c6STheodore Ts'o 	/*
361426a4c0c6STheodore Ts'o 	 * If the hole extends beyond i_size, set the hole
361526a4c0c6STheodore Ts'o 	 * to end after the page that contains i_size
361626a4c0c6STheodore Ts'o 	 */
361726a4c0c6STheodore Ts'o 	if (offset + length > inode->i_size) {
361826a4c0c6STheodore Ts'o 		length = inode->i_size +
361926a4c0c6STheodore Ts'o 		   PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
362026a4c0c6STheodore Ts'o 		   offset;
362126a4c0c6STheodore Ts'o 	}
362226a4c0c6STheodore Ts'o 
362326a4c0c6STheodore Ts'o 	first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
362426a4c0c6STheodore Ts'o 	last_page = (offset + length) >> PAGE_CACHE_SHIFT;
362526a4c0c6STheodore Ts'o 
362626a4c0c6STheodore Ts'o 	first_page_offset = first_page << PAGE_CACHE_SHIFT;
362726a4c0c6STheodore Ts'o 	last_page_offset = last_page << PAGE_CACHE_SHIFT;
362826a4c0c6STheodore Ts'o 
362926a4c0c6STheodore Ts'o 	/* Now release the pages */
363026a4c0c6STheodore Ts'o 	if (last_page_offset > first_page_offset) {
363126a4c0c6STheodore Ts'o 		truncate_pagecache_range(inode, first_page_offset,
363226a4c0c6STheodore Ts'o 					 last_page_offset - 1);
363326a4c0c6STheodore Ts'o 	}
363426a4c0c6STheodore Ts'o 
363526a4c0c6STheodore Ts'o 	/* Wait all existing dio workers, newcomers will block on i_mutex */
363626a4c0c6STheodore Ts'o 	ext4_inode_block_unlocked_dio(inode);
363726a4c0c6STheodore Ts'o 	ret = ext4_flush_unwritten_io(inode);
363826a4c0c6STheodore Ts'o 	if (ret)
363926a4c0c6STheodore Ts'o 		goto out_dio;
364026a4c0c6STheodore Ts'o 	inode_dio_wait(inode);
364126a4c0c6STheodore Ts'o 
364226a4c0c6STheodore Ts'o 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
364326a4c0c6STheodore Ts'o 		credits = ext4_writepage_trans_blocks(inode);
364426a4c0c6STheodore Ts'o 	else
364526a4c0c6STheodore Ts'o 		credits = ext4_blocks_for_truncate(inode);
364626a4c0c6STheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
364726a4c0c6STheodore Ts'o 	if (IS_ERR(handle)) {
364826a4c0c6STheodore Ts'o 		ret = PTR_ERR(handle);
364926a4c0c6STheodore Ts'o 		ext4_std_error(sb, ret);
365026a4c0c6STheodore Ts'o 		goto out_dio;
365126a4c0c6STheodore Ts'o 	}
365226a4c0c6STheodore Ts'o 
365326a4c0c6STheodore Ts'o 	/*
365426a4c0c6STheodore Ts'o 	 * Now we need to zero out the non-page-aligned data in the
365526a4c0c6STheodore Ts'o 	 * pages at the start and tail of the hole, and unmap the
365626a4c0c6STheodore Ts'o 	 * buffer heads for the block aligned regions of the page that
365726a4c0c6STheodore Ts'o 	 * were completely zeroed.
365826a4c0c6STheodore Ts'o 	 */
365926a4c0c6STheodore Ts'o 	if (first_page > last_page) {
366026a4c0c6STheodore Ts'o 		/*
366126a4c0c6STheodore Ts'o 		 * If the file space being truncated is contained
366226a4c0c6STheodore Ts'o 		 * within a page just zero out and unmap the middle of
366326a4c0c6STheodore Ts'o 		 * that page
366426a4c0c6STheodore Ts'o 		 */
366526a4c0c6STheodore Ts'o 		ret = ext4_discard_partial_page_buffers(handle,
366626a4c0c6STheodore Ts'o 			mapping, offset, length, 0);
366726a4c0c6STheodore Ts'o 
366826a4c0c6STheodore Ts'o 		if (ret)
366926a4c0c6STheodore Ts'o 			goto out_stop;
367026a4c0c6STheodore Ts'o 	} else {
367126a4c0c6STheodore Ts'o 		/*
367226a4c0c6STheodore Ts'o 		 * zero out and unmap the partial page that contains
367326a4c0c6STheodore Ts'o 		 * the start of the hole
367426a4c0c6STheodore Ts'o 		 */
367526a4c0c6STheodore Ts'o 		page_len = first_page_offset - offset;
367626a4c0c6STheodore Ts'o 		if (page_len > 0) {
367726a4c0c6STheodore Ts'o 			ret = ext4_discard_partial_page_buffers(handle, mapping,
367826a4c0c6STheodore Ts'o 						offset, page_len, 0);
367926a4c0c6STheodore Ts'o 			if (ret)
368026a4c0c6STheodore Ts'o 				goto out_stop;
368126a4c0c6STheodore Ts'o 		}
368226a4c0c6STheodore Ts'o 
368326a4c0c6STheodore Ts'o 		/*
368426a4c0c6STheodore Ts'o 		 * zero out and unmap the partial page that contains
368526a4c0c6STheodore Ts'o 		 * the end of the hole
368626a4c0c6STheodore Ts'o 		 */
368726a4c0c6STheodore Ts'o 		page_len = offset + length - last_page_offset;
368826a4c0c6STheodore Ts'o 		if (page_len > 0) {
368926a4c0c6STheodore Ts'o 			ret = ext4_discard_partial_page_buffers(handle, mapping,
369026a4c0c6STheodore Ts'o 					last_page_offset, page_len, 0);
369126a4c0c6STheodore Ts'o 			if (ret)
369226a4c0c6STheodore Ts'o 				goto out_stop;
369326a4c0c6STheodore Ts'o 		}
369426a4c0c6STheodore Ts'o 	}
369526a4c0c6STheodore Ts'o 
369626a4c0c6STheodore Ts'o 	/*
369726a4c0c6STheodore Ts'o 	 * If i_size is contained in the last page, we need to
369826a4c0c6STheodore Ts'o 	 * unmap and zero the partial page after i_size
369926a4c0c6STheodore Ts'o 	 */
370026a4c0c6STheodore Ts'o 	if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
370126a4c0c6STheodore Ts'o 	   inode->i_size % PAGE_CACHE_SIZE != 0) {
370226a4c0c6STheodore Ts'o 		page_len = PAGE_CACHE_SIZE -
370326a4c0c6STheodore Ts'o 			(inode->i_size & (PAGE_CACHE_SIZE - 1));
370426a4c0c6STheodore Ts'o 
370526a4c0c6STheodore Ts'o 		if (page_len > 0) {
370626a4c0c6STheodore Ts'o 			ret = ext4_discard_partial_page_buffers(handle,
370726a4c0c6STheodore Ts'o 					mapping, inode->i_size, page_len, 0);
370826a4c0c6STheodore Ts'o 
370926a4c0c6STheodore Ts'o 			if (ret)
371026a4c0c6STheodore Ts'o 				goto out_stop;
371126a4c0c6STheodore Ts'o 		}
371226a4c0c6STheodore Ts'o 	}
371326a4c0c6STheodore Ts'o 
371426a4c0c6STheodore Ts'o 	first_block = (offset + sb->s_blocksize - 1) >>
371526a4c0c6STheodore Ts'o 		EXT4_BLOCK_SIZE_BITS(sb);
371626a4c0c6STheodore Ts'o 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
371726a4c0c6STheodore Ts'o 
371826a4c0c6STheodore Ts'o 	/* If there are no blocks to remove, return now */
371926a4c0c6STheodore Ts'o 	if (first_block >= stop_block)
372026a4c0c6STheodore Ts'o 		goto out_stop;
372126a4c0c6STheodore Ts'o 
372226a4c0c6STheodore Ts'o 	down_write(&EXT4_I(inode)->i_data_sem);
372326a4c0c6STheodore Ts'o 	ext4_discard_preallocations(inode);
372426a4c0c6STheodore Ts'o 
372526a4c0c6STheodore Ts'o 	ret = ext4_es_remove_extent(inode, first_block,
372626a4c0c6STheodore Ts'o 				    stop_block - first_block);
372726a4c0c6STheodore Ts'o 	if (ret) {
372826a4c0c6STheodore Ts'o 		up_write(&EXT4_I(inode)->i_data_sem);
372926a4c0c6STheodore Ts'o 		goto out_stop;
373026a4c0c6STheodore Ts'o 	}
373126a4c0c6STheodore Ts'o 
373226a4c0c6STheodore Ts'o 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
373326a4c0c6STheodore Ts'o 		ret = ext4_ext_remove_space(inode, first_block,
373426a4c0c6STheodore Ts'o 					    stop_block - 1);
373526a4c0c6STheodore Ts'o 	else
373626a4c0c6STheodore Ts'o 		ret = ext4_free_hole_blocks(handle, inode, first_block,
373726a4c0c6STheodore Ts'o 					    stop_block);
373826a4c0c6STheodore Ts'o 
373926a4c0c6STheodore Ts'o 	ext4_discard_preallocations(inode);
3740819c4920STheodore Ts'o 	up_write(&EXT4_I(inode)->i_data_sem);
374126a4c0c6STheodore Ts'o 	if (IS_SYNC(inode))
374226a4c0c6STheodore Ts'o 		ext4_handle_sync(handle);
374326a4c0c6STheodore Ts'o 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
374426a4c0c6STheodore Ts'o 	ext4_mark_inode_dirty(handle, inode);
374526a4c0c6STheodore Ts'o out_stop:
374626a4c0c6STheodore Ts'o 	ext4_journal_stop(handle);
374726a4c0c6STheodore Ts'o out_dio:
374826a4c0c6STheodore Ts'o 	ext4_inode_resume_unlocked_dio(inode);
374926a4c0c6STheodore Ts'o out_mutex:
375026a4c0c6STheodore Ts'o 	mutex_unlock(&inode->i_mutex);
375126a4c0c6STheodore Ts'o 	return ret;
3752a4bb6b64SAllison Henderson }
3753a4bb6b64SAllison Henderson 
3754a4bb6b64SAllison Henderson /*
3755617ba13bSMingming Cao  * ext4_truncate()
3756ac27a0ecSDave Kleikamp  *
3757617ba13bSMingming Cao  * We block out ext4_get_block() block instantiations across the entire
3758617ba13bSMingming Cao  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3759ac27a0ecSDave Kleikamp  * simultaneously on behalf of the same inode.
3760ac27a0ecSDave Kleikamp  *
376142b2aa86SJustin P. Mattock  * As we work through the truncate and commit bits of it to the journal there
3762ac27a0ecSDave Kleikamp  * is one core, guiding principle: the file's tree must always be consistent on
3763ac27a0ecSDave Kleikamp  * disk.  We must be able to restart the truncate after a crash.
3764ac27a0ecSDave Kleikamp  *
3765ac27a0ecSDave Kleikamp  * The file's tree may be transiently inconsistent in memory (although it
3766ac27a0ecSDave Kleikamp  * probably isn't), but whenever we close off and commit a journal transaction,
3767ac27a0ecSDave Kleikamp  * the contents of (the filesystem + the journal) must be consistent and
3768ac27a0ecSDave Kleikamp  * restartable.  It's pretty simple, really: bottom up, right to left (although
3769ac27a0ecSDave Kleikamp  * left-to-right works OK too).
3770ac27a0ecSDave Kleikamp  *
3771ac27a0ecSDave Kleikamp  * Note that at recovery time, journal replay occurs *before* the restart of
3772ac27a0ecSDave Kleikamp  * truncate against the orphan inode list.
3773ac27a0ecSDave Kleikamp  *
3774ac27a0ecSDave Kleikamp  * The committed inode has the new, desired i_size (which is the same as
3775617ba13bSMingming Cao  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3776ac27a0ecSDave Kleikamp  * that this inode's truncate did not complete and it will again call
3777617ba13bSMingming Cao  * ext4_truncate() to have another go.  So there will be instantiated blocks
3778617ba13bSMingming Cao  * to the right of the truncation point in a crashed ext4 filesystem.  But
3779ac27a0ecSDave Kleikamp  * that's fine - as long as they are linked from the inode, the post-crash
3780617ba13bSMingming Cao  * ext4_truncate() run will find them and release them.
3781ac27a0ecSDave Kleikamp  */
3782617ba13bSMingming Cao void ext4_truncate(struct inode *inode)
3783ac27a0ecSDave Kleikamp {
3784819c4920STheodore Ts'o 	struct ext4_inode_info *ei = EXT4_I(inode);
3785819c4920STheodore Ts'o 	unsigned int credits;
3786819c4920STheodore Ts'o 	handle_t *handle;
3787819c4920STheodore Ts'o 	struct address_space *mapping = inode->i_mapping;
3788819c4920STheodore Ts'o 	loff_t page_len;
3789819c4920STheodore Ts'o 
379019b5ef61STheodore Ts'o 	/*
379119b5ef61STheodore Ts'o 	 * There is a possibility that we're either freeing the inode
379219b5ef61STheodore Ts'o 	 * or it completely new indode. In those cases we might not
379319b5ef61STheodore Ts'o 	 * have i_mutex locked because it's not necessary.
379419b5ef61STheodore Ts'o 	 */
379519b5ef61STheodore Ts'o 	if (!(inode->i_state & (I_NEW|I_FREEING)))
379619b5ef61STheodore Ts'o 		WARN_ON(!mutex_is_locked(&inode->i_mutex));
37970562e0baSJiaying Zhang 	trace_ext4_truncate_enter(inode);
37980562e0baSJiaying Zhang 
379991ef4cafSDuane Griffin 	if (!ext4_can_truncate(inode))
3800ac27a0ecSDave Kleikamp 		return;
3801ac27a0ecSDave Kleikamp 
380212e9b892SDmitry Monakhov 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3803c8d46e41SJiaying Zhang 
38045534fb5bSTheodore Ts'o 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
380519f5fb7aSTheodore Ts'o 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
38067d8f9f7dSTheodore Ts'o 
3807aef1c851STao Ma 	if (ext4_has_inline_data(inode)) {
3808aef1c851STao Ma 		int has_inline = 1;
3809aef1c851STao Ma 
3810aef1c851STao Ma 		ext4_inline_data_truncate(inode, &has_inline);
3811aef1c851STao Ma 		if (has_inline)
3812aef1c851STao Ma 			return;
3813aef1c851STao Ma 	}
3814aef1c851STao Ma 
3815819c4920STheodore Ts'o 	/*
3816819c4920STheodore Ts'o 	 * finish any pending end_io work so we won't run the risk of
3817819c4920STheodore Ts'o 	 * converting any truncated blocks to initialized later
3818819c4920STheodore Ts'o 	 */
3819819c4920STheodore Ts'o 	ext4_flush_unwritten_io(inode);
3820819c4920STheodore Ts'o 
3821ff9893dcSAmir Goldstein 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3822819c4920STheodore Ts'o 		credits = ext4_writepage_trans_blocks(inode);
3823ff9893dcSAmir Goldstein 	else
3824819c4920STheodore Ts'o 		credits = ext4_blocks_for_truncate(inode);
3825819c4920STheodore Ts'o 
3826819c4920STheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3827819c4920STheodore Ts'o 	if (IS_ERR(handle)) {
3828819c4920STheodore Ts'o 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
3829819c4920STheodore Ts'o 		return;
3830819c4920STheodore Ts'o 	}
3831819c4920STheodore Ts'o 
3832819c4920STheodore Ts'o 	if (inode->i_size % PAGE_CACHE_SIZE != 0) {
3833819c4920STheodore Ts'o 		page_len = PAGE_CACHE_SIZE -
3834819c4920STheodore Ts'o 			(inode->i_size & (PAGE_CACHE_SIZE - 1));
3835819c4920STheodore Ts'o 
3836819c4920STheodore Ts'o 		if (ext4_discard_partial_page_buffers(handle,
3837819c4920STheodore Ts'o 				mapping, inode->i_size, page_len, 0))
3838819c4920STheodore Ts'o 			goto out_stop;
3839819c4920STheodore Ts'o 	}
3840819c4920STheodore Ts'o 
3841819c4920STheodore Ts'o 	/*
3842819c4920STheodore Ts'o 	 * We add the inode to the orphan list, so that if this
3843819c4920STheodore Ts'o 	 * truncate spans multiple transactions, and we crash, we will
3844819c4920STheodore Ts'o 	 * resume the truncate when the filesystem recovers.  It also
3845819c4920STheodore Ts'o 	 * marks the inode dirty, to catch the new size.
3846819c4920STheodore Ts'o 	 *
3847819c4920STheodore Ts'o 	 * Implication: the file must always be in a sane, consistent
3848819c4920STheodore Ts'o 	 * truncatable state while each transaction commits.
3849819c4920STheodore Ts'o 	 */
3850819c4920STheodore Ts'o 	if (ext4_orphan_add(handle, inode))
3851819c4920STheodore Ts'o 		goto out_stop;
3852819c4920STheodore Ts'o 
3853819c4920STheodore Ts'o 	down_write(&EXT4_I(inode)->i_data_sem);
3854819c4920STheodore Ts'o 
3855819c4920STheodore Ts'o 	ext4_discard_preallocations(inode);
3856819c4920STheodore Ts'o 
3857819c4920STheodore Ts'o 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3858819c4920STheodore Ts'o 		ext4_ext_truncate(handle, inode);
3859819c4920STheodore Ts'o 	else
3860819c4920STheodore Ts'o 		ext4_ind_truncate(handle, inode);
3861819c4920STheodore Ts'o 
3862819c4920STheodore Ts'o 	up_write(&ei->i_data_sem);
3863819c4920STheodore Ts'o 
3864819c4920STheodore Ts'o 	if (IS_SYNC(inode))
3865819c4920STheodore Ts'o 		ext4_handle_sync(handle);
3866819c4920STheodore Ts'o 
3867819c4920STheodore Ts'o out_stop:
3868819c4920STheodore Ts'o 	/*
3869819c4920STheodore Ts'o 	 * If this was a simple ftruncate() and the file will remain alive,
3870819c4920STheodore Ts'o 	 * then we need to clear up the orphan record which we created above.
3871819c4920STheodore Ts'o 	 * However, if this was a real unlink then we were called by
3872819c4920STheodore Ts'o 	 * ext4_delete_inode(), and we allow that function to clean up the
3873819c4920STheodore Ts'o 	 * orphan info for us.
3874819c4920STheodore Ts'o 	 */
3875819c4920STheodore Ts'o 	if (inode->i_nlink)
3876819c4920STheodore Ts'o 		ext4_orphan_del(handle, inode);
3877819c4920STheodore Ts'o 
3878819c4920STheodore Ts'o 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3879819c4920STheodore Ts'o 	ext4_mark_inode_dirty(handle, inode);
3880819c4920STheodore Ts'o 	ext4_journal_stop(handle);
3881a86c6181SAlex Tomas 
38820562e0baSJiaying Zhang 	trace_ext4_truncate_exit(inode);
3883ac27a0ecSDave Kleikamp }
3884ac27a0ecSDave Kleikamp 
3885ac27a0ecSDave Kleikamp /*
3886617ba13bSMingming Cao  * ext4_get_inode_loc returns with an extra refcount against the inode's
3887ac27a0ecSDave Kleikamp  * underlying buffer_head on success. If 'in_mem' is true, we have all
3888ac27a0ecSDave Kleikamp  * data in memory that is needed to recreate the on-disk version of this
3889ac27a0ecSDave Kleikamp  * inode.
3890ac27a0ecSDave Kleikamp  */
3891617ba13bSMingming Cao static int __ext4_get_inode_loc(struct inode *inode,
3892617ba13bSMingming Cao 				struct ext4_iloc *iloc, int in_mem)
3893ac27a0ecSDave Kleikamp {
3894240799cdSTheodore Ts'o 	struct ext4_group_desc	*gdp;
3895ac27a0ecSDave Kleikamp 	struct buffer_head	*bh;
3896240799cdSTheodore Ts'o 	struct super_block	*sb = inode->i_sb;
3897240799cdSTheodore Ts'o 	ext4_fsblk_t		block;
3898240799cdSTheodore Ts'o 	int			inodes_per_block, inode_offset;
3899ac27a0ecSDave Kleikamp 
39003a06d778SAneesh Kumar K.V 	iloc->bh = NULL;
3901240799cdSTheodore Ts'o 	if (!ext4_valid_inum(sb, inode->i_ino))
3902ac27a0ecSDave Kleikamp 		return -EIO;
3903ac27a0ecSDave Kleikamp 
3904240799cdSTheodore Ts'o 	iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3905240799cdSTheodore Ts'o 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3906240799cdSTheodore Ts'o 	if (!gdp)
3907240799cdSTheodore Ts'o 		return -EIO;
3908240799cdSTheodore Ts'o 
3909240799cdSTheodore Ts'o 	/*
3910240799cdSTheodore Ts'o 	 * Figure out the offset within the block group inode table
3911240799cdSTheodore Ts'o 	 */
391200d09882STao Ma 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3913240799cdSTheodore Ts'o 	inode_offset = ((inode->i_ino - 1) %
3914240799cdSTheodore Ts'o 			EXT4_INODES_PER_GROUP(sb));
3915240799cdSTheodore Ts'o 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3916240799cdSTheodore Ts'o 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3917240799cdSTheodore Ts'o 
3918240799cdSTheodore Ts'o 	bh = sb_getblk(sb, block);
3919aebf0243SWang Shilong 	if (unlikely(!bh))
3920860d21e2STheodore Ts'o 		return -ENOMEM;
3921ac27a0ecSDave Kleikamp 	if (!buffer_uptodate(bh)) {
3922ac27a0ecSDave Kleikamp 		lock_buffer(bh);
39239c83a923SHidehiro Kawai 
39249c83a923SHidehiro Kawai 		/*
39259c83a923SHidehiro Kawai 		 * If the buffer has the write error flag, we have failed
39269c83a923SHidehiro Kawai 		 * to write out another inode in the same block.  In this
39279c83a923SHidehiro Kawai 		 * case, we don't have to read the block because we may
39289c83a923SHidehiro Kawai 		 * read the old inode data successfully.
39299c83a923SHidehiro Kawai 		 */
39309c83a923SHidehiro Kawai 		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
39319c83a923SHidehiro Kawai 			set_buffer_uptodate(bh);
39329c83a923SHidehiro Kawai 
3933ac27a0ecSDave Kleikamp 		if (buffer_uptodate(bh)) {
3934ac27a0ecSDave Kleikamp 			/* someone brought it uptodate while we waited */
3935ac27a0ecSDave Kleikamp 			unlock_buffer(bh);
3936ac27a0ecSDave Kleikamp 			goto has_buffer;
3937ac27a0ecSDave Kleikamp 		}
3938ac27a0ecSDave Kleikamp 
3939ac27a0ecSDave Kleikamp 		/*
3940ac27a0ecSDave Kleikamp 		 * If we have all information of the inode in memory and this
3941ac27a0ecSDave Kleikamp 		 * is the only valid inode in the block, we need not read the
3942ac27a0ecSDave Kleikamp 		 * block.
3943ac27a0ecSDave Kleikamp 		 */
3944ac27a0ecSDave Kleikamp 		if (in_mem) {
3945ac27a0ecSDave Kleikamp 			struct buffer_head *bitmap_bh;
3946240799cdSTheodore Ts'o 			int i, start;
3947ac27a0ecSDave Kleikamp 
3948240799cdSTheodore Ts'o 			start = inode_offset & ~(inodes_per_block - 1);
3949ac27a0ecSDave Kleikamp 
3950ac27a0ecSDave Kleikamp 			/* Is the inode bitmap in cache? */
3951240799cdSTheodore Ts'o 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3952aebf0243SWang Shilong 			if (unlikely(!bitmap_bh))
3953ac27a0ecSDave Kleikamp 				goto make_io;
3954ac27a0ecSDave Kleikamp 
3955ac27a0ecSDave Kleikamp 			/*
3956ac27a0ecSDave Kleikamp 			 * If the inode bitmap isn't in cache then the
3957ac27a0ecSDave Kleikamp 			 * optimisation may end up performing two reads instead
3958ac27a0ecSDave Kleikamp 			 * of one, so skip it.
3959ac27a0ecSDave Kleikamp 			 */
3960ac27a0ecSDave Kleikamp 			if (!buffer_uptodate(bitmap_bh)) {
3961ac27a0ecSDave Kleikamp 				brelse(bitmap_bh);
3962ac27a0ecSDave Kleikamp 				goto make_io;
3963ac27a0ecSDave Kleikamp 			}
3964240799cdSTheodore Ts'o 			for (i = start; i < start + inodes_per_block; i++) {
3965ac27a0ecSDave Kleikamp 				if (i == inode_offset)
3966ac27a0ecSDave Kleikamp 					continue;
3967617ba13bSMingming Cao 				if (ext4_test_bit(i, bitmap_bh->b_data))
3968ac27a0ecSDave Kleikamp 					break;
3969ac27a0ecSDave Kleikamp 			}
3970ac27a0ecSDave Kleikamp 			brelse(bitmap_bh);
3971240799cdSTheodore Ts'o 			if (i == start + inodes_per_block) {
3972ac27a0ecSDave Kleikamp 				/* all other inodes are free, so skip I/O */
3973ac27a0ecSDave Kleikamp 				memset(bh->b_data, 0, bh->b_size);
3974ac27a0ecSDave Kleikamp 				set_buffer_uptodate(bh);
3975ac27a0ecSDave Kleikamp 				unlock_buffer(bh);
3976ac27a0ecSDave Kleikamp 				goto has_buffer;
3977ac27a0ecSDave Kleikamp 			}
3978ac27a0ecSDave Kleikamp 		}
3979ac27a0ecSDave Kleikamp 
3980ac27a0ecSDave Kleikamp make_io:
3981ac27a0ecSDave Kleikamp 		/*
3982240799cdSTheodore Ts'o 		 * If we need to do any I/O, try to pre-readahead extra
3983240799cdSTheodore Ts'o 		 * blocks from the inode table.
3984240799cdSTheodore Ts'o 		 */
3985240799cdSTheodore Ts'o 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
3986240799cdSTheodore Ts'o 			ext4_fsblk_t b, end, table;
3987240799cdSTheodore Ts'o 			unsigned num;
3988240799cdSTheodore Ts'o 
3989240799cdSTheodore Ts'o 			table = ext4_inode_table(sb, gdp);
3990b713a5ecSTheodore Ts'o 			/* s_inode_readahead_blks is always a power of 2 */
3991240799cdSTheodore Ts'o 			b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3992240799cdSTheodore Ts'o 			if (table > b)
3993240799cdSTheodore Ts'o 				b = table;
3994240799cdSTheodore Ts'o 			end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3995240799cdSTheodore Ts'o 			num = EXT4_INODES_PER_GROUP(sb);
3996feb0ab32SDarrick J. Wong 			if (ext4_has_group_desc_csum(sb))
3997560671a0SAneesh Kumar K.V 				num -= ext4_itable_unused_count(sb, gdp);
3998240799cdSTheodore Ts'o 			table += num / inodes_per_block;
3999240799cdSTheodore Ts'o 			if (end > table)
4000240799cdSTheodore Ts'o 				end = table;
4001240799cdSTheodore Ts'o 			while (b <= end)
4002240799cdSTheodore Ts'o 				sb_breadahead(sb, b++);
4003240799cdSTheodore Ts'o 		}
4004240799cdSTheodore Ts'o 
4005240799cdSTheodore Ts'o 		/*
4006ac27a0ecSDave Kleikamp 		 * There are other valid inodes in the buffer, this inode
4007ac27a0ecSDave Kleikamp 		 * has in-inode xattrs, or we don't have this inode in memory.
4008ac27a0ecSDave Kleikamp 		 * Read the block from disk.
4009ac27a0ecSDave Kleikamp 		 */
40100562e0baSJiaying Zhang 		trace_ext4_load_inode(inode);
4011ac27a0ecSDave Kleikamp 		get_bh(bh);
4012ac27a0ecSDave Kleikamp 		bh->b_end_io = end_buffer_read_sync;
401365299a3bSChristoph Hellwig 		submit_bh(READ | REQ_META | REQ_PRIO, bh);
4014ac27a0ecSDave Kleikamp 		wait_on_buffer(bh);
4015ac27a0ecSDave Kleikamp 		if (!buffer_uptodate(bh)) {
4016c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, block,
4017c398eda0STheodore Ts'o 					       "unable to read itable block");
4018ac27a0ecSDave Kleikamp 			brelse(bh);
4019ac27a0ecSDave Kleikamp 			return -EIO;
4020ac27a0ecSDave Kleikamp 		}
4021ac27a0ecSDave Kleikamp 	}
4022ac27a0ecSDave Kleikamp has_buffer:
4023ac27a0ecSDave Kleikamp 	iloc->bh = bh;
4024ac27a0ecSDave Kleikamp 	return 0;
4025ac27a0ecSDave Kleikamp }
4026ac27a0ecSDave Kleikamp 
4027617ba13bSMingming Cao int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4028ac27a0ecSDave Kleikamp {
4029ac27a0ecSDave Kleikamp 	/* We have all inode data except xattrs in memory here. */
4030617ba13bSMingming Cao 	return __ext4_get_inode_loc(inode, iloc,
403119f5fb7aSTheodore Ts'o 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4032ac27a0ecSDave Kleikamp }
4033ac27a0ecSDave Kleikamp 
4034617ba13bSMingming Cao void ext4_set_inode_flags(struct inode *inode)
4035ac27a0ecSDave Kleikamp {
4036617ba13bSMingming Cao 	unsigned int flags = EXT4_I(inode)->i_flags;
4037ac27a0ecSDave Kleikamp 
4038ac27a0ecSDave Kleikamp 	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
4039617ba13bSMingming Cao 	if (flags & EXT4_SYNC_FL)
4040ac27a0ecSDave Kleikamp 		inode->i_flags |= S_SYNC;
4041617ba13bSMingming Cao 	if (flags & EXT4_APPEND_FL)
4042ac27a0ecSDave Kleikamp 		inode->i_flags |= S_APPEND;
4043617ba13bSMingming Cao 	if (flags & EXT4_IMMUTABLE_FL)
4044ac27a0ecSDave Kleikamp 		inode->i_flags |= S_IMMUTABLE;
4045617ba13bSMingming Cao 	if (flags & EXT4_NOATIME_FL)
4046ac27a0ecSDave Kleikamp 		inode->i_flags |= S_NOATIME;
4047617ba13bSMingming Cao 	if (flags & EXT4_DIRSYNC_FL)
4048ac27a0ecSDave Kleikamp 		inode->i_flags |= S_DIRSYNC;
4049ac27a0ecSDave Kleikamp }
4050ac27a0ecSDave Kleikamp 
4051ff9ddf7eSJan Kara /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4052ff9ddf7eSJan Kara void ext4_get_inode_flags(struct ext4_inode_info *ei)
4053ff9ddf7eSJan Kara {
405484a8dce2SDmitry Monakhov 	unsigned int vfs_fl;
405584a8dce2SDmitry Monakhov 	unsigned long old_fl, new_fl;
4056ff9ddf7eSJan Kara 
405784a8dce2SDmitry Monakhov 	do {
405884a8dce2SDmitry Monakhov 		vfs_fl = ei->vfs_inode.i_flags;
405984a8dce2SDmitry Monakhov 		old_fl = ei->i_flags;
406084a8dce2SDmitry Monakhov 		new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
406184a8dce2SDmitry Monakhov 				EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
406284a8dce2SDmitry Monakhov 				EXT4_DIRSYNC_FL);
406384a8dce2SDmitry Monakhov 		if (vfs_fl & S_SYNC)
406484a8dce2SDmitry Monakhov 			new_fl |= EXT4_SYNC_FL;
406584a8dce2SDmitry Monakhov 		if (vfs_fl & S_APPEND)
406684a8dce2SDmitry Monakhov 			new_fl |= EXT4_APPEND_FL;
406784a8dce2SDmitry Monakhov 		if (vfs_fl & S_IMMUTABLE)
406884a8dce2SDmitry Monakhov 			new_fl |= EXT4_IMMUTABLE_FL;
406984a8dce2SDmitry Monakhov 		if (vfs_fl & S_NOATIME)
407084a8dce2SDmitry Monakhov 			new_fl |= EXT4_NOATIME_FL;
407184a8dce2SDmitry Monakhov 		if (vfs_fl & S_DIRSYNC)
407284a8dce2SDmitry Monakhov 			new_fl |= EXT4_DIRSYNC_FL;
407384a8dce2SDmitry Monakhov 	} while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
4074ff9ddf7eSJan Kara }
4075de9a55b8STheodore Ts'o 
40760fc1b451SAneesh Kumar K.V static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
40770fc1b451SAneesh Kumar K.V 				  struct ext4_inode_info *ei)
40780fc1b451SAneesh Kumar K.V {
40790fc1b451SAneesh Kumar K.V 	blkcnt_t i_blocks ;
40808180a562SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
40818180a562SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
40820fc1b451SAneesh Kumar K.V 
40830fc1b451SAneesh Kumar K.V 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
40840fc1b451SAneesh Kumar K.V 				EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
40850fc1b451SAneesh Kumar K.V 		/* we are using combined 48 bit field */
40860fc1b451SAneesh Kumar K.V 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
40870fc1b451SAneesh Kumar K.V 					le32_to_cpu(raw_inode->i_blocks_lo);
408807a03824STheodore Ts'o 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
40898180a562SAneesh Kumar K.V 			/* i_blocks represent file system block size */
40908180a562SAneesh Kumar K.V 			return i_blocks  << (inode->i_blkbits - 9);
40918180a562SAneesh Kumar K.V 		} else {
40920fc1b451SAneesh Kumar K.V 			return i_blocks;
40938180a562SAneesh Kumar K.V 		}
40940fc1b451SAneesh Kumar K.V 	} else {
40950fc1b451SAneesh Kumar K.V 		return le32_to_cpu(raw_inode->i_blocks_lo);
40960fc1b451SAneesh Kumar K.V 	}
40970fc1b451SAneesh Kumar K.V }
4098ff9ddf7eSJan Kara 
4099152a7b0aSTao Ma static inline void ext4_iget_extra_inode(struct inode *inode,
4100152a7b0aSTao Ma 					 struct ext4_inode *raw_inode,
4101152a7b0aSTao Ma 					 struct ext4_inode_info *ei)
4102152a7b0aSTao Ma {
4103152a7b0aSTao Ma 	__le32 *magic = (void *)raw_inode +
4104152a7b0aSTao Ma 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
410567cf5b09STao Ma 	if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4106152a7b0aSTao Ma 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
410767cf5b09STao Ma 		ext4_find_inline_data_nolock(inode);
4108f19d5870STao Ma 	} else
4109f19d5870STao Ma 		EXT4_I(inode)->i_inline_off = 0;
4110152a7b0aSTao Ma }
4111152a7b0aSTao Ma 
41121d1fe1eeSDavid Howells struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4113ac27a0ecSDave Kleikamp {
4114617ba13bSMingming Cao 	struct ext4_iloc iloc;
4115617ba13bSMingming Cao 	struct ext4_inode *raw_inode;
41161d1fe1eeSDavid Howells 	struct ext4_inode_info *ei;
41171d1fe1eeSDavid Howells 	struct inode *inode;
4118b436b9beSJan Kara 	journal_t *journal = EXT4_SB(sb)->s_journal;
41191d1fe1eeSDavid Howells 	long ret;
4120ac27a0ecSDave Kleikamp 	int block;
412108cefc7aSEric W. Biederman 	uid_t i_uid;
412208cefc7aSEric W. Biederman 	gid_t i_gid;
4123ac27a0ecSDave Kleikamp 
41241d1fe1eeSDavid Howells 	inode = iget_locked(sb, ino);
41251d1fe1eeSDavid Howells 	if (!inode)
41261d1fe1eeSDavid Howells 		return ERR_PTR(-ENOMEM);
41271d1fe1eeSDavid Howells 	if (!(inode->i_state & I_NEW))
41281d1fe1eeSDavid Howells 		return inode;
41291d1fe1eeSDavid Howells 
41301d1fe1eeSDavid Howells 	ei = EXT4_I(inode);
41317dc57615SPeter Huewe 	iloc.bh = NULL;
4132ac27a0ecSDave Kleikamp 
41331d1fe1eeSDavid Howells 	ret = __ext4_get_inode_loc(inode, &iloc, 0);
41341d1fe1eeSDavid Howells 	if (ret < 0)
4135ac27a0ecSDave Kleikamp 		goto bad_inode;
4136617ba13bSMingming Cao 	raw_inode = ext4_raw_inode(&iloc);
4137814525f4SDarrick J. Wong 
4138814525f4SDarrick J. Wong 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4139814525f4SDarrick J. Wong 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4140814525f4SDarrick J. Wong 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4141814525f4SDarrick J. Wong 		    EXT4_INODE_SIZE(inode->i_sb)) {
4142814525f4SDarrick J. Wong 			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
4143814525f4SDarrick J. Wong 				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
4144814525f4SDarrick J. Wong 				EXT4_INODE_SIZE(inode->i_sb));
4145814525f4SDarrick J. Wong 			ret = -EIO;
4146814525f4SDarrick J. Wong 			goto bad_inode;
4147814525f4SDarrick J. Wong 		}
4148814525f4SDarrick J. Wong 	} else
4149814525f4SDarrick J. Wong 		ei->i_extra_isize = 0;
4150814525f4SDarrick J. Wong 
4151814525f4SDarrick J. Wong 	/* Precompute checksum seed for inode metadata */
4152814525f4SDarrick J. Wong 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4153814525f4SDarrick J. Wong 			EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
4154814525f4SDarrick J. Wong 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4155814525f4SDarrick J. Wong 		__u32 csum;
4156814525f4SDarrick J. Wong 		__le32 inum = cpu_to_le32(inode->i_ino);
4157814525f4SDarrick J. Wong 		__le32 gen = raw_inode->i_generation;
4158814525f4SDarrick J. Wong 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4159814525f4SDarrick J. Wong 				   sizeof(inum));
4160814525f4SDarrick J. Wong 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4161814525f4SDarrick J. Wong 					      sizeof(gen));
4162814525f4SDarrick J. Wong 	}
4163814525f4SDarrick J. Wong 
4164814525f4SDarrick J. Wong 	if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4165814525f4SDarrick J. Wong 		EXT4_ERROR_INODE(inode, "checksum invalid");
4166814525f4SDarrick J. Wong 		ret = -EIO;
4167814525f4SDarrick J. Wong 		goto bad_inode;
4168814525f4SDarrick J. Wong 	}
4169814525f4SDarrick J. Wong 
4170ac27a0ecSDave Kleikamp 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
417108cefc7aSEric W. Biederman 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
417208cefc7aSEric W. Biederman 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4173ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
417408cefc7aSEric W. Biederman 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
417508cefc7aSEric W. Biederman 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4176ac27a0ecSDave Kleikamp 	}
417708cefc7aSEric W. Biederman 	i_uid_write(inode, i_uid);
417808cefc7aSEric W. Biederman 	i_gid_write(inode, i_gid);
4179bfe86848SMiklos Szeredi 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4180ac27a0ecSDave Kleikamp 
4181353eb83cSTheodore Ts'o 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
418267cf5b09STao Ma 	ei->i_inline_off = 0;
4183ac27a0ecSDave Kleikamp 	ei->i_dir_start_lookup = 0;
4184ac27a0ecSDave Kleikamp 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4185ac27a0ecSDave Kleikamp 	/* We now have enough fields to check if the inode was active or not.
4186ac27a0ecSDave Kleikamp 	 * This is needed because nfsd might try to access dead inodes
4187ac27a0ecSDave Kleikamp 	 * the test is that same one that e2fsck uses
4188ac27a0ecSDave Kleikamp 	 * NeilBrown 1999oct15
4189ac27a0ecSDave Kleikamp 	 */
4190ac27a0ecSDave Kleikamp 	if (inode->i_nlink == 0) {
4191ac27a0ecSDave Kleikamp 		if (inode->i_mode == 0 ||
4192617ba13bSMingming Cao 		    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
4193ac27a0ecSDave Kleikamp 			/* this inode is deleted */
41941d1fe1eeSDavid Howells 			ret = -ESTALE;
4195ac27a0ecSDave Kleikamp 			goto bad_inode;
4196ac27a0ecSDave Kleikamp 		}
4197ac27a0ecSDave Kleikamp 		/* The only unlinked inodes we let through here have
4198ac27a0ecSDave Kleikamp 		 * valid i_mode and are being read by the orphan
4199ac27a0ecSDave Kleikamp 		 * recovery code: that's fine, we're about to complete
4200ac27a0ecSDave Kleikamp 		 * the process of deleting those. */
4201ac27a0ecSDave Kleikamp 	}
4202ac27a0ecSDave Kleikamp 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
42030fc1b451SAneesh Kumar K.V 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
42047973c0c1SAneesh Kumar K.V 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4205a9e81742STheodore Ts'o 	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4206a1ddeb7eSBadari Pulavarty 		ei->i_file_acl |=
4207a1ddeb7eSBadari Pulavarty 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4208a48380f7SAneesh Kumar K.V 	inode->i_size = ext4_isize(raw_inode);
4209ac27a0ecSDave Kleikamp 	ei->i_disksize = inode->i_size;
4210a9e7f447SDmitry Monakhov #ifdef CONFIG_QUOTA
4211a9e7f447SDmitry Monakhov 	ei->i_reserved_quota = 0;
4212a9e7f447SDmitry Monakhov #endif
4213ac27a0ecSDave Kleikamp 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4214ac27a0ecSDave Kleikamp 	ei->i_block_group = iloc.block_group;
4215a4912123STheodore Ts'o 	ei->i_last_alloc_group = ~0;
4216ac27a0ecSDave Kleikamp 	/*
4217ac27a0ecSDave Kleikamp 	 * NOTE! The in-memory inode i_data array is in little-endian order
4218ac27a0ecSDave Kleikamp 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4219ac27a0ecSDave Kleikamp 	 */
4220617ba13bSMingming Cao 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4221ac27a0ecSDave Kleikamp 		ei->i_data[block] = raw_inode->i_block[block];
4222ac27a0ecSDave Kleikamp 	INIT_LIST_HEAD(&ei->i_orphan);
4223ac27a0ecSDave Kleikamp 
4224b436b9beSJan Kara 	/*
4225b436b9beSJan Kara 	 * Set transaction id's of transactions that have to be committed
4226b436b9beSJan Kara 	 * to finish f[data]sync. We set them to currently running transaction
4227b436b9beSJan Kara 	 * as we cannot be sure that the inode or some of its metadata isn't
4228b436b9beSJan Kara 	 * part of the transaction - the inode could have been reclaimed and
4229b436b9beSJan Kara 	 * now it is reread from disk.
4230b436b9beSJan Kara 	 */
4231b436b9beSJan Kara 	if (journal) {
4232b436b9beSJan Kara 		transaction_t *transaction;
4233b436b9beSJan Kara 		tid_t tid;
4234b436b9beSJan Kara 
4235a931da6aSTheodore Ts'o 		read_lock(&journal->j_state_lock);
4236b436b9beSJan Kara 		if (journal->j_running_transaction)
4237b436b9beSJan Kara 			transaction = journal->j_running_transaction;
4238b436b9beSJan Kara 		else
4239b436b9beSJan Kara 			transaction = journal->j_committing_transaction;
4240b436b9beSJan Kara 		if (transaction)
4241b436b9beSJan Kara 			tid = transaction->t_tid;
4242b436b9beSJan Kara 		else
4243b436b9beSJan Kara 			tid = journal->j_commit_sequence;
4244a931da6aSTheodore Ts'o 		read_unlock(&journal->j_state_lock);
4245b436b9beSJan Kara 		ei->i_sync_tid = tid;
4246b436b9beSJan Kara 		ei->i_datasync_tid = tid;
4247b436b9beSJan Kara 	}
4248b436b9beSJan Kara 
42490040d987SEric Sandeen 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4250ac27a0ecSDave Kleikamp 		if (ei->i_extra_isize == 0) {
4251ac27a0ecSDave Kleikamp 			/* The extra space is currently unused. Use it. */
4252617ba13bSMingming Cao 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4253617ba13bSMingming Cao 					    EXT4_GOOD_OLD_INODE_SIZE;
4254ac27a0ecSDave Kleikamp 		} else {
4255152a7b0aSTao Ma 			ext4_iget_extra_inode(inode, raw_inode, ei);
4256ac27a0ecSDave Kleikamp 		}
4257814525f4SDarrick J. Wong 	}
4258ac27a0ecSDave Kleikamp 
4259ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4260ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4261ef7f3835SKalpak Shah 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4262ef7f3835SKalpak Shah 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4263ef7f3835SKalpak Shah 
426425ec56b5SJean Noel Cordenner 	inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
426525ec56b5SJean Noel Cordenner 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
426625ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
426725ec56b5SJean Noel Cordenner 			inode->i_version |=
426825ec56b5SJean Noel Cordenner 			(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
426925ec56b5SJean Noel Cordenner 	}
427025ec56b5SJean Noel Cordenner 
4271c4b5a614STheodore Ts'o 	ret = 0;
4272485c26ecSTheodore Ts'o 	if (ei->i_file_acl &&
42731032988cSTheodore Ts'o 	    !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
427424676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
427524676da4STheodore Ts'o 				 ei->i_file_acl);
4276485c26ecSTheodore Ts'o 		ret = -EIO;
4277485c26ecSTheodore Ts'o 		goto bad_inode;
4278f19d5870STao Ma 	} else if (!ext4_has_inline_data(inode)) {
4279f19d5870STao Ma 		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4280f19d5870STao Ma 			if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4281c4b5a614STheodore Ts'o 			    (S_ISLNK(inode->i_mode) &&
4282f19d5870STao Ma 			     !ext4_inode_is_fast_symlink(inode))))
42837a262f7cSAneesh Kumar K.V 				/* Validate extent which is part of inode */
42847a262f7cSAneesh Kumar K.V 				ret = ext4_ext_check_inode(inode);
4285fe2c8191SThiemo Nagel 		} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4286fe2c8191SThiemo Nagel 			   (S_ISLNK(inode->i_mode) &&
4287fe2c8191SThiemo Nagel 			    !ext4_inode_is_fast_symlink(inode))) {
4288fe2c8191SThiemo Nagel 			/* Validate block references which are part of inode */
42891f7d1e77STheodore Ts'o 			ret = ext4_ind_check_inode(inode);
4290fe2c8191SThiemo Nagel 		}
4291f19d5870STao Ma 	}
4292567f3e9aSTheodore Ts'o 	if (ret)
42937a262f7cSAneesh Kumar K.V 		goto bad_inode;
42947a262f7cSAneesh Kumar K.V 
4295ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode)) {
4296617ba13bSMingming Cao 		inode->i_op = &ext4_file_inode_operations;
4297617ba13bSMingming Cao 		inode->i_fop = &ext4_file_operations;
4298617ba13bSMingming Cao 		ext4_set_aops(inode);
4299ac27a0ecSDave Kleikamp 	} else if (S_ISDIR(inode->i_mode)) {
4300617ba13bSMingming Cao 		inode->i_op = &ext4_dir_inode_operations;
4301617ba13bSMingming Cao 		inode->i_fop = &ext4_dir_operations;
4302ac27a0ecSDave Kleikamp 	} else if (S_ISLNK(inode->i_mode)) {
4303e83c1397SDuane Griffin 		if (ext4_inode_is_fast_symlink(inode)) {
4304617ba13bSMingming Cao 			inode->i_op = &ext4_fast_symlink_inode_operations;
4305e83c1397SDuane Griffin 			nd_terminate_link(ei->i_data, inode->i_size,
4306e83c1397SDuane Griffin 				sizeof(ei->i_data) - 1);
4307e83c1397SDuane Griffin 		} else {
4308617ba13bSMingming Cao 			inode->i_op = &ext4_symlink_inode_operations;
4309617ba13bSMingming Cao 			ext4_set_aops(inode);
4310ac27a0ecSDave Kleikamp 		}
4311563bdd61STheodore Ts'o 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4312563bdd61STheodore Ts'o 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4313617ba13bSMingming Cao 		inode->i_op = &ext4_special_inode_operations;
4314ac27a0ecSDave Kleikamp 		if (raw_inode->i_block[0])
4315ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
4316ac27a0ecSDave Kleikamp 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4317ac27a0ecSDave Kleikamp 		else
4318ac27a0ecSDave Kleikamp 			init_special_inode(inode, inode->i_mode,
4319ac27a0ecSDave Kleikamp 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4320563bdd61STheodore Ts'o 	} else {
4321563bdd61STheodore Ts'o 		ret = -EIO;
432224676da4STheodore Ts'o 		EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4323563bdd61STheodore Ts'o 		goto bad_inode;
4324ac27a0ecSDave Kleikamp 	}
4325ac27a0ecSDave Kleikamp 	brelse(iloc.bh);
4326617ba13bSMingming Cao 	ext4_set_inode_flags(inode);
43271d1fe1eeSDavid Howells 	unlock_new_inode(inode);
43281d1fe1eeSDavid Howells 	return inode;
4329ac27a0ecSDave Kleikamp 
4330ac27a0ecSDave Kleikamp bad_inode:
4331567f3e9aSTheodore Ts'o 	brelse(iloc.bh);
43321d1fe1eeSDavid Howells 	iget_failed(inode);
43331d1fe1eeSDavid Howells 	return ERR_PTR(ret);
4334ac27a0ecSDave Kleikamp }
4335ac27a0ecSDave Kleikamp 
43360fc1b451SAneesh Kumar K.V static int ext4_inode_blocks_set(handle_t *handle,
43370fc1b451SAneesh Kumar K.V 				struct ext4_inode *raw_inode,
43380fc1b451SAneesh Kumar K.V 				struct ext4_inode_info *ei)
43390fc1b451SAneesh Kumar K.V {
43400fc1b451SAneesh Kumar K.V 	struct inode *inode = &(ei->vfs_inode);
43410fc1b451SAneesh Kumar K.V 	u64 i_blocks = inode->i_blocks;
43420fc1b451SAneesh Kumar K.V 	struct super_block *sb = inode->i_sb;
43430fc1b451SAneesh Kumar K.V 
43440fc1b451SAneesh Kumar K.V 	if (i_blocks <= ~0U) {
43450fc1b451SAneesh Kumar K.V 		/*
43464907cb7bSAnatol Pomozov 		 * i_blocks can be represented in a 32 bit variable
43470fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
43480fc1b451SAneesh Kumar K.V 		 */
43498180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
43500fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = 0;
435184a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4352f287a1a5STheodore Ts'o 		return 0;
4353f287a1a5STheodore Ts'o 	}
4354f287a1a5STheodore Ts'o 	if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4355f287a1a5STheodore Ts'o 		return -EFBIG;
4356f287a1a5STheodore Ts'o 
4357f287a1a5STheodore Ts'o 	if (i_blocks <= 0xffffffffffffULL) {
43580fc1b451SAneesh Kumar K.V 		/*
43590fc1b451SAneesh Kumar K.V 		 * i_blocks can be represented in a 48 bit variable
43600fc1b451SAneesh Kumar K.V 		 * as multiple of 512 bytes
43610fc1b451SAneesh Kumar K.V 		 */
43628180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
43630fc1b451SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
436484a8dce2SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
43650fc1b451SAneesh Kumar K.V 	} else {
436684a8dce2SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
43678180a562SAneesh Kumar K.V 		/* i_block is stored in file system block size */
43688180a562SAneesh Kumar K.V 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
43698180a562SAneesh Kumar K.V 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
43708180a562SAneesh Kumar K.V 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
43710fc1b451SAneesh Kumar K.V 	}
4372f287a1a5STheodore Ts'o 	return 0;
43730fc1b451SAneesh Kumar K.V }
43740fc1b451SAneesh Kumar K.V 
4375ac27a0ecSDave Kleikamp /*
4376ac27a0ecSDave Kleikamp  * Post the struct inode info into an on-disk inode location in the
4377ac27a0ecSDave Kleikamp  * buffer-cache.  This gobbles the caller's reference to the
4378ac27a0ecSDave Kleikamp  * buffer_head in the inode location struct.
4379ac27a0ecSDave Kleikamp  *
4380ac27a0ecSDave Kleikamp  * The caller must have write access to iloc->bh.
4381ac27a0ecSDave Kleikamp  */
4382617ba13bSMingming Cao static int ext4_do_update_inode(handle_t *handle,
4383ac27a0ecSDave Kleikamp 				struct inode *inode,
4384830156c7SFrank Mayhar 				struct ext4_iloc *iloc)
4385ac27a0ecSDave Kleikamp {
4386617ba13bSMingming Cao 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4387617ba13bSMingming Cao 	struct ext4_inode_info *ei = EXT4_I(inode);
4388ac27a0ecSDave Kleikamp 	struct buffer_head *bh = iloc->bh;
4389ac27a0ecSDave Kleikamp 	int err = 0, rc, block;
4390b71fc079SJan Kara 	int need_datasync = 0;
439108cefc7aSEric W. Biederman 	uid_t i_uid;
439208cefc7aSEric W. Biederman 	gid_t i_gid;
4393ac27a0ecSDave Kleikamp 
4394ac27a0ecSDave Kleikamp 	/* For fields not not tracking in the in-memory inode,
4395ac27a0ecSDave Kleikamp 	 * initialise them to zero for new inodes. */
439619f5fb7aSTheodore Ts'o 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4397617ba13bSMingming Cao 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4398ac27a0ecSDave Kleikamp 
4399ff9ddf7eSJan Kara 	ext4_get_inode_flags(ei);
4400ac27a0ecSDave Kleikamp 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
440108cefc7aSEric W. Biederman 	i_uid = i_uid_read(inode);
440208cefc7aSEric W. Biederman 	i_gid = i_gid_read(inode);
4403ac27a0ecSDave Kleikamp 	if (!(test_opt(inode->i_sb, NO_UID32))) {
440408cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
440508cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4406ac27a0ecSDave Kleikamp /*
4407ac27a0ecSDave Kleikamp  * Fix up interoperability with old kernels. Otherwise, old inodes get
4408ac27a0ecSDave Kleikamp  * re-used with the upper 16 bits of the uid/gid intact
4409ac27a0ecSDave Kleikamp  */
4410ac27a0ecSDave Kleikamp 		if (!ei->i_dtime) {
4411ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high =
441208cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_uid));
4413ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high =
441408cefc7aSEric W. Biederman 				cpu_to_le16(high_16_bits(i_gid));
4415ac27a0ecSDave Kleikamp 		} else {
4416ac27a0ecSDave Kleikamp 			raw_inode->i_uid_high = 0;
4417ac27a0ecSDave Kleikamp 			raw_inode->i_gid_high = 0;
4418ac27a0ecSDave Kleikamp 		}
4419ac27a0ecSDave Kleikamp 	} else {
442008cefc7aSEric W. Biederman 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
442108cefc7aSEric W. Biederman 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4422ac27a0ecSDave Kleikamp 		raw_inode->i_uid_high = 0;
4423ac27a0ecSDave Kleikamp 		raw_inode->i_gid_high = 0;
4424ac27a0ecSDave Kleikamp 	}
4425ac27a0ecSDave Kleikamp 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4426ef7f3835SKalpak Shah 
4427ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4428ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4429ef7f3835SKalpak Shah 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4430ef7f3835SKalpak Shah 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4431ef7f3835SKalpak Shah 
44320fc1b451SAneesh Kumar K.V 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
44330fc1b451SAneesh Kumar K.V 		goto out_brelse;
4434ac27a0ecSDave Kleikamp 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4435353eb83cSTheodore Ts'o 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
44369b8f1f01SMingming Cao 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
44379b8f1f01SMingming Cao 	    cpu_to_le32(EXT4_OS_HURD))
4438a1ddeb7eSBadari Pulavarty 		raw_inode->i_file_acl_high =
4439a1ddeb7eSBadari Pulavarty 			cpu_to_le16(ei->i_file_acl >> 32);
44407973c0c1SAneesh Kumar K.V 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4441b71fc079SJan Kara 	if (ei->i_disksize != ext4_isize(raw_inode)) {
4442a48380f7SAneesh Kumar K.V 		ext4_isize_set(raw_inode, ei->i_disksize);
4443b71fc079SJan Kara 		need_datasync = 1;
4444b71fc079SJan Kara 	}
4445ac27a0ecSDave Kleikamp 	if (ei->i_disksize > 0x7fffffffULL) {
4446ac27a0ecSDave Kleikamp 		struct super_block *sb = inode->i_sb;
4447617ba13bSMingming Cao 		if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4448617ba13bSMingming Cao 				EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4449617ba13bSMingming Cao 				EXT4_SB(sb)->s_es->s_rev_level ==
4450617ba13bSMingming Cao 				cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4451ac27a0ecSDave Kleikamp 			/* If this is the first large file
4452ac27a0ecSDave Kleikamp 			 * created, add a flag to the superblock.
4453ac27a0ecSDave Kleikamp 			 */
4454617ba13bSMingming Cao 			err = ext4_journal_get_write_access(handle,
4455617ba13bSMingming Cao 					EXT4_SB(sb)->s_sbh);
4456ac27a0ecSDave Kleikamp 			if (err)
4457ac27a0ecSDave Kleikamp 				goto out_brelse;
4458617ba13bSMingming Cao 			ext4_update_dynamic_rev(sb);
4459617ba13bSMingming Cao 			EXT4_SET_RO_COMPAT_FEATURE(sb,
4460617ba13bSMingming Cao 					EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
44610390131bSFrank Mayhar 			ext4_handle_sync(handle);
4462b50924c2SArtem Bityutskiy 			err = ext4_handle_dirty_super(handle, sb);
4463ac27a0ecSDave Kleikamp 		}
4464ac27a0ecSDave Kleikamp 	}
4465ac27a0ecSDave Kleikamp 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4466ac27a0ecSDave Kleikamp 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4467ac27a0ecSDave Kleikamp 		if (old_valid_dev(inode->i_rdev)) {
4468ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] =
4469ac27a0ecSDave Kleikamp 				cpu_to_le32(old_encode_dev(inode->i_rdev));
4470ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] = 0;
4471ac27a0ecSDave Kleikamp 		} else {
4472ac27a0ecSDave Kleikamp 			raw_inode->i_block[0] = 0;
4473ac27a0ecSDave Kleikamp 			raw_inode->i_block[1] =
4474ac27a0ecSDave Kleikamp 				cpu_to_le32(new_encode_dev(inode->i_rdev));
4475ac27a0ecSDave Kleikamp 			raw_inode->i_block[2] = 0;
4476ac27a0ecSDave Kleikamp 		}
4477f19d5870STao Ma 	} else if (!ext4_has_inline_data(inode)) {
4478de9a55b8STheodore Ts'o 		for (block = 0; block < EXT4_N_BLOCKS; block++)
4479ac27a0ecSDave Kleikamp 			raw_inode->i_block[block] = ei->i_data[block];
4480f19d5870STao Ma 	}
4481ac27a0ecSDave Kleikamp 
448225ec56b5SJean Noel Cordenner 	raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
448325ec56b5SJean Noel Cordenner 	if (ei->i_extra_isize) {
448425ec56b5SJean Noel Cordenner 		if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
448525ec56b5SJean Noel Cordenner 			raw_inode->i_version_hi =
448625ec56b5SJean Noel Cordenner 			cpu_to_le32(inode->i_version >> 32);
4487ac27a0ecSDave Kleikamp 		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
448825ec56b5SJean Noel Cordenner 	}
448925ec56b5SJean Noel Cordenner 
4490814525f4SDarrick J. Wong 	ext4_inode_csum_set(inode, raw_inode, ei);
4491814525f4SDarrick J. Wong 
44920390131bSFrank Mayhar 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
449373b50c1cSCurt Wohlgemuth 	rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4494ac27a0ecSDave Kleikamp 	if (!err)
4495ac27a0ecSDave Kleikamp 		err = rc;
449619f5fb7aSTheodore Ts'o 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4497ac27a0ecSDave Kleikamp 
4498b71fc079SJan Kara 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4499ac27a0ecSDave Kleikamp out_brelse:
4500ac27a0ecSDave Kleikamp 	brelse(bh);
4501617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4502ac27a0ecSDave Kleikamp 	return err;
4503ac27a0ecSDave Kleikamp }
4504ac27a0ecSDave Kleikamp 
4505ac27a0ecSDave Kleikamp /*
4506617ba13bSMingming Cao  * ext4_write_inode()
4507ac27a0ecSDave Kleikamp  *
4508ac27a0ecSDave Kleikamp  * We are called from a few places:
4509ac27a0ecSDave Kleikamp  *
4510ac27a0ecSDave Kleikamp  * - Within generic_file_write() for O_SYNC files.
4511ac27a0ecSDave Kleikamp  *   Here, there will be no transaction running. We wait for any running
45124907cb7bSAnatol Pomozov  *   transaction to commit.
4513ac27a0ecSDave Kleikamp  *
4514ac27a0ecSDave Kleikamp  * - Within sys_sync(), kupdate and such.
4515ac27a0ecSDave Kleikamp  *   We wait on commit, if tol to.
4516ac27a0ecSDave Kleikamp  *
4517ac27a0ecSDave Kleikamp  * - Within prune_icache() (PF_MEMALLOC == true)
4518ac27a0ecSDave Kleikamp  *   Here we simply return.  We can't afford to block kswapd on the
4519ac27a0ecSDave Kleikamp  *   journal commit.
4520ac27a0ecSDave Kleikamp  *
4521ac27a0ecSDave Kleikamp  * In all cases it is actually safe for us to return without doing anything,
4522ac27a0ecSDave Kleikamp  * because the inode has been copied into a raw inode buffer in
4523617ba13bSMingming Cao  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4524ac27a0ecSDave Kleikamp  * knfsd.
4525ac27a0ecSDave Kleikamp  *
4526ac27a0ecSDave Kleikamp  * Note that we are absolutely dependent upon all inode dirtiers doing the
4527ac27a0ecSDave Kleikamp  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4528ac27a0ecSDave Kleikamp  * which we are interested.
4529ac27a0ecSDave Kleikamp  *
4530ac27a0ecSDave Kleikamp  * It would be a bug for them to not do this.  The code:
4531ac27a0ecSDave Kleikamp  *
4532ac27a0ecSDave Kleikamp  *	mark_inode_dirty(inode)
4533ac27a0ecSDave Kleikamp  *	stuff();
4534ac27a0ecSDave Kleikamp  *	inode->i_size = expr;
4535ac27a0ecSDave Kleikamp  *
4536ac27a0ecSDave Kleikamp  * is in error because a kswapd-driven write_inode() could occur while
4537ac27a0ecSDave Kleikamp  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4538ac27a0ecSDave Kleikamp  * will no longer be on the superblock's dirty inode list.
4539ac27a0ecSDave Kleikamp  */
4540a9185b41SChristoph Hellwig int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4541ac27a0ecSDave Kleikamp {
454291ac6f43SFrank Mayhar 	int err;
454391ac6f43SFrank Mayhar 
4544ac27a0ecSDave Kleikamp 	if (current->flags & PF_MEMALLOC)
4545ac27a0ecSDave Kleikamp 		return 0;
4546ac27a0ecSDave Kleikamp 
454791ac6f43SFrank Mayhar 	if (EXT4_SB(inode->i_sb)->s_journal) {
4548617ba13bSMingming Cao 		if (ext4_journal_current_handle()) {
4549b38bd33aSMingming Cao 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4550ac27a0ecSDave Kleikamp 			dump_stack();
4551ac27a0ecSDave Kleikamp 			return -EIO;
4552ac27a0ecSDave Kleikamp 		}
4553ac27a0ecSDave Kleikamp 
4554a9185b41SChristoph Hellwig 		if (wbc->sync_mode != WB_SYNC_ALL)
4555ac27a0ecSDave Kleikamp 			return 0;
4556ac27a0ecSDave Kleikamp 
455791ac6f43SFrank Mayhar 		err = ext4_force_commit(inode->i_sb);
455891ac6f43SFrank Mayhar 	} else {
455991ac6f43SFrank Mayhar 		struct ext4_iloc iloc;
456091ac6f43SFrank Mayhar 
45618b472d73SCurt Wohlgemuth 		err = __ext4_get_inode_loc(inode, &iloc, 0);
456291ac6f43SFrank Mayhar 		if (err)
456391ac6f43SFrank Mayhar 			return err;
4564a9185b41SChristoph Hellwig 		if (wbc->sync_mode == WB_SYNC_ALL)
4565830156c7SFrank Mayhar 			sync_dirty_buffer(iloc.bh);
4566830156c7SFrank Mayhar 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4567c398eda0STheodore Ts'o 			EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4568c398eda0STheodore Ts'o 					 "IO error syncing inode");
4569830156c7SFrank Mayhar 			err = -EIO;
4570830156c7SFrank Mayhar 		}
4571fd2dd9fbSCurt Wohlgemuth 		brelse(iloc.bh);
457291ac6f43SFrank Mayhar 	}
457391ac6f43SFrank Mayhar 	return err;
4574ac27a0ecSDave Kleikamp }
4575ac27a0ecSDave Kleikamp 
4576ac27a0ecSDave Kleikamp /*
457753e87268SJan Kara  * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
457853e87268SJan Kara  * buffers that are attached to a page stradding i_size and are undergoing
457953e87268SJan Kara  * commit. In that case we have to wait for commit to finish and try again.
458053e87268SJan Kara  */
458153e87268SJan Kara static void ext4_wait_for_tail_page_commit(struct inode *inode)
458253e87268SJan Kara {
458353e87268SJan Kara 	struct page *page;
458453e87268SJan Kara 	unsigned offset;
458553e87268SJan Kara 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
458653e87268SJan Kara 	tid_t commit_tid = 0;
458753e87268SJan Kara 	int ret;
458853e87268SJan Kara 
458953e87268SJan Kara 	offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
459053e87268SJan Kara 	/*
459153e87268SJan Kara 	 * All buffers in the last page remain valid? Then there's nothing to
459253e87268SJan Kara 	 * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
459353e87268SJan Kara 	 * blocksize case
459453e87268SJan Kara 	 */
459553e87268SJan Kara 	if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
459653e87268SJan Kara 		return;
459753e87268SJan Kara 	while (1) {
459853e87268SJan Kara 		page = find_lock_page(inode->i_mapping,
459953e87268SJan Kara 				      inode->i_size >> PAGE_CACHE_SHIFT);
460053e87268SJan Kara 		if (!page)
460153e87268SJan Kara 			return;
460253e87268SJan Kara 		ret = __ext4_journalled_invalidatepage(page, offset);
460353e87268SJan Kara 		unlock_page(page);
460453e87268SJan Kara 		page_cache_release(page);
460553e87268SJan Kara 		if (ret != -EBUSY)
460653e87268SJan Kara 			return;
460753e87268SJan Kara 		commit_tid = 0;
460853e87268SJan Kara 		read_lock(&journal->j_state_lock);
460953e87268SJan Kara 		if (journal->j_committing_transaction)
461053e87268SJan Kara 			commit_tid = journal->j_committing_transaction->t_tid;
461153e87268SJan Kara 		read_unlock(&journal->j_state_lock);
461253e87268SJan Kara 		if (commit_tid)
461353e87268SJan Kara 			jbd2_log_wait_commit(journal, commit_tid);
461453e87268SJan Kara 	}
461553e87268SJan Kara }
461653e87268SJan Kara 
461753e87268SJan Kara /*
4618617ba13bSMingming Cao  * ext4_setattr()
4619ac27a0ecSDave Kleikamp  *
4620ac27a0ecSDave Kleikamp  * Called from notify_change.
4621ac27a0ecSDave Kleikamp  *
4622ac27a0ecSDave Kleikamp  * We want to trap VFS attempts to truncate the file as soon as
4623ac27a0ecSDave Kleikamp  * possible.  In particular, we want to make sure that when the VFS
4624ac27a0ecSDave Kleikamp  * shrinks i_size, we put the inode on the orphan list and modify
4625ac27a0ecSDave Kleikamp  * i_disksize immediately, so that during the subsequent flushing of
4626ac27a0ecSDave Kleikamp  * dirty pages and freeing of disk blocks, we can guarantee that any
4627ac27a0ecSDave Kleikamp  * commit will leave the blocks being flushed in an unused state on
4628ac27a0ecSDave Kleikamp  * disk.  (On recovery, the inode will get truncated and the blocks will
4629ac27a0ecSDave Kleikamp  * be freed, so we have a strong guarantee that no future commit will
4630ac27a0ecSDave Kleikamp  * leave these blocks visible to the user.)
4631ac27a0ecSDave Kleikamp  *
4632678aaf48SJan Kara  * Another thing we have to assure is that if we are in ordered mode
4633678aaf48SJan Kara  * and inode is still attached to the committing transaction, we must
4634678aaf48SJan Kara  * we start writeout of all the dirty pages which are being truncated.
4635678aaf48SJan Kara  * This way we are sure that all the data written in the previous
4636678aaf48SJan Kara  * transaction are already on disk (truncate waits for pages under
4637678aaf48SJan Kara  * writeback).
4638678aaf48SJan Kara  *
4639678aaf48SJan Kara  * Called with inode->i_mutex down.
4640ac27a0ecSDave Kleikamp  */
4641617ba13bSMingming Cao int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4642ac27a0ecSDave Kleikamp {
4643ac27a0ecSDave Kleikamp 	struct inode *inode = dentry->d_inode;
4644ac27a0ecSDave Kleikamp 	int error, rc = 0;
46453d287de3SDmitry Monakhov 	int orphan = 0;
4646ac27a0ecSDave Kleikamp 	const unsigned int ia_valid = attr->ia_valid;
4647ac27a0ecSDave Kleikamp 
4648ac27a0ecSDave Kleikamp 	error = inode_change_ok(inode, attr);
4649ac27a0ecSDave Kleikamp 	if (error)
4650ac27a0ecSDave Kleikamp 		return error;
4651ac27a0ecSDave Kleikamp 
465212755627SDmitry Monakhov 	if (is_quota_modification(inode, attr))
4653871a2931SChristoph Hellwig 		dquot_initialize(inode);
465408cefc7aSEric W. Biederman 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
465508cefc7aSEric W. Biederman 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4656ac27a0ecSDave Kleikamp 		handle_t *handle;
4657ac27a0ecSDave Kleikamp 
4658ac27a0ecSDave Kleikamp 		/* (user+group)*(old+new) structure, inode write (sb,
4659ac27a0ecSDave Kleikamp 		 * inode block, ? - but truncate inode update has it) */
46609924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
46619924a92aSTheodore Ts'o 			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
4662194074acSDmitry Monakhov 			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
4663ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4664ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4665ac27a0ecSDave Kleikamp 			goto err_out;
4666ac27a0ecSDave Kleikamp 		}
4667b43fa828SChristoph Hellwig 		error = dquot_transfer(inode, attr);
4668ac27a0ecSDave Kleikamp 		if (error) {
4669617ba13bSMingming Cao 			ext4_journal_stop(handle);
4670ac27a0ecSDave Kleikamp 			return error;
4671ac27a0ecSDave Kleikamp 		}
4672ac27a0ecSDave Kleikamp 		/* Update corresponding info in inode so that everything is in
4673ac27a0ecSDave Kleikamp 		 * one transaction */
4674ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_UID)
4675ac27a0ecSDave Kleikamp 			inode->i_uid = attr->ia_uid;
4676ac27a0ecSDave Kleikamp 		if (attr->ia_valid & ATTR_GID)
4677ac27a0ecSDave Kleikamp 			inode->i_gid = attr->ia_gid;
4678617ba13bSMingming Cao 		error = ext4_mark_inode_dirty(handle, inode);
4679617ba13bSMingming Cao 		ext4_journal_stop(handle);
4680ac27a0ecSDave Kleikamp 	}
4681ac27a0ecSDave Kleikamp 
4682e2b46574SEric Sandeen 	if (attr->ia_valid & ATTR_SIZE) {
4683562c72aaSChristoph Hellwig 
468412e9b892SDmitry Monakhov 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4685e2b46574SEric Sandeen 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4686e2b46574SEric Sandeen 
46870c095c7fSTheodore Ts'o 			if (attr->ia_size > sbi->s_bitmap_maxbytes)
46880c095c7fSTheodore Ts'o 				return -EFBIG;
4689e2b46574SEric Sandeen 		}
4690e2b46574SEric Sandeen 	}
4691e2b46574SEric Sandeen 
4692ac27a0ecSDave Kleikamp 	if (S_ISREG(inode->i_mode) &&
4693c8d46e41SJiaying Zhang 	    attr->ia_valid & ATTR_SIZE &&
4694072bd7eaSTheodore Ts'o 	    (attr->ia_size < inode->i_size)) {
4695ac27a0ecSDave Kleikamp 		handle_t *handle;
4696ac27a0ecSDave Kleikamp 
46979924a92aSTheodore Ts'o 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4698ac27a0ecSDave Kleikamp 		if (IS_ERR(handle)) {
4699ac27a0ecSDave Kleikamp 			error = PTR_ERR(handle);
4700ac27a0ecSDave Kleikamp 			goto err_out;
4701ac27a0ecSDave Kleikamp 		}
47023d287de3SDmitry Monakhov 		if (ext4_handle_valid(handle)) {
4703617ba13bSMingming Cao 			error = ext4_orphan_add(handle, inode);
47043d287de3SDmitry Monakhov 			orphan = 1;
47053d287de3SDmitry Monakhov 		}
4706617ba13bSMingming Cao 		EXT4_I(inode)->i_disksize = attr->ia_size;
4707617ba13bSMingming Cao 		rc = ext4_mark_inode_dirty(handle, inode);
4708ac27a0ecSDave Kleikamp 		if (!error)
4709ac27a0ecSDave Kleikamp 			error = rc;
4710617ba13bSMingming Cao 		ext4_journal_stop(handle);
4711678aaf48SJan Kara 
4712678aaf48SJan Kara 		if (ext4_should_order_data(inode)) {
4713678aaf48SJan Kara 			error = ext4_begin_ordered_truncate(inode,
4714678aaf48SJan Kara 							    attr->ia_size);
4715678aaf48SJan Kara 			if (error) {
4716678aaf48SJan Kara 				/* Do as much error cleanup as possible */
47179924a92aSTheodore Ts'o 				handle = ext4_journal_start(inode,
47189924a92aSTheodore Ts'o 							    EXT4_HT_INODE, 3);
4719678aaf48SJan Kara 				if (IS_ERR(handle)) {
4720678aaf48SJan Kara 					ext4_orphan_del(NULL, inode);
4721678aaf48SJan Kara 					goto err_out;
4722678aaf48SJan Kara 				}
4723678aaf48SJan Kara 				ext4_orphan_del(handle, inode);
47243d287de3SDmitry Monakhov 				orphan = 0;
4725678aaf48SJan Kara 				ext4_journal_stop(handle);
4726678aaf48SJan Kara 				goto err_out;
4727678aaf48SJan Kara 			}
4728678aaf48SJan Kara 		}
4729ac27a0ecSDave Kleikamp 	}
4730ac27a0ecSDave Kleikamp 
4731072bd7eaSTheodore Ts'o 	if (attr->ia_valid & ATTR_SIZE) {
473253e87268SJan Kara 		if (attr->ia_size != inode->i_size) {
473353e87268SJan Kara 			loff_t oldsize = inode->i_size;
473453e87268SJan Kara 
473553e87268SJan Kara 			i_size_write(inode, attr->ia_size);
473653e87268SJan Kara 			/*
473753e87268SJan Kara 			 * Blocks are going to be removed from the inode. Wait
473853e87268SJan Kara 			 * for dio in flight.  Temporarily disable
473953e87268SJan Kara 			 * dioread_nolock to prevent livelock.
474053e87268SJan Kara 			 */
47411b65007eSDmitry Monakhov 			if (orphan) {
474253e87268SJan Kara 				if (!ext4_should_journal_data(inode)) {
47431b65007eSDmitry Monakhov 					ext4_inode_block_unlocked_dio(inode);
47441c9114f9SDmitry Monakhov 					inode_dio_wait(inode);
47451b65007eSDmitry Monakhov 					ext4_inode_resume_unlocked_dio(inode);
474653e87268SJan Kara 				} else
474753e87268SJan Kara 					ext4_wait_for_tail_page_commit(inode);
47481b65007eSDmitry Monakhov 			}
474953e87268SJan Kara 			/*
475053e87268SJan Kara 			 * Truncate pagecache after we've waited for commit
475153e87268SJan Kara 			 * in data=journal mode to make pages freeable.
475253e87268SJan Kara 			 */
475353e87268SJan Kara 			truncate_pagecache(inode, oldsize, inode->i_size);
47541c9114f9SDmitry Monakhov 		}
4755072bd7eaSTheodore Ts'o 		ext4_truncate(inode);
4756072bd7eaSTheodore Ts'o 	}
4757ac27a0ecSDave Kleikamp 
47581025774cSChristoph Hellwig 	if (!rc) {
47591025774cSChristoph Hellwig 		setattr_copy(inode, attr);
47601025774cSChristoph Hellwig 		mark_inode_dirty(inode);
47611025774cSChristoph Hellwig 	}
47621025774cSChristoph Hellwig 
47631025774cSChristoph Hellwig 	/*
47641025774cSChristoph Hellwig 	 * If the call to ext4_truncate failed to get a transaction handle at
47651025774cSChristoph Hellwig 	 * all, we need to clean up the in-core orphan list manually.
47661025774cSChristoph Hellwig 	 */
47673d287de3SDmitry Monakhov 	if (orphan && inode->i_nlink)
4768617ba13bSMingming Cao 		ext4_orphan_del(NULL, inode);
4769ac27a0ecSDave Kleikamp 
4770ac27a0ecSDave Kleikamp 	if (!rc && (ia_valid & ATTR_MODE))
4771617ba13bSMingming Cao 		rc = ext4_acl_chmod(inode);
4772ac27a0ecSDave Kleikamp 
4773ac27a0ecSDave Kleikamp err_out:
4774617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, error);
4775ac27a0ecSDave Kleikamp 	if (!error)
4776ac27a0ecSDave Kleikamp 		error = rc;
4777ac27a0ecSDave Kleikamp 	return error;
4778ac27a0ecSDave Kleikamp }
4779ac27a0ecSDave Kleikamp 
47803e3398a0SMingming Cao int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
47813e3398a0SMingming Cao 		 struct kstat *stat)
47823e3398a0SMingming Cao {
47833e3398a0SMingming Cao 	struct inode *inode;
47843e3398a0SMingming Cao 	unsigned long delalloc_blocks;
47853e3398a0SMingming Cao 
47863e3398a0SMingming Cao 	inode = dentry->d_inode;
47873e3398a0SMingming Cao 	generic_fillattr(inode, stat);
47883e3398a0SMingming Cao 
47893e3398a0SMingming Cao 	/*
47903e3398a0SMingming Cao 	 * We can't update i_blocks if the block allocation is delayed
47913e3398a0SMingming Cao 	 * otherwise in the case of system crash before the real block
47923e3398a0SMingming Cao 	 * allocation is done, we will have i_blocks inconsistent with
47933e3398a0SMingming Cao 	 * on-disk file blocks.
47943e3398a0SMingming Cao 	 * We always keep i_blocks updated together with real
47953e3398a0SMingming Cao 	 * allocation. But to not confuse with user, stat
47963e3398a0SMingming Cao 	 * will return the blocks that include the delayed allocation
47973e3398a0SMingming Cao 	 * blocks for this file.
47983e3398a0SMingming Cao 	 */
479996607551STao Ma 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
480096607551STao Ma 				EXT4_I(inode)->i_reserved_data_blocks);
48013e3398a0SMingming Cao 
48023e3398a0SMingming Cao 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
48033e3398a0SMingming Cao 	return 0;
48043e3398a0SMingming Cao }
4805ac27a0ecSDave Kleikamp 
4806a02908f1SMingming Cao static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4807a02908f1SMingming Cao {
480812e9b892SDmitry Monakhov 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
48098bb2b247SAmir Goldstein 		return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4810ac51d837STheodore Ts'o 	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4811a02908f1SMingming Cao }
4812ac51d837STheodore Ts'o 
4813a02908f1SMingming Cao /*
4814a02908f1SMingming Cao  * Account for index blocks, block groups bitmaps and block group
4815a02908f1SMingming Cao  * descriptor blocks if modify datablocks and index blocks
4816a02908f1SMingming Cao  * worse case, the indexs blocks spread over different block groups
4817a02908f1SMingming Cao  *
4818a02908f1SMingming Cao  * If datablocks are discontiguous, they are possible to spread over
48194907cb7bSAnatol Pomozov  * different block groups too. If they are contiguous, with flexbg,
4820a02908f1SMingming Cao  * they could still across block group boundary.
4821a02908f1SMingming Cao  *
4822a02908f1SMingming Cao  * Also account for superblock, inode, quota and xattr blocks
4823a02908f1SMingming Cao  */
48241f109d5aSTheodore Ts'o static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4825a02908f1SMingming Cao {
48268df9675fSTheodore Ts'o 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
48278df9675fSTheodore Ts'o 	int gdpblocks;
4828a02908f1SMingming Cao 	int idxblocks;
4829a02908f1SMingming Cao 	int ret = 0;
4830a02908f1SMingming Cao 
4831a02908f1SMingming Cao 	/*
4832a02908f1SMingming Cao 	 * How many index blocks need to touch to modify nrblocks?
4833a02908f1SMingming Cao 	 * The "Chunk" flag indicating whether the nrblocks is
4834a02908f1SMingming Cao 	 * physically contiguous on disk
4835a02908f1SMingming Cao 	 *
4836a02908f1SMingming Cao 	 * For Direct IO and fallocate, they calls get_block to allocate
4837a02908f1SMingming Cao 	 * one single extent at a time, so they could set the "Chunk" flag
4838a02908f1SMingming Cao 	 */
4839a02908f1SMingming Cao 	idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4840a02908f1SMingming Cao 
4841a02908f1SMingming Cao 	ret = idxblocks;
4842a02908f1SMingming Cao 
4843a02908f1SMingming Cao 	/*
4844a02908f1SMingming Cao 	 * Now let's see how many group bitmaps and group descriptors need
4845a02908f1SMingming Cao 	 * to account
4846a02908f1SMingming Cao 	 */
4847a02908f1SMingming Cao 	groups = idxblocks;
4848a02908f1SMingming Cao 	if (chunk)
4849a02908f1SMingming Cao 		groups += 1;
4850ac27a0ecSDave Kleikamp 	else
4851a02908f1SMingming Cao 		groups += nrblocks;
4852ac27a0ecSDave Kleikamp 
4853a02908f1SMingming Cao 	gdpblocks = groups;
48548df9675fSTheodore Ts'o 	if (groups > ngroups)
48558df9675fSTheodore Ts'o 		groups = ngroups;
4856a02908f1SMingming Cao 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4857a02908f1SMingming Cao 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4858a02908f1SMingming Cao 
4859a02908f1SMingming Cao 	/* bitmaps and block group descriptor blocks */
4860a02908f1SMingming Cao 	ret += groups + gdpblocks;
4861a02908f1SMingming Cao 
4862a02908f1SMingming Cao 	/* Blocks for super block, inode, quota and xattr blocks */
4863a02908f1SMingming Cao 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4864ac27a0ecSDave Kleikamp 
4865ac27a0ecSDave Kleikamp 	return ret;
4866ac27a0ecSDave Kleikamp }
4867ac27a0ecSDave Kleikamp 
4868ac27a0ecSDave Kleikamp /*
486925985edcSLucas De Marchi  * Calculate the total number of credits to reserve to fit
4870f3bd1f3fSMingming Cao  * the modification of a single pages into a single transaction,
4871f3bd1f3fSMingming Cao  * which may include multiple chunks of block allocations.
4872a02908f1SMingming Cao  *
4873525f4ed8SMingming Cao  * This could be called via ext4_write_begin()
4874a02908f1SMingming Cao  *
4875525f4ed8SMingming Cao  * We need to consider the worse case, when
4876a02908f1SMingming Cao  * one new block per extent.
4877a02908f1SMingming Cao  */
4878a02908f1SMingming Cao int ext4_writepage_trans_blocks(struct inode *inode)
4879a02908f1SMingming Cao {
4880a02908f1SMingming Cao 	int bpp = ext4_journal_blocks_per_page(inode);
4881a02908f1SMingming Cao 	int ret;
4882a02908f1SMingming Cao 
4883a02908f1SMingming Cao 	ret = ext4_meta_trans_blocks(inode, bpp, 0);
4884a02908f1SMingming Cao 
4885a02908f1SMingming Cao 	/* Account for data blocks for journalled mode */
4886a02908f1SMingming Cao 	if (ext4_should_journal_data(inode))
4887a02908f1SMingming Cao 		ret += bpp;
4888a02908f1SMingming Cao 	return ret;
4889a02908f1SMingming Cao }
4890f3bd1f3fSMingming Cao 
4891f3bd1f3fSMingming Cao /*
4892f3bd1f3fSMingming Cao  * Calculate the journal credits for a chunk of data modification.
4893f3bd1f3fSMingming Cao  *
4894f3bd1f3fSMingming Cao  * This is called from DIO, fallocate or whoever calling
489579e83036SEric Sandeen  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4896f3bd1f3fSMingming Cao  *
4897f3bd1f3fSMingming Cao  * journal buffers for data blocks are not included here, as DIO
4898f3bd1f3fSMingming Cao  * and fallocate do no need to journal data buffers.
4899f3bd1f3fSMingming Cao  */
4900f3bd1f3fSMingming Cao int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4901f3bd1f3fSMingming Cao {
4902f3bd1f3fSMingming Cao 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
4903f3bd1f3fSMingming Cao }
4904f3bd1f3fSMingming Cao 
4905a02908f1SMingming Cao /*
4906617ba13bSMingming Cao  * The caller must have previously called ext4_reserve_inode_write().
4907ac27a0ecSDave Kleikamp  * Give this, we know that the caller already has write access to iloc->bh.
4908ac27a0ecSDave Kleikamp  */
4909617ba13bSMingming Cao int ext4_mark_iloc_dirty(handle_t *handle,
4910617ba13bSMingming Cao 			 struct inode *inode, struct ext4_iloc *iloc)
4911ac27a0ecSDave Kleikamp {
4912ac27a0ecSDave Kleikamp 	int err = 0;
4913ac27a0ecSDave Kleikamp 
4914c64db50eSTheodore Ts'o 	if (IS_I_VERSION(inode))
491525ec56b5SJean Noel Cordenner 		inode_inc_iversion(inode);
491625ec56b5SJean Noel Cordenner 
4917ac27a0ecSDave Kleikamp 	/* the do_update_inode consumes one bh->b_count */
4918ac27a0ecSDave Kleikamp 	get_bh(iloc->bh);
4919ac27a0ecSDave Kleikamp 
4920dab291afSMingming Cao 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4921830156c7SFrank Mayhar 	err = ext4_do_update_inode(handle, inode, iloc);
4922ac27a0ecSDave Kleikamp 	put_bh(iloc->bh);
4923ac27a0ecSDave Kleikamp 	return err;
4924ac27a0ecSDave Kleikamp }
4925ac27a0ecSDave Kleikamp 
4926ac27a0ecSDave Kleikamp /*
4927ac27a0ecSDave Kleikamp  * On success, We end up with an outstanding reference count against
4928ac27a0ecSDave Kleikamp  * iloc->bh.  This _must_ be cleaned up later.
4929ac27a0ecSDave Kleikamp  */
4930ac27a0ecSDave Kleikamp 
4931ac27a0ecSDave Kleikamp int
4932617ba13bSMingming Cao ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4933617ba13bSMingming Cao 			 struct ext4_iloc *iloc)
4934ac27a0ecSDave Kleikamp {
49350390131bSFrank Mayhar 	int err;
49360390131bSFrank Mayhar 
4937617ba13bSMingming Cao 	err = ext4_get_inode_loc(inode, iloc);
4938ac27a0ecSDave Kleikamp 	if (!err) {
4939ac27a0ecSDave Kleikamp 		BUFFER_TRACE(iloc->bh, "get_write_access");
4940617ba13bSMingming Cao 		err = ext4_journal_get_write_access(handle, iloc->bh);
4941ac27a0ecSDave Kleikamp 		if (err) {
4942ac27a0ecSDave Kleikamp 			brelse(iloc->bh);
4943ac27a0ecSDave Kleikamp 			iloc->bh = NULL;
4944ac27a0ecSDave Kleikamp 		}
4945ac27a0ecSDave Kleikamp 	}
4946617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
4947ac27a0ecSDave Kleikamp 	return err;
4948ac27a0ecSDave Kleikamp }
4949ac27a0ecSDave Kleikamp 
4950ac27a0ecSDave Kleikamp /*
49516dd4ee7cSKalpak Shah  * Expand an inode by new_extra_isize bytes.
49526dd4ee7cSKalpak Shah  * Returns 0 on success or negative error number on failure.
49536dd4ee7cSKalpak Shah  */
49541d03ec98SAneesh Kumar K.V static int ext4_expand_extra_isize(struct inode *inode,
49551d03ec98SAneesh Kumar K.V 				   unsigned int new_extra_isize,
49561d03ec98SAneesh Kumar K.V 				   struct ext4_iloc iloc,
49571d03ec98SAneesh Kumar K.V 				   handle_t *handle)
49586dd4ee7cSKalpak Shah {
49596dd4ee7cSKalpak Shah 	struct ext4_inode *raw_inode;
49606dd4ee7cSKalpak Shah 	struct ext4_xattr_ibody_header *header;
49616dd4ee7cSKalpak Shah 
49626dd4ee7cSKalpak Shah 	if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
49636dd4ee7cSKalpak Shah 		return 0;
49646dd4ee7cSKalpak Shah 
49656dd4ee7cSKalpak Shah 	raw_inode = ext4_raw_inode(&iloc);
49666dd4ee7cSKalpak Shah 
49676dd4ee7cSKalpak Shah 	header = IHDR(inode, raw_inode);
49686dd4ee7cSKalpak Shah 
49696dd4ee7cSKalpak Shah 	/* No extended attributes present */
497019f5fb7aSTheodore Ts'o 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
49716dd4ee7cSKalpak Shah 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
49726dd4ee7cSKalpak Shah 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
49736dd4ee7cSKalpak Shah 			new_extra_isize);
49746dd4ee7cSKalpak Shah 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
49756dd4ee7cSKalpak Shah 		return 0;
49766dd4ee7cSKalpak Shah 	}
49776dd4ee7cSKalpak Shah 
49786dd4ee7cSKalpak Shah 	/* try to expand with EAs present */
49796dd4ee7cSKalpak Shah 	return ext4_expand_extra_isize_ea(inode, new_extra_isize,
49806dd4ee7cSKalpak Shah 					  raw_inode, handle);
49816dd4ee7cSKalpak Shah }
49826dd4ee7cSKalpak Shah 
49836dd4ee7cSKalpak Shah /*
4984ac27a0ecSDave Kleikamp  * What we do here is to mark the in-core inode as clean with respect to inode
4985ac27a0ecSDave Kleikamp  * dirtiness (it may still be data-dirty).
4986ac27a0ecSDave Kleikamp  * This means that the in-core inode may be reaped by prune_icache
4987ac27a0ecSDave Kleikamp  * without having to perform any I/O.  This is a very good thing,
4988ac27a0ecSDave Kleikamp  * because *any* task may call prune_icache - even ones which
4989ac27a0ecSDave Kleikamp  * have a transaction open against a different journal.
4990ac27a0ecSDave Kleikamp  *
4991ac27a0ecSDave Kleikamp  * Is this cheating?  Not really.  Sure, we haven't written the
4992ac27a0ecSDave Kleikamp  * inode out, but prune_icache isn't a user-visible syncing function.
4993ac27a0ecSDave Kleikamp  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4994ac27a0ecSDave Kleikamp  * we start and wait on commits.
4995ac27a0ecSDave Kleikamp  */
4996617ba13bSMingming Cao int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4997ac27a0ecSDave Kleikamp {
4998617ba13bSMingming Cao 	struct ext4_iloc iloc;
49996dd4ee7cSKalpak Shah 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
50006dd4ee7cSKalpak Shah 	static unsigned int mnt_count;
50016dd4ee7cSKalpak Shah 	int err, ret;
5002ac27a0ecSDave Kleikamp 
5003ac27a0ecSDave Kleikamp 	might_sleep();
50047ff9c073STheodore Ts'o 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5005617ba13bSMingming Cao 	err = ext4_reserve_inode_write(handle, inode, &iloc);
50060390131bSFrank Mayhar 	if (ext4_handle_valid(handle) &&
50070390131bSFrank Mayhar 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
500819f5fb7aSTheodore Ts'o 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
50096dd4ee7cSKalpak Shah 		/*
50106dd4ee7cSKalpak Shah 		 * We need extra buffer credits since we may write into EA block
50116dd4ee7cSKalpak Shah 		 * with this same handle. If journal_extend fails, then it will
50126dd4ee7cSKalpak Shah 		 * only result in a minor loss of functionality for that inode.
50136dd4ee7cSKalpak Shah 		 * If this is felt to be critical, then e2fsck should be run to
50146dd4ee7cSKalpak Shah 		 * force a large enough s_min_extra_isize.
50156dd4ee7cSKalpak Shah 		 */
50166dd4ee7cSKalpak Shah 		if ((jbd2_journal_extend(handle,
50176dd4ee7cSKalpak Shah 			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
50186dd4ee7cSKalpak Shah 			ret = ext4_expand_extra_isize(inode,
50196dd4ee7cSKalpak Shah 						      sbi->s_want_extra_isize,
50206dd4ee7cSKalpak Shah 						      iloc, handle);
50216dd4ee7cSKalpak Shah 			if (ret) {
502219f5fb7aSTheodore Ts'o 				ext4_set_inode_state(inode,
502319f5fb7aSTheodore Ts'o 						     EXT4_STATE_NO_EXPAND);
5024c1bddad9SAneesh Kumar K.V 				if (mnt_count !=
5025c1bddad9SAneesh Kumar K.V 					le16_to_cpu(sbi->s_es->s_mnt_count)) {
502612062dddSEric Sandeen 					ext4_warning(inode->i_sb,
50276dd4ee7cSKalpak Shah 					"Unable to expand inode %lu. Delete"
50286dd4ee7cSKalpak Shah 					" some EAs or run e2fsck.",
50296dd4ee7cSKalpak Shah 					inode->i_ino);
5030c1bddad9SAneesh Kumar K.V 					mnt_count =
5031c1bddad9SAneesh Kumar K.V 					  le16_to_cpu(sbi->s_es->s_mnt_count);
50326dd4ee7cSKalpak Shah 				}
50336dd4ee7cSKalpak Shah 			}
50346dd4ee7cSKalpak Shah 		}
50356dd4ee7cSKalpak Shah 	}
5036ac27a0ecSDave Kleikamp 	if (!err)
5037617ba13bSMingming Cao 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5038ac27a0ecSDave Kleikamp 	return err;
5039ac27a0ecSDave Kleikamp }
5040ac27a0ecSDave Kleikamp 
5041ac27a0ecSDave Kleikamp /*
5042617ba13bSMingming Cao  * ext4_dirty_inode() is called from __mark_inode_dirty()
5043ac27a0ecSDave Kleikamp  *
5044ac27a0ecSDave Kleikamp  * We're really interested in the case where a file is being extended.
5045ac27a0ecSDave Kleikamp  * i_size has been changed by generic_commit_write() and we thus need
5046ac27a0ecSDave Kleikamp  * to include the updated inode in the current transaction.
5047ac27a0ecSDave Kleikamp  *
50485dd4056dSChristoph Hellwig  * Also, dquot_alloc_block() will always dirty the inode when blocks
5049ac27a0ecSDave Kleikamp  * are allocated to the file.
5050ac27a0ecSDave Kleikamp  *
5051ac27a0ecSDave Kleikamp  * If the inode is marked synchronous, we don't honour that here - doing
5052ac27a0ecSDave Kleikamp  * so would cause a commit on atime updates, which we don't bother doing.
5053ac27a0ecSDave Kleikamp  * We handle synchronous inodes at the highest possible level.
5054ac27a0ecSDave Kleikamp  */
5055aa385729SChristoph Hellwig void ext4_dirty_inode(struct inode *inode, int flags)
5056ac27a0ecSDave Kleikamp {
5057ac27a0ecSDave Kleikamp 	handle_t *handle;
5058ac27a0ecSDave Kleikamp 
50599924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5060ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5061ac27a0ecSDave Kleikamp 		goto out;
5062f3dc272fSCurt Wohlgemuth 
5063617ba13bSMingming Cao 	ext4_mark_inode_dirty(handle, inode);
5064f3dc272fSCurt Wohlgemuth 
5065617ba13bSMingming Cao 	ext4_journal_stop(handle);
5066ac27a0ecSDave Kleikamp out:
5067ac27a0ecSDave Kleikamp 	return;
5068ac27a0ecSDave Kleikamp }
5069ac27a0ecSDave Kleikamp 
5070ac27a0ecSDave Kleikamp #if 0
5071ac27a0ecSDave Kleikamp /*
5072ac27a0ecSDave Kleikamp  * Bind an inode's backing buffer_head into this transaction, to prevent
5073ac27a0ecSDave Kleikamp  * it from being flushed to disk early.  Unlike
5074617ba13bSMingming Cao  * ext4_reserve_inode_write, this leaves behind no bh reference and
5075ac27a0ecSDave Kleikamp  * returns no iloc structure, so the caller needs to repeat the iloc
5076ac27a0ecSDave Kleikamp  * lookup to mark the inode dirty later.
5077ac27a0ecSDave Kleikamp  */
5078617ba13bSMingming Cao static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5079ac27a0ecSDave Kleikamp {
5080617ba13bSMingming Cao 	struct ext4_iloc iloc;
5081ac27a0ecSDave Kleikamp 
5082ac27a0ecSDave Kleikamp 	int err = 0;
5083ac27a0ecSDave Kleikamp 	if (handle) {
5084617ba13bSMingming Cao 		err = ext4_get_inode_loc(inode, &iloc);
5085ac27a0ecSDave Kleikamp 		if (!err) {
5086ac27a0ecSDave Kleikamp 			BUFFER_TRACE(iloc.bh, "get_write_access");
5087dab291afSMingming Cao 			err = jbd2_journal_get_write_access(handle, iloc.bh);
5088ac27a0ecSDave Kleikamp 			if (!err)
50890390131bSFrank Mayhar 				err = ext4_handle_dirty_metadata(handle,
509073b50c1cSCurt Wohlgemuth 								 NULL,
5091ac27a0ecSDave Kleikamp 								 iloc.bh);
5092ac27a0ecSDave Kleikamp 			brelse(iloc.bh);
5093ac27a0ecSDave Kleikamp 		}
5094ac27a0ecSDave Kleikamp 	}
5095617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
5096ac27a0ecSDave Kleikamp 	return err;
5097ac27a0ecSDave Kleikamp }
5098ac27a0ecSDave Kleikamp #endif
5099ac27a0ecSDave Kleikamp 
5100617ba13bSMingming Cao int ext4_change_inode_journal_flag(struct inode *inode, int val)
5101ac27a0ecSDave Kleikamp {
5102ac27a0ecSDave Kleikamp 	journal_t *journal;
5103ac27a0ecSDave Kleikamp 	handle_t *handle;
5104ac27a0ecSDave Kleikamp 	int err;
5105ac27a0ecSDave Kleikamp 
5106ac27a0ecSDave Kleikamp 	/*
5107ac27a0ecSDave Kleikamp 	 * We have to be very careful here: changing a data block's
5108ac27a0ecSDave Kleikamp 	 * journaling status dynamically is dangerous.  If we write a
5109ac27a0ecSDave Kleikamp 	 * data block to the journal, change the status and then delete
5110ac27a0ecSDave Kleikamp 	 * that block, we risk forgetting to revoke the old log record
5111ac27a0ecSDave Kleikamp 	 * from the journal and so a subsequent replay can corrupt data.
5112ac27a0ecSDave Kleikamp 	 * So, first we make sure that the journal is empty and that
5113ac27a0ecSDave Kleikamp 	 * nobody is changing anything.
5114ac27a0ecSDave Kleikamp 	 */
5115ac27a0ecSDave Kleikamp 
5116617ba13bSMingming Cao 	journal = EXT4_JOURNAL(inode);
51170390131bSFrank Mayhar 	if (!journal)
51180390131bSFrank Mayhar 		return 0;
5119d699594dSDave Hansen 	if (is_journal_aborted(journal))
5120ac27a0ecSDave Kleikamp 		return -EROFS;
51212aff57b0SYongqiang Yang 	/* We have to allocate physical blocks for delalloc blocks
51222aff57b0SYongqiang Yang 	 * before flushing journal. otherwise delalloc blocks can not
51232aff57b0SYongqiang Yang 	 * be allocated any more. even more truncate on delalloc blocks
51242aff57b0SYongqiang Yang 	 * could trigger BUG by flushing delalloc blocks in journal.
51252aff57b0SYongqiang Yang 	 * There is no delalloc block in non-journal data mode.
51262aff57b0SYongqiang Yang 	 */
51272aff57b0SYongqiang Yang 	if (val && test_opt(inode->i_sb, DELALLOC)) {
51282aff57b0SYongqiang Yang 		err = ext4_alloc_da_blocks(inode);
51292aff57b0SYongqiang Yang 		if (err < 0)
51302aff57b0SYongqiang Yang 			return err;
51312aff57b0SYongqiang Yang 	}
5132ac27a0ecSDave Kleikamp 
513317335dccSDmitry Monakhov 	/* Wait for all existing dio workers */
513417335dccSDmitry Monakhov 	ext4_inode_block_unlocked_dio(inode);
513517335dccSDmitry Monakhov 	inode_dio_wait(inode);
513617335dccSDmitry Monakhov 
5137dab291afSMingming Cao 	jbd2_journal_lock_updates(journal);
5138ac27a0ecSDave Kleikamp 
5139ac27a0ecSDave Kleikamp 	/*
5140ac27a0ecSDave Kleikamp 	 * OK, there are no updates running now, and all cached data is
5141ac27a0ecSDave Kleikamp 	 * synced to disk.  We are now in a completely consistent state
5142ac27a0ecSDave Kleikamp 	 * which doesn't have anything in the journal, and we know that
5143ac27a0ecSDave Kleikamp 	 * no filesystem updates are running, so it is safe to modify
5144ac27a0ecSDave Kleikamp 	 * the inode's in-core data-journaling state flag now.
5145ac27a0ecSDave Kleikamp 	 */
5146ac27a0ecSDave Kleikamp 
5147ac27a0ecSDave Kleikamp 	if (val)
514812e9b892SDmitry Monakhov 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
51495872ddaaSYongqiang Yang 	else {
51505872ddaaSYongqiang Yang 		jbd2_journal_flush(journal);
515112e9b892SDmitry Monakhov 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
51525872ddaaSYongqiang Yang 	}
5153617ba13bSMingming Cao 	ext4_set_aops(inode);
5154ac27a0ecSDave Kleikamp 
5155dab291afSMingming Cao 	jbd2_journal_unlock_updates(journal);
515617335dccSDmitry Monakhov 	ext4_inode_resume_unlocked_dio(inode);
5157ac27a0ecSDave Kleikamp 
5158ac27a0ecSDave Kleikamp 	/* Finally we can mark the inode as dirty. */
5159ac27a0ecSDave Kleikamp 
51609924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5161ac27a0ecSDave Kleikamp 	if (IS_ERR(handle))
5162ac27a0ecSDave Kleikamp 		return PTR_ERR(handle);
5163ac27a0ecSDave Kleikamp 
5164617ba13bSMingming Cao 	err = ext4_mark_inode_dirty(handle, inode);
51650390131bSFrank Mayhar 	ext4_handle_sync(handle);
5166617ba13bSMingming Cao 	ext4_journal_stop(handle);
5167617ba13bSMingming Cao 	ext4_std_error(inode->i_sb, err);
5168ac27a0ecSDave Kleikamp 
5169ac27a0ecSDave Kleikamp 	return err;
5170ac27a0ecSDave Kleikamp }
51712e9ee850SAneesh Kumar K.V 
51722e9ee850SAneesh Kumar K.V static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
51732e9ee850SAneesh Kumar K.V {
51742e9ee850SAneesh Kumar K.V 	return !buffer_mapped(bh);
51752e9ee850SAneesh Kumar K.V }
51762e9ee850SAneesh Kumar K.V 
5177c2ec175cSNick Piggin int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
51782e9ee850SAneesh Kumar K.V {
5179c2ec175cSNick Piggin 	struct page *page = vmf->page;
51802e9ee850SAneesh Kumar K.V 	loff_t size;
51812e9ee850SAneesh Kumar K.V 	unsigned long len;
51829ea7df53SJan Kara 	int ret;
51832e9ee850SAneesh Kumar K.V 	struct file *file = vma->vm_file;
5184496ad9aaSAl Viro 	struct inode *inode = file_inode(file);
51852e9ee850SAneesh Kumar K.V 	struct address_space *mapping = inode->i_mapping;
51869ea7df53SJan Kara 	handle_t *handle;
51879ea7df53SJan Kara 	get_block_t *get_block;
51889ea7df53SJan Kara 	int retries = 0;
51892e9ee850SAneesh Kumar K.V 
51908e8ad8a5SJan Kara 	sb_start_pagefault(inode->i_sb);
5191041bbb6dSTheodore Ts'o 	file_update_time(vma->vm_file);
51929ea7df53SJan Kara 	/* Delalloc case is easy... */
51939ea7df53SJan Kara 	if (test_opt(inode->i_sb, DELALLOC) &&
51949ea7df53SJan Kara 	    !ext4_should_journal_data(inode) &&
51959ea7df53SJan Kara 	    !ext4_nonda_switch(inode->i_sb)) {
51969ea7df53SJan Kara 		do {
51979ea7df53SJan Kara 			ret = __block_page_mkwrite(vma, vmf,
51989ea7df53SJan Kara 						   ext4_da_get_block_prep);
51999ea7df53SJan Kara 		} while (ret == -ENOSPC &&
52009ea7df53SJan Kara 		       ext4_should_retry_alloc(inode->i_sb, &retries));
52019ea7df53SJan Kara 		goto out_ret;
52022e9ee850SAneesh Kumar K.V 	}
52030e499890SDarrick J. Wong 
52040e499890SDarrick J. Wong 	lock_page(page);
52059ea7df53SJan Kara 	size = i_size_read(inode);
52069ea7df53SJan Kara 	/* Page got truncated from under us? */
52079ea7df53SJan Kara 	if (page->mapping != mapping || page_offset(page) > size) {
52089ea7df53SJan Kara 		unlock_page(page);
52099ea7df53SJan Kara 		ret = VM_FAULT_NOPAGE;
52109ea7df53SJan Kara 		goto out;
52110e499890SDarrick J. Wong 	}
52122e9ee850SAneesh Kumar K.V 
52132e9ee850SAneesh Kumar K.V 	if (page->index == size >> PAGE_CACHE_SHIFT)
52142e9ee850SAneesh Kumar K.V 		len = size & ~PAGE_CACHE_MASK;
52152e9ee850SAneesh Kumar K.V 	else
52162e9ee850SAneesh Kumar K.V 		len = PAGE_CACHE_SIZE;
5217a827eaffSAneesh Kumar K.V 	/*
52189ea7df53SJan Kara 	 * Return if we have all the buffers mapped. This avoids the need to do
52199ea7df53SJan Kara 	 * journal_start/journal_stop which can block and take a long time
5220a827eaffSAneesh Kumar K.V 	 */
52212e9ee850SAneesh Kumar K.V 	if (page_has_buffers(page)) {
5222f19d5870STao Ma 		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5223f19d5870STao Ma 					    0, len, NULL,
5224a827eaffSAneesh Kumar K.V 					    ext4_bh_unmapped)) {
52259ea7df53SJan Kara 			/* Wait so that we don't change page under IO */
52261d1d1a76SDarrick J. Wong 			wait_for_stable_page(page);
52279ea7df53SJan Kara 			ret = VM_FAULT_LOCKED;
52289ea7df53SJan Kara 			goto out;
52292e9ee850SAneesh Kumar K.V 		}
5230a827eaffSAneesh Kumar K.V 	}
5231a827eaffSAneesh Kumar K.V 	unlock_page(page);
52329ea7df53SJan Kara 	/* OK, we need to fill the hole... */
52339ea7df53SJan Kara 	if (ext4_should_dioread_nolock(inode))
52349ea7df53SJan Kara 		get_block = ext4_get_block_write;
52359ea7df53SJan Kara 	else
52369ea7df53SJan Kara 		get_block = ext4_get_block;
52379ea7df53SJan Kara retry_alloc:
52389924a92aSTheodore Ts'o 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
52399924a92aSTheodore Ts'o 				    ext4_writepage_trans_blocks(inode));
52409ea7df53SJan Kara 	if (IS_ERR(handle)) {
5241c2ec175cSNick Piggin 		ret = VM_FAULT_SIGBUS;
52429ea7df53SJan Kara 		goto out;
52439ea7df53SJan Kara 	}
52449ea7df53SJan Kara 	ret = __block_page_mkwrite(vma, vmf, get_block);
52459ea7df53SJan Kara 	if (!ret && ext4_should_journal_data(inode)) {
5246f19d5870STao Ma 		if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
52479ea7df53SJan Kara 			  PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
52489ea7df53SJan Kara 			unlock_page(page);
52499ea7df53SJan Kara 			ret = VM_FAULT_SIGBUS;
5250fcbb5515SYongqiang Yang 			ext4_journal_stop(handle);
52519ea7df53SJan Kara 			goto out;
52529ea7df53SJan Kara 		}
52539ea7df53SJan Kara 		ext4_set_inode_state(inode, EXT4_STATE_JDATA);
52549ea7df53SJan Kara 	}
52559ea7df53SJan Kara 	ext4_journal_stop(handle);
52569ea7df53SJan Kara 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
52579ea7df53SJan Kara 		goto retry_alloc;
52589ea7df53SJan Kara out_ret:
52599ea7df53SJan Kara 	ret = block_page_mkwrite_return(ret);
52609ea7df53SJan Kara out:
52618e8ad8a5SJan Kara 	sb_end_pagefault(inode->i_sb);
52622e9ee850SAneesh Kumar K.V 	return ret;
52632e9ee850SAneesh Kumar K.V }
5264